metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jddixon/optionz",
"score": 3
}
|
#### File: optionz/tests/test_base_options.py
```python
import time
import unittest
from rnglib import SimpleRNG
from optionz import Option
# pylint: disable=too-few-public-methods
class EmptyClass():
""" Stub class, for testing. """
pass
# pylint: disable=unused-argument
def simple_adder(self, a__, b__):
""" Simplest possible adder method. """
return a__ + b__
class TestBaseOption(unittest.TestCase):
""" Test the [Base]Option class. """
def setUp(self):
self.rng = SimpleRNG(time.time())
def tearDown(self):
pass
def test_constructor(self):
""" Test the constuctor. """
opt_a = Option(name='fred', surname='jones')
# it's a dictionary
self.assertTrue('name' in opt_a)
self.assertTrue('surname'in opt_a)
self.assertFalse('fred' in opt_a)
# dots work
self.assertEqual(opt_a.name, 'fred')
# pylint: disable=no-member
self.assertEqual(opt_a.surname, 'jones')
opt_b = Option(name='fred', surname='jones')
self.assertEqual(opt_b.name, 'fred')
# pylint: disable=no-member
self.assertEqual(opt_b.surname, 'jones')
self.assertEqual(opt_a, opt_a)
self.assertEqual(opt_a, opt_b)
opt_c = Option(name='john', surname='smith')
# dictionary
self.assertTrue('name' in opt_c)
self.assertTrue('surname' in opt_c)
self.assertFalse('john' in opt_c)
self.assertFalse('smith' in opt_c)
# dots
self.assertEqual(opt_c.name, 'john')
# pylint: disable=no-member
self.assertEqual(opt_c.surname, 'smith')
self.assertNotEqual(opt_a, opt_c)
# assignment to dotted option
opt_b.name = 'george'
self.assertEqual(opt_b.name, 'george')
if __name__ == '__main__':
unittest.main()
```
#### File: optionz/tests/test_named_tuples.py
```python
import unittest
from collections import namedtuple
class TestNamedTuples(unittest.TestCase):
""" Test behavior of named tuples. """
def test_functionality(self):
"""
Nested named tuples work more or less as expected. Lower-level
tuples must be built first, because the tuples are immutable.
"""
# creation of lower-level tuple
two_pair = namedtuple('TwoPair', ['c__', 'd__'])
duo = two_pair(13, 'dddD')
# create upper-level tuple
threesome = namedtuple('Threesome', ['a__', 'b__', 'e__'])
trio = threesome(a__='val0', b__=duo, e__=42)
# attribute notation
self.assertEqual(trio.a__, 'val0')
self.assertEqual(trio.b__.c__, 13)
self.assertEqual(trio.b__.d__, 'dddD')
self.assertEqual(trio.e__, 42)
# indexed access
self.assertEqual(trio[0], 'val0')
self.assertEqual(trio[1][0], 13)
self.assertEqual(trio[1][1], 'dddD')
self.assertEqual(trio[2], 42)
# Immutability, attribute access.
try:
trio.a__ = 997
self.fail("trio.a__ isn't immutable") # pragma: no cover
except AttributeError:
self.assertEqual(trio.a__, 'val0')
try:
trio.b__ = 'happiness'
self.fail("trio.b__ isn't immutable") # pragma: no cover
except AttributeError:
# __eq__ works too
self.assertEqual(trio.b__, duo)
try:
trio.b__.c__ = 'foo'
self.fail("trio.b__.c__ isn't immutable") # pragma: no cover
except AttributeError:
self.assertEqual(trio.b__.c__, 13)
# Immutability, indexed assignment: we get a TypeError:
# "object does not support item assignment"
try:
trio[1][1] = 1942
self.fail("trio[1][1] isn't immutable") # pragma: no cover
except TypeError:
self.assertEqual(trio[1][1], 'dddD')
try:
# pylint: disable=unsupported-assignment-operation
trio[2] = 'baz'
self.fail("trio[2] isn't immutable") # pragma: no cover
except TypeError:
self.assertEqual(trio[2], 42)
if __name__ == '__main__':
unittest.main()
```
#### File: optionz/tests/test_optionz.py
```python
import time
import unittest
from rnglib import SimpleRNG
from optionz import Optionz as Z
from optionz import (ValType, BoolOption, ChoiceOption,
FloatOption, IntOption, ListOption, StrOption)
class TestOptionz(unittest.TestCase):
""" Test the basic Optionz classes. """
def setUp(self):
self.rng = SimpleRNG(time.time())
def tearDown(self):
pass
# utility functions #############################################
# actual unit tests #############################################
def test_bare_optionz(self):
""" Create an Optionz instance, check for expected attibutes. """
my_optz = Z('fred')
self.assertEqual(my_optz.name, 'fred')
self.assertEqual(my_optz.desc, None)
self.assertEqual(my_optz.epilog, None)
self.assertEqual(len(my_optz), 0)
my_optz = Z('frank', 'frivolous', 'fabulous')
self.assertEqual(my_optz.name, 'frank')
self.assertEqual(my_optz.desc, 'frivolous')
self.assertEqual(my_optz.epilog, 'fabulous')
self.assertEqual(len(my_optz), 0)
def test_z_option(self):
""" Populate an Optionz object, check for expected attr. """
z_name = self.rng.next_file_name(8)
z_desc = self.rng.next_file_name(64)
z_epilog = self.rng.next_file_name(64)
my_optz = Z(z_name, z_desc, z_epilog)
self.assertEqual(my_optz.name, z_name)
self.assertEqual(my_optz.desc, z_desc)
self.assertEqual(my_optz.epilog, z_epilog)
self.assertEqual(len(my_optz), 0)
# booleans --------------------------------------------------
b_dflt_val = True
b_desc = "I'm small"
bool_opt = BoolOption('bO', default=b_dflt_val, desc=b_desc)
self.assertEqual(bool_opt.name, 'bO')
self.assertEqual(bool_opt.default, b_dflt_val)
self.assertEqual(bool_opt.desc, b_desc)
# name valType default desc
b_check = my_optz.add_option('bO', ValType.BOOL, b_dflt_val, b_desc)
self.assertEqual(len(my_optz), 1)
self.assertEqual(bool_opt, b_check)
# choice lists ----------------------------------------------
# NOTE We should probably require that list elements be of
# compatible types. For the moment we just assume that elements
# are all strings.
# succeeds if default in list of choices ----------
my_size = 2 + self.rng.next_int16(4) # so in [2..5]
choice = self.rng.next_file_name(8)
choices = [choice]
while len(choices) < my_size:
if choice not in choices:
choices.append(choice)
choice = self.rng.next_file_name(8)
c_dflt_val = choices[self.rng.next_int16(my_size)]
c_desc = 'a list'
choice_opt = ChoiceOption('cO', choices, c_dflt_val, c_desc)
self.assertEqual(choice_opt.name, 'cO')
self.assertEqual(choice_opt.choices, choices)
self.assertEqual(choice_opt.default, c_dflt_val)
self.assertEqual(choice_opt.desc, "a list")
# fails if default is NOT in list of choices ------
my_size = 2 + self.rng.next_int16(4) # so in [2..5]
choice = self.rng.next_file_name(8)
b_choices = [choice]
while len(b_choices) < my_size:
if choice not in b_choices:
b_choices.append(choice)
choice = self.rng.next_file_name(8)
dflt_val = self.rng.next_file_name(8)
while dflt_val in choices:
dflt_val = self.rng.next_file_name(8)
try:
ChoiceOption('bC', choices, default=dflt_val, desc="a list")
self.fail('added default value not in list of choices')
except BaseException:
pass
c_check = my_optz.add_choice_option('cO', choices, c_dflt_val, c_desc)
self.assertEqual(len(my_optz), 2)
self.assertEqual(choice_opt, c_check)
# floats ----------------------------------------------------
f_dflt_val = self.rng.next_real()
f_desc = 'bubbly'
float_opt = FloatOption('fO', default=f_dflt_val, desc=f_desc)
self.assertEqual(float_opt.name, 'fO')
self.assertEqual(float_opt.default, f_dflt_val)
self.assertEqual(float_opt.desc, f_desc)
# name valType default desc
f_check = my_optz.add_option('fO', ValType.FLOAT, f_dflt_val, f_desc)
self.assertEqual(len(my_optz), 3)
self.assertEqual(float_opt, f_check)
# ints ------------------------------------------------------
i_dflt_val = self.rng.next_int32()
i_desc = 'discrete'
int_opt = IntOption('iO', default=i_dflt_val, desc=i_desc)
self.assertEqual(int_opt.name, 'iO')
self.assertEqual(int_opt.default, i_dflt_val)
self.assertEqual(int_opt.desc, i_desc)
# name valType default desc
i_check = my_optz.add_option('iO', ValType.INT, i_dflt_val, i_desc)
self.assertEqual(len(my_optz), 4)
self.assertEqual(int_opt, i_check)
# lists -----------------------------------------------------
size_val = self.rng.next_int16()
# select polarity of size randomly
if self.rng.next_boolean():
size_val = - size_val
l_desc = "chunky"
list_opt = ListOption('lO', default=size_val, desc=l_desc)
self.assertEqual(list_opt.name, 'lO')
self.assertEqual(list_opt.default, size_val)
self.assertEqual(list_opt.size, size_val)
self.assertEqual(list_opt.desc, l_desc)
zero_val = 0
var_list_opt = ListOption('zO', default=zero_val, desc="skinny")
self.assertEqual(var_list_opt.name, 'zO')
self.assertEqual(var_list_opt.default, zero_val)
self.assertEqual(var_list_opt.desc, "skinny")
# name valType default desc
l_check = my_optz.add_option('lO', ValType.LIST, size_val, l_desc)
self.assertEqual(len(my_optz), 5)
self.assertEqual(list_opt, l_check)
# strings ---------------------------------------------------
s_dflt_val = self.rng.next_file_name(12)
s_desc = "wiggly"
str_opt = StrOption('sO', default=s_dflt_val, desc=s_desc)
self.assertEqual(str_opt.name, 'sO')
self.assertEqual(str_opt.default, s_dflt_val)
self.assertEqual(str_opt.desc, s_desc)
# name valType default desc
s_check = my_optz.add_option('sO', ValType.STR, s_dflt_val, s_desc)
self.assertEqual(len(my_optz), 6)
self.assertEqual(str_opt, s_check)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jddixon/projlocator",
"score": 3
}
|
#### File: projlocator/tests/test_proj_dir_from_name.py
```python
import time
import unittest
from rnglib import SimpleRNG
from projlocator import proj_dir_from_name
class TestProjDirFromName(unittest.TestCase):
""" Verify correct results from proj_dir_from_name(). """
def setUp(self):
self.rng = SimpleRNG(time.time())
def tearDown(self):
pass
def test_mapping(self):
"""
Check functionality with cases expected to succeed and other
cases expected to fail.
"""
# dir -> lang tests -----------------------------------------
try:
proj_dir_from_name(None)
self.fail("didn't catch missing project name")
# pylint: disable=bare-except
except BaseException:
pass
# failure to match should return ""
self.assertEqual(proj_dir_from_name('/'), "")
self.assertEqual(proj_dir_from_name('foo'), "")
# these names must be filtered out
self.assertEqual(proj_dir_from_name('dot'), "")
self.assertEqual(proj_dir_from_name('ghp.css'), "")
self.assertEqual(proj_dir_from_name('img'), "")
self.assertEqual(proj_dir_from_name('LICENSE.md'), "")
self.assertEqual(proj_dir_from_name('TODO'), "")
# these are real project names
self.assertEqual(proj_dir_from_name('alertz'),
'/home/jdd/dev/py/alertz')
self.assertEqual(proj_dir_from_name('buildlist'),
'/home/jdd/dev/py/buildlist')
self.assertEqual(proj_dir_from_name('bindex'),
'/home/jdd/dev/py/bindex')
self.assertEqual(proj_dir_from_name('cryptoserver_go'),
'/home/jdd/dev/go/src/github.com/jddixon/' +
'cryptoserver_go')
self.assertEqual(proj_dir_from_name('fieldz'),
'/home/jdd/dev/py/fieldz')
self.assertEqual(proj_dir_from_name('gotwitgo'),
'/home/jdd/dev/go/src/github.com/jddixon/gotwitgo')
self.assertEqual(proj_dir_from_name('pzog'),
'/home/jdd/dev/py/pzog')
self.assertEqual(proj_dir_from_name('ringd'),
'/home/jdd/dev/py/ringd')
self.assertEqual(proj_dir_from_name('xgo_go'),
'/home/jdd/dev/go/src/github.com/jddixon/xgo_go')
self.assertEqual(proj_dir_from_name('xlreg_ml'),
'/home/jdd/dev/ml/xlreg_ml')
self.assertEqual(proj_dir_from_name('magicsack'),
'/home/jdd/dev/py/magicsack')
self.assertEqual(proj_dir_from_name('merkletree'),
'/home/jdd/dev/py/merkletree')
self.assertEqual(proj_dir_from_name('nlhtree_py'),
'/home/jdd/dev/py/nlhtree_py')
self.assertEqual(proj_dir_from_name('rnglib'),
'/home/jdd/dev/py/rnglib')
self.assertEqual(proj_dir_from_name('xl_test_data'),
'/home/jdd/dev/dat/xl_test_data')
self.assertEqual(proj_dir_from_name('xlreg_c'),
'/home/jdd/dev/c/xlreg_c')
self.assertEqual(proj_dir_from_name('xlreg_cpp'),
'/home/jdd/dev/cpp/xlreg_cpp')
self.assertEqual(proj_dir_from_name('xlreg_java'),
'/home/jdd/dev/java/xlreg_java')
self.assertEqual(proj_dir_from_name('xlreg_rb'),
'/home/jdd/dev/rb/xlreg_rb')
# TOP LEVEL PROJECT(S)
self.assertEqual(proj_dir_from_name('xlattice'),
'/home/jdd/dev/xlattice')
# these have been returned incorrectly ======================
if __name__ == '__main__':
unittest.main()
```
#### File: projlocator/tests/test_proj_list.py
```python
import unittest
from projlocator import PROJ_LIST_MAP, add_to_proj_list
# from rnglib import SimpleRNG
class TestProjList(unittest.TestCase):
""" Verify that the project list is correct and is read correctly. """
def setUp(self):
pass
def tearDown(self):
pass
def check(self, project, rel_path):
""" Verify the relative path for the project is as expected. """
self.assertEqual(PROJ_LIST_MAP[project], rel_path)
def test_proj_adder(self):
""" Verify that the add_to_proj_list() funtion works correctly. """
self.assertEqual(add_to_proj_list('foo_c'), 'c/foo_c')
self.assertEqual(add_to_proj_list('foo_cpp'), 'cpp/foo_cpp')
self.assertEqual(add_to_proj_list('foo_go'),
'go/src/github.com/jddixon/foo_go')
self.assertEqual(add_to_proj_list('foo_java'), 'java/foo_java')
self.assertEqual(add_to_proj_list('foo_ml'), 'ml/foo_ml')
self.assertEqual(add_to_proj_list('foo_py'), 'py/foo_py')
self.assertEqual(add_to_proj_list('foo_rb'), 'rb/foo_rb')
try:
# pylint: disable=no-value-for-parameter
add_to_proj_list() # missing required parameter
self.fail('succesfully added project without name')
except TypeError:
pass
try:
add_to_proj_list('foo') # missing relPath
self.fail('succesfully added project without relPath')
except RuntimeError:
pass
try:
add_to_proj_list('foo_py', 'foo.cpp') # wrong relPath
self.fail('succesfully added project without relPath')
except RuntimeError:
pass
try:
add_to_proj_list('pysloc', 'py') # existing project
self.fail('succesfully added existing project!')
except RuntimeError:
pass
def test_proj_list_map(self):
""" Verift that existing projects have the correct relative path. """
self.check('alertz', 'py/alertz')
self.check('gotwitgo', 'go/src/github.com/jddixon/gotwitgo')
self.check('xlattice', 'xlattice')
if __name__ == '__main__':
unittest.main()
```
#### File: projlocator/tests/test_rel_path_for_projects.py
```python
import time
import unittest
from rnglib import SimpleRNG
from projlocator import proj_rel_path_from_name
class TestRelPathForProject(unittest.TestCase):
"""
Exercise the code which determines the relative path given a project name.
"""
def setUp(self):
self.rng = SimpleRNG(time.time())
def tearDown(self):
pass
# utility functions #############################################
# actual unit tests #############################################
def test_rel_path(self):
# dir -> lang tests -----------------------------------------
try:
proj_rel_path_from_name(None)
self.fail("didn't catch missing project name")
# pylint: disable=bare-except
except BaseException:
pass
try:
proj_rel_path_from_name('')
self.fail("didn't catch empty project name")
# pylint: disable=bare-except
except BaseException:
pass
# failure to match should return ""
self.assertEqual(proj_rel_path_from_name('/'), "")
self.assertEqual(proj_rel_path_from_name('foo'), "")
# these names must be filtered out
self.assertEqual(proj_rel_path_from_name('dot'), "")
self.assertEqual(proj_rel_path_from_name('ghp.css'), "")
self.assertEqual(proj_rel_path_from_name('img'), "")
self.assertEqual(proj_rel_path_from_name('LICENSE.md'), "")
self.assertEqual(proj_rel_path_from_name('TODO'), "")
# these are real project names
for pair in [
('cryptoserver_go',
'go/src/github.com/jddixon/cryptoserver_go'),
('ctries_go', 'go/src/github.com/jddixon/ctries_go'),
('merkletree', 'py/merkletree'),
('nlp', 'py/nlp'),
('projlocator', 'py/projlocator'),
('pysloc', 'py/pysloc'),
('rnglib', 'py/rnglib'),
('xgo_go', 'go/src/github.com/jddixon/xgo_go'),
('xlreg_c', 'c/xlreg_c'),
('xlreg_cpp', 'cpp/xlreg_cpp'),
('xlreg_java', 'java/xlreg_java'),
('xlreg_rb', 'rb/xlreg_rb'),
('xlreg_ml', 'ml/xlreg_ml'),
('xlreg_py', 'py/xlreg_py'),
# top level project(s)
('xlattice', 'xlattice'),
]:
self.assertEqual(proj_rel_path_from_name(pair[0]), pair[1])
# these are phoney project names -- we want the system to guess
for pair in [
('foo_c', 'c/foo_c'),
('foo_go', 'go/src/github.com/jddixon/foo_go'),
('foo_cpp', 'cpp/foo_cpp'),
('foo_java', 'java/foo_java'),
('foo_rb', 'rb/foo_rb'),
('foo_ml', 'ml/foo_ml'),
('foo_py', 'py/foo_py'),
]:
self.assertEqual(proj_rel_path_from_name(pair[0]), pair[1])
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jddixon/pysloc",
"score": 3
}
|
#### File: pysloc/tests/test_augeas_comments.py
```python
import unittest
from argparse import Namespace
from pysloc import count_lines_augeas
class TestAugeasComments(unittest.TestCase):
""" Test counting lines in augeas files. """
def setUp(self):
self.options = Namespace()
self.options.already = set()
self.options.verbose = False
def tearDown(self):
pass
def test_name_to_func_map(self):
""" Test counting lines in known test Augeas file. """
test_file = 'tests/commentsForAugeas'
lines, sloc = count_lines_augeas(test_file, self.options, 'ml')
self.assertEqual(lines, 107)
self.assertEqual(sloc, 45)
if __name__ == '__main__':
unittest.main()
```
#### File: pysloc/tests/test_ocaml_comments.py
```python
import unittest
from argparse import Namespace
from pysloc import count_lines_ocaml
class TestOCamlComments(unittest.TestCase):
""" Test OCaml line counters. """
def setUp(self):
self.options = Namespace()
self.options.already = set()
self.options.verbose = False
def tearDown(self):
pass
def test_name_to_func_map(self):
""" Verify that line counts for a known OCaml file are correct. """
test_file = 'tests/commentsForOCaml'
lines, sloc = count_lines_ocaml(test_file, self.options, 'ml')
self.assertEqual(lines, 39)
self.assertEqual(sloc, 15)
if __name__ == '__main__':
unittest.main()
```
#### File: pysloc/tests/test_octave_comments.py
```python
import unittest
from argparse import Namespace
from pysloc import count_lines_occam
class TestOctaveComments(unittest.TestCase):
""" Test line counters for Octave. """
def setUp(self):
self.options = Namespace()
self.options.already = set()
self.options.verbose = False
def tearDown(self):
pass
# utility functions #############################################
# actual unit tests #############################################
def test_name_to_func_map(self):
""" Verify that line counts for a known Octave file are correct. """
test_file = 'tests/commentsForOctave'
lines, sloc = count_lines_occam(test_file, self.options, 'octave')
self.assertEqual(lines, 79)
self.assertEqual(sloc, 25)
if __name__ == '__main__':
unittest.main()
```
#### File: pysloc/tests/test_protobuf_comments.py
```python
import unittest
from argparse import Namespace
from pysloc import count_lines_protobuf, MapHolder
class TestProtobufComments(unittest.TestCase):
""" Verify that line counters for protobuf work correctly. """
def setUp(self):
pass
def tearDown(self):
pass
# utility functions #############################################
# actual unit tests #############################################
def test_name_to_func_map(self):
""" Verify that line counts are correct for a known protobuf file. """
test_file = 'tests/commentsForProtobuf'
options = Namespace()
options.already = set()
options.ex_re = None
options.map_holder = MapHolder()
options.verbose = False
lines, sloc = count_lines_protobuf(test_file, options, 'py')
self.assertEqual(lines, 71)
self.assertEqual(sloc, 46)
if __name__ == '__main__':
unittest.main()
```
#### File: pysloc/tests/test_q.py
```python
import unittest
from pysloc import(MapHolder,
count_lines_double_dash, count_lines_fortran,
count_lines_java_style, count_lines_not_sharp,
count_lines_perl, count_lines_protobuf, count_lines_python,
count_lines_snobol, count_lines_tex,
count_lines_txt)
class TestQ(unittest.TestCase):
"""
Tests the operation of the Q class, which knows about counters.
"""
def setUp(self):
self.map_holder = MapHolder()
def tearDown(self):
pass
# utility functions #############################################
# actual unit tests #############################################
#################################################################
# TEST FILE NAMES beginning with 'yy' should exist in the test
# directory; those beginning with 'zz' should not exist.
#################################################################
def test_ext2lang(self):
""" exhaustive test of mapping extension to short lang name """
# DEBUG
print("DIR(MAP_HOLDER)")
print(dir(self.map_holder))
# END
# expect failure
self.assertEqual(self.map_holder.ext2lang(None), None)
self.assertEqual(self.map_holder.ext2lang(''), None)
self.assertEqual(self.map_holder.ext2lang('foo'), None)
# expect success
self.assertEqual(self.map_holder.ext2lang('C'), 'cpp')
self.assertEqual(self.map_holder.ext2lang('cc'), 'cpp')
self.assertEqual(self.map_holder.ext2lang('cpp'), 'cpp')
self.assertEqual(self.map_holder.ext2lang('c++'), 'cpp')
self.assertEqual(self.map_holder.ext2lang('cxx'), 'cpp')
self.assertEqual(self.map_holder.ext2lang('h'), 'c')
self.assertEqual(self.map_holder.ext2lang('hh'), 'cpp')
self.assertEqual(self.map_holder.ext2lang('hpp'), 'cpp')
self.assertEqual(self.map_holder.ext2lang('adb'), 'ada')
self.assertEqual(self.map_holder.ext2lang('ads'), 'ada')
self.assertEqual(self.map_holder.ext2lang('aug'), 'augeas')
self.assertEqual(self.map_holder.ext2lang('awk'), 'awk')
self.assertEqual(self.map_holder.ext2lang('css'), 'css')
self.assertEqual(self.map_holder.ext2lang('flattened'), 'for')
self.assertEqual(self.map_holder.ext2lang('f90'), 'f90+')
self.assertEqual(self.map_holder.ext2lang('f95'), 'f90+')
self.assertEqual(self.map_holder.ext2lang('f03'), 'f90+')
self.assertEqual(self.map_holder.ext2lang('f08'), 'f90+')
self.assertEqual(self.map_holder.ext2lang('f15'), 'f90+')
self.assertEqual(self.map_holder.ext2lang('for'), 'for')
self.assertEqual(self.map_holder.ext2lang('go'), 'go')
self.assertEqual(self.map_holder.ext2lang('gperf'), 'gperf')
self.assertEqual(self.map_holder.ext2lang('hs'), 'hs')
self.assertEqual(self.map_holder.ext2lang('html'), 'html')
self.assertEqual(self.map_holder.ext2lang('json'), 'json')
self.assertEqual(self.map_holder.ext2lang('java'), 'java')
self.assertEqual(self.map_holder.ext2lang('js'), 'js')
self.assertEqual(self.map_holder.ext2lang('loc_'), 'lex')
self.assertEqual(self.map_holder.ext2lang('lisp'), 'lisp')
self.assertEqual(self.map_holder.ext2lang('m4'), 'm4')
self.assertEqual(self.map_holder.ext2lang('md'), 'md')
self.assertEqual(self.map_holder.ext2lang('occ'), 'occ')
self.assertEqual(self.map_holder.ext2lang('proto'), 'proto')
self.assertEqual(self.map_holder.ext2lang('pl'), 'perl')
self.assertEqual(self.map_holder.ext2lang('pm'), 'perl')
self.assertEqual(self.map_holder.ext2lang('pxd'), 'cython')
self.assertEqual(self.map_holder.ext2lang('py'), 'py')
self.assertEqual(self.map_holder.ext2lang('pyx'), 'cython')
self.assertEqual(self.map_holder.ext2lang('R'), 'R') # short name
self.assertEqual(self.map_holder.ext2lang('r'), 'R') # short name
self.assertEqual(self.map_holder.ext2lang('scala'), 'scala')
self.assertEqual(self.map_holder.ext2lang('sh'), 'sh')
self.assertEqual(self.map_holder.ext2lang('sno'), 'sno')
self.assertEqual(self.map_holder.ext2lang('tex'), 'tex')
self.assertEqual(self.map_holder.ext2lang('toml'), 'toml')
self.assertEqual(self.map_holder.ext2lang('y'), 'yacc')
self.assertEqual(self.map_holder.ext2lang('yaml'), 'yaml')
def test_irregular_ext2lang(self):
""" Exercise the extention-to-language function. """
cpp_holder = MapHolder('cpp')
self.assertEqual(cpp_holder.ext2lang('h'), 'cpp')
occ_holder = MapHolder('occ')
self.assertEqual(occ_holder.ext2lang('inc'), 'occ')
def test_get_counter(self):
""" Exercise the get_counter() function. """
# expect failure if unknown lang and not a command line argument
self.assertEqual(self.map_holder.get_counter(None, False), None)
self.assertEqual(self.map_holder.get_counter('', False), None)
self.assertEqual(self.map_holder.get_counter('foo', False), None)
# on the command line we are more generous
self.assertEqual(
self.map_holder.get_counter(
None, True), count_lines_not_sharp)
self.assertEqual(
self.map_holder.get_counter(
'', True), count_lines_not_sharp)
self.assertEqual(
self.map_holder.get_counter(
'foo', True), count_lines_not_sharp)
# where the language is known we should always succeed
# ... whether this is a command line argument
self.assertEqual(
self.map_holder.get_counter(
'ada', True), count_lines_double_dash)
self.assertEqual(
self.map_holder.get_counter(
'awk', True), count_lines_not_sharp)
self.assertEqual(
self.map_holder.get_counter(
'cython',
True),
count_lines_python)
self.assertEqual(
self.map_holder.get_counter(
'for', True), count_lines_fortran)
self.assertEqual(
self.map_holder.get_counter(
'hs', True), count_lines_double_dash)
self.assertEqual(
self.map_holder.get_counter(
'json', True), count_lines_txt)
self.assertEqual(
self.map_holder.get_counter(
'lex', True), count_lines_java_style)
self.assertEqual(
self.map_holder.get_counter(
'm4', True), count_lines_not_sharp)
self.assertEqual(
self.map_holder.get_counter(
'occ', True), count_lines_double_dash)
self.assertEqual(
self.map_holder.get_counter(
'perl', True), count_lines_perl)
self.assertEqual(
self.map_holder.get_counter(
'proto',
True),
count_lines_protobuf)
self.assertEqual(
self.map_holder.get_counter(
'sno', True), count_lines_snobol)
self.assertEqual(
self.map_holder.get_counter(
'tex', True), count_lines_tex)
self.assertEqual(
self.map_holder.get_counter(
'txt', True), count_lines_txt)
self.assertEqual(
self.map_holder.get_counter(
'yacc', True), count_lines_java_style)
self.assertEqual(
self.map_holder.get_counter(
'yaml', True), count_lines_not_sharp)
# ... or not
self.assertEqual(
self.map_holder.get_counter(
'py', False), count_lines_python)
self.assertEqual(
self.map_holder.get_counter(
'sno', False), count_lines_snobol)
def test_get_long_name(self):
""" sh is omitted """
# expect failure
self.assertEqual(self.map_holder.get_long_name(None), None)
self.assertEqual(self.map_holder.get_long_name(''), None)
self.assertEqual(self.map_holder.get_long_name('foo'), None)
# expect success
self.assertEqual(self.map_holder.get_long_name('ada'), 'Ada')
self.assertEqual(self.map_holder.get_long_name('aug'), 'augeas')
self.assertEqual(self.map_holder.get_long_name('awk'), 'awk')
self.assertEqual(self.map_holder.get_long_name('cython'), 'cython')
self.assertEqual(self.map_holder.get_long_name('for'), 'FORTRAN')
self.assertEqual(self.map_holder.get_long_name('gen'), 'generic')
self.assertEqual(self.map_holder.get_long_name('go'), 'golang')
self.assertEqual(self.map_holder.get_long_name('hs'), 'haskell')
self.assertEqual(self.map_holder.get_long_name('html'), 'html')
self.assertEqual(self.map_holder.get_long_name('java'), 'java')
self.assertEqual(self.map_holder.get_long_name('json'), 'json')
self.assertEqual(self.map_holder.get_long_name('m4'), 'm4')
self.assertEqual(self.map_holder.get_long_name('md'), 'markdown')
self.assertEqual(self.map_holder.get_long_name('objc'), 'Objective C')
self.assertEqual(self.map_holder.get_long_name('occ'), 'Occam')
self.assertEqual(self.map_holder.get_long_name('perl'), 'Perl')
self.assertEqual(self.map_holder.get_long_name('proto'), 'proto')
self.assertEqual(self.map_holder.get_long_name('re2c'), 're2c')
self.assertEqual(self.map_holder.get_long_name('scala'), 'scala')
self.assertEqual(self.map_holder.get_long_name('sno'), 'snobol4')
self.assertEqual(self.map_holder.get_long_name('tex'), 'TeX/LaTeX')
self.assertEqual(self.map_holder.get_long_name('toml'), 'toml')
self.assertEqual(self.map_holder.get_long_name('yaml'), 'yaml')
def test_guess_lang_from_filename(self):
""" Exercise guess_lang() function on various names. """
# expect failure --------------------------------------------
lang, is_test = self.map_holder.guess_lang('./', None, is_cli_arg=True)
self.assertEqual(lang, None)
self.assertEqual(is_test, False)
lang, is_test = self.map_holder.guess_lang('./', '', is_cli_arg=True)
self.assertEqual(lang, None)
self.assertEqual(is_test, False)
# not recognized but on command line, so use generic counter
lang, is_test = self.map_holder.guess_lang(
'tests', 'yy_foo', is_cli_arg=True)
self.assertEqual(lang, 'gen')
self.assertEqual(is_test, False)
# if not recognized and not on command line, fail -----------
lang, is_test = self.map_holder.guess_lang(
'tests', 'yy_foo', is_cli_arg=False)
self.assertEqual(lang, None)
self.assertEqual(is_test, False)
lang, is_test = self.map_holder.guess_lang(
'./', 'go', is_cli_arg=False)
self.assertEqual(lang, None)
self.assertEqual(is_test, False)
lang, is_test = self.map_holder.guess_lang(
'./', 'py', is_cli_arg=False)
self.assertEqual(lang, None)
self.assertEqual(is_test, False)
# no extension, not on command line -------------------------
lang, is_test = self.map_holder.guess_lang(
'./', 'joego', is_cli_arg=False)
self.assertEqual(lang, None)
self.assertEqual(is_test, False)
lang, is_test = self.map_holder.guess_lang(
'./', 'py', is_cli_arg=False)
self.assertEqual(lang, None)
self.assertEqual(is_test, False)
# if known language should always get language --------------
lang, is_test = self.map_holder.guess_lang(
'tests', 'yy_foo.go', is_cli_arg=True)
self.assertEqual(lang, 'go')
self.assertEqual(is_test, False)
lang, is_test = self.map_holder.guess_lang(
'tests', 'yy_foo.go', is_cli_arg=False)
self.assertEqual(lang, 'go')
self.assertEqual(is_test, False)
lang, is_test = self.map_holder.guess_lang(
'tests', 'yy_foo_test.go', is_cli_arg=True)
self.assertEqual(lang, 'go')
self.assertEqual(is_test, True)
lang, is_test = self.map_holder.guess_lang(
'tests', 'yy_foo_test.go', is_cli_arg=False)
self.assertEqual(lang, 'go')
self.assertEqual(is_test, True)
lang, is_test = self.map_holder.guess_lang(
'tests', 'yy_foo.l', is_cli_arg=True)
self.assertEqual(lang, 'lex')
self.assertEqual(is_test, False)
lang, is_test = self.map_holder.guess_lang(
'tests', 'yy_foo.l', is_cli_arg=False)
self.assertEqual(lang, 'lex')
self.assertEqual(is_test, False)
lang, is_test = self.map_holder.guess_lang(
'tests', 'yy_foo.occ', is_cli_arg=True)
self.assertEqual(lang, 'occ')
self.assertEqual(is_test, False)
lang, is_test = self.map_holder.guess_lang(
'tests', 'yy_foo.occ', is_cli_arg=False)
self.assertEqual(lang, 'occ')
self.assertEqual(is_test, False)
lang, is_test = self.map_holder.guess_lang(
'tests', 'yy_foo.py', is_cli_arg=True)
self.assertEqual(lang, 'py')
self.assertEqual(is_test, False)
lang, is_test = self.map_holder.guess_lang(
'tests', 'yy_foo.py', is_cli_arg=False)
self.assertEqual(lang, 'py')
self.assertEqual(is_test, False)
lang, is_test = self.map_holder.guess_lang(
'tests', 'test_foo.py', is_cli_arg=True)
self.assertEqual(lang, 'py')
self.assertEqual(is_test, True)
lang, is_test = self.map_holder.guess_lang(
'tests', 'test_foo.py', is_cli_arg=False)
self.assertEqual(lang, 'py')
self.assertEqual(is_test, True)
lang, is_test = self.map_holder.guess_lang(
'tests', 'yy_foo.sno', is_cli_arg=True)
self.assertEqual(lang, 'sno')
self.assertEqual(is_test, False)
lang, is_test = self.map_holder.guess_lang(
'tests', 'yy_foo.sno', is_cli_arg=False)
self.assertEqual(lang, 'sno')
self.assertEqual(is_test, False)
lang, is_test = self.map_holder.guess_lang(
'tests', 'yy_foo.y', is_cli_arg=True)
self.assertEqual(lang, 'yacc')
self.assertEqual(is_test, False)
lang, is_test = self.map_holder.guess_lang(
'tests', 'yy_foo.y', is_cli_arg=False)
self.assertEqual(lang, 'yacc')
self.assertEqual(is_test, False)
# DON'T KNOW TEST PATTERN FOR SNOB
def test_non_code_ext(self):
""" Exercise non_code_ext() function on various extensions."""
# expect failure
self.assertEqual(self.map_holder.non_code_ext(None), False)
self.assertEqual(self.map_holder.non_code_ext(''), False)
self.assertEqual(self.map_holder.non_code_ext('yy_foo'), False)
# expect success
self.assertEqual(self.map_holder.non_code_ext('jar'), True)
self.assertEqual(self.map_holder.non_code_ext('md'), True)
self.assertEqual(self.map_holder.non_code_ext('pyc'), True)
def test_non_code_dir(self):
""" Exercise non_code_dir() function on various names."""
# expect failure
self.assertEqual(self.map_holder.non_code_dir('src'), False)
self.assertEqual(self.map_holder.non_code_dir('tests'), False)
# expect success
self.assertEqual(self.map_holder.non_code_dir('.git'), True)
self.assertEqual(self.map_holder.non_code_dir('__pycache__'), True)
def test_non_code_file(self):
""" Exercise non_code_file() function on various names."""
# expect failure
self.assertEqual(self.map_holder.non_code_file(None), False)
self.assertEqual(self.map_holder.non_code_file(''), False)
self.assertEqual(self.map_holder.non_code_file('yy_foo'), False)
self.assertEqual(self.map_holder.non_code_file('__pycache__'), False)
# expect success
self.assertEqual(self.map_holder.non_code_file('AUTHORS'), True)
self.assertEqual(self.map_holder.non_code_file('CONTRIBUTORS'), True)
self.assertEqual(self.map_holder.non_code_file('COPYING'), True)
self.assertEqual(self.map_holder.non_code_file(
'COPYING.AUTOCONF.EXCEPTION'), True)
self.assertEqual(self.map_holder.non_code_file('COPYING.GNUBL'), True)
self.assertEqual(self.map_holder.non_code_file('COPYING.LIB'), True)
self.assertEqual(self.map_holder.non_code_file('LICENSE'), True)
self.assertEqual(self.map_holder.non_code_file('NEWS'), True)
self.assertEqual(self.map_holder.non_code_file('PATENTS'), True)
self.assertEqual(self.map_holder.non_code_file('README'), True)
self.assertEqual(self.map_holder.non_code_file('TODO'), True)
if __name__ == '__main__':
unittest.main()
```
#### File: pysloc/tests/test_uncomment_java.py
```python
import unittest
from pysloc import uncomment_java
class TestUncommentJava(unittest.TestCase):
""" Test uncomment function Java-like languages. """
def setUp(self):
pass
def tearDown(self):
pass
def test_uncomment_java(self):
""" Verify that uncommenting snippets of Java works correctly. """
line = ''
code, in_comment = uncomment_java(line, True)
self.assertEqual(code, '')
self.assertEqual(in_comment, True)
code, in_comment = uncomment_java(line, False)
self.assertEqual(code, '')
self.assertEqual(in_comment, False)
line = '/**/'
code, in_comment = uncomment_java(line, False)
self.assertEqual(code, '')
self.assertEqual(in_comment, False)
line = '/*'
code, in_comment = uncomment_java(line, False)
self.assertEqual(code, '')
self.assertEqual(in_comment, True)
line = '*/'
code, in_comment = uncomment_java(line, True)
self.assertEqual(code, '')
self.assertEqual(in_comment, False)
line = 'a/* */b/**/c'
code, in_comment = uncomment_java(line, False)
self.assertEqual(code, 'abc')
self.assertEqual(in_comment, False)
line = '//'
code, in_comment = uncomment_java(line, False)
self.assertEqual(code, '')
self.assertEqual(in_comment, False)
line = '/* abc //'
code, in_comment = uncomment_java(line, False)
self.assertEqual(code, '')
self.assertEqual(in_comment, True)
line = 'abc // def '
code, in_comment = uncomment_java(line, False)
self.assertEqual(code, 'abc ')
self.assertEqual(in_comment, False)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jddixon/pzog",
"score": 2
}
|
#### File: src/pzog/__init__.py
```python
__version__ = '0.6.13'
__version_date__ = '2018-03-23'
__all__ = ['__version__', '__version_date__',
'PZOG_MAX_MSG', 'PZOG_PORT', 'RING_IP_ADDR',
# methods
'ring_size', ]
# the maximum number of bytes in a message
PZOG_MAX_MSG = 512
PZOG_PORT = 55552 # for this version, v0.2.x
# these are indexed in this order
RING_IP_ADDR = [('losaltos', '192.168.152.253'),
('test', '192.168.152.10'),
('losgatos', '192.168.136.254'),
('supermicro', '192.168.144.16'),
('guadalupe', '192.168.152.254'), ]
@property
def ring_size():
return len(RING_IP_ADDR)
```
#### File: pzog/tests/test_node.py
```python
import time
import hashlib
# import os
# import sys
import unittest
from Crypto.PublicKey import RSA as rsa
# from Crypto.Signature import PKCS1_v1_5 as pkcs1
from xlattice import HashTypes, check_hashtype
# from xlattice.node import Node
from rnglib import SimpleRNG
RNG = SimpleRNG(time.time)
class TestNode(unittest.TestCase):
"""
Tests an XLattice-style Node, including its sign() and verify()
functions, using SHA1, SHA2(56), and SHA3
"""
def setUp(self):
pass
def tearDown(self):
pass
def check_node(self, node, hashtype):
"""
Verify that the basic capabilities of an XLattice Node are present.
"""
assert node is not None
pub = node.pub_key
id_ = node.node_id
if hashtype == HashTypes.SHA1:
self.assertEqual(20, len(id_))
sha = hashlib.sha1()
elif hashtype == HashTypes.SHA2:
self.assertEqual(32, len(id_))
sha = hashlib.sha256()
elif hashtype == HashTypes.SHA3:
self.assertEqual(32, len(id_))
# pylint: disable=no-member
sha = hashlib.sha3_256()
sha.update(pub.exportKey())
expected_id = sha.digest()
self.assertEqual(expected_id, id_)
# make a random array of bytes
count = 16 + RNG.next_int16(256)
msg = bytearray(count)
RNG.next_bytes(msg)
# sign it and verify that it verifies
sig = node.sign(msg)
self.assertTrue(node.verify(msg, sig))
# flip some bits and verify that it doesn't verify with the same sig
msg[0] = msg[0] ^ 0x36
self.assertFalse(node.verify(msg, sig))
# ---------------------------------------------------------------
def do_test_generated_rsa_key(self, hashtype):
""" Run tests on a generated Node for a specific hashtype. """
assert hashtype # XXX hack: stop warnings
# node = Node(hashtype=hashtype) # no RSA key provided, so creates one
# self.check_node(node, hashtype)
def test_generated_rsa_key(self):
""" Run basic tests for all supported hash types. """
for hashtype in HashTypes:
self.do_test_generated_rsa_key(hashtype)
# ---------------------------------------------------------------
def do_test_with_openssl_key(self, hashtype):
""" Run tests using an OpenSSL key for the specified hashtypes. """
check_hashtype(hashtype)
# import an openSSL-generated 2048-bit key (this becomes a
# string constant in this program)
with open('tests/openssl2k.pem', 'r') as file:
pem_key = file.read()
key = rsa.importKey(pem_key)
assert key is not None
self.assertTrue(key.has_private())
# XXX COMMENTED THIS OUT TO SILENCE WARNINGS
# XXX Need ck_priv
# node = Node(hashtype=hashtype, sk_priv=key)
# self.check_node(node, hashtype)
# The _RSAobj.publickey() returns a raw key.
# self.assertEqual(key.publickey().exportKey(),
# node.pub_key.exportKey())
# -----------------------------------------------------------
# CLEAN THIS UP: node.key and node.pubKey should return
# stringified objects, but node._privateKey and _pubKey should
# be binary
# -----------------------------------------------------------
def test_with_open_ssl_key(self):
""" Run tests using an OpenSSL key for all supported hashtypes. """
for hashtype in HashTypes:
self.do_test_with_openssl_key(hashtype)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jddixon/rnglib",
"score": 3
}
|
#### File: src/rnglib/__init__.py
```python
import os
import random
import re
__version__ = '1.3.10'
__version_date__ = '2019-03-19'
__all__ = [ \
# constants, so to speak
'__version__', '__version_date__',
'MAX_INT16', 'MAX_INT32', 'MAX_INT64',
'FILE_NAME_CHARS', 'FILE_NAME_STARTERS', 'FILE_NAME_OTHER_CHARS',
# functions
'valid_file_name',
# classes
'SimpleRNG', 'SystemRNG', 'SecureRNG', 'DataFile'
]
# we pray for constant folding - and we would prefer that these be const
MAX_INT16 = 65536
MAX_INT32 = 65536 * 65536
MAX_INT64 = 65536 * 65536 * 65536 * 65536
# characters we permit in file names, other than the first position
FILE_NAME_CHARS = \
r'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_-.'
# patterns used in recognizing valid file names
FILE_NAME_STARTERS = \
r'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_'
FILE_NAME_OTHER_CHARS = \
r'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_\-\.'
VALID_FILE_NAME_PAT = \
r'^[' + FILE_NAME_STARTERS + '][' + FILE_NAME_OTHER_CHARS + ']*$'
VALID_FILE_NAME_RE = re.compile(VALID_FILE_NAME_PAT)
def valid_file_name(name):
""" Return whether the name matches the regular expression. """
match = VALID_FILE_NAME_RE.match(name)
return match is not None
# -------------------------------------------------------------------
class DataFile(object):
""" This appears to be a stub USED ONLY IN TESTING """
def __init__(self, name, parent=None):
self._name = name
self._parent = parent
@property
def name(self):
""" Return the name of the data file. """
return self._name
@property
def path(self):
""" Return a relative or absolute path to the data file. """
if self._parent:
pth = os.path.join(self._parent.path, self._name)
else:
pth = self._name
return pth
@property
def parent(self):
""" Return the name of the data file's parent. """
return self._parent
def __eq__(self, other):
""" Return whether two data files are the same. """
if self is other:
return True
if other is None or self.name != other.name:
return False
if self.parent and self.parent != other.parent:
return False
return self._parent == other.parent
# -------------------------------------------------------------------
class CommonFunc(object):
"""
Parent class for RNG classes.
This class contains convenience functions to be added to Random.
"""
def random(self):
""" Subclasses must override. """
raise NotImplementedError
def next_boolean(self):
""" Return a quasi-random boolean value. """
return self.random() >= 0.5
def next_byte(self, max_=256):
""" Return a quasi-random byte value between 0 and 255 inclusive. """
if max_ < 1:
max_ = 1
elif max_ > 256:
max_ = 256
return int(max_ * self.random())
def _rand_bytes(self, count):
_ = self
for _ in range(count):
yield random.getrandbits(8)
def next_bytes(self, buf):
"""
buf is a bytearray. Fill it with random bytes.
This is the version for the Mersenne Twister. SystemRNG and
SecureRNG should override.
"""
if buf is not None:
count = len(buf)
if count <= 64:
val = bytearray(self._rand_bytes(count))
else:
val = bytearray(os.urandom(count))
buf[:] = val
def some_bytes(self, count):
""" return a bytearray of N random bytes """
buffer = bytearray(count)
self.next_bytes(buffer)
return buffer
def next_int16(self, max_=65536):
""" Return a quasi-random 16-bit int < max_. """
if (max_ <= 0) or (max_ > 65536):
max_ = 65536
return int(max_ * self.random())
def next_int32(self, max_=(65536 * 65536)):
""" Return a quasi-random 32-bit int < max_. """
if (max_ <= 0) or ((65536 * 65536) < max_):
max_ = (65536 * 65536)
return int(max_ * self.random())
def next_int64(self, max_=(65536 * 65536 * 65536 * 65536)):
""" Return a quasi-random 64-bit int < max_. """
if (max_ <= 0) or ((65536 * 65536 * 65536 * 65536) < max_):
max_ = (65536 * 65536 * 65536 * 65536)
return int(max_ * self.random())
def next_real(self):
"""
Return a quasi-random floating-point number in the range [0..1).
"""
return self.random()
# ---------------------------------------------------------------
# These produce strings which are acceptable POSIX file names
# and also advance a cursor by a multiple of 64 bits. All strings
# are at least one byte and less than max_Len bytes n length. We
# arbitrarily limit file names to less than 256 characters.
def _next_file_name(self, name_len):
""" Always returns at least one character. """
max_starter_ndx = len(FILE_NAME_STARTERS)
ndx = self.next_byte(max_starter_ndx)
name = FILE_NAME_STARTERS[ndx]
max_char_ndx = len(FILE_NAME_CHARS)
for _ in range(name_len - 1):
ndx = self.next_byte(max_char_ndx)
char = FILE_NAME_CHARS[ndx]
name = name + char
return name
def next_file_name(self, max_len):
""" Return a legal file name with 0 < length < max_len). """
if max_len < 2:
max_len = 2 # this is a ceiling which cannot be reached
name_len = 0
while name_len == 0:
name_len = self.next_byte(max_len) # so len < 256
while True:
name = self._next_file_name(name_len)
if name and (name.find("..") == -1):
break
return name
# These are operations on the file system. Directory depth is at least 1
# and no more than 'depth'. Likewise for width, the number of
# files in a directory, where a file is either a data file or a
# subdirectory. The number of bytes in a file is at least min_len and
# less than max_len.
# Subdirectory names may be random
def next_data_file(self, dir_name, max_len, min_len=0):
"""
Return a data file in directory dir_name with a quasi-random name
and contents. The file is at least min_len bytes log and less than
max_len bytes long. Parameters are silently converted to reasonable
values if necessary.
"""
if min_len < 0:
min_len = 0
if max_len < min_len + 1:
max_len = min_len + 1
# loop until name does not match existing file
path_to_file = "%s/%s" % (dir_name, self.next_file_name(16))
while os.path.exists(path_to_file):
path_to_file = "%s/%s" % (dir_name, self.next_file_name(16))
count = min_len + int(self.random() * (max_len - min_len))
data = self.some_bytes(count)
with open(path_to_file, "wb") as file:
file.write(data)
# could check file size with file.tell()
return (count, path_to_file)
# BUGS
# * on at least one occasion with width = 4 only 3 files/directories
# were created at the top level (2 were subdirs)
# DEFICIENCIES:
# * no control over percentage of directories
# * no guarantee that depth will be reached
def next_data_dir(self, path_to_dir, depth, width, max_len, min_len=0):
""" Creates a directory tree populated with data files. """
# number of directory levels; 1 means no subdirectories
if depth < 1:
depth = 1
# number of members (files, subdirectories) at each level
if width < 1:
width = 1
if not os.path.exists(path_to_dir):
os.makedirs(path_to_dir)
subdir_so_far = 0
for i in range(width):
if depth > 1:
if (self.random() > 0.25) and (
(i < width - 1) or (subdir_so_far > 0)):
# 25% are subdirs
# data file i
# SPECIFICATION ERROR: file name may not be unique
(_, path_to_file) = self.next_data_file(
path_to_dir, max_len, min_len)
_ = path_to_file
else:
# directory
subdir_so_far += 1
# create unique name
file_name = self.next_file_name(16)
path_to_subdir = os.path.join(path_to_dir, file_name)
self.next_data_dir(path_to_subdir, depth - 1, width,
max_len, min_len)
else:
# data file
# SPECIFICATION ERROR: file name may not be unique
(_, path_to_leaf) = self.next_data_file(
path_to_dir, max_len, min_len)
_ = path_to_leaf # suppress warning ?
class SimpleRNG(random.Random, CommonFunc):
""" if salt is None, uses time of day as salt """
def __init__(self, salt=None):
super().__init__(salt) # in first parent
class SystemRNG(random.SystemRandom, CommonFunc):
"""
A more secure random number generator getting numbers from the
system's /dev/urandom. This will be slower than SimpleRNG but
not so very slow as an RNG using /dev/random, which will block
until enough entropy accumulates.
"""
def __init__(self, salt=None):
super().__init__() # "useless super delegation" ?
_ = salt # make pylint happy
def getstate(self):
""" Implements abstract function. """
raise NotImplementedError('not implemented, stateless RNG')
def setstate(self, state):
""" Implements abstract function. """
raise NotImplementedError('not implemented, stateless RNG')
def next_byte(self, max_=256):
"""
Return a quasi-random byte value between 0 and max_ - 1 inclusive.
"""
if max_ < 1:
max_ = 1
elif max_ > 256:
max_ = 256
val = os.urandom(1)[0]
if max_ < 256:
val *= float(max_) / 256
return int(val)
def next_bytes(self, buf):
"""
buf is a bytearray. Fill it with random bytes.
"""
if buf is not None:
count = len(buf)
buf[:] = bytearray(os.urandom(count))
class SecureRandom(random.Random):
"""
Overrides Random.random(), stubs the other 5 functions.
"""
BPF = 53 # bits in a Python float
RECIP_BPF = 2 ** -BPF
def __init__(self, salt=None):
super().__init__() # useless super delegation ?
_ = salt # to suppress pylint complaints
def _random(self, k):
""" Read /dev/random for k bytes: blocks. """
assert k >= 0
with open('/dev/random', 'rb') as file:
return file.read(k)
def random(self):
""" Return a random value in the range [0..1) """
# DEBUG
print("SecureRandom.random")
# END
return (int.from_bytes(self._random(7), 'little') >> 3) * \
SecureRandom.RECIP_BPF
# def seed(self): # a=None, version=2):
@staticmethod
def seed(a=None, version=2):
""" Unused abstract method. """
# _,_ = a, version
raise NotImplementedError('not implemented, stateless RNG')
# return
def jumpahead(self):
""" Unused abstract method. """
pass
def getstate(self):
""" Implements abstract function. """
raise NotImplementedError('not implemented, stateless RNG')
def setstate(self, state):
""" Implements abstract function. """
_ = state # suppress warnings
raise NotImplementedError('not implemented, stateless RNG')
def _notimplemented(self):
""" Implements abstract function. """
raise NotImplementedError()
class SecureRNG(SecureRandom, CommonFunc):
"""
SecureRandom plus the common functions,
"""
def __init__(self, salt=0):
super().__init__() # in first parent, I hope
# self.seed(salt)
def _notimplemented(self):
""" Implements abstract function. """
raise NotImplementedError()
def next_byte(self, max_=256):
"""
Return a quasi-random byte value between 0 and max_ - 1 inclusive.
"""
if max_ < 1:
max_ = 1
elif max_ > 256:
max_ = 256
val = self._random(1)[0]
if max_ < 256:
val *= float(max_) / 256
return int(val)
def next_bytes(self, buf):
"""
buf is a bytearray. Fill it with random bytes.
"""
if buf is not None:
count = len(buf)
buf[:] = bytearray(self._random(count))
```
#### File: rnglib/tests/test_valid_file_name.py
```python
import unittest
from rnglib import valid_file_name
class TestValidFileName(unittest.TestCase):
""" Exercise the valid_file_name() function. """
def test_file_names(self):
"""
Verify that known good and known bad names succeed or fail
as appropriate.
"""
self.assertTrue(valid_file_name('1'))
self.assertTrue(valid_file_name('_'))
self.assertTrue(valid_file_name('_.'))
self.assertTrue(valid_file_name('_-'))
self.assertTrue(valid_file_name('1abc._'))
self.assertTrue(valid_file_name('abc._def'))
self.assertTrue(valid_file_name('QRS.T..UV_def'))
self.assertFalse(valid_file_name(''))
self.assertFalse(valid_file_name('-'))
self.assertFalse(valid_file_name('~'))
self.assertFalse(valid_file_name('$'))
self.assertFalse(valid_file_name('?'))
self.assertFalse(valid_file_name('!'))
self.assertFalse(valid_file_name('.'))
self.assertFalse(valid_file_name('1abc ._')) # contains a space
self.assertFalse(valid_file_name('1abc\t._')) # contains a tab
self.assertFalse(valid_file_name('QRS.T..UV_def$')) # dollar sign
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jddixon/upax",
"score": 3
}
|
#### File: src/upax/consistency.py
```python
from upax import UpaxError
from upax.ftlog import BoundLog, FileReader # , LogEntry
from upax.server import BlockingServer
from upax.walker import UWalker
__all__ = ['check', ]
def setup_server(options):
""" Add server configuration info to the set of options. """
options.uServer = BlockingServer(options.u_path, options.hashtype)
def shutdown_server(options):
""" Shut down a upax server if it is running. """
if options.uServer:
options.uServer.close()
def walk_u(options):
"""
Returns a list of content keys in the selected region of U,
the region being defined by a two hex digit start point and
a maximum number of entries to be included.
"""
www = UWalker(just_keys=options.just_keys,
limit=options.limit,
start_at=options.start_at,
u_path=options.u_path,
hashtype=options.hashtype,
verbose=options.verbose)
keys = www.walk()
return keys
def check(options):
"""
Examines U and its log (U/L), reports inconsistencies, and
possibly takes action to correct them.
If the --repair argument is present, will locate any content files
in U that are not in the log and add them with the int form of the
time of the run as the timestamp, U/nodeID as the nodeID, this program
as the source, and the content key as the path. By default the program
assumes that we are using SHA1 to calculate content keys.
In this implementation no attempt is made to verify that the
content key accurately reflects what is in the file.
Also, whereas every file in U is examined, no attempt is made to
verify that every entry in U corresponds to a file in U. The
not very coherent idea is that log entries may describe files on
other machines and so may used to retrieve them. Presumably a
future version of this program will have a map from valid nodeIDs
to end points (fully qualified domain names and port numbers for
peers), allowing utilities to fetch files from the source host
by content key.
"""
options.uServer = None
try:
setup_server(options) # gets locks on U and U0
except UpaxError:
if options.uServer is None:
print("have you set usingSHA correctly?")
else:
raise
if options.uServer is not None:
_do_server_shutdown(options)
def _do_server_shutdown(options):
repairing = options.repairing
verbose = options.verbose
try:
# LOG: keyed by hash, later entries with same hash should
# overwrite earlier
options.reader = FileReader(options.u_path, options.hashtype)
options.log = BoundLog(options.reader, options.hashtype)
log = options.log
# U: sorted content keys
keys = walk_u(options)
# for now, just check whether each content key has a log
# entry
for key in keys:
if key in log.index:
if verbose:
log_e = log.index[key]
print(("%s in ndx, src '%s'" % (key, log_e.src)))
else:
if repairing:
entry = log.add_entry(options.timestamp, key,
options.myNodeID,
options.app_name, key)
if verbose:
print(("ADDED TO LOG: %s" % entry))
else:
if verbose:
print(("%s is not in the log" % key))
# DEBUG -------------------------------------------------
if verbose:
print(("COUNT OF ITEMS CHECKED IN U: %s" % len(keys)))
print(("NUMBER OF LOG ENTRIES: %s" % len(options.log)))
# END ---------------------------------------------------
finally:
try:
if options.log is not None:
options.log.close()
except AttributeError:
pass
shutdown_server(options) # releases lock on U
```
#### File: src/upax/ftlog.py
```python
import os
import re
# import sys
from collections import Container, Sized
from xlattice import (HashTypes, check_hashtype, # u,
SHA1_HEX_NONE, SHA2_HEX_NONE, SHA3_HEX_NONE,
BLAKE2B_HEX_NONE)
from upax import UpaxError
from upax.node import check_hex_node_id_160, check_hex_node_id_256
__all__ = ['ATEXT', 'AT_FREE',
'PATH_RE',
'BODY_LINE_1_RE', 'BODY_LINE_256_RE',
'IGNORABLE_RE',
# classes
'Log', 'BoundLog', 'LogEntry',
'Reader', 'FileReader', 'StringReader', ]
# -------------------------------------------------------------------
# CLASS LOG AND SUBCLASSES
# -------------------------------------------------------------------
# Take care: this pattern is used in xlmfilter, possibly elsewhere
# this is RFC2822's atext; *,+,?,- are escaped; needs to be enclosed in []+
ATEXT = r"[a-z0-9!#$%&'\*\+/=\?^_`{|}~\-]+"
AT_FREE = ATEXT + r'(?:\.' + ATEXT + r')*'
# this permits an RFC2822 message ID but is a little less restrictive
PATH_PAT = AT_FREE + r'(?:@' + AT_FREE + ')?'
PATH_RE = re.compile(PATH_PAT, re.I)
BODY_LINE_1_PAT =\
r'^(\d+) ([0-9a-f]{40}) ([0-9a-f]{40}) "([^"]*)" (%s)$' % PATH_PAT
BODY_LINE_1_RE = re.compile(BODY_LINE_1_PAT, re.I)
BODY_LINE_256_PAT =\
r'^(\d+) ([0-9a-f]{64}) ([0-9a-f]{64}) "([^"]*)" (%s)$' % PATH_PAT
BODY_LINE_256_RE = re.compile(BODY_LINE_256_PAT, re.I)
IGNORABLE_PAT = '(^ *$)|^ *#'
IGNORABLE_RE = re.compile(IGNORABLE_PAT)
class Log(Container, Sized):
"""a fault-tolerant log"""
def __init__(self, reader, hashtype):
self._hashtype = hashtype
(timestamp, prev_log_hash, prev_master, entries, index) = reader.read()
self._timestamp = timestamp # seconds from epoch
self._prev_hash = prev_log_hash # SHA1/3 hash of previous Log
if hashtype == HashTypes.SHA1:
check_hex_node_id_160(self._prev_hash)
else:
check_hex_node_id_256(self._prev_hash)
self._prev_master = prev_master # nodeID of master writing prev log
if hashtype == HashTypes.SHA1:
check_hex_node_id_160(self._prev_master)
else:
check_hex_node_id_256(self._prev_master)
self._entries = entries # a list
self._index = index # a map, hash => entry
def __contains__(self, key):
""" Return whether this key is in the Log. """
return key in self._index
def __len__(self):
""" Return the length of this Log. """
return len(self._entries)
def __str__(self):
"""used for serialization, so includes newline"""
# first line
if self._hashtype == HashTypes.SHA1:
fmt = "%013u %40s %40s\n"
else:
fmt = "%013u %64s %64s\n"
ret = fmt % (self._timestamp, self._prev_hash, self._prev_master)
# list of entries
for entry in self._entries:
ret += str(entry) # woefully inefficient :-)
return ret
def add_entry(self, tstamp, key, node_id, src, path):
"""
Create a LogEntry with the given timestamp, key, nodeID, src, and path.
If the LogEntry is already present in the Log, return a reference to
the existing LogEntry. Otherwise, add the LogEntry to the list and
index it by key.
"""
entry = LogEntry(tstamp, key, node_id, src, path)
if key in self._index:
existing = self._index[key]
if entry == existing:
return existing # silently ignore duplicates
self._entries.append(entry) # increases size of list
self._index[key] = entry # overwrites any earlier duplicates
return entry
def get_entry(self, key):
""" Given a key, return the corresponding LogEntry or None. """
if key not in self._index:
return None
return self._index[key]
@property
def entries(self):
""" Return the list of LogEntries. """
return self._entries
@property
def index(self):
""" Return the index by key into the list of LogEntries. """
return self._index
@property
def prev_hash(self):
""" Return the content hash of the previous Log. """
return self._prev_hash
@property
def prev_master(self):
"""
Return the ID of the master of the previous Log.
"""
return self._prev_master
@property
def timestamp(self):
""" Return the timestamp for this Log. """
return self._timestamp
class BoundLog(Log):
""" A fult tolerant log bound to a file. """
def __init__(self, reader, hashtype=HashTypes.SHA2,
u_path=None, base_name='L'):
super(). __init__(reader, hashtype)
self.fd_ = None
self.is_open = False # for appending
overwriting = False
if u_path:
self.u_path = u_path
self.base_name = base_name
overwriting = True
else:
if isinstance(reader, FileReader):
self.u_path = reader.u_path
self.base_name = reader.base_name
overwriting = False
else:
msg = "no target uPath/baseName specified"
raise UpaxError(msg)
self.path_to_log = "%s/%s" % (self.u_path, self.base_name)
if overwriting:
with open(self.path_to_log, 'w') as file:
log_contents = super(BoundLog, self).__str__()
file.write(log_contents)
file.close()
self.fd_ = open(self.path_to_log, 'a')
self.is_open = True
def add_entry(self, tstamp, key, node_id, src, path):
if not self.is_open:
msg = "log file %s is not open for appending" % self.path_to_log
raise UpaxError(msg)
# XXX NEED TO THINK ABOUT THE ORDER OF OPERATIONS HERE
entry = super(
BoundLog,
self).add_entry(tstamp, key, node_id, src, path)
stringified = str(entry)
self.fd_.write(stringified)
return entry
def flush(self):
"""
Flush the log.
This should write the contents of any internal buffers to disk,
but no particular behavior is guaranteed.
"""
self.fd_.flush()
def close(self):
""" Close the log. """
self.fd_.close()
self.is_open = False
# -------------------------------------------------------------------
class LogEntry():
"""
The entry made upon adding a file to the Upax content-keyed data store.
This consists of a timestamp; an SHA content key, the hash of the
contents of the file, the NodeID identifying the contributor,
its source (which may be a program name, and a UNIX/POSIX path
associated with the file. The path will normally be relative.
"""
__slots__ = ['_timestamp', '_key', '_node_id', '_src', '_path', ]
def __init__(self,
timestamp, key, node_id, source, pathToDoc):
self._timestamp = timestamp # seconds from epoch
if key is None:
raise UpaxError('LogEntry key may not be None')
hashtype = len(key) == 40
self._key = key # 40 or 64 hex digits, content hash
if hashtype == HashTypes.SHA1:
check_hex_node_id_160(self._key)
else:
check_hex_node_id_256(self._key)
if node_id is None:
raise UpaxError('LogEntry nodeID may not be None')
self._node_id = node_id # 40/64 digits, node providing entry
# XXX This is questionable. Why can't a node with a SHA1 id store
# a datum with a SHA3 key?
if hashtype == HashTypes.SHA1:
check_hex_node_id_160(self._node_id)
else:
check_hex_node_id_256(self._node_id)
self._src = source # tool or person responsible
self._path = pathToDoc # file name
@property
def key(self):
"""
Return the 40- or 64-byte SHA hash associated with the entry.
This is an SHA content hash.
"""
return self._key
@property
def node_id(self):
""" Return the 40- or 64-byte NodeID associated with the entry. """
return self._node_id
@property
def path(self):
""" Return the POSIX path associated with the LogEntry. """
return self._path
@property
def src(self):
""" Return the 'src' associated with the LogEntry. """
return self._src
@property
def timestamp(self):
""" Return the time at which the LogEntry was created. """
return self._timestamp
@property
def hashtype(self):
""" XXX WRONG should return key length, allowing 64 or 40. """
return len(self._key) == 40
# used in serialization, so newlines are intended
def __str__(self):
if self.hashtype == HashTypes.SHA1:
fmt = '%013u %40s %40s "%s" %s\n'
else:
fmt = '%013u %64s %64s "%s" %s\n'
return fmt % (self._timestamp, self._key,
self._node_id, self._src, self._path)
def __eq__(self, other):
return isinstance(other, LogEntry) and\
self._timestamp == other.timestamp and\
self._key == other.key and\
self._node_id == other.node_id and\
self._src == other.src and\
self._path == other.path
def __ne__(self, other):
return not self.__eq__(other)
def equals(self, other):
"""
The function usualy known as __eq__. XXX DEPRECATED
"""
return self.__eq__(other)
# -------------------------------------------------------------------
# CLASS READER AND SUBCLASSES
# -------------------------------------------------------------------
class Reader(object):
"""
Would prefer to be able to handle this through something like a Java
Reader, so that we could test with a StringReader but then use a
FileReader in production. If it is a file, file.readlines(sizeHint)
supposedly has very good preformance for larger sizeHint, say 100KB
It appears that lines returned need to be rstripped, which wastefully
requires copying
For our purposes, string input can just be split on newlines, which
has the benefit of effectively chomping at the same time
"""
# __slots__ = ['_entries', '_index', '_lines', '_hashtype',
# 'FIRST_LINE_RE', ]
def __init__(self, lines, hashtype):
check_hashtype(hashtype)
self._hashtype = hashtype
if hashtype == HashTypes.SHA1:
first_line_pat = r'^(\d{13}) ([0-9a-f]{40}) ([0-9a-f]{40})$'
else:
first_line_pat = r'^(\d{13}) ([0-9a-f]{64}) ([0-9a-f]{64})$'
self.first_line_re = re.compile(first_line_pat, re.I)
# XXX verify that argument is an array of strings
self._lines = lines
ndx_last = len(self._lines) - 1
# strip newline from last line if present
if ndx_last >= 1:
self._lines[ndx_last] = self._lines[ndx_last].rstrip('\n')
# Entries are a collection, a list. We also need a dictionary
# that accesses each log entry using its hash.
self._entries = [] # the empty list
self._index = dict() # mapping hash => entry
@property
def hashtype(self):
""" Return the type of SHA hash used. """
return self._hashtype
def read(self):
"""
The first line contains timestamp, hash, nodeID for previous Log.
Succeeding lines look like
timestamp hash nodeID src path
In both cases timestamp is an unsigned int, the number of
milliseconds since the epoch. It can be printed with %13u.
The current value (April 2011) is about 1.3 trillion (1301961973000).
"""
first_line = None
if self._lines:
first_line = self._lines[0]
if first_line:
match = re.match(self.first_line_re, first_line)
if not match:
print("NO MATCH, FIRST LINE; hashtype = %s" % self.hashtype)
print((" FIRST LINE: '%s'" % first_line))
raise UpaxError("no match on first line; giving up")
timestamp = int(match.group(1))
prev_log_hash = match.group(2)
prev_master = match.group(3)
del self._lines[0] # so we can cleanly iterate
else:
# no first line
timestamp = 0
if self._hashtype == HashTypes.SHA1:
prev_log_hash = SHA1_HEX_NONE
prev_master = SHA1_HEX_NONE
elif self._hashtype == HashTypes.SHA2:
prev_log_hash = SHA2_HEX_NONE
prev_master = SHA2_HEX_NONE
elif self._hashtype == HashTypes.SHA3:
prev_log_hash = SHA3_HEX_NONE
prev_master = SHA3_HEX_NONE
elif self._hashtype == HashTypes.BLAKE2B:
prev_log_hash = BLAKE2B_HEX_NONE
prev_master = BLAKE2B_HEX_NONE
else:
raise NotImplementedError
entries = []
index = dict()
for line in self._lines:
# Read each successive line, creating an entry for each and
# indexing each. Ignore blank lines and those beginning with
# a hash ('#')
match = re.match(IGNORABLE_RE, line)
if match:
continue
if self._hashtype == HashTypes.SHA1:
match = re.match(BODY_LINE_1_RE, line)
else:
match = re.match(BODY_LINE_256_RE, line)
if match:
tstamp = int(match.group(1))
key = match.group(2)
node_id = match.group(3)
src = match.group(4)
path = match.group(5)
# constructor should catch invalid fields
entry = LogEntry(tstamp, key, node_id, src, path)
entries.append(entry)
index[key] = entry
else:
msg = "not a valid log entry line: '%s'" % line
raise UpaxError(msg)
return (timestamp, prev_log_hash, prev_master, entries, index)
# -------------------------------------------------------------------
class FileReader(Reader):
"""
Accept uPath and optionally log file name, read entire file into
a string array, pass to Reader.
"""
__slots__ = ['_u_path', '_base_name', '_log_file', ]
# XXX CHECK ORDER OF ARGUMENTS
def __init__(self, u_path, hashtype=False, base_name="L"):
if not os.path.exists(u_path):
raise UpaxError("no such directory %s" % u_path)
self._u_path = u_path
self._base_name = base_name
self._log_file = "%s/%s" % (self._u_path, base_name)
with open(self._log_file, 'r') as file:
contents = file.read()
lines = contents.split('\n')
super(FileReader, self).__init__(lines, hashtype)
@property
def base_name(self):
""" Return the base name of the log file. """
return self._base_name
@property
def log_file(self):
""" Return the path to the log file. """
return self._log_file
@property
def u_path(self):
""" Return the path to uDir, the content-keyed store. """
return self._u_path
# -------------------------------------------------------------------
class StringReader(Reader):
"""
Accept a (big) string, convert to a string array, pass to Reader
"""
def __init__(self, bigString, hashtype=False):
# split on newlines
lines = bigString.split('\n')
super().__init__(lines, hashtype)
```
|
{
"source": "jddixon/xlattice_py",
"score": 4
}
|
#### File: src/xlattice/address.py
```python
from abc import ABCMeta, abstractmethod
class Address(metaclass=ABCMeta):
""" XLattice's Address abstraction. """
@abstractmethod
def __eq__(self, other): # -> bool
""" Whether this Address equals another. """
return False
@abstractmethod
def hashcode(self): # -> int
""" Return a reasonably distributed hash for the Address. """
return 0
@abstractmethod
def __str__(self): # -> str
""" Return a string representation of the Address. """
pass
```
#### File: src/xlattice/connector.py
```python
from abc import ABCMeta, abstractmethod
class Connector(metaclass=ABCMeta):
""" Used to establish a Connetion with another Node-like entity. """
@abstractmethod
def connect(self, near_end, blocking): # raises IOException
"""
Establish a Connection with another entity using the transport
and address in the EndPoint.
@param nearEnd local end point to use for connection
@param blocking whether the new Connection is to be blocking
@raises IOException if not in appropriate state
"""
pass
def get_far_end(self): # -> EndPoint
"""
Return the Acceptor EndPoint that this Connector is used to
establish connections to
"""
pass
```
|
{
"source": "jddixon/xlcrypto_py",
"score": 3
}
|
#### File: xlcrypto/filters/__init__.py
```python
from threading import Lock
# from binascii import b2a_hex
from copy import deepcopy
from math import exp
from xlcrypto import XLFilterError
__all__ = ['MIN_M', 'MIN_K', 'BloomSHA', 'NibbleCounters']
# EXPORTED CONSTANTS ------------------------------------------------
MIN_M = 2 # minimum hashlen in bits as exponent of 2
MIN_K = 1 # minimum number of 'hash functions'
# PRIVATE CONSTANTS -------------------------------------------------
SIZEOF_UINT64 = 8 # bytes
# ===================================================================
class BloomSHA(object):
"""
A Bloom filter for sets of Secure Hash Algorithm (SHA) digests.
A Bloom filter uses a set of k hash functions to determine set
membership. Each hash function produces a value in the range 0..M-1.
The filter is of size M bits, where M is a power of two. To add a
member to the set, apply each hash function to the new member and set
the corresponding bit in the filter. For M very large relative to k,
this will normally set k bits in the filter. To check whether x is a
member of the set, apply each of the k hash functions to x and check
whether the corresponding bits are set in the filter. If any are not
set, x is definitely not a member. If all are set, x may be a member.
The probability of error (the false positive rate) is
f = (1 - e^(-kN/M))^k
where N is the number of filter (set) members.
This class takes advantage of the fact that SHA digests are good-
quality pseudo-random numbers. The k hash functions are the values
of distinct sets of bits taken from the SHA hash. The number of bytes
in the filter, M, is constrained to be a power of 2; M == 2**m. The
number of bits in each hash function may not exceed floor(m/k), or as
we say in Python, m//k.
This class is designed to be thread-safe, but this has not been
exhaustively tested.
"""
def __init__(self, m=20, k=8, key_bytes=20):
"""
Creates a filter with 2**m bits and k 'hash functions',
where each hash function is a portion of the SHA digest.
@param m determines number of bits in filter, defaults to 20
@param k number of hash functions, defaults to 8
@param key_bytes length in bytes of keys acceptable to the filter
"""
m = int(m) # must be an int
if m < MIN_M:
raise XLFilterError("m = %d but must be > %d" % (m, MIN_M))
key_bytes = int(key_bytes) # must be an int
if key_bytes <= 0:
raise XLFilterError("must specify a positive key length")
key_bits = key_bytes * 8 # length of keys in bits
k = int(k) # must be an int
if k < MIN_K:
raise XLFilterError(
"too many hash functions (%d) for filter size" % k)
if k * m > key_bits:
k = key_bits // m # rounds down to number that will fit
self._mm = m
self._kk = k
self._key_bytes = key_bytes
self._key_count = 0
# convenience variables
self._filter_bits = 1 << m
self._filter_bytes = (self._filter_bits + 7) // 8 # round up
self._filter = bytearray(self._filter_bytes)
self._lock = Lock()
# DEBUG
# print("Bloom ctor: m %d, k %d, filter_bits %d, filter_bytes %d" % (
# self._mm, self._kk, self._filter_bits, self._filter_bytes))
# END
@property
def m(self):
""" Return m, the number of bits in the filter (default == 20). """
return self._mm
@property
def k(self):
""" Return k, the number of hash functions. """
return self._kk
@property
def key_bytes(self):
""" Length in bytes of acceptable keys (default == 20 bytes). """
return self._key_bytes
def _do_clear(self):
""" Clear the filter, unsynchronized. """
for i in range(self._filter_bytes):
self._filter[i] = 0
def clear(self):
""" Clear the filter, synchronized version. """
try:
self._lock.acquire()
self._do_clear()
self._key_count = 0
# jdd added 2005-02-19
finally:
self._lock.release()
def __len__(self):
"""
Returns the number of keys which have been inserted. This
class (BloomSHA) does not guarantee uniqueness in any sense
if the same key is added N times, the number of set members
reported will increase by N.
"""
try:
self._lock.acquire()
return self._key_count
finally:
self._lock.release()
@property
def capacity(self):
""" Return number of bits in filter. """
return self._filter_bits
def false_positives(self, n=0):
"""
@param n number of set members
@return approximate False positive rate
"""
if n == 0:
n = self._key_count
return (1 - exp(-self._kk * n / self._filter_bits)) ** self._kk
def insert(self, keysel):
"""
Add a key to the set represented by the filter.
XXX This version does not maintain 4 - bit counters, it is not
a counting Bloom filter.
@param keysel KeySelector for key (SHA digest)
"""
if keysel is None:
raise XLFilterError("KeySelector may not be None")
bitsel, bytesel = keysel.bitsel, keysel.bytesel
try:
self._lock.acquire()
for i in range(self._kk):
self._filter[bytesel[i]] |= (1 << bitsel[i])
self._key_count += 1
# DEBUG
# print("key count := %d" % self._key_count)
# END
finally:
self._lock.release()
def _is_member(self, keysel):
"""
Whether a key is in the filter. Sets up the bit and byte offset
arrays.
@param keysel KeySelector for key (SHA digest)
@return True if b is in the filter
"""
bitsel, bytesel = keysel.bitsel, keysel.bytesel
for i in range(self._kk):
check_byte = self._filter[bytesel[i]]
if (check_byte & (1 << bitsel[i])) == 0:
return False
return True
def is_member(self, keysel):
"""
Whether a key is in the filter. External interface, internally
synchronized.
@param keysel KeySelector for a key (SHA digest)
@return True if b is in the filter
"""
if keysel is None:
raise XLFilterError("KeySelector may not be None")
try:
self._lock.acquire()
return self._is_member(keysel)
finally:
self._lock.release()
# ===================================================================
class KeySelector(object):
def __init__(self, key, bloom):
if not key:
raise XLFilterError(
"key being added to KeySelector may not be None or empty")
self._key = bytes(deepcopy(key)) # so immutable
# XXX Weak test.
if bloom is None:
raise XLFilterError("bloom may not be None")
key_bytes = bloom.key_bytes
if len(key) != key_bytes:
raise XLFilterError(
"key of length %d but fltr expects length of %d bytes" % (
len(key), key_bytes))
m, k = bloom.m, bloom.k
# DEBUG
# print("KeySelector: m = %d, k = %d" % (m, k))
# END
bitsel = [0] * k # ints used to select flag bits
bytesel = [0] * k # ints used to select flag bytes
# Given a key, populate the byte and bit offset arrays, each
# of which has k elements. The low order 3 bits are used to
# select a bit within a byte. The higher order bits are used
# select the byte.
# convert the bytes of the key to a single long int
i = int.from_bytes(key, 'little') # signed=False
# extract the k bit and byte selectors
for j in range(k):
bitsel[j] = i & 0x7 # get 3 bits selecting bit in byte
i >>= 3
byte_mask = (1 << (m - 3)) - 1
bytesel[j] = i & byte_mask
i >>= m - 3
self._bitsel = bitsel
self._bytesel = bytesel
@property
def bitsel(self):
""" Return the bit selector. """
return self._bitsel
@property
def bytesel(self):
""" Return the byte selector. """
return self._bytesel
@property
def key(self):
""" Return the value of the key associated with the selector. """
return self._key
# ===================================================================
class NibbleCounters(object):
"""
Maintain a set of 4-bit counters, one for each bit in a BloomSHA.
Counters are stored in bytes, two counters per byte.
The presence of the counters allows keys to be removed without
having to recalculate the entire BloomSHA.
As it stands, this class is not thread-safe. Using classes are
expected to provide synchronization.
"""
def __init__(self, m=20): # default is for SHA1
self._nibble_count = 1 << m # ie, 2**20; the size of the filter
self._counters = bytearray(self._nibble_count // 2)
def clear(self):
""" Zero out all of the counters. Unsynchronized. """
for i in range(self._counters // 2):
self._counters[i] = 0 # zeroes out two counters
def inc(self, filter_bit):
"""
Increment the nibble, ignoring any overflow.
@param filter_bit offset of bit in the filter
@return value of nibble after operation
"""
if filter_bit < 0:
raise XLFilterError("filter bit offset cannot be negative.")
if filter_bit >= self._nibble_count:
raise XLFilterError("filter bit offset %d out of range" %
filter_bit)
byte_offset = filter_bit // 2
upper_nibble = filter_bit & 1 # interpreted as boolean
cur_byte = self._counters[byte_offset]
if upper_nibble:
value = cur_byte >> 4
else:
value = cur_byte & 0xf
# DEBUG
# print("bit %6d: value 0x%x => " % (filter_bit, value), end='')
# END
if value < 0xf:
value += 1 # increment counter, ignoring any overflow
# DEBUG
# print("0x%x " % value, end='')
# END
if upper_nibble:
self._counters[byte_offset] &= 0x0f # mask off existing value
self._counters[byte_offset] |= (value << 4)
else:
self._counters[byte_offset] &= 0xf0 # mask off low-order nibble
self._counters[byte_offset] |= value
# DEBUG
# print(" counters: 0x%02x => 0x%02x" % (
# cur_byte, self._counters[byte_offset]))
# END
return value
def dec(self, filter_bit):
"""
Decrement the nibble, ignoring any underflow
@param filterWord offset of 32-bit word
@param filter_bit offset of bit in that word (so in range 0..31)
@return value of nibble after operation
"""
if filter_bit < 0:
raise XLFilterError("filter bit offset cannot be negative.")
if filter_bit >= self._nibble_count:
raise XLFilterError("filter bit offset %d out of range" %
filter_bit)
byte_offset = filter_bit // 2
upper_nibble = filter_bit & 1 # interpreted as boolean
cur_byte = self._counters[byte_offset]
if upper_nibble:
value = cur_byte >> 4
else:
value = cur_byte & 0xf
# DEBUG
# print("bit %6d: value 0x%x => " % (filter_bit, value), end='')
# END
if value > 0:
value -= 1 # decrement counter, ignoring underflow
# DEBUG
# print("0x%x " % value, end='')
# END
if upper_nibble:
self._counters[byte_offset] &= 0x0f # mask off existing value
self._counters[byte_offset] |= value << 4
else:
self._counters[byte_offset] &= 0xf0 # mask off low-order nibble
self._counters[byte_offset] |= value
# DEBUG
# print(" counters: 0x%02x => 0x%02x" % (
# cur_byte, self._counters[byte_offset]))
# END
return value
# ===================================================================
class CountingBloom(BloomSHA):
"""
Counting version of the Bloom filter.
Adds a 4-bit counter to each bit in the Bloom filter, enabling members
to be removed from the set without having to recreate the filter from
scratch.
"""
# self._cb_lock is CountingBloom lock
# self._lock is BloomSHA lock, so super._lock; MAY NEED isolating
# functions
def __init__(self, m=20, k=8, key_bytes=20):
super().__init__(m, k, key_bytes)
self._counters = NibbleCounters(m)
self._cb_lock = Lock() # coarse lock on nibble counters
def clear(self):
"""
Clear both the underlying filter in the superclass and the
bit counters maintained here.
XXX Possible deadlock.
"""
# XXX ORDER IN WHICH LOCKS ARE OBTAINED MUST BE THE SAME EVERYWHERE.
try:
self._cb_lock.acquire()
super().clear() # BloomSHA; otherwise unsynchronized
self._counters.clear() # nibble counters; otherwise unsync
finally:
self._cb_lock.release()
def insert(self, keysel):
"""
Add a key to the set represented by the filter, updating counters
as it does so. Overflows are silently ignored.
@param b byte array representing a key (SHA digest)
"""
bytesel, bitsel = keysel.bytesel, keysel.bitsel
filter_bit = []
for i in range(self._kk):
filter_bit.append((bytesel[i] << 3) + bitsel[i])
try:
self._cb_lock.acquire()
super().insert(keysel) # add to BloomSHA
for i in range(self._kk):
self._counters.inc(filter_bit[i]) # increment counter
finally:
self._cb_lock.release()
def remove(self, keysel):
"""
Remove a key from the set, updating counters while doing so.
If the key is not a member of the set, no action is taken.
However, if it is a member (a) the count is decremented,
(b) all bit counters are decremented, and (c) where the bit
counter goes to zero the corresponding bit in the filter is
zeroed.
@param keysel KeySelector for the key to be removed.
"""
if not self.is_member(keysel):
return
bytesel, bitsel = keysel.bytesel, keysel.bitsel
filter_bit = []
for i in range(self._kk):
filter_bit.append((bytesel[i] << 3) + bitsel[i])
try:
self._cb_lock.acquire()
present = self.is_member(keysel)
if present:
for i in range(self._kk):
new_count = self._counters.dec(filter_bit[i])
if new_count == 0:
# mask out the relevant bit
val = self._filter[bytesel[i]] & ~(1 << bitsel[i])
self._filter[bytesel[i]] = val
if self._key_count > 0:
self._key_count -= 1
finally:
self._cb_lock.release()
```
#### File: src/xlcrypto/padding.py
```python
def pkcs7_padding(data, block_size):
block_size = int(block_size)
if block_size < 1:
raise XLCryptoError("impossible block size")
if not data:
length = 0
else:
length = len(data)
# we want from 1 to block_size bytes of padding
n_blocks = int((length + block_size - 1) / block_size)
rem = n_blocks * block_size - length
if rem == 0:
rem = block_size
padding = bytearray(rem) # that many null bytes
for iii in range(rem):
padding[iii] = rem # padding bytes set to length of padding
return padding
def add_pkcs7_padding(data, block_size):
if block_size <= 1:
raise XLCryptoError("impossible block size")
else:
padding = pkcs7_padding(data, block_size)
if not data:
out = padding
else:
out = data + padding
return out
# The data passed is presumed to have PKCS7 padding. If possible, return
# a copy of the data without the padding. Return an error if the padding
# is incorrect.
def strip_pkcs7_padding(data, block_size):
if block_size <= 1:
raise XLCryptoError("impossible block size")
elif not data:
raise XLCryptoError("cannot strip padding from empty data")
len_data = len(data)
if len_data < block_size:
raise XLCryptoError("data too short to have any padding")
else:
# examine the very last byte: it must be padding and must
# contain the number of padding bytes added
len_padding = data[len_data - 1]
if len_padding < 1 or len_data < len_padding:
raise XLCryptoError("incorrect PKCS7 padding")
else:
out = data[:len_data - len_padding]
return out
```
#### File: xlcrypto_py/tests/test_counting_bloom.py
```python
import time
import unittest
from hashlib import sha1, sha256 as sha2
from rnglib import SimpleRNG
from xlcrypto import XLFilterError
from xlcrypto.filters import CountingBloom, KeySelector
RNG = SimpleRNG(time.time())
class TestCountingBloom(unittest.TestCase):
""" Exercise the CountingBloom filter. """
def setUp(self):
self.m = 20 # M = 2**m is number of bits in filter
self.k = 8 # numberof hash funcions
self.key_bytes = 20 # so these are SHA1s
self.keys = [] # new byte[100][20]
def test_empty_filter(self):
""" Verify that empty CountingBloom has expected properties. """
fltr = CountingBloom(self.m, self.k, self.key_bytes)
self.assertEqual(0, len(fltr),
"brand new fltr isn't empty")
self.assertEqual(2 << (self.m - 1), fltr.capacity,
"filter capacity is wrong")
def test_param_exceptions(self):
"""
Verify that out of range or otherwise unacceptable constructor
parameters are caught.
"""
# m (m_exp) checks
try:
CountingBloom(-5)
self.fail("didn't catch negative filter size exponent")
except XLFilterError:
pass
try:
CountingBloom(0)
self.fail("didn't catch zero filter size exponent")
except XLFilterError:
pass
# checks on k (hash_count)
try:
CountingBloom(20, -1)
self.fail("didn't catch zero hash function count")
except XLFilterError:
pass
try:
CountingBloom(20, 0)
self.fail("didn't catch zero hash function count")
except XLFilterError:
pass
try:
CountingBloom(3, 0)
self.fail("didn't catch invalid hash function count")
except XLFilterError:
pass
try:
CountingBloom(247, 0)
self.fail("didn't catch invalid hash function count")
except XLFilterError:
pass
try:
CountingBloom(20, 8, -47)
self.fail("didn't catch invalid key_bytes")
except XLFilterError:
pass
try:
CountingBloom(20, 8, 0)
self.fail("didn't catch key_bytes==0")
except XLFilterError:
pass
def do_test_sha_inserts(self, m, k, num_key):
""" Test CountingBloom for specific parameters. """
keys = []
# set up distinct keys, each the hash of a unique value
for i in range(num_key):
sha = sha1()
stuff = RNG.some_bytes(20) # 20 quasi-random bytes
stuff[0] = i # guarantee uniqueness
sha.update(stuff)
keys.append(stuff)
fltr = CountingBloom(m, k, key_bytes=20)
# DEBUG
# print("test_sha_inserts: len of new filter is %d" % len(fltr))
# END
for i in range(num_key):
# DEBUG
if i != len(fltr):
print(" before %d-th insert length of filter is %d" % (
i, len(fltr)))
# END
self.assertEqual(i, len(fltr))
keysel = KeySelector(keys[i], fltr)
self.assertFalse(fltr.is_member(keysel),
"key %d not yet in set, but found!" % i)
fltr.insert(keysel) # add key to fltr
for i in range(num_key):
keysel = KeySelector(keys[i], fltr)
self.assertTrue(fltr.is_member(keysel),
"key " + str(i) +
" has been added but not found in set")
def test_sha_inserts(self):
""" Test CountingBloom for various parameter settings. """
self.do_test_sha_inserts(self.m, self.k, 16) # default values
self.do_test_sha_inserts(14, 8, 16) # stride = 9
self.do_test_sha_inserts(13, 8, 16) # stride = 8
self.do_test_sha_inserts(12, 8, 16) # stride = 7
self.do_test_sha_inserts(14, 7, 16) # stride = 9
self.do_test_sha_inserts(13, 7, 16) # stride = 8
self.do_test_sha_inserts(12, 7, 16) # stride = 7
self.do_test_sha_inserts(14, 6, 16) # stride = 9
self.do_test_sha_inserts(13, 6, 16) # stride = 8
self.do_test_sha_inserts(12, 6, 16) # stride = 7
self.do_test_sha_inserts(14, 5, 16) # stride = 9
self.do_test_sha_inserts(13, 5, 16) # stride = 8
self.do_test_sha_inserts(12, 5, 16) # stride = 7
def do_test_sha2_inserts(self, m, k, num_key):
""" Test CountingBloom2 for specific parameters. """
keys = []
# set up distinct keys, each the hash of a unique value
for i in range(num_key):
sha = sha2()
stuff = RNG.some_bytes(32) # 32 quasi-random bytes
stuff[0] = i # guarantee uniqueness
sha.update(stuff)
keys.append(stuff)
fltr = CountingBloom(m, k, key_bytes=32)
# DEBUG
# print("test_sha2_inserts: len of new filter is %d" % len(fltr))
# END
for i in range(num_key):
# DEBUG
if i != len(fltr):
print(" before %d-th insert length of filter is %d" % (
i, len(fltr)))
# END
self.assertEqual(i, len(fltr))
keysel = KeySelector(keys[i], fltr)
self.assertFalse(fltr.is_member(keysel),
"key %d not yet in set, but found!" % i)
fltr.insert(keysel) # add key to fltr
for i in range(num_key):
keysel = KeySelector(keys[i], fltr)
self.assertTrue(fltr.is_member(keysel),
"key %d has been added but not found in set" % i)
def test_sha2_inserts(self):
""" Test SHA2 version of CountingBloom filter. """
self.do_test_sha2_inserts(32, 8, 16)
self.do_test_sha2_inserts(16, 16, 16)
if __name__ == '__main__':
unittest.main()
```
#### File: xlcrypto_py/tests/test_nibble_counters.py
```python
import time
import unittest
# from hashlib import sha1, sha256 as sha2
from rnglib import SimpleRNG
from xlcrypto import XLFilterError
from xlcrypto.filters import BloomSHA, NibbleCounters
RNG = SimpleRNG(time.time())
class TestNibbleCounters(unittest.TestCase):
"""
Tests the counters associated with Bloom filters for sets whose members
are 20- or 32-byte SHA digests.
"""
def do_nibble_test_bit(self, counters, filter_bit):
""" Count up through all possible values and beyond. """
value = 0
# DEBUG
# print("do_nibble_test_bit: filter_bit %6d" % filter_bit)
# END
for i in range(18):
# DEBUG
# print(" up %2d" % i)
# END
value = counters.inc(filter_bit)
if i < 15:
self.assertEqual(value, i + 1,
"bit %d: error adding 1 to %d" % (
filter_bit, i))
else:
self.assertEqual(value, 15,
"bit %d: overflow error" % filter_bit)
# count back down
for i in range(18):
# DEBUG
# print(" down %2d" % i)
# END
value = counters.dec(filter_bit)
if i < 15:
self.assertEqual(value, 14 - i,
("bit %d filter_bit: " +
"error subtracting 1 from %d") % (
filter_bit, 15 - i))
else:
self.assertEqual(value, 0,
"bit %d: underflow error" % filter_bit)
def do_nibble_test(self, m):
""" Run tests for specific value of m. """
fltr = BloomSHA(m) # used only to calculate capacity
fltr_size = fltr.capacity
self.assertEqual(fltr_size, 1 << m)
counters = NibbleCounters(m)
# verify we get exceptions for bits out of range
try:
_ = counters.inc(-1)
except XLFilterError:
pass
try:
_ = counters.inc(fltr_size)
except XLFilterError:
pass
try:
_ = counters.dec(-1)
except XLFilterError:
pass
try:
_ = counters.dec(fltr_size)
except XLFilterError:
pass
# test top bits, bottom bits, and some in the middle
self.do_nibble_test_bit(counters, 0)
self.do_nibble_test_bit(counters, 1)
self.do_nibble_test_bit(counters, fltr_size - 2)
self.do_nibble_test_bit(counters, fltr_size - 1)
for _ in range(4):
bit = 2 + RNG.next_int16(fltr_size - 4)
self.do_nibble_test_bit(counters, bit)
def test_nibs(self):
""" Run tests for various values of m. """
self.do_nibble_test(16)
self.do_nibble_test(17)
self.do_nibble_test(20)
self.do_nibble_test(24)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jddixon/xlnodeid_py",
"score": 3
}
|
#### File: src/xlnodeid/__init__.py
```python
from binascii import b2a_hex
from copy import deepcopy
from xlattice import SHA1_BIN_LEN, SHA2_BIN_LEN, SHA3_BIN_LEN
__version__ = '0.0.12'
__version_date__ = '2018-03-23'
__all__ = ['__version__', '__version_date__',
'XLNodeIDError', 'XLNodeID']
class XLNodeIDError(RuntimeError):
""" General purpose exception for the package. """
class XLNodeID(object):
""" Unique identifier for an XLattice Node. """
def __init__(self, ident):
if not ident:
raise XLNodeIDError("id may not be None or empty")
if not isinstance(ident, (bytearray, bytes)):
raise XLNodeIDError("NodeID value must be bytes-like")
length = len(ident)
if length != SHA1_BIN_LEN and length != SHA2_BIN_LEN and \
length != SHA3_BIN_LEN:
raise XLNodeIDError("invalid nodeID length %d" % length)
# it's a valid ID, so deep copy it
self._node_id = bytes(deepcopy(ident))
@property
def value(self):
""" Return a deep copy of the underlying byte sequence. """
return deepcopy(self._node_id)
@staticmethod
def is_valid_node_id(val):
""" Return whether val is a valid XLNodeID value. """
if not val:
return False
if not isinstance(val, bytes) and not isinstance(val, bytearray):
return False
length = len(val)
if length != SHA1_BIN_LEN and length != SHA2_BIN_LEN and \
length != SHA3_BIN_LEN:
return False
return True
def clone(self):
""" Return a deep copy of the XLNodeID instance. """
return XLNodeID(self._node_id)
def __eq__(self, other):
if not isinstance(other, XLNodeID):
return False
return self._node_id == other.value
def __lt__(self, other):
return self._node_id < other.value
def __str__(self):
return b2a_hex(self._node_id)
def __len__(self):
return len(self._node_id)
```
|
{
"source": "jddixon/xltransport_py",
"score": 3
}
|
#### File: xltransport_py/tests/test_ipv4addr.py
```python
import time
import unittest
from rnglib import SimpleRNG
from xltransport import IPv4Address, AddressError
class TestIPv4Address(unittest.TestCase):
""" Exercise code handling IPv4 addresses. """
A1234 = '1.2.3.4'
A1235 = '1.2.3.5'
def setUp(self):
self.rng = SimpleRNG(time.time())
self.v4addr = None
self.v4addr2 = None
def tearDown(self):
pass
def test_bad_addr(self):
""" Verify that bad addresses are rejected. """
try:
IPv4Address("1.2.3.4", -92)
self.fail("didn't catch out of range port number -92")
except AddressError:
# success
pass
try:
IPv4Address("1.2.3.4", 65536)
self.fail("didn't catch out of range port number 2^16")
except AddressError:
# success
pass
try:
IPv4Address("1.2.3.4.5", 5)
self.fail("didn't catch 1.2.3.4.5")
# except AddressError:
except OSError:
# success
pass
# XXX SPECIFICATION ERROR: arg should be bytes-like
# self.assertFalse(IPv4Address.is_valid_address(None))
# self.assertFalse(IPv4Address.is_valid_address("0"))
# self.assertFalse(IPv4Address.is_valid_address("0.0"))
# self.assertFalse(IPv4Address.is_valid_address("0.0.0"))
# self.assertTrue(IPv4Address.is_valid_address("0.0.0.0"))
# self.assertFalse(IPv4Address.is_valid_address("0.0.0.0.0"))
def test_v4_addr_with_port(self):
""" Test ipv4 addresses with a port number. """
v4addr = IPv4Address(self.A1234, 97)
self.assertIsNotNone(v4addr)
self.assertEqual(97, v4addr.port)
# HACKED FROM JAVA, NEEDS CLEANING UP
# iaddr = v4addr.get_inet_address() # InetAddress
# byte_addr = v4addr.get_ip_address()
# self.assertEqual(4, byte_addr.length)
# addr_from_ia = iaddr.get_address()
# self.assertEqual(4, addr_from_ia.length)
# for ndx in range(4):
# self.assertEqual(byte_addr[ndx], addr_from_ia[ndx])
# v4addr2 = IPv4Address(byte_addr, 97)
# self.assertIsNotNone(v4addr2)
# self.assertEqual(v4addr, v4addr2)
# self.assertEqual("1.2.3.4:97", v4addr2.__str__())
def test_equal(self):
""" Confirm that __equal__() works. """
v4addr = IPv4Address(self.A1234, 52)
self.assertEqual(v4addr, v4addr)
v4addr2 = IPv4Address(self.A1235, 52) # different IP
self.assertIsNotNone(v4addr)
self.assertFalse(v4addr == v4addr2)
v4addr2 = IPv4Address(self.A1234, 53) # different port
self.assertFalse(v4addr == v4addr2)
v4addr2 = IPv4Address(self.A1234, 52) # same IP and port
self.assertEqual(v4addr, v4addr2)
def test_private_ips(self):
""" Verify that private addresses are recognized as such. """
# Web server running in 10/8
v4addr = IPv4Address("10.0.0.1", 80)
addr = v4addr.nbo_host
self.assertEqual(10, addr[0])
self.assertEqual(0, addr[1])
self.assertEqual(0, addr[2])
self.assertEqual(1, addr[3])
self.assertTrue(IPv4Address.is_private(addr))
self.assertEqual(80, v4addr.port)
# Web server running in 128.0/16
v4addr = IPv4Address("172.16.31.10", 8080)
addr = v4addr.nbo_host
self.assertEqual(128, addr[0])
self.assertEqual(0, addr[1])
self.assertEqual(12, addr[2])
self.assertEqual(121, addr[3])
self.assertTrue(IPv4Address.is_private(addr))
self.assertEqual(8080, v4addr.port)
addr = IPv4Address("127.255.0.4", 8080).nbo_host
self.assertFalse(IPv4Address.is_private(addr))
addr = IPv4Address("172.16.58.3", 8080).nbo_host
self.assertFalse(IPv4Address.is_private(addr))
# Web server running in 172.16/12
v4addr = IPv4Address("172.30.0.1", 443)
addr = v4addr.nbo_host
self.assertEqual(172, addr[0])
self.assertEqual(30, addr[1])
self.assertEqual(0, addr[2])
self.assertEqual(1, addr[3])
self.assertTrue(IPv4Address.is_private(addr))
self.assertEqual(443, v4addr.port)
addr = IPv4Address("192.168.127.12", 443).nbo_host
self.assertFalse(IPv4Address.is_private(addr))
addr = IPv4Address("172.16.58.3", 443).nbo_host
self.assertFalse(IPv4Address.is_private(addr))
def test_rfc3330(self):
""" Verify that RFC 3330 addresses are recognized. """
test = bytearray([0, 0, 0, 1]) # gets modified
# 0/8
self.assertTrue(IPv4Address.is_rfc3330(test))
self.assertTrue(IPv4Address.is_rfc3330_not_private(test))
# 14/8
test[0] = 14
self.assertTrue(IPv4Address.is_rfc3330(test))
# 24/8
test[0] = 24
self.assertTrue(IPv4Address.is_rfc3330(test))
# 127/0
test[0] = 127
self.assertTrue(IPv4Address.is_rfc3330(test))
# 169.254/16, link local
test = IPv4Address("169.254.0.1", 443).nbo_host
self.assertEqual(169, test[0])
self.assertEqual(254, test[1])
self.assertTrue(IPv4Address.is_rfc3330(test))
self.assertFalse(IPv4Address.is_private(test))
# 192.0.2.0/24, test net
test = IPv4Address("192.0.2.14", 443).nbo_host
self.assertEqual(192, test[0])
self.assertTrue(IPv4Address.is_rfc3330(test))
self.assertFalse(IPv4Address.is_private(test))
# 192.168.127.12/24, 6to4 relay anycast
test = IPv4Address("192.168.3.11", 443).nbo_host
self.assertEqual(192, test[0])
self.assertTrue(IPv4Address.is_rfc3330(test))
# 198.18/15, benchmark testing
test = IPv4Address("198.18.99.14", 443).nbo_host
self.assertEqual(198, test[0])
self.assertTrue(IPv4Address.is_rfc3330(test))
test = IPv4Address("198.19.99.14", 443).nbo_host
self.assertTrue(IPv4Address.is_rfc3330(test))
# 224/4, multicast
test = IPv4Address("172.16.58.3", 443).nbo_host
self.assertEqual(224, test[0])
self.assertTrue(IPv4Address.is_rfc3330(test))
# 240/4, reserved
v4addr = IPv4Address("240.18.99.14", 443)
test = v4addr.nbo_host
self.assertEqual(240, test[0])
self.assertTrue(IPv4Address.is_rfc3330(test))
self.assertEqual("240.18.99.14:443", v4addr.__str__())
# XXX DNS LOOKUP NOT YET SUPPORTED
# def test_constructor_with_host_name(self):
# v4addr = IPv4Address("www.xlattice.org", 80)
# test = v4addr.nbo_host
# # known to be globally routable ;-)
# self.assertFalse(IPv4Address.is_rfc3330(test))
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jddixon/xlutil_py",
"score": 2
}
|
#### File: xlutil_py/tests/test_cft_log_for_py.py
```python
import os
import sys
import time
import unittest
import xlattice
from rnglib import SimpleRNG
# pylint: disable=no-name-in-module
from cFTLogForPy import (
init_cft_logger, open_cft_log, log_msg, close_cft_logger)
sys.path.insert(0, 'build/lib.linux-x86_64-3.6') # for the .so
class TestCFTLogForPy(unittest.TestCase):
""" Test the C fault-tolerant log for Python. """
def setUp(self):
self.rng = SimpleRNG(time.time())
def tearDown(self):
pass
def unique_file_name(self):
""" Create a locally unique file name under tmp/. """
log_file = "tmp/foo%04x" % self.rng.next_int16()
while os.path.exists(log_file):
log_file = "tmp/foo%04x" % self.rng.next_int16()
return log_file
# actual unit tests #############################################
def test_version(self):
""" Display library version info. """
version = xlattice.__version__
print("VERSION %s" % version, end=' ')
if version >= '0.5.1':
print(" %s" % xlattice.__version_date__)
else:
self.fail("have loaded an old version of the library")
def test_open_and_close(self):
""" Test open and close functions. """
log_file = self.unique_file_name()
status = init_cft_logger()
status = open_cft_log(log_file)
self.assertEqual(0, status)
time.sleep(0.2)
if status:
print("open_cft_log failed, status is %d" % status)
else:
time.sleep(0.2)
status = close_cft_logger()
time.sleep(0.2)
self.assertEqual(0, status)
os.path.exists(log_file)
class DumbLogger:
""" A very simple logger for testing. """
def __init__(self):
self.log_file = "tmp/dumb.log"
def open_log(self):
""" Open the dummy logger. """
init_cft_logger()
return open_cft_log(self.log_file)
def close_log(self):
""" Close the dummy logger. """
return close_cft_logger()
def test_dumb_logger(self):
""" Run some simple tests of the DumbLogger. """
logger = self.DumbLogger()
time.sleep(0.1)
status = logger.open_log()
self.assertEqual(0, status)
time.sleep(0.1)
status = logger.close_log()
time.sleep(0.1)
self.assertEqual(0, status)
self.assertTrue(os.path.exists(logger.log_file))
def test_initialization(self):
"""Tests init, log messages, close, with sleeps. """
log_file = self.unique_file_name()
init_cft_logger()
log_ndx = open_cft_log(log_file)
if log_ndx:
print("open_cft_logger failed, log_ndx is %d" % log_ndx)
else:
log_msg(log_ndx, "now is the winter of our discontent\n")
log_msg(log_ndx, "made glorious summer by this son of York\n")
log_msg(log_ndx, "buffers are temporarily multiples of 64B\n")
log_msg(log_ndx,
"... so these few message should overflow a page\n")
# print "ABOUT TO WRITE BLOCK 0"; sys.stdout.flush()
for n__ in range(128):
# ....x....1....x....2....x....3....x....4...
log_msg(
log_ndx,
"padding ljlkjk;ljlj;k;lklj;j;kjkljklj %04x\n" %
n__)
# we see 0 .. 87 consistently, so 88 * 43 = 3784 bytes
# have been written and the next line would take it to
# print '%3d' % n, ; sys.stdout.flush() # DEBUG
# print # DEBUG
# print "BLOCK 0 WRITTEN"; sys.stdout.flush()
time.sleep(0.2) # NOTE
for n__ in range(128):
log_msg(
log_ndx, "padding ljlkjk;ljlj;k;lklj;j;kjkljklj %04x\n" %
(n__ + 128))
# print "BLOCK 1 WRITTEN"; sys.stdout.flush()
time.sleep(0.2) # NOTE
for n__ in range(128):
log_msg(
log_ndx, "padding ljlkjk;ljlj;k;lklj;j;kjkljklj %04x\n" %
(n__ + 256))
# print "BLOCK 2 WRITTEN"; sys.stdout.flush()
time.sleep(0.2) # NOTE
for n__ in range(128):
log_msg(
log_ndx, "padding ljlkjk;ljlj;k;lklj;j;kjkljklj %04x\n" %
(n__ + 384))
# print "BLOCK 3 WRITTEN"; sys.stdout.flush()
# close_cft_logger(None)
# print "BRANCHING TO close_cft_logger"; sys.stdout.flush() # NOT
# :1SEEN
close_cft_logger()
# print "close_cft_logger returns %s" % str(junk);
# sys.stdout.flush()
time.sleep(0.2) # NOTE_AT_END
if __name__ == '__main__':
unittest.main()
```
#### File: xlutil_py/tests/test_cft_log_obj.py
```python
import os
import sys
import time
import unittest
import xlattice
# pylint: disable=no-name-in-module
import cFTLogForPy
# pylint: disable=no-name-in-module
from cFTLogForPy import (
# open_cft_log, log_msg not imported
init_cft_logger, close_cft_logger)
from rnglib import SimpleRNG
sys.path.insert(0, 'build/lib.linux-x86_64-3.4') # for the .so
class TestCLogObj(unittest.TestCase):
""" Test the C fault-tolerant log for python. """
def setUp(self):
self.rng = SimpleRNG(time.time())
def tearDown(self):
pass
# utility functions #############################################
def unique_file_name(self):
""" Create a file under tmp/ with a locally unique name. """
log_file = "tmp/foo%04x" % self.rng.next_int16()
while os.path.exists(log_file):
log_file = "tmp/foo%04x" % self.rng.next_int16()
return log_file
# actual unit tests #############################################
def test_version_and_max_log(self):
""" Display the library version number. """
version = xlattice.__version__
print("VERSION %s" % version, end=' ')
if version >= '0.5.1':
print(" %s" % xlattice.__version_date__)
# pylint: disable=no-member
self.assertEqual(16, cFTLogForPy.max_log)
else:
print(" THIS IS AN OLD VERSION OF THE LIBRARY")
def test_ctor(self):
"""
Verify that the constructor creates a logger with sensible props.
"""
init_cft_logger()
log_file = self.unique_file_name()
# pylint: disable=no-member
obj = cFTLogForPy.LogForPy()
obj.init(log_file)
self.assertEqual(0, obj.ndx())
self.assertEqual(0, obj.count())
self.assertEqual(log_file, obj.log_file())
def test_count(self):
""" Verify that the message count is correct. """
messages = ["now is the winter of our discontent\n",
"made glorious summer by this son of York\n",
"and all the clouds that lowered upon our house\n",
"... and so forth and so on\n", ]
log_file = self.unique_file_name()
# this 3-line stanza needs to be shortened
init_cft_logger()
obj = cFTLogForPy.LogForPy() # pylint: disable=no-member
obj.init(log_file)
self.assertEqual(log_file, obj.log_file()) # must follow init()
log_ndx = obj.ndx()
self.assertEqual(0, log_ndx)
expected = 0
count = obj.count()
self.assertEqual(expected, count)
for msg in messages:
# pylint: disable=no-member
obj.log_msg(msg)
expected += 1
count = obj.count()
self.assertEqual(expected, count)
# XXX OLD MODULE-LEVEL FUNC
status = close_cft_logger(log_ndx)
# print("close_cft_logger returns %s" % str(status))
self.assertEqual(status, 0)
if __name__ == '__main__':
unittest.main()
```
#### File: xlutil_py/tests/test_context.py
```python
import unittest
from xlattice import Context
class TestContext(unittest.TestCase):
""" Verify the XLattice Context functions as expected. """
def setUp(self):
self.ctx = Context()
def tearDown(self):
pass
def test_empty(self):
self.assertEqual(0, len(self.ctx))
self.assertEqual(None, self.ctx.parent)
self.assertEqual(None, self.ctx.lookup("foo"))
def test_adding_nones(self):
try:
self.ctx.bind(None, "bar")
self.fail("bind with None name succeeded!")
except Exception:
# success
pass
try:
self.ctx.bind("foo", None)
self.fail("bind with None object succeeded!")
except Exception:
# success
pass
def test_simple_bindings(self):
self.ctx.bind("foo", "that was foo")
self.ctx.bind("bar", "that was bar")
self.assertEqual(2, self.ctx.size())
self.assertEqual("that was foo", self.ctx.lookup("foo"))
self.assertEqual("that was bar", self.ctx.lookup("bar"))
def test_nested_contexts(self):
ctx1 = Context(self.ctx)
ctx2 = Context(ctx1)
self.assertTrue(self.ctx == ctx1.parent)
self.assertTrue(ctx1 == ctx2.parent)
self.ctx.bind("foo", "bar0")
ctx1.bind("foo", "bar1")
ctx2.bind("foo", "bar2")
self.assertEqual("bar2", ctx2.lookup("foo"))
ctx2.unbind("foo")
self.assertEqual("bar1", ctx2.lookup("foo"))
ctx1.unbind("foo")
self.assertEqual("bar0", ctx2.lookup("foo"))
self.ctx.unbind("foo")
self.assertIsNone(ctx2.lookup("foo"))
self.ctx.bind("wombat", "<NAME>")
self.assertEqual("<NAME>", ctx2.lookup("wombat"))
# ctx99 = ctx2.parent = None
ctx2.parent = None
# self.assertEqual(ctx99, ctx2)
self.assertIsNone(ctx2.parent)
self.assertIsNone(ctx2.lookup("wombat")) # broke chain of contexts
if __name__ == "__main__":
unittest.main()
```
#### File: xlutil_py/tests/test_lib_ev.py
```python
import os
import signal
import sys
import time
import unittest
import pyev
from rnglib import SimpleRNG
sys.path.insert(0, 'build/lib.linux-x86_64-2.7') # for the .so
def sig_cb(watcher, revenets):
""" Handle keyboard interrupt. """
print("\n<KEYBOARD INTERRUPT>")
loop = watcher.loop
# pylint inexplicably reports EVBREAK_ALL not a member of pyev
# pylint: disable=no-member
loop.stop(pyev.EVBREAK_ALL)
def guillotine_cb(watcher, revents):
""" Kill the event loop. """
# pylint: disable=no-member
watcher.loop.stop(pyev.EVBREAK_ALL)
def timer_cb(watcher, revents):
""" Timed callback, right out of the book. """
watcher.data += 1
print("timer.data: {0}".format(watcher.data))
print("timer.loop.iteration: {0}".format(watcher.loop.iteration))
print("timer.loop.now(): {0}".format(watcher.loop.now()))
TICK = 0.051
LIFETIME = 1.720
class TestLibev(unittest.TestCase):
""" Test libev functions used by this package. """
def setUp(self):
self.fd_ = None
self.log_name = None
self.loop = None
self.rng = SimpleRNG(time.time())
def tearDown(self):
if self.loop:
self.loop.stop()
# utility functions #############################################
def setup_async_logging(self):
""" Set up async loggig for a test. """
os.makedirs('tmp', exist_ok=True, mode=0o755)
# pylint: disable=no-member
self.loop = pyev.default_loop()
self.log_name = 'tmp/log%05x' % self.rng.next_int32(1024 * 1024)
# never used, never closed !
self.fd_ = os.open(self.log_name,
os.O_CREAT | os.O_APPEND | os.O_NONBLOCK, # flags
0o644) # mode
# set up watchers ##################################
# ticks every second
timer = self.loop.timer(0, TICK, timer_cb, 0)
timer.start()
# kills the event loop after LIFETIME seconds
life_is_short = self.loop.timer(LIFETIME, 0, guillotine_cb, 0)
life_is_short.start()
# lets the keyboard end things early
sig_handler = self.loop.signal(signal.SIGINT, sig_cb)
sig_handler.start()
self.loop.start()
# actual unit tests #############################################
def test_async_log(self):
""" Test the async log -- in a primitive way. """
t00 = time.time()
self.setup_async_logging()
t01 = time.time()
delta_t = 1.0 * (t01 - t00)
self.assertTrue(delta_t >= LIFETIME and delta_t < LIFETIME + 0.005)
if __name__ == '__main__':
unittest.main()
```
#### File: xlutil_py/tests/test_two_logs.py
```python
import os
import shutil
import sys
import unittest
from xlutil.ftlog import LogMgr
sys.path.insert(0, 'build/lib.linux-x86_64-3.4') # for the .so
class TestTwoLogs(unittest.TestCase):
""" Test concurrent use of more than one log. """
def setUp(self):
pass
def tearDown(self):
pass
# utility functions #############################################
# actual unit tests #############################################
def test_with_single_message(self):
""" Log a single message, close log, verify correct state. """
path_to_logs = os.path.join('tmp', 'logs')
if os.path.exists(path_to_logs):
shutil.rmtree(path_to_logs)
# -- open ---------------------------------------------------
def show_log_handle(handle):
""" Dump the log handle for testing. """
print("HANDLE: %s as %d writing to %s" % (
handle.base_name, handle.lfd, handle.log_file,))
mgr = LogMgr(path_to_logs)
foo_log = mgr.open('foo')
foo_log.log('oh hello, foo')
show_log_handle(foo_log) # DEBUG
bar_log = mgr.open('bar')
bar_log.log('oh hello, bar')
# showLogHandle(bar_log) # DEBUG
# print("TEST_TWO: closing")
sys.stdout.flush()
# -- close --------------------------------------------------
mgr.close()
# -- test our expectations ----------------------------------
expected_log_file = os.path.join(path_to_logs, 'foo.log')
self.assertEqual(expected_log_file, foo_log.log_file_name)
self.assertTrue(os.path.exists(expected_log_file))
with open(expected_log_file, 'r') as file:
contents = file.read()
contents = contents.strip()
self.assertTrue(contents.endswith('oh hello, foo')) # END FOO
if bar_log:
expected_log_file = os.path.join(path_to_logs, 'bar.log')
self.assertEqual(expected_log_file, bar_log.log_file_name)
self.assertTrue(os.path.exists(expected_log_file))
with open(expected_log_file, 'r') as file:
contents = file.read()
contents = contents.strip()
self.assertTrue(contents.endswith('oh hello, bar')) # END BAR
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jddobson/PythonTesting",
"score": 4
}
|
#### File: firstProject/src/collatz.py
```python
def run():
val = int(input())
while(val > 1):
val = compute(val)
def compute(value):
if(isEven(value)):
value = value // 2
print(value)
return value
else:
value = 3 * value + 1
print(value)
return value
def isEven(value):
if(value % 2 == 0):
return True
else:
return False
```
|
{
"source": "jddocea/threatminer-for-sdl",
"score": 3
}
|
#### File: jddocea/threatminer-for-sdl/setup.py
```python
import os.path
import sys
import json
# get user input and
def configure(config):
# if config.json was properly setup
if config:
print("NOTE: To delete an existing field, Enter a space when prompted for that field.\n")
host = raw_input("Host [" + config["databaseConnection"]["host"] + "]: ")
user = raw_input("Username [" + config["databaseConnection"]["user"] + "]: ")
password = raw_input("Password [" + config["databaseConnection"]["password"] + "]: ")
db = raw_input("DB [" + config["databaseConnection"]["db"] + "]: ")
charset = raw_input("Charset [" + config["databaseConnection"]["charset"] + "]: ")
port = raw_input("Port [" + config["databaseConnection"]["port"] + "]: ")
caCert = raw_input("CA File (For ssl connection to database. If not using ssl, press space and then enter.) [" + config["databaseConnection"]["caCert"] + "]: ")
isDeveloping = raw_input("Is Developing? (If deploying to production, press space and then enter.) [" + config["databaseConnection"]["isDeveloping"] + "]: ")
if host:
if host.isspace():
config["databaseConnection"]["host"] = ""
else:
config["databaseConnection"]["host"] = host
if user:
if user.isspace():
config["databaseConnection"]["user"] = ""
else:
config["databaseConnection"]["user"] = user
if password:
if password.isspace():
config["databaseConnection"]["password"] = ""
else:
config["databaseConnection"]["password"] = password
if db:
if db.isspace():
config["databaseConnection"]["db"] = ""
else:
config["databaseConnection"]["db"] = db
if charset:
if charset.isspace():
config["databaseConnection"]["charset"] = ""
else:
config["databaseConnection"]["charset"] = charset
if port:
if port.isspace():
config["databaseConnection"]["port"] = ""
else:
config["databaseConnection"]["port"] = port
if caCert:
if caCert.isspace():
config["databaseConnection"]["caCert"] = ""
else:
config["databaseConnection"]["caCert"] = caCert
if isDeveloping:
if isDeveloping.isspace():
config["databaseConnection"]["isDeveloping"] = ""
else:
config["databaseConnection"]["isDeveloping"] = isDeveloping
else:
print("Enter database configuration information:\n")
config = {"databaseConnection": {"db": "", "host": "", "user": "", "password": "", "charset": "", "port": ""}}
config["databaseConnection"]["host"] = raw_input("Host: ")
config["databaseConnection"]["user"] = raw_input("Username: ")
config["databaseConnection"]["password"] = raw_input("Password: ")
config["databaseConnection"]["db"] = raw_input("DB: ")
config["databaseConnection"]["charset"] = raw_input("Charset: ")
config["databaseConnection"]["port"] = raw_input("Port: ")
config["databaseConnection"]["caCert"] = raw_input("CA File (For ssl connection to database. If not using ssl, hit enter.): ")
config["databaseConnection"]["isDeveloping"] = raw_input("Is Developing (If deploying to production, hit enter.)?: ")
return config
# if config is valid, return configuration information currently in config.json
# otherwise return None
def getConfig():
# open config.json and load into config
try:
configFile = open("config.json")
except:
return None
with configFile as f:
config = json.load(f)
configFile.close()
if "databaseConnection" not in config:
return None
keys = ["host", "user", "password", "db", "charset", "port"]
for key in keys:
if key not in config["databaseConnection"]:
return None
return config
# Overwrites config.json with new config information
def writeConfig(config):
with open('config.json', 'w+') as outfile:
json.dump(config, outfile)
with open('scripts/config.json', 'w+') as outfile:
json.dump(config, outfile)
with open('server/config.json', 'w+') as outfile:
json.dump(config, outfile)
with open('database/config.json', 'w+') as outfile:
json.dump(config, outfile)
def init():
writeConfig(configure(getConfig()))
init()
```
|
{
"source": "jddunn/dementia-progression-analysis",
"score": 3
}
|
#### File: jddunn/dementia-progression-analysis/test_network.py
```python
import os
import argparse
import numpy as np
from keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img
from keras.models import Sequential
from keras.layers import Dropout, Flatten, Dense
from keras import applications
from keras.utils.np_utils import to_categorical
import matplotlib.pyplot as plt
import math
import cv2
import random
import imutils
top_model_weights_path = ""
train_data_dir = "data/train"
validation_data_dir = "data/validation"
data_type = ""
img_width, img_height = 256, 256
def predict(image_path):
print("Predicting " + image_path)
filename = image_path.split('/')[len(image_path.split('/'))-1]
# load the class_indices saved in the earlier step
class_dictionary = np.load('oasis_cross-sectional_class_indices' + '_' + data_type + '.npy').item()
num_classes = len(class_dictionary)
orig = cv2.imread(image_path)
orig = imutils.resize(orig, width=600) # Make images bigger (training data is in high-quality format)
image = load_img(image_path, target_size=(img_width, img_height))
# image = cv2.resize(image, (img_width, img_height), interpolation = cv2.INTER_NEAREST)
image = img_to_array(image)
# important! otherwise the predictions will be '0'
image = image / 255
image = np.expand_dims(image, axis=0)
# build the VGG16 network
model = applications.VGG16(include_top=False, weights='imagenet')
# get the bottleneck prediction from the pre-trained VGG16 model
bottleneck_prediction = model.predict(image)
# build top model
model = Sequential()
model.add(Flatten(input_shape=bottleneck_prediction.shape[1:]))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='sigmoid'))
model.load_weights(top_model_weights_path)
# use the bottleneck prediction on the top model to get the final
# classification
probs = model.predict_proba(bottleneck_prediction)
classes = model.predict_classes(bottleneck_prediction)
# print(str(classes))
# print(str(probs))
class_predicted = model.predict_classes(bottleneck_prediction)
# print(str(class_predicted))
probabilities = model.predict_proba(bottleneck_prediction)
inID = class_predicted[0]
# print()
inv_map = {v: k for k, v in class_dictionary.items()}
label = str(inv_map[inID]) + " - " + str(probs)
print(label)
cv2.putText(orig, label, (20, 45),
cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0), 2)
# cv2.imshow("Classification", orig)
cv2.imwrite("test_results" + "/" + inv_map[inID] + "/" + filename, orig)
cv2.waitKey(0)
cv2.destroyAllWindows()
def send_from_dir(path):
is_dir = os.path.isdir(path)
if is_dir:
for each in os.listdir(path):
predict(path + "/" + each)
else:
predict(path)
if __name__ == '__main__':
# Command line arguments
ap = argparse.ArgumentParser()
ap.add_argument("-f", "--file", required=True,
help="path to image file or directory of images to test")
ap.add_argument("-t", "--type", required=True,
help="type of dataset / model to train (options: FSL_SEG, PROCESSED, or RAW)")
args = vars(ap.parse_args())
data_type = args["type"]
if data_type == 'FSL_SEG':
img_width, img_height = 176, 208
train_data_dir = train_data_dir + "/" + data_type
validation_data_dir = validation_data_dir + "/" + data_type
top_model_weights_path = "oasis_cross-sectional" + "_" + data_type + ".h5"
path = args["file"]
send_from_dir(path)
cv2.destroyAllWindows()
```
|
{
"source": "jddunn/restless",
"score": 2
}
|
#### File: components/classifier/classifier.py
```python
import sys
import os
import time
import concurrent.futures
import asyncio
# make dep imports work when running in dir and in outside scripts
PACKAGE_PARENT = "../../.."
SCRIPT_DIR = os.path.dirname(
os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__)))
)
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
try:
from restless.components.utils import utils as utils
from restless.components.nlp import NLP
except Exception as e:
from ..utils import utils as utils
from ..nlp import NLP
logging = utils.logger
logger = utils.logger.logger
colored = logging.colored
same_line = logging.same_line
flush = logging.flush
misc = utils.misc
from multiprocessing import cpu_count
class Classifier:
def __init__(self, load_default_hann_model=False):
self.nlp = NLP(load_default_hann_model=load_default_hann_model)
return
def get_classification_results(
self, fname: str, benign: float, malicious: float
) -> None:
parent_path_to_fname = misc.get_parent_path_to_fname(fname)
short_fname = fname.split("/")[len(fname.split("/")) - 1]
classified_file_result = {
"filename": fname,
"benign": benign,
"malicious": malicious,
"time_scanned": misc.make_ts(),
}
# Colorizd percentages
colored_fname = (
colored(parent_path_to_fname, "gray")
+ colored("/", "bold")
+ " "
+ colored(short_fname, ["gray", "underline", "bold"])
)
colored_benign = misc.prob_to_percentage(benign)
colored_malicious = misc.prob_to_percentage(malicious)
clr_b = "d_gray" # benign color(s)
clr_m = "d_gray" # malicious color(s)
if benign > 0.3:
clr_b = "yellow" if benign < 0.45 else ["yellow", "bold"]
if benign >= 0.6:
clr_b = ["green"] if benign < 0.8 else ["green", "bold"]
if malicious > 0.15:
clr_m = "yellow" if malicious < 0.25 else ["yellow", "bold"]
if malicious >= 0.4:
clr_m = "red" if malicious >= 0.6 and malicious <= 0.8 else ["red", "bold"]
colored_benign = colored(colored_benign, clr_b)
colored_malicious = colored(colored_malicious, clr_m)
logger.info(
"{} {} {} predicted: {} {} and {} {}.".format(
colored("Scanned", "white"),
colored_fname,
colored("-", "d_gray"),
colored_benign,
colored("benign", clr_b),
colored_malicious,
colored("malicious", clr_m),
)
)
return classified_file_result
async def analyze_scanned_files(
self, file_results: list, default_malware_prob_threshold: float = 0.6
) -> tuple:
all_results = []
potential_malware_results = (
[]
) # Only results that pass default_malware_prob_threshold
files_scanned = len(file_results)
# Remove none from our results (meaning those files did not have any
# extractable metadata for our classifier, for now at least)
file_results = [res for res in file_results if res is not None]
if not self._filter_initial_scan_results(
files_scanned
): # Checks to see if we have any files that can be classified
return all_results, potential_malware_results
count = (
len(file_results) - 1 if len(file_results) - 1 > 0 else len(file_results)
)
logger.info(
colored(
"Sending {} files to the malware analysis / defense pipeline.".format(
colored(str(count), ["d_gray", "underline", "bold"]), "bold"
)
)
)
# Classification pipeline
for file_result in file_results:
fname = file_result[0]
features = file_result[1]
# Send features to NLP / HAN pipeline
matrix_results = self.nlp.hann.build_feature_matrix_from_input_arr(features)
result = (fname, self.nlp.hann.predict(matrix_results))
benign = float(result[1][0])
malicious = float(result[1][1])
# Classify our results
res = self.get_classification_results(fname, benign, malicious)
if res["malicious"] >= default_malware_prob_threshold:
potential_malware_results.append(res)
all_results.append(res)
flush(newline=True)
logger.info(
"\tRestless scanned a total of {} files, with {} sent to the malware classification / defense pipeline.".format(
colored(files_scanned, ["d_gray", "bold", "underline"]),
colored(len(all_results), ["gray", "bold", "underline"]),
)
)
flush(newline=True)
return all_results, potential_malware_results
def _filter_initial_scan_results(self, files_scanned: int) -> bool:
"""Checks to see if we have any scanned files that can be analyzed."""
if files_scanned == 0:
logger.success(
colored(
"Found no files that were scannable for malware (checked {} files).".format(
colored(str(files_scanned), ["bold", "underline"])
)
)
+ colored(" The system seems to be safe.", ["bold", "green"])
)
return False
else:
return True
```
#### File: components/utils/db_caller.py
```python
import pyspark
conf = pyspark.SparkConf().setMaster("local[*]").setAppName("restless-db")
sc = pyspark.SparkContext.getOrCreate(conf=conf)
class DB_Caller:
"""
Wrapper for database calls. Private methods will be called by higher-level `Utils`. Currently only supports Spark.
"""
def __init__(self):
self.context = sc
pass
```
#### File: components/utils/utils.py
```python
import os
import subprocess
from .logger import Logger
from .stats import Stats
from .stats_vis import StatsVis
from .misc import Misc
# from .db_caller import DB_Caller
logger = Logger()
stats = Stats()
stats_vis = StatsVis()
misc = Misc()
# db = DB_Caller()
class Utils:
"""
Various tools, including logging, database, and other high-level functions.
"""
def __init__(self):
self.logger = logger
self.stats = stats
self.stats_vis = stats_vis
self.misc = misc
# self.db = db
pass
```
#### File: restless/tests/test_rest_api.py
```python
import unittest
import os
from fastapi import FastAPI
from fastapi.testclient import TestClient
app = FastAPI()
@app.get("/")
async def read_main():
return {"msg": "Hello World"}
class MainAPITests(unittest.TestCase):
def test_base_api_setup(self):
client = TestClient(app)
response = client.get("/")
assert response.status_code == 200
if __name__ == "__main__":
unittest.main()
```
|
{
"source": "Jdduryea/Jamtools",
"score": 3
}
|
#### File: Jdduryea/Jamtools/bin_coverage.py
```python
import numpy as np
import sys
import pandas as pd
from os import listdir
from os.path import isfile, join
import os
import xlrd
from collections import defaultdict
import matplotlib.pyplot as plt
import pylab
import types
import collections
## Plotting function
def histogram(data,filename):
# data to plot
n_groups = len(data)
data1 = data.values()
#data2 = non_cancer.values()
# create plot
fig, ax = plt.subplots()
index = np.arange(n_groups)
bar_width = 0.35
opacity = 0.8
rects1 = plt.bar(index + bar_width, data1, bar_width,
alpha=opacity,
color='b',
label='CpGs')
# rects2 = plt.bar(index + 1.5*bar_width, data1, bar_width,
# alpha=opacity,
# color='#2188CD',
# label='Cancer')
hfont = {'fontname':'Arial'}
plt.xlabel('Num CpGs sites',**hfont)
plt.ylabel('Percentage of CpGs covered genome wide',**hfont)
plt.xticks(index + bar_width, range(1, len(data)+1), **hfont)
plt.tight_layout()
# Higher resolution
pylab.savefig(filename,format='png', dpi=500)
fig = plt.figure()
plt.close(fig)
#plt.show()
bin_size = 200 # Default bin size, vary this for experiments
genome_bins = {} # contains bin data
total_cpgs_in_genome = 0 # count total cpgs across genome
cpgs_per_chrome = {}
# Read each chromosome file
for filename in os.listdir(os.getcwd()):
if filename[-3:] == "txt": # If this is a text file
chrome_name = filename[:filename.index(".")]
my_file = open(filename,"r")
bins = defaultdict(lambda:[])
# Keeps track of cpgs
all_cpgs_in_chrm = []
for line in my_file:
data = line.split() # line is tab seperated, first entry is chrm number, second is position
cpg_pos = int(data[1])
all_cpgs_in_chrm.append(cpg_pos)
bin_num = int(cpg_pos/(bin_size+0.0)) # Divide by bin size to find bin number
bins[bin_num].append(chrome_name+"-"+data[1])
total_cpgs_in_genome += 1 # Count cpgs
genome_bins[chrome_name] = bins
cpgs_per_chrome[chrome_name] = len(all_cpgs_in_chrm) # Distribution of CpGs
## Now that we have the data, look at the coverage
num_cpgs_per_bin = defaultdict(lambda:0)
# Go through each chromosome
for chrm in genome_bins:
for chrm_bin in genome_bins[chrm]: # Find bins
num_cpgs = len(genome_bins[chrm][chrm_bin])
num_cpgs_per_bin[num_cpgs] += num_cpgs
data = []
for x in num_cpgs_per_bin:
data.append(num_cpgs_per_bin[x])
plot_data = {}
for i in range(len(data)):
plot_data[i+1] = (100.0*sum(data[i:]))/total_cpgs_in_genome
print plot_data
histogram(plot_data, "bin200Data.png")
```
|
{
"source": "jdeanwallace/tinypilot",
"score": 3
}
|
#### File: app/hid/keyboard.py
```python
from hid import keycodes as hid
from hid import write as hid_write
_MODIFIER_KEYCODES = [
hid.KEYCODE_LEFT_CTRL, hid.KEYCODE_LEFT_SHIFT, hid.KEYCODE_LEFT_ALT,
hid.KEYCODE_LEFT_META, hid.KEYCODE_RIGHT_CTRL, hid.KEYCODE_RIGHT_SHIFT,
hid.KEYCODE_RIGHT_ALT, hid.KEYCODE_RIGHT_META
]
def send_keystroke(keyboard_path, control_keys, hid_keycode):
# First 8 bytes are for the first keystroke. Second 8 bytes are
# all zeroes to indicate release of keys.
buf = [0] * 8
buf[0] = control_keys
buf[2] = hid_keycode
hid_write.write_to_hid_interface(keyboard_path, buf)
# If it's not a modifier keycode, add a message indicating that the key
# should be released after it is sent.
if hid_keycode not in _MODIFIER_KEYCODES:
release_keys(keyboard_path)
def release_keys(keyboard_path):
hid_write.write_to_hid_interface(keyboard_path, [0] * 8)
```
#### File: tinypilot/app/update.py
```python
import enum
import glob
import logging
import os
import subprocess
import iso8601
import update_result
import utc
logger = logging.getLogger(__name__)
class Error(Exception):
pass
class AlreadyInProgressError(Error):
pass
class Status(enum.Enum):
NOT_RUNNING = 0
IN_PROGRESS = 1
DONE = 2
def __str__(self):
return str(self.name)
UPDATE_SCRIPT_PATH = '/opt/tinypilot-privileged/update'
# Cutoff under which an update is considered "recently" completed. It should be
# just long enough that it's the one we see right after a device reboot but not
# so long that there's risk of it being confused with the result from a later
# update attempt.
_RECENT_UPDATE_THRESHOLD_SECONDS = 60 * 3
_RESULT_FILE_DIR = os.path.expanduser('~/logs')
# Result files are prefixed with UTC timestamps in ISO-8601 format.
_UPDATE_RESULT_FILENAME_FORMAT = '%s-update-result.json'
def start_async():
"""Launches the update service asynchronously.
Launches the tinypilot-update systemd service in the background. If the
service is already running, raises an exception.
Raises:
AlreadyInProgressError if the update process is already running.
"""
current_state, _ = get_current_state()
if current_state == Status.IN_PROGRESS:
raise AlreadyInProgressError('An update is already in progress')
subprocess.Popen(
('sudo', '/usr/sbin/service', 'tinypilot-updater', 'start'))
def get_current_state():
"""Retrieves the current state of the update process.
Checks the state of any actively running update jobs or jobs that have
finished in the last 30 minutes and returns the status and error state.
Returns:
A two-tuple where the first value is a Status enum and the second is a
string containing the error associated with a recently completed update
job. If the job completed successfully, the error string is empty.
"""
if _is_update_process_running():
return Status.IN_PROGRESS, None
recent_result = _get_latest_update_result()
if not recent_result:
return Status.NOT_RUNNING, None
return Status.DONE, recent_result.error
def get_result_path(timestamp):
"""Retrieves the associated file path for a result file for a timestamp."""
return os.path.join(
_RESULT_FILE_DIR,
_UPDATE_RESULT_FILENAME_FORMAT % iso8601.to_string(timestamp))
def _is_update_process_running():
lines = subprocess.check_output(
('ps', '-auxwe')).decode('utf-8').splitlines()
for line in lines:
if UPDATE_SCRIPT_PATH in line:
return True
return False
def _get_latest_update_result():
result_files = glob.glob(
os.path.join(_RESULT_FILE_DIR, _UPDATE_RESULT_FILENAME_FORMAT % '*'))
if not result_files:
return None
# Filenames start with a timestamp, so the last one lexicographically is the
# most recently created file.
most_recent_result_file = sorted(result_files)[-1]
with open(most_recent_result_file) as result_file:
most_recent_result = update_result.read(result_file)
# Ignore the result if it's too old.
delta = utc.now() - most_recent_result.timestamp
if delta.total_seconds() > _RECENT_UPDATE_THRESHOLD_SECONDS:
return None
return most_recent_result
```
|
{
"source": "jdeath/core",
"score": 2
}
|
#### File: components/minecraft_server/sensor.py
```python
from typing import Any, Dict
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import TIME_MILLISECONDS
from homeassistant.helpers.typing import HomeAssistantType
from . import MinecraftServer, MinecraftServerEntity
from .const import (
ATTR_PLAYERS_LIST,
DOMAIN,
ICON_LATENCY_TIME,
ICON_PLAYERS_MAX,
ICON_PLAYERS_ONLINE,
ICON_PROTOCOL_VERSION,
ICON_VERSION,
ICON_MOTD,
NAME_LATENCY_TIME,
NAME_PLAYERS_MAX,
NAME_PLAYERS_ONLINE,
NAME_PROTOCOL_VERSION,
NAME_VERSION,
NAME_MOTD,
UNIT_PLAYERS_MAX,
UNIT_PLAYERS_ONLINE,
UNIT_PROTOCOL_VERSION,
UNIT_VERSION,
UNIT_MOTD,
)
async def async_setup_entry(
hass: HomeAssistantType, config_entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the Minecraft Server sensor platform."""
server = hass.data[DOMAIN][config_entry.unique_id]
# Create entities list.
entities = [
MinecraftServerVersionSensor(server),
MinecraftServerProtocolVersionSensor(server),
MinecraftServerLatencyTimeSensor(server),
MinecraftServerPlayersOnlineSensor(server),
MinecraftServerPlayersMaxSensor(server),
MinecraftServerMOTDSensor(server),
]
# Add sensor entities.
async_add_entities(entities, True)
class MinecraftServerSensorEntity(MinecraftServerEntity):
"""Representation of a Minecraft Server sensor base entity."""
def __init__(
self,
server: MinecraftServer,
type_name: str,
icon: str = None,
unit: str = None,
device_class: str = None,
) -> None:
"""Initialize sensor base entity."""
super().__init__(server, type_name, icon, device_class)
self._state = None
self._unit = unit
@property
def available(self) -> bool:
"""Return sensor availability."""
return self._server.online
@property
def state(self) -> Any:
"""Return sensor state."""
return self._state
@property
def unit_of_measurement(self) -> str:
"""Return sensor measurement unit."""
return self._unit
class MinecraftServerVersionSensor(MinecraftServerSensorEntity):
"""Representation of a Minecraft Server version sensor."""
def __init__(self, server: MinecraftServer) -> None:
"""Initialize version sensor."""
super().__init__(
server=server, type_name=NAME_VERSION, icon=ICON_VERSION, unit=UNIT_VERSION
)
async def async_update(self) -> None:
"""Update version."""
self._state = self._server.version
class MinecraftServerProtocolVersionSensor(MinecraftServerSensorEntity):
"""Representation of a Minecraft Server protocol version sensor."""
def __init__(self, server: MinecraftServer) -> None:
"""Initialize protocol version sensor."""
super().__init__(
server=server,
type_name=NAME_PROTOCOL_VERSION,
icon=ICON_PROTOCOL_VERSION,
unit=UNIT_PROTOCOL_VERSION,
)
async def async_update(self) -> None:
"""Update protocol version."""
self._state = self._server.protocol_version
class MinecraftServerLatencyTimeSensor(MinecraftServerSensorEntity):
"""Representation of a Minecraft Server latency time sensor."""
def __init__(self, server: MinecraftServer) -> None:
"""Initialize latency time sensor."""
super().__init__(
server=server,
type_name=NAME_LATENCY_TIME,
icon=ICON_LATENCY_TIME,
unit=TIME_MILLISECONDS,
)
async def async_update(self) -> None:
"""Update latency time."""
self._state = self._server.latency_time
class MinecraftServerPlayersOnlineSensor(MinecraftServerSensorEntity):
"""Representation of a Minecraft Server online players sensor."""
def __init__(self, server: MinecraftServer) -> None:
"""Initialize online players sensor."""
super().__init__(
server=server,
type_name=NAME_PLAYERS_ONLINE,
icon=ICON_PLAYERS_ONLINE,
unit=UNIT_PLAYERS_ONLINE,
)
async def async_update(self) -> None:
"""Update online players state and device state attributes."""
self._state = self._server.players_online
device_state_attributes = None
players_list = self._server.players_list
if players_list is not None:
if len(players_list) != 0:
device_state_attributes = {ATTR_PLAYERS_LIST: self._server.players_list}
self._device_state_attributes = device_state_attributes
@property
def device_state_attributes(self) -> Dict[str, Any]:
"""Return players list in device state attributes."""
return self._device_state_attributes
class MinecraftServerPlayersMaxSensor(MinecraftServerSensorEntity):
"""Representation of a Minecraft Server maximum number of players sensor."""
def __init__(self, server: MinecraftServer) -> None:
"""Initialize maximum number of players sensor."""
super().__init__(
server=server,
type_name=NAME_PLAYERS_MAX,
icon=ICON_PLAYERS_MAX,
unit=UNIT_PLAYERS_MAX,
)
async def async_update(self) -> None:
"""Update maximum number of players."""
self._state = self._server.players_max
class MinecraftServerMOTDSensor(MinecraftServerSensorEntity):
"""Representation of a Minecraft Server maximum number of players sensor."""
def __init__(self, server: MinecraftServer) -> None:
"""Initialize maximum number of players sensor."""
super().__init__(
server=server,
type_name=NAME_MOTD,
icon=ICON_MOTD,
unit=UNIT_MOTD,
)
async def async_update(self) -> None:
"""Update maximum number of players."""
self._state = self._server.motd
```
|
{
"source": "jdeaver/noms",
"score": 2
}
|
#### File: noms/tools/roll.py
```python
import argparse, os, os.path, subprocess, sys, shutil, urlparse
from collections import defaultdict
def main():
parser = argparse.ArgumentParser(description='Dependency snapshotter')
parser.add_argument('url')
parser.add_argument('--path', help=(
'path to store the dependency at, defaults to vendor/[url without protocol]'))
parser.add_argument('--incl', action='append', help=(
'subdirectories of the dependency to check out, relative to the path. '
'Defaults to root. Evaluated before --excl.'))
parser.add_argument('--version', default='HEAD', help=(
'version of the dependency to snapshot, defaults to HEAD'))
args = parser.parse_args()
url = urlparse.urlparse(args.url)
if url.scheme == '':
print 'Invalid url: no scheme'
sys.exit(1)
def rel(subdir):
if subdir is not None and os.path.isabs(subdir):
print 'subdirectory %s must be a relative path' % subdir
sys.exit(1)
return subdir
incl = None
if args.incl is not None:
incl = [rel(i) for i in args.incl]
if not os.path.isdir('.git'):
print '%s must be run from the root of a repository' % sys.argv[0]
sys.exit(1)
path = url.path
if path.startswith('/'):
path = path[1:]
if path.endswith('.git'):
path = path[0:len(path) - 4]
depdir = args.path
if depdir is None:
depdir = os.path.join('vendor', url.netloc, path)
shutil.rmtree(depdir, True)
parent = os.path.dirname(depdir)
if not os.path.isdir(parent):
os.makedirs(parent)
os.chdir(parent)
# Kinda sucks to clone entire repo to get a particular version, but:
# http://stackoverflow.com/questions/3489173/how-to-clone-git-repository-with-specific-revision-changeset
subprocess.check_call(['git', 'clone', args.url])
os.chdir(os.path.basename(depdir))
subprocess.check_call(['git', 'reset', '--hard', args.version])
head = subprocess.check_output(['git', 'rev-parse', 'HEAD']).strip()
f = open('.version', 'w')
f.write('%s\n%s\n' % (args.url, head))
f.close()
shutil.rmtree('.git')
if os.path.isdir('vendor'):
deps = [dirName for dirName, _, files in os.walk('vendor') if files]
if deps:
print '\nWarning!'
print ' %s contains one or more dependencies which will need to be vendored as well:' % args.url
print ' -', '\n - '.join(deps)
shutil.rmtree('vendor')
if incl is not None:
inclPaths = []
inclParentToName = defaultdict(set)
for dir in incl:
if not os.path.isdir(dir):
print 'Warning: --incl directory %s does not exist, skipping.' % dir
else:
path = os.path.abspath(dir)
parent, name = os.path.split(path)
inclPaths.append(path)
inclParentToName[parent].add(name)
for (dirpath, dirnames, _) in os.walk(os.getcwd()):
if dirpath in inclParentToName:
# Don't descend into included subdirectories
for n in inclParentToName[dirpath]:
dirnames.remove(n)
elif not any(p.startswith(dirpath) for p in inclPaths):
# Remove directories that aren't an ancestor of the included.
print 'rm subdirectory: %s' % dirpath
shutil.rmtree(dirpath)
if __name__ == '__main__':
main()
```
|
{
"source": "jdebacker/taxdata",
"score": 2
}
|
#### File: taxdata/puf_stage1/updatesoi.py
```python
import argparse
import pandas as pd
from pathlib import Path
from collections import defaultdict
CUR_PATH = Path(__file__).resolve().parent
# these are all of the columns used in the table 1.4 spreadsheet. The pattern
# used for this list is (SOI_estimates variable name, col1, col2, col3, col4)
# each of those columns is needed because pandas will read the spreadsheet as
# a DataFrame with multiple column labels. If the IRS changes the format of
# the table 1.4 spreadsheet, this script will break and you'll need to update
# the columns in this list. You will also need to update this list if you add
# a new variable to the estimates.
TABLE14COLS = [
(
"SS_return",
"Social security benefits",
"Total [1]",
"Unnamed: 69_level_2",
"Number of\nreturns",
),
("INTS", "Taxable interest", "Unnamed: 8_level_1", "Unnamed: 8_level_2", "Amount"),
(
"DIVS",
"Ordinary dividends",
"Unnamed: 12_level_1",
"Unnamed: 12_level_2",
"Amount",
),
("SCHCI", "Business or profession", "Net\nincome", "Unnamed: 20_level_2", "Amount"),
("SCHCL", "Business or profession", "Net\nloss", "Unnamed: 22_level_2", "Amount"),
(
"CGNS",
"Sales of capital assets reported on Form 1040, Schedule D [2]",
"Taxable\nnet gain",
"Unnamed: 26_level_2",
"Amount",
),
("Pension", "Pensions and\nannuities", "Unnamed: 38_level_1", "Taxable", "Amount"),
(
"SCHEI",
"Total rental and royalty",
"Net\nincome",
"Unnamed: 52_level_2",
"Amount",
),
(
"SCHEI",
"Partnership and S corporation",
"Net\nincome",
"Unnamed: 56_level_2",
"Amount",
),
("SCHEI", "Estate and trust", "Net\nincome", "Unnamed: 60_level_2", "Amount"),
("SCHEL", "Total rental and royalty", "Net\nloss", "Unnamed: 54_level_2", "Amount"),
(
"SCHEL",
"Partnership and S corporation",
"Net\nloss",
"Unnamed: 58_level_2",
"Amount",
),
("SCHEL", "Estate and trust", "Net\nloss", "Unnamed: 62_level_2", "Amount"),
("SS", "Social security benefits", "Total [1]", "Unnamed: 70_level_2", "Amount"),
(
"UCOMP",
"Unemployment compensation",
"Unnamed: 68_level_1",
"Unnamed: 68_level_2",
"Amount",
),
]
# these lists are the indicies for the wage bins used to split up wage targetts
# in the SOI estimates.
PUFWAGES = [
(2, 2),
(3, 4),
(5, 6),
(7, 8),
(9, 9),
(10, 10),
(11, 11),
(12, 12),
(13, 13),
(14, 14),
(15, 15),
(17, 20),
]
CPSWAGES = [(2, 4), (5, 6), (7, 8), (9, 9), (10, 10), (11, 11), (12, 12), (13, 20)]
def update_soi(year, datapath, wage_indicies, file_):
"""
Update SOI estimates for a given year by reading and parsing the given SOI
spreadsheets
Paramters
---------
year: year of the estimates you're adding
datapath: path to the necessary files from the SOI
wage_indicies: the PUF and CPS estimates have different wage
file_: "cps" or "puf"
"""
single, married, hoh = table12(year, datapath)
dep_return = table23(year, datapath)
ipd = table21(year, datapath)
nonwages, wages = table14(year, wage_indicies, datapath)
values = [
single,
married,
hoh,
nonwages["SS_return"],
dep_return,
nonwages["INTS"],
nonwages["DIVS"],
nonwages["SCHCI"],
nonwages["SCHCL"],
nonwages["CGNS"],
nonwages["Pension"],
nonwages["SCHEI"],
nonwages["SCHEL"],
nonwages["SS"],
nonwages["UCOMP"],
ipd,
]
if file_ == "cps":
values = values[:-1]
values += wages
return values
def table12(year, datapath):
"""
Extract the total returns from single, joint, and married filers from
SOI table 1.2
"""
def numfilers(data, col):
nreturns = "Number\nof\nreturns"
allreturns = "All returns, total"
return data[col][nreturns].loc[allreturns].values[0].astype(int)
file_ = Path(datapath, f"{str(year)[-2:]:}in12ms.xls")
data = pd.read_excel(file_, header=[2, 3, 4], index_col=0)
single = numfilers(data, "Returns of single persons")
col = "Returns of married persons filing jointly and returns of surviving spouses"
married1 = numfilers(data, col)
married2 = numfilers(data, "Returns of married persons filing separately")
married = married1 + married2
hoh = numfilers(data, "Returns of heads of households")
return single, married, hoh
def table23(year, datapath):
"""
Returns number of dependent exemption from SOI table 2.3
"""
file_ = f"{str(year)[-2:]}in23ar.xls"
data = pd.read_excel(Path(datapath, file_), header=[2, 3, 4], index_col=0)
col = "Exemptions for dependents"
nexemp = "Number\nof\nexemptions"
allreturns = "All returns, total"
return data[col]["Total"][nexemp].loc[allreturns].astype(int)
def table21(year, datapath):
"""
Return interest paid deduction amount from table 2.1
Parameters
----------
year: integer representing the year of the table
datapath: path to the directory holding the SOI files
"""
file_ = f"{str(year)[-2:]}in21id.xls"
data = pd.read_excel(Path(datapath, file_), header=[2, 3, 4, 5, 6], index_col=0)
itemded = "Itemized deductions"
ipd = "Interest paid deduction"
un = "Unnamed: 83_level_3"
if year == "2017":
un = "Unnamed: 85_level_3"
allrets = "All returns, total"
return data[itemded][ipd]["Total"][un]["Amount"].loc[allrets].astype(int)
def table14_nonwages(data, cols):
"""
Extracts all needed data except wages from table 1.4
Parameters
----------
data: SOI data
cols: See TABLE14COLS
"""
values = defaultdict(int)
for col in cols:
val = data[col[1:]].loc["All returns, total"].astype(int)
values[col[0]] += val
return values
def table14_wages(data, indicies):
"""
Return all of the wage totals
Parameters
----------
data: table 1.4 data
indicies: See PUFWAGES and CPSWAGES
"""
was = []
assert len(data) == 21 # they sometimes change up the wage bins they use
data = data["Salaries and wages"]["Unnamed: 6_level_1"]["Unnamed: 6_level_2"]
for i, j in indicies:
val = data.iloc[i : j + 1].sum()[0].astype(int)
was.append(val)
return was
def table14(year, wage_indicies, datapath):
"""
Grabs everything from table 1.4 by calling the two other table 1.4
functions.
Parameters
----------
year: year we're pulling the data for
wage_indicies: see PUFWAGES and CPSWAGES
datapath: path to directory where the SOI data is stored
"""
data = pd.read_excel(
Path(datapath, f"{str(year)[-2:]}in14ar.xls"), header=[2, 3, 4, 5], index_col=0
)
data = data.iloc[:21]
nonwages = table14_nonwages(data, TABLE14COLS)
wages = table14_wages(data, wage_indicies)
return nonwages, wages
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("year", help="Year of the update", type=str)
parser.add_argument(
"path", help="Path to a directory with all of the SOI files needed", type=str
)
args = parser.parse_args()
year = args.year
datapath = args.path
puf_vals = update_soi(year, datapath, PUFWAGES, "puf")
pufpath = Path(CUR_PATH, "SOI_estimates.csv")
puf_soi = pd.read_csv(pufpath, index_col=0)
puf_soi[year] = puf_vals
cpspath = Path(CUR_PATH, "..", "cps_stage1", "SOI_estimates.csv")
cps_soi = pd.read_csv(cpspath, index_col=0)
cps_vals = update_soi(year, datapath, CPSWAGES, "cps")
cps_soi[year] = cps_vals
puf_soi.to_csv(pufpath)
cps_soi.to_csv(cpspath)
```
#### File: taxdata/cps/splitincome.py
```python
import numpy as np
def split_income(data):
"""
Split up income variables
"""
np.random.seed(79)
# Qualified dividends
ALL_QUALIFIED_PROB = 0.429 # % of units where all dividends are qualified
NO_QUALIFIED_PROB = 0.093 # % of units where no dividends are qualified
# % of units where either all or no dividends are qualified
NON_AVG_PROB = ALL_QUALIFIED_PROB + NO_QUALIFIED_PROB
QUALIFIED_FRAC = 0.678 # % of dividends that are qualified among remaining
# determine qualified dividend percentage
probs = np.random.random(len(data["divs"]))
qualified = np.ones(len(data["divs"]))
qualified = np.where(
(probs > ALL_QUALIFIED_PROB) & (probs <= NON_AVG_PROB), 0.0, qualified
)
qualified = np.where(probs > NON_AVG_PROB, QUALIFIED_FRAC, qualified)
data["e00650"] = data["divs"] * qualified
# Split interest income into taxable and tax exempt
SLOPE = 0.068
RATIO = 0.46
prob = 1.0 - SLOPE * (data["interest"] * 1e-3)
uniform_rn = np.random.random(len(prob))
data["e00300"] = np.where(
uniform_rn < prob, data["interest"], data["interest"] * RATIO
)
data["e00400"] = data["interest"] - data["e00300"]
# Pensions and annuities
probs = np.random.random(len(data["e01500"]))
FULL_TAXABLE_PROB = 0.612
ZERO_TAX_PROB = 0.073
NON_AVG_PROB = FULL_TAXABLE_PROB + ZERO_TAX_PROB
AVG_TAXABLE_AMOUNT = 0.577
# determine taxability
taxability = np.ones(len(data["e01500"]))
taxability = np.where(
(probs > FULL_TAXABLE_PROB) & (probs <= NON_AVG_PROB), 0.0, taxability
)
taxability = np.where(probs > NON_AVG_PROB, AVG_TAXABLE_AMOUNT, taxability)
data["e01700"] = data["e01500"] * taxability
return data
```
#### File: taxdata/cps/targeting.py
```python
import pandas as pd
import numpy as np
from .constants import FIPS_DICT
def target(cps, state_data_link):
"""
Read state level income information and adjust CPS data accordingly
"""
state_data = pd.read_csv(state_data_link, index_col="STATE", thousands=",")
# only use aggregate data
state_data = state_data[state_data["AGI_STUB"] == 0].copy()
# map income variables in the CPS and IRS data
# TODO: Add imputed variables
VAR_MAP = {
"A00200": ["e00200p", "e00200s"],
"A00300": ["e00300"],
"A00600": ["divs"],
"A00650": ["e00650"],
"A00900": ["e00900p", "e00900s"],
"A02300": ["e02300"],
"A03240": ["DPAD"],
"A01400": ["TIRAD"],
"A03270": ["SEHEALTH"],
"A03210": ["SLINT"],
"A07180": ["CDC"],
}
# dictionary to hold factors
factor_dict = {}
# loop through each state and variable
for var, cps_vars in VAR_MAP.items():
factor_dict[var] = []
for state, fips in FIPS_DICT.items():
sub_cps = cps[cps["fips"] == fips]
target = state_data[var][state] * 1000 # scale up IRS data
# only count filers
cps_uw_total = sub_cps[cps_vars].sum(axis=1) * sub_cps["filer"]
cps_sum = (cps_uw_total * sub_cps["s006"]).sum()
# compute factor
factor = target / cps_sum
factor_dict[var].append(factor)
# create a DataFrame with the factors
factor_df = pd.DataFrame(factor_dict)
factor_df.index = FIPS_DICT.values()
# export factors
factor_df.to_csv("state_factors.csv")
# apply factors
for var, cps_vars in VAR_MAP.items():
factor_array = factor_df[var][cps["fips"]].values
for v in cps_vars:
cps[v] *= factor_array
# recalculate total income
cps["e00200"] = cps["e00200p"] + cps["e00200s"]
cps["e00900"] = cps["e00900p"] + cps["e00900s"]
cps["e02100"] = cps["e02100p"] + cps["e02100s"]
cps["e00650"] = np.minimum(cps["divs"], cps["e00650"])
cps["tot_inc"] = cps[
[
"e00200",
"e00300",
"e00400",
"e00900",
"divs",
"e00800",
"e01500",
"rents",
"e02100",
"e02400",
"CGAGIX",
"e02300",
]
].sum(axis=1)
assert np.allclose(cps["e00900"], cps[["e00900p", "e00900s"]].sum(axis=1))
assert np.allclose(cps["e02100"], cps[["e02100p", "e02100s"]].sum(axis=1))
assert np.allclose(cps["e00200"], cps[["e00200p", "e00200s"]].sum(axis=1))
return cps
```
|
{
"source": "jdebecdelievre/PyFME",
"score": 2
}
|
#### File: pyfme/aero/avl.py
```python
import numpy as np
nl = np.linalg
import os
import subprocess
import sys
import pandas as pd
FORCES = ['CL', 'CD', 'CY', 'Cl', 'Cm', 'Cn']
STAB = [F+d for F in ['CL', 'CY', 'Cl', 'Cm', 'Cn'] for d in ['a', 'b', 'p', 'q', 'r']]
CONT = [F+d for F in ['CL', 'CY', 'Cl', 'Cm', 'Cn'] for d in ['d1', 'd2', 'd3']]
# Note: missing drag
class avl_run():
def __init__(self, geom_file, num_control_surfaces, run_file="runs", path_to_avl='.'):
self.geom_file = geom_file
self.run_file = run_file
self.avl = path_to_avl
self.num_ctrl = num_control_surfaces
def run(self, state, controls):
"""
State is a stack of [alpha, beta, phat, qhat, rhat] horizontal vectors
Controls is a [elevator, aileron, rudder]
"""
if controls.ndim > 1:
assert controls.shape[0] == state.shape[0]
else:
state = np.expand_dims(state, 0)
controls = np.expand_dims(controls, 0)
N = controls.shape[0]
# Modify run file
f = open(self.run_file, 'w')
for i in range(N):
print(state[i])
alpha, beta, phat, qhat, rhat = state[i]
elevator, aileron, rudder = controls[i]
f.write(f"""
---------------------------------------------
Run case {i+1}: -unnamed-
alpha -> alpha = {alpha}
beta -> beta = {beta}
pb/2V -> pb/2V = {phat}
qc/2V -> qc/2V = {qhat}
rb/2V -> rb/2V = {rhat}
elevator -> elevator = {elevator}
aileron -> aileron = {aileron}
rudder -> rudder = {rudder}
alpha = {alpha} deg
beta = {beta} deg
pb/2V = {phat}
qc/2V = {qhat}
rb/2V = {rhat}
CL = 0.310719
CDo = 0.00000
bank = 0.00000 deg
elevation = 0.00000 deg
heading = 0.00000 deg
Mach = 0.00000
velocity = 5.00000 Lunit/Tunit
density = 1.12500 Munit/Lunit^3
grav.acc. = 9.81000 Lunit/Tunit^2
turn_rad. = 0.00000 Lunit
load_fac. = 1.00000
X_cg = 0.300000 Lunit
Y_cg = 0.00000 Lunit
Z_cg = 0.00000 Lunit
mass = 5.00000 Munit
Ixx = 1.00000 Munit-Lunit^2
Iyy = 0.02000 Munit-Lunit^2
Izz = 1.00000 Munit-Lunit^2
Ixy = 0.00000 Munit-Lunit^2
Iyz = 0.00000 Munit-Lunit^2
Izx = 0.00000 Munit-Lunit^2
visc CL_a = 0.00000
visc CL_u = 0.00000
visc CM_a = 0.00000
visc CM_u = 0.00000
""")
f.close()
# Create bash script
f = open('cmd_file.run', 'w')
# f.write(f"LOAD {self.geom_file}\n") # load geom file
f.write(f'PLOP\ng\n\n') # disable graphics
f.write(f"CASE {self.run_file}\nOPER\n")
for i in range(N):
results_file = f"rslt_{i}.stab"
f.write(f"{i+1}\nx\nst\n{results_file}\n")
f.write("\n\nQUIT")
f.close()
# Run bash
with open('cmd_file.run', 'r') as commands:
avl_run = subprocess.Popen([f"{self.avl}\\avl.exe", self.geom_file],
stderr=sys.stderr,
stdout=open(os.devnull, 'w'),
stdin=subprocess.PIPE)
for line in commands:
avl_run.stdin.write(line.encode('utf-8'))
avl_run.communicate()
avl_run.wait()
# sort out results
data = pd.DataFrame({k: 0.0 for k in FORCES + STAB + CONT}, index=np.arange(N))
data['de'] = controls[:, 0]
data['da'] = controls[:, 1]
data['dr'] = controls[:, 2]
data['alpha'] = state[:, 0]
data['beta'] = state[:, 1]
data['p'] = state[:, 2]
data['q'] = state[:, 3]
data['r'] = state[:, 4]
for i in range(N):
with open(f"rslt_{i}.stab", 'r') as f:
lines = f.readlines()
data.Cl[i] = float(lines[19][33:41].strip())
data.Cm[i] = float(lines[20][33:41].strip())
data.Cn[i] = float(lines[21][33:41].strip())
data.CL[i] = float(lines[23][10:20].strip())
data.CD[i] = float(lines[24][10:20].strip())
data.CY[i] = float(lines[20][10:20].strip())
num_ctrl = self.num_ctrl # number of control surfaces
data.CLa[i] = float(lines[36 + num_ctrl][24:34].strip()) # CL_a
data.CYa[i] = float(lines[37 + num_ctrl][24:34].strip()) # CY_a
data.Cla[i] = float(lines[38 + num_ctrl][24:34].strip()) # Cl_a
data.Cma[i] = float(lines[39 + num_ctrl][24:34].strip()) # Cm_a
data.Cna[i] = float(lines[40 + num_ctrl][24:34].strip()) # Cn_a
data.CLb[i] = float(lines[36 + num_ctrl][43:54].strip()) # CL_b
data.CYb[i] = float(lines[37 + num_ctrl][43:54].strip()) # CY_b
data.Clb[i] = float(lines[38 + num_ctrl][43:54].strip()) # Cl_b
data.Cmb[i] = float(lines[39 + num_ctrl][43:54].strip()) # Cm_b
data.Cnb[i] = float(lines[40 + num_ctrl][43:54].strip()) # Cn_b
data.CLp[i] = float(lines[44 + num_ctrl][24:34].strip())
data.CLq[i] = float(lines[44 + num_ctrl][43:54].strip())
data.CLr[i] = float(lines[44 + num_ctrl][65:74].strip())
data.CYp[i] = float(lines[45 + num_ctrl][24:34].strip())
data.CYq[i] = float(lines[45 + num_ctrl][43:54].strip())
data.CYr[i] = float(lines[45 + num_ctrl][65:74].strip())
data.Clp[i] = float(lines[46 + num_ctrl][24:34].strip())
data.Clq[i] = float(lines[46 + num_ctrl][43:54].strip())
data.Clr[i] = float(lines[44 + num_ctrl][65:74].strip())
data.Cmp[i] = float(lines[47 + num_ctrl][24:34].strip())
data.Cmq[i] = float(lines[47 + num_ctrl][43:54].strip())
data.Cmr[i] = float(lines[44 + num_ctrl][65:74].strip())
data.Cnp[i] = float(lines[48 + num_ctrl][24:34].strip())
data.Cnq[i] = float(lines[48 + num_ctrl][43:54].strip())
data.Cnr[i] = float(lines[48 + num_ctrl][65:74].strip())
INI = [24,43,65]
FIN = [34,54,74]
for n_ctrl in range(num_ctrl):
data['CLd'+str(n_ctrl + 1)][i] = float(lines[52 + num_ctrl]
[INI[n_ctrl]:FIN[n_ctrl]].strip()) # CL_a
data['CYd'+str(n_ctrl + 1)][i] = float(lines[53 + num_ctrl]
[INI[n_ctrl]:FIN[n_ctrl]].strip()) # CY_a
data['Cld'+str(n_ctrl + 1)][i] = float(lines[54 + num_ctrl]
[INI[n_ctrl]:FIN[n_ctrl]].strip()) # Cl_a
data['Cmd'+str(n_ctrl + 1)][i] = float(lines[55 + num_ctrl]
[INI[n_ctrl]:FIN[n_ctrl]].strip()) # Cm_a
data['Cnd'+str(n_ctrl + 1)][i] = float(lines[56 + num_ctrl]
[INI[n_ctrl]:FIN[n_ctrl]].strip()) # Cn_a
os.remove(f"rslt_{i}.stab")
os.remove(self.run_file)
os.remove('cmd_file.run')
return(data)
##### TODO : try to see if I can leave an AVL session open
# def count_control_surfaces(geomfile):
# with open(geomfile,'r') as f:
# for line in f:
#
if __name__ == "__main__":
states = []
controls = []
for al in np.linspace(-10,20,30):
# for de in np.linspace(-26,28,10):
# controls.append(np.array([de,0,0]))
# states.append(np.array([al,0,0,0,0]))
# for da in np.linspace(-15, 10):
# controls.append(np.array([0,da,0]))
# states.append(np.array([al,0,0,0,0]))
# for dr in np.linspace(-5,5,10):
# controls.append(np.array([0,0,dr]))
states.append(np.array([al,0,0,0,0]))
controls.append(np.array([0,0,0]))
states = np.array(states)
controls = np.array(controls)
# states = np.array([np.arange(5)/100, np.arange(5)/600])
# controls = np.array([np.arange(3)/3, np.arange(3)/4])
a = avl_run(num_control_surfaces=3, geom_file='hypo', run_file='runs')
data = a.run(states, controls)
data.to_pickle('MeterSpanUAV.pkl')
```
#### File: pyfme/aircrafts/basisLinear.py
```python
import numpy as np
import pdb
import json
from pyfme.aircrafts.aircraft import Aircraft, ConventionalControls
from pyfme.models.constants import slugft2_2_kgm2, lbs2kg
from pyfme.utils.coordinates import wind2body, body2wind
from copy import deepcopy as cp
from collections import namedtuple
from pyfme.environment import Conditions
import os
pth = os.path.dirname(os.path.realpath(__file__))
inertia_attributes=[
'mass',
'inertia']
aero_attributes = [
'CL_0',
'CM_0',
'CL_alpha',
'CL_q',
'CL_delta_elev',
'CM_alpha2',
'CM_alpha',
'CM_q',
'CM_delta_elev',
'e',
'CD_0',
'CL_MAX',
'CY_beta',
'CY_p',
'CY_r',
'CY_delta_rud',
'Cl_beta',
'Cl_p',
'Cl_r',
'Cl_delta_rud',
'Cl_delta_aile',
'CN_beta',
'CN_p_al',
'CN_r_cl',
'CN_r_0',
'CN_delta_rud',
'CN_delta_aile']
geometrical_attributes =[
'Sw',
'chord',
'span'
]
class BasisLinear(Aircraft):
"""
Cessna 172
The Cessna 172 is a blablabla...
"""
def __init__(self, aircraft_file):
super().__init__()
# set to 0 by default
for d in aero_attributes+geometrical_attributes+inertia_attributes:
setattr(self, d, 0)
# set to loaded values
with open(aircraft_file, 'r') as f:
aircraft_file = json.load(f)
for d in aircraft_file:
setattr(self, d, aircraft_file[d])
self.inertia_inverse = np.linalg.inv(self.inertia)
self.AR = self.span**2/self.Sw
def get_controls(self, t, controls_sequence):
return ConventionalControls().evaluate_sequence(t, controls_sequence)
def _calculate_aero_lon_forces_moments_coeffs(self, alpha, V, state, controls):
"""
Simplified dynamics for the Cessna 172: strictly linear dynamics.
Stability derivatives are considered constant, the value for small angles is kept.
Parameters
----------
state
Returns
-------
"""
delta_elev = controls.delta_elevator
alpha_RAD = alpha # rad
c = self.chord # m
p, q, r = state.omega.T # rad/s
CL = (
self.CL_0 +
self.CL_alpha * alpha_RAD +
# self.CL_alpha*np.sin(2*np.pi*alpha_RAD)/2/np.pi +
self.CL_delta_elev*delta_elev +
self.CL_q * q * c/(2*V)
)
# STALL
# CL = CL * (abs(CL) < self.CL_MAX) + np.sign(CL)*self.CL_MAX*(1-(abs(CL) < self.CL_MAX))
CD = self.CD_0 + CL**2/(self.AR*self.e*np.pi)
CM = (
self.CM_0 +
(self.CM_alpha2*alpha + self.CM_alpha)*alpha +
self.CM_delta_elev * delta_elev +
self.CM_q * q * c/(2*V)
)
return CL, CD, CM
def _calculate_aero_lat_forces_moments_coeffs(self, alpha, beta, V, state, controls):
delta_aile = controls.delta_aileron # rad
delta_rud = controls.delta_rudder # rad
b = self.span
p, q, r = state.omega.T
# Recompute CL
delta_elev = np.rad2deg(controls.delta_elevator)
CL = (
self.CL_0 +
self.CL_alpha*alpha +
self.CL_delta_elev*delta_elev +
self.CL_q * q * self.chord/(2*V)
)
# CL = CL * (abs(CL) < self.CL_MAX) + np.sign(CL)*self.CL_MAX*(1-(abs(CL) < self.CL_MAX))
CY = (
self.CY_beta * beta +
self.CY_delta_rud * delta_rud +
b/(2 * V) * (self.CY_p * p + self.CY_r * r)
)
Cl = (
self.Cl_beta * beta +
self.Cl_delta_aile * delta_aile +
self.Cl_delta_rud * delta_rud +
b/(2 * V) * (self.Cl_p * p + self.Cl_r * r)
)
# b/(2 * V) * (self.Cl_p * p + self.Cl_r_cl * CL * r)
CN = (
self.CN_beta * beta +
(self.CN_delta_aile*delta_aile) +
self.CN_delta_rud * delta_rud +
b/(2 * V) * (self.CN_p_al*alpha * p + (self.CN_r_cl*CL**2 + self.CN_r_0) * r)
)
# b/(2 * V) * (self.CN_p_al*alpha_DEG * p + (self.CN_r_cl*CL**2 + self.CN_r_0) * r)
return CY, Cl, CN
def _calculate_thrust_forces_moments(self, TAS, conditions, controls):
return 0
def _calculate_aero_forces_moments(self, conditions, state, controls):
q_inf = conditions.q_inf
V = conditions.TAS
alpha = conditions.alpha
beta = conditions.beta
Sw = self.Sw
c = self.chord
b = self.span
CL, CD, Cm = self._calculate_aero_lon_forces_moments_coeffs(alpha, V, state, controls)
CY, Cl, Cn = self._calculate_aero_lat_forces_moments_coeffs(alpha, beta, V, state, controls)
L = q_inf * Sw * CL
D = q_inf * Sw * CD
Y = q_inf * Sw * CY
l = q_inf * Sw * b * Cl
m = q_inf * Sw * c * Cm
n = q_inf * Sw * b * Cn
return L, D, Y, l, m, n
def calculate_derivatives(self, state, environment, controls, eps=1e-3):
"""
Calculate dimensional derivatives of the forces at the vicinity of the state.
The output consists in 2 dictionaries, one for force one for moment
key: type of variables derivatives are taken for
val : 3x3 np array with X,Y,Z and L,M,N as columns, and the variable we differentiate against in lines
(u,v,w ; phi,theta,psi ; p,q,r ; x,y,z)
"""
names = {'velocity': ['u', 'v', 'w'],
'omega': ['p', 'q', 'r'],
'acceleration': ['w_dot']}
Fnames = ['X', 'Y', 'Z']
Mnames = ['L', 'M', 'N']
# F, M = self.calculate_forces_and_moments(state, environment, controls)
# Rotation for stability derivatives in stability axis
V = np.sqrt(state.velocity.u**2 + state.velocity.v**2 + state.velocity.w**2)
alpha = np.arctan2(state.velocity.w, state.velocity.u)
beta = np.arcsin(state.velocity.v / V)
derivatives = {}
for keyword in names.keys():
for i in range(len(names[keyword])):
eps_v0 = np.zeros(3)
# plus perturb
eps_v0[i] = eps/2
eps_vec = wind2body(eps_v0, alpha, beta)
state.perturbate(eps_vec, keyword)
forces_p, moments_p = self.calculate_forces_and_moments(state, environment, controls)
forces_p = body2wind(forces_p, alpha, beta)
moments_p = body2wind(moments_p, alpha, beta)
state.cancel_perturbation()
# minus perturb
eps_v0[i] = - eps/2
eps_vec = wind2body(eps_v0, alpha, beta)
state.perturbate(eps_vec, keyword)
forces_m, moments_m = self.calculate_forces_and_moments(state, environment, controls)
forces_m = body2wind(forces_m, alpha, beta)
moments_m = body2wind(moments_m, alpha, beta)
state.cancel_perturbation()
k = names[keyword][i]
for j in range(3):
# print(Fnames[j] + k, forces[j])
derivatives[Fnames[j] + k] = (forces_p[j] - forces_m[j]) / eps
derivatives[Mnames[j] + k] = (moments_p[j] - moments_m[j]) / eps
return derivatives
class Linear(Aircraft):
def __init__(self, aircraft_file=None, alpha_dot=False):
super().__init__()
aircraft_file = os.path.join(pth, 'linear', 'linear.json') if aircraft_file is None else aircraft_file
# set to loaded values
with open(aircraft_file, 'r') as f:
aircraft_file = json.load(f)
for d in aircraft_file:
setattr(self, d, aircraft_file[d])
if alpha_dot == False:
self.CL_alpha_dot = 0.0
self.CM_alpha_dot = 0.0
self.state_dot = None
self.AR = self.span**2/self.Sw
self.store = []
self.inertia_inverse = np.linalg.inv(self.inertia)
def get_controls(self, t, controls_sequence):
return ConventionalControls().evaluate_sequence(t, controls_sequence)
def _calculate_aero_lon_forces_moments_coeffs(self, alpha, V, state, controls):
delta_elev = controls.delta_elevator # deg
c = self.chord # m
p, q, r = state.omega.T # rad/s
D_alpha = alpha - self.alpha_0
state_dot = self.state_dot # trick to avoid having to extend state space
if self.state_dot == None:
alpha_dot = 0.
else:
alpha_dot = (self.state_dot.w * state.u - self.state_dot.u * state.w) / (state.u**2 + state.w**2)
# self.state_dot = None
self.store.append(alpha_dot)
# alpha_dot = 0
CL = (
self.CL_0 +
self.CL_alpha * D_alpha +
self.CL_delta_elev * delta_elev +
self.CL_q * q * c/(2*V) +
self.CL_alpha_dot * alpha_dot
)
CD = (
self.CD_0 + 2/(self.AR*self.e*np.pi)*CL*self.CL_alpha*D_alpha
)
CM = (
self.CM_0 +
self.CM_alpha * D_alpha +
self.CM_delta_elev * delta_elev +
self.CM_q * q * c/(2*V) +
self.CM_alpha_dot * alpha_dot
)
return CL, CD, CM
def _calculate_aero_lat_forces_moments_coeffs(self, alpha, beta, V, state, controls):
delta_aile = controls.delta_aileron # deg
delta_rud = controls.delta_rudder # deg
delta_elev = controls.delta_elevator # deg
b = self.span
p, q, r = state.omega.T
D_alpha = alpha - self.alpha_0
# Recompute CL\
state_dot = self.state_dot # trick to avoid having to extend state space
if self.state_dot == None:
alpha_dot = 0.
else:
alpha_dot = (self.state_dot.w * state.u - self.state_dot.u * state.w) / (state.u**2 + state.w**2)
# CL = self.CL
CL = (
self.CL_0 +
self.CL_alpha * D_alpha +
self.CL_delta_elev * delta_elev +
self.CL_q * q * self.chord/(2*V) +
self.CL_alpha_dot * alpha_dot
)
CY = (
self.CY_beta * beta +
self.CY_delta_rud * delta_rud +
b/(2 * V) * (self.CY_p * p + self.CY_r * r)
)
Cl = (
self.Cl_beta * beta +
self.Cl_delta_aile * delta_aile +
self.Cl_delta_rud * delta_rud +
b/(2 * V) * (self.Cl_p * p + self.Cl_r * r)
)
CN = (
self.CN_beta * beta +
self.CN_delta_aile * delta_aile +
self.CN_delta_rud * delta_rud +
b/(2 * V) * (self.CN_p * p + self.CN_r * r)
)
return CY, Cl, CN
def _calculate_thrust_forces_moments(self, TAS, conditions, controls):
return 0
def _calculate_aero_forces_moments(self, conditions, state, controls):
q_inf = conditions.q_inf
V = conditions.TAS
alpha = conditions.alpha
beta = conditions.beta
Sw = self.Sw
c = self.chord
b = self.span
CL, CD, Cm = self._calculate_aero_lon_forces_moments_coeffs(alpha, V, state, controls)
CY, Cl, Cn = self._calculate_aero_lat_forces_moments_coeffs(alpha, beta, V, state, controls)
L = q_inf * Sw * CL
D = q_inf * Sw * CD
Y = q_inf * Sw * CY
l = q_inf * Sw * b * Cl
m = q_inf * Sw * c * Cm
n = q_inf * Sw * b * Cn
return L, D, Y, l, m, n
```
#### File: pyfme/environment/wind.py
```python
import numpy as np
class NoWind(object):
def horizon(self, state):
# Wind velocity: FROM North to South, FROM East to West,
return np.zeros((state.N, 3), dtype=float)
def body(self, state):
# Wind velocity in the UPSIDE direction
return np.zeros((state.N, 3), dtype=float)
```
#### File: pyfme/utils/input_generator.py
```python
from abc import abstractmethod
from numpy import vectorize, float64
from numpy import sin, pi
import numpy as np
from scipy.signal import chirp
from scipy.interpolate import UnivariateSpline as uspline
def vectorize_float(method):
vect_method = vectorize(method, otypes=[float64])
def wrapper(self, *args, **kwargs):
return vect_method(self, *args, **kwargs)
return wrapper
# TODO: documentation
class Control(object):
@abstractmethod
def _fun(self, t):
raise NotImplementedError
def __call__(self, t):
r = self._fun(t)
return np.squeeze(r)
def __add__(self, other):
control = Control()
control._fun = lambda t: self(t) + other(t)
control._vec_fun = vectorize(control._fun, otypes=[float64])
return control
def __sub__(self, other):
control = Control()
control._fun = lambda t: self(t) - other(t)
control._vec_fun = vectorize(control._fun, otypes=[float64])
return control
def __mul__(self, other):
control = Control()
control._fun = lambda t: self(t) * other(t)
control._vec_fun = vectorize(control._fun, otypes=[float64])
return control
class Constant(Control):
def __init__(self, offset=0):
self.offset = offset
def _fun(self, t):
return np.ones_like(t)*self.offset
class Chirp(Control):
def __init__(self, amplitude, length, initial_freq, final_freq):
n_points = length/final_freq * 100 # 100 points per period
self.C = lambda t: amplitude * chirp(t, initial_freq, length, final_freq)
def _fun(self, t):
return self.C(t)
class Step(Control):
def __init__(self, t_init, T, A, offset=0):
self.t_init = t_init
self.T = T
self.A = A
self.offset = offset
self.t_fin = self.t_init + self.T
def _fun(self, t):
value = self.offset
value = value + self.A*((self.t_init <= t) * (t <= self.t_fin))
return value
class Doublet(Control):
def __init__(self, t_init, T, A, offset=0):
self.t_init = t_init
self.T = T
self.A = A
self.offset = offset
self.t_fin1 = self.t_init + self.T / 2
self.t_fin2 = self.t_init + self.T
def _fun(self, t):
value = self.offset + self.A / 2*((self.t_init <= t) * (t < self.t_fin1))\
- self.A / 2*((self.t_fin1 < t) * (t <= self.t_fin2))
return value
class Ramp(Control):
def __init__(self, t_init, T, A, offset=0):
self.t_init = t_init
self.T = T
self.A = A
self.offset = offset
self.slope = self.A / self.T
self.t_fin = self.t_init + self.T
def _fun(self, t):
value = self.offset + self.slope * (t - self.t_init) * \
((self.t_init <= t) * (t <= self.t_fin))
return value
class Harmonic(Control):
def __init__(self, t_init, T, A, freq, phase=0, offset=0):
super().__init__()
self.t_init = t_init
self.t_fin = t_init + T
self.A = A
self.freq = freq
self.phase = phase
self.offset = offset
def _fun(self, t):
value = self.offset + (self.A/2 * sin(2 * pi * self.freq * (t - self.t_init) +
self.phase)) * ((self.t_init <= t) * (t <= self.t_fin))
return value
```
#### File: utils/tests/test_anemometry_tascaseas.py
```python
from numpy.testing import (assert_almost_equal)
from pyfme.utils.anemometry import (tas2eas, eas2tas, cas2eas, eas2cas,
tas2cas, cas2tas)
from pyfme.models.constants import RHO_0, P_0
from pyfme.environment.atmosphere import ISA1976
atmosphere = ISA1976()
def test_tas2eas():
# sea level
tas = 275
eas_expected = 275
eas = tas2eas(tas, RHO_0)
assert_almost_equal(eas, eas_expected)
# Test at 11000m
_, _, rho, _ = atmosphere(11000)
tas = 275
eas_expected = 149.88797172756003
eas = tas2eas(tas, rho)
assert_almost_equal(eas, eas_expected)
def test_eas2tas():
# sea level
eas = 149.88797172756003
tas_expected = 149.88797172756003
tas = eas2tas(eas, RHO_0)
assert_almost_equal(tas, tas_expected)
# Test at 11000m
_, _, rho, _ = atmosphere(11000)
eas = 149.88797172756003
tas_expected = 275
tas = eas2tas(eas, rho)
assert_almost_equal(tas, tas_expected)
def test_tas2cas():
# sea level
tas = 275
cas_expected = 275
cas = tas2cas(tas, P_0, RHO_0)
assert_almost_equal(cas, cas_expected)
# Test at 11000m
_, p, rho, _ = atmosphere(11000)
tas = 275
cas_expected = 162.03569680495048
cas = tas2cas(tas, p, rho)
assert_almost_equal(cas, cas_expected)
def test_cas2tas():
# sea level
cas = 275
tas_expected = 275
tas = cas2tas(cas, P_0, RHO_0)
assert_almost_equal(tas, tas_expected)
# Test at 11000m
_, p, rho, _ = atmosphere(11000)
cas = 162.03569680495048
tas_expected = 275
tas = cas2tas(cas, p, rho)
assert_almost_equal(tas, tas_expected)
def test_cas2eas():
# sea level
cas = 275
eas_expected = 275
eas = cas2eas(cas, P_0, RHO_0)
assert_almost_equal(eas, eas_expected)
# Test at 11000m
_, p, rho, _ = atmosphere(11000)
cas = 162.03569680495048
eas_expected = 149.88797172756003
eas = cas2eas(cas, p, rho)
assert_almost_equal(eas, eas_expected)
def test_eas2cas():
# sea level
eas = 275
cas_expected = 275
cas = eas2cas(eas, P_0, RHO_0)
assert_almost_equal(cas, cas_expected)
# Test at 11000m
_, p, rho, _ = atmosphere(11000)
eas = 149.88797172756003
cas_expected = 162.03569680495048
cas = eas2cas(eas, p, rho)
assert_almost_equal(cas, cas_expected)
```
#### File: utils/tests/test_coordinates.py
```python
import pytest
import numpy as np
from numpy.testing import (assert_array_almost_equal)
from pyfme.utils.coordinates import (body2hor, hor2body,
check_theta_phi_psi_range,
hor2wind, wind2hor,
check_gamma_mu_chi_range,
body2wind, wind2body,
check_alpha_beta_range)
def test_check_theta_range():
wrong_values = (3 * np.pi, - 3 * np.pi)
for value in wrong_values:
# 0 is always a correct value
with pytest.raises(ValueError) as excinfo:
check_theta_phi_psi_range(value, 0, 0)
assert ("ValueError: Theta value is not inside correct range"
in excinfo.exconly())
def test_check_phi_range():
wrong_values = (3 * np.pi, - 3 * np.pi)
for value in wrong_values:
# 0 is always a correct value
with pytest.raises(ValueError) as excinfo:
check_theta_phi_psi_range(0, value, 0)
assert ("ValueError: Phi value is not inside correct range"
in excinfo.exconly())
def test_check_psi_range():
wrong_values = (3 * np.pi, - 3 * np.pi)
for value in wrong_values:
# 0 is always a correct value
with pytest.raises(ValueError) as excinfo:
check_theta_phi_psi_range(0, 0, value)
assert ("ValueError: Psi value is not inside correct range"
in excinfo.exconly())
def test_body2hor():
# Test with a pitch rotation
vector_body = np.array([1, 1, 1])
theta, phi, psi = np.deg2rad(45), 0, 0
vector_hor = body2hor(vector_body, theta, phi, psi)
vector_hor_expected = np.array([2 * 0.70710678118654757, 1, 0])
assert_array_almost_equal(vector_hor, vector_hor_expected)
# Test with a roll rotation
vector_body = np.array([1, 1, 1])
theta, phi, psi = 0, np.deg2rad(45), 0
vector_hor = body2hor(vector_body, theta, phi, psi)
vector_hor_expected = np.array([1, 0, 2 * 0.70710678118654757])
assert_array_almost_equal(vector_hor, vector_hor_expected)
# Test with a yaw rotation
vector_body = np.array([1, 1, 1])
theta, phi, psi = 0, 0, np.deg2rad(45)
vector_hor = body2hor(vector_body, theta, phi, psi)
vector_hor_expected = np.array([0, 2 * 0.70710678118654757, 1])
assert_array_almost_equal(vector_hor, vector_hor_expected)
def test_hor2body():
# Test with a pitch rotation
vector_hor = np.array([2 * 0.70710678118654757, 1, 0])
theta, phi, psi = np.deg2rad(45), 0, 0
vector_body_expected = np.array([1, 1, 1])
vector_body = hor2body(vector_hor, theta, phi, psi)
assert_array_almost_equal(vector_body, vector_body_expected)
# Test with a roll rotation
vector_hor = np.array([1, 0, 2 * 0.70710678118654757])
theta, phi, psi = 0, np.deg2rad(45), 0
vector_body_expected = np.array([1, 1, 1])
vector_body = hor2body(vector_hor, theta, phi, psi)
assert_array_almost_equal(vector_body, vector_body_expected)
# Test with a yaw rotation
vector_hor = np.array([0, 2 * 0.70710678118654757, 1])
theta, phi, psi = 0, 0, np.deg2rad(45)
vector_body_expected = np.array([1, 1, 1])
vector_body = hor2body(vector_hor, theta, phi, psi)
assert_array_almost_equal(vector_body, vector_body_expected)
def test_check_gamma_mu_chi_range():
wrong_values = (3 * np.pi, - 3 * np.pi)
for value in wrong_values:
# 0 is always a correct value
angles = [0, 0, 0]
for ii in range(3):
angles[ii] = value
with pytest.raises(ValueError):
check_gamma_mu_chi_range(*angles)
def test_check_gamma_range():
wrong_values = (3 * np.pi, - 3 * np.pi)
for value in wrong_values:
# 0 is always a correct value
with pytest.raises(ValueError) as excinfo:
check_gamma_mu_chi_range(value, 0, 0)
assert ("ValueError: Gamma value is not inside correct range"
in excinfo.exconly())
def test_check_mu_range():
wrong_values = (3 * np.pi, - 3 * np.pi)
for value in wrong_values:
# 0 is always a correct value
with pytest.raises(ValueError) as excinfo:
check_gamma_mu_chi_range(0, value, 0)
assert ("ValueError: Mu value is not inside correct range"
in excinfo.exconly())
def test_check_chi_range():
wrong_values = (3 * np.pi, - 3 * np.pi)
for value in wrong_values:
# 0 is always a correct value
with pytest.raises(ValueError) as excinfo:
check_gamma_mu_chi_range(0, 0, value)
assert ("ValueError: Chi value is not inside correct range"
in excinfo.exconly())
def test_wind2hor():
# Test with a pitch rotation
vector_wind = np.array([1, 1, 1])
gamma, mu, chi = np.deg2rad(45), 0, 0
vector_hor = wind2hor(vector_wind, gamma, mu, chi)
vector_hor_expected = np.array([2 * 0.70710678118654757, 1, 0])
assert_array_almost_equal(vector_hor, vector_hor_expected)
# Test with a roll rotation
vector_wind = np.array([1, 1, 1])
gamma, mu, chi = 0, np.deg2rad(45), 0
vector_hor = wind2hor(vector_wind, gamma, mu, chi)
vector_hor_expected = np.array([1, 0, 2 * 0.70710678118654757])
assert_array_almost_equal(vector_hor, vector_hor_expected)
# Test with a yaw rotation
vector_wind = np.array([1, 1, 1])
gamma, mu, chi = 0, 0, np.deg2rad(45)
vector_hor = wind2hor(vector_wind, gamma, mu, chi)
vector_hor_expected = np.array([0, 2 * 0.70710678118654757, 1])
assert_array_almost_equal(vector_hor, vector_hor_expected)
def test_hor2wind():
# Test with a pitch rotation
vector_hor = np.array([2 * 0.70710678118654757, 1, 0])
gamma, mu, chi = np.deg2rad(45), 0, 0
vector_wind_expected = np.array([1, 1, 1])
vector_wind = hor2wind(vector_hor, gamma, mu, chi)
assert_array_almost_equal(vector_wind, vector_wind_expected)
# Test with a roll rotation
vector_hor = np.array([1, 0, 2 * 0.70710678118654757])
gamma, mu, chi = 0, np.deg2rad(45), 0
vector_wind_expected = np.array([1, 1, 1])
vector_wind = hor2wind(vector_hor, gamma, mu, chi)
assert_array_almost_equal(vector_wind, vector_wind_expected)
# Test with a yaw rotation
vector_hor = np.array([0, 2 * 0.70710678118654757, 1])
gamma, mu, chi = 0, 0, np.deg2rad(45)
vector_wind_expected = np.array([1, 1, 1])
vector_wind = hor2wind(vector_hor, gamma, mu, chi)
assert_array_almost_equal(vector_wind, vector_wind_expected)
def test_check_alpha_beta_range():
wrong_values = (3 * np.pi, - 3 * np.pi)
for value in wrong_values:
# 0 is always a correct value
angles = [0, 0]
for ii in range(2):
angles[ii] = value
with pytest.raises(ValueError):
check_alpha_beta_range(*angles)
def test_check_alpha_range():
wrong_values = (3 * np.pi, - 3 * np.pi)
for value in wrong_values:
# 0 is always a correct value
with pytest.raises(ValueError) as excinfo:
check_alpha_beta_range(value, 0)
assert ("ValueError: Alpha value is not inside correct range"
in excinfo.exconly())
def test_check_beta_range():
wrong_values = (3 * np.pi, - 3 * np.pi)
for value in wrong_values:
# 0 is always a correct value
with pytest.raises(ValueError) as excinfo:
check_alpha_beta_range(0, value)
assert ("ValueError: Beta value is not inside correct range"
in excinfo.exconly())
def test_wind2body():
# Test with an increment of the angle of attack
vector_wind = np.array([1, 1, 1])
alpha, beta = np.deg2rad(45), 0
vector_body = wind2body(vector_wind, alpha, beta)
vector_body_expected = np.array([0, 1, 2 * 0.70710678118654757])
assert_array_almost_equal(vector_body, vector_body_expected)
# Test with an increment of the sideslip angle
vector_wind = np.array([1, 1, 1])
alpha, beta = 0, np.deg2rad(45)
vector_body = wind2body(vector_wind, alpha, beta)
vector_body_expected = np.array([0, 2 * 0.70710678118654757, 1])
assert_array_almost_equal(vector_body, vector_body_expected)
def test_body2wind():
# Test with an increment of the angle of attack
vector_body = np.array([0, 1, 2 * 0.70710678118654757])
alpha, beta = np.deg2rad(45), 0
vector_wind = body2wind(vector_body, alpha, beta)
vector_wind_expected = np.array([1, 1, 1])
assert_array_almost_equal(vector_wind, vector_wind_expected)
# Test with an increment of the sideslip angle
vector_body = np.array([0, 2 * 0.70710678118654757, 1])
alpha, beta = 0, np.deg2rad(45)
vector_wind = body2wind(vector_body, alpha, beta)
vector_wind_expected = np.array([1, 1, 1])
assert_array_almost_equal(vector_wind, vector_wind_expected)
```
|
{
"source": "jdebloat/jdebloat",
"score": 2
}
|
#### File: jdebloat/scripts/benchmark.py
```python
import sys
import json
import tempfile
import re
import shutil
import csv
import os
import zipfile
from subprocess import check_output, run, CalledProcessError, DEVNULL
from pathlib import Path
from contextlib import contextmanager
# https://stackoverflow.com/questions/431684/how-do-i-change-directory-cd-in-python
@contextmanager
def changedir(dir):
prevdir = os.getcwd()
try:
os.chdir(os.path.expanduser(str(dir)))
yield
finally:
os.chdir(prevdir)
def read(*args, **kwargs):
try:
return check_output(args, universal_newlines=True, **kwargs).splitlines()
except CalledProcessError as e:
print('Failed, while running: ', ' '.join('{!r}'.format(c) for c in args), file=sys.stderr)
raise
def git(*cmd, work_folder="."):
args = []
args += ['-C', str(work_folder)]
args += [str(c) for c in cmd]
return read("git", *args)
def extract_gitinfo(benchmark):
with changedir(benchmark):
url = git("remote", "get-url", "origin")[0]
rev = git("rev-list", "-n", 1, "HEAD")[0]
return { "id": benchmark.name, "url": url, "rev": rev }
def build(benchmark):
with changedir(benchmark):
run([ "mvn",
"-Dmaven.repo.local=libs",
"install",
"--batch-mode",
"-fn"], stdout=DEVNULL)
def extract_classpath(benchmark, scope):
with changedir(benchmark):
lines = read("mvn", "dependency:build-classpath",
"-Dmaven.repo.local=libs",
"-DincludeScope={}".format(scope),
"--batch-mode")
classpath = []
for line in lines:
if line.startswith("[INFO]"): continue
for x in line.strip().split(":"):
if not x: continue
l = [str(Path.cwd()), x]
prefix = os.path.commonprefix(l)
if str(prefix) != str(Path.cwd()): continue
classpath.append(x)
return classpath
def extract_jar(jar, tofolder):
Path(tofolder).mkdir(parents=True, exist_ok=True)
run(["unzip", "-qo", str(jar), "-d", str(tofolder)])
def copy_files(src, dst):
for file in src.rglob("*"):
if not file.is_file(): continue
dst_file = dst / file.relative_to(src)
dst_file.parent.mkdir(parents=True, exist_ok=True)
shutil.rmtree(str(dst_file), ignore_errors=True)
shutil.copyfile(str(file), str(dst_file))
def make_jar(srcs, libs, jar):
with tempfile.TemporaryDirectory() as stage_folder:
for lib in libs:
extract_jar(lib, stage_folder)
for src in srcs:
copy_files(src, stage_folder)
jar.parent.mkdir(parents=True, exist_ok=True)
absjar = jar.parent.resolve() / jar.name
with changedir(stage_folder):
for a in Path(stage_folder).glob("**/*.SF"):
a.unlink()
for a in Path(stage_folder).glob("**/*.RSA"):
a.unlink()
for a in Path(stage_folder).glob("**/*.DES"):
a.unlink()
run(["jar", "cf", str(absjar), "."])
def extract_testclasses(target):
expr = re.compile(r'.*/surefire-reports/TEST-(.*)\.xml$')
test_classes = []
for x in (target / "surefire-reports").rglob("*.xml"):
test_classes.append(expr.match(str(x)).group(1))
return test_classes
def main(argv):
excludedtest, benchmark, extract = [Path(a) for a in argv[1:]]
excluded = set(excludedtest.read_text().splitlines())
shutil.rmtree(str(extract), ignore_errors=True)
extract.mkdir(parents=True, exist_ok=True)
if os.path.exists(str(benchmark / ".git")):
dct = extract_gitinfo(benchmark)
else:
dct = {}
print("Looking at: " + str(benchmark))
if not (benchmark / "target").exists():
print("Building...")
build(benchmark)
targets = list(benchmark.glob("*/target/classes"))
test_targets = list(benchmark.glob("*/target/test-classes"))
resources = list(benchmark.glob("*/src/test/resources"))
if (benchmark / "target").exists():
targets.append(benchmark / "target" / "classes")
test_targets.append(benchmark / "target" / "test-classes")
resources.append(benchmark / "src" / "test" / "resources")
test_classes = set.union(*[set(extract_testclasses(t.parent)) for t in test_targets]) - excluded
(extract / "test.classes.txt").write_text('\n'.join(test_classes) + '\n')
make_jar(targets, set(), extract / "app.jar")
compile_cp = set(extract_classpath(benchmark, "compile"))
make_jar([], compile_cp, extract / "lib.jar")
test_cp = set(extract_classpath(benchmark, "test"))
make_jar(test_targets, test_cp - compile_cp, extract / "test.jar")
for t in resources:
x = t.parent.parent
copy_files(t, extract / "src" / t.relative_to(x))
dct["classpath"] = {
"lib": sorted(compile_cp),
"test": sorted(test_cp - compile_cp)
}
dct["test"] = sorted(test_classes)
with open(str(extract / "extract.json"), "w") as w:
json.dump(dct, w)
if __name__ == "__main__":
main(sys.argv)
```
|
{
"source": "jdebloat/jreduce",
"score": 2
}
|
#### File: jreduce/scripts/reduceall.py
```python
import os
import csv
import sys
import subprocess
from multiprocessing import Pool
from subprocess import DEVNULL, call, check_call, Popen, PIPE, STDOUT, check_output, TimeoutExpired
from shutil import rmtree
from pathlib import Path
import logging
logger = logging.getLogger('reduceall')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stderr)
ch.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('(%(asctime)s)-%(processName)s: %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
info = logger.info
debug = logger.debug
reductors = [ "ddmin:graph", "gbired", "ddmin:verify", "ddmin:graph-sort"]
def run_jreduce(reductor, classes, work_dir, prop, max_iterations=1000):
info("Running {}".format(work_dir))
try:
call(
["jreduce", "-v",
"--cp", str(classes),
"-p", "class",
"-r", reductor,
"--work-dir", str(work_dir),
"-m", str(max_iterations),
] + prop, stdout=DEVNULL, stderr=DEVNULL, timeout=3600)
except Exception as e:
info("{}: {}".format(e, work_dir))
info("Done running {}".format(work_dir))
def run_property(prop, output_folder):
stdout = output_folder / "stdout.log"
with open(str(stdout), "wb") as out:
p = Popen(["bash", str(prop.resolve()), "classes"],
stderr=STDOUT, stdout=PIPE, cwd=str(output_folder))
for line in p.stdout:
out.write(line)
n = p.wait()
if n != 0:
info("Property did not succeed; return code = {}".format(n))
else:
info("Property succeded, see stdout: {}".format(stdout))
def compute_reduction(jar, prop, output_folder, pool):
classes = output_folder / "classes"
if not classes.exists():
classes.mkdir(parents=True)
if not check_call(["unzip", str(jar.resolve())], stderr=DEVNULL, stdout=DEVNULL, cwd=str(classes)) == 0:
info("Could not unzip {}, continuing..".format(jar.resolve()))
return
if not (output_folder / "stdout.log").exists():
pool.apply_async(run_property, (prop, output_folder))
for red in reductors:
folder = output_folder / red
if not folder.exists():
pool.apply_async(run_jreduce, (red, classes, folder, [str(prop)]))
def analyse_jar(jar, prop, basefolder, pool):
name = jar.stem
prop_name = prop.stem
output_folder = basefolder / prop_name / name
compute_reduction(jar, prop, output_folder, pool)
dct = { "name" : name, "prop": prop_name}
for red in reductors:
iters = 0
final = -1
score = 0
best_score = 0
try:
with open(str(output_folder / red / "progress.csv")) as c:
for line in csv.DictReader(c):
if "class-closure-" + red in line["Step"]:
iters += 1
if "after-class-closure" in line["Step"]:
final = line["Classes"]
diffname = output_folder / red / "after-class-closure.diff"
call(
"diff --left-column --side-by-side {} {} > {}"
.format(output_folder / "stdout.log",
output_folder / red / line["Step"] / "stderr.log",
diffname),
shell=True)
with open(str(diffname)) as r:
for line in r:
best_score +=1
if line.endswith("(\n") or line.endswith("<\n"):
score +=1
except:
pass
dct[red + " iter"] = iters
dct[red + " size"] = final
dct[red + " change"] = 0 if best_score == 0 else score / best_score
classes = list((output_folder / "classes").glob("**/*.class"))
dct["classes"] = len(classes)
return dct
if __name__ == "__main__":
cmd, basefolder, *args = sys.argv
i = args.index("--")
props = [Path(prop) for prop in args[:i]]
jars = [Path(jar) for jar in args[i+1:]]
writer = csv.DictWriter(sys.stdout,
["name", "prop", "classes"] +
sum([[red + " " + kind for red in reductors ] for kind in ["iter", "size", "change"]], []))
writer.writeheader()
with Pool() as p:
for jar in jars:
for prop in props:
r = analyse_jar(jar, prop, Path(basefolder), p)
if r:
writer.writerow(r)
p.close()
p.join()
```
|
{
"source": "jdebp/neovim",
"score": 2
}
|
#### File: neovim/scripts/gen_vimdoc.py
```python
import argparse
import os
import re
import sys
import shutil
import textwrap
import subprocess
import collections
import msgpack
import logging
from xml.dom import minidom
MIN_PYTHON_VERSION = (3, 5)
if sys.version_info < MIN_PYTHON_VERSION:
print("requires Python {}.{}+".format(*MIN_PYTHON_VERSION))
sys.exit(1)
# DEBUG = ('DEBUG' in os.environ)
INCLUDE_C_DECL = ('INCLUDE_C_DECL' in os.environ)
INCLUDE_DEPRECATED = ('INCLUDE_DEPRECATED' in os.environ)
log = logging.getLogger(__name__)
LOG_LEVELS = {
logging.getLevelName(level): level for level in [
logging.DEBUG, logging.INFO, logging.ERROR
]
}
fmt_vimhelp = False # HACK
text_width = 78
script_path = os.path.abspath(__file__)
base_dir = os.path.dirname(os.path.dirname(script_path))
out_dir = os.path.join(base_dir, 'tmp-{target}-doc')
filter_cmd = '%s %s' % (sys.executable, script_path)
seen_funcs = set()
msgs = [] # Messages to show on exit.
lua2dox_filter = os.path.join(base_dir, 'scripts', 'lua2dox_filter')
CONFIG = {
'api': {
'mode': 'c',
'filename': 'api.txt',
# String used to find the start of the generated part of the doc.
'section_start_token': '*api-global*',
# Section ordering.
'section_order': [
'vim.c',
'buffer.c',
'window.c',
'win_config.c',
'tabpage.c',
'ui.c',
'extmark.c',
],
# List of files/directories for doxygen to read, separated by blanks
'files': os.path.join(base_dir, 'src/nvim/api'),
# file patterns used by doxygen
'file_patterns': '*.h *.c',
# Only function with this prefix are considered
'fn_name_prefix': 'nvim_',
# Section name overrides.
'section_name': {
'vim.c': 'Global',
},
# For generated section names.
'section_fmt': lambda name: f'{name} Functions',
# Section helptag.
'helptag_fmt': lambda name: f'*api-{name.lower()}*',
# Per-function helptag.
'fn_helptag_fmt': lambda fstem, name: f'*{name}()*',
# Module name overrides (for Lua).
'module_override': {},
# Append the docs for these modules, do not start a new section.
'append_only': [],
},
'lua': {
'mode': 'lua',
'filename': 'lua.txt',
'section_start_token': '*lua-vim*',
'section_order': [
'vim.lua',
'shared.lua',
'uri.lua',
'ui.lua',
],
'files': ' '.join([
os.path.join(base_dir, 'src/nvim/lua/vim.lua'),
os.path.join(base_dir, 'runtime/lua/vim/shared.lua'),
os.path.join(base_dir, 'runtime/lua/vim/uri.lua'),
os.path.join(base_dir, 'runtime/lua/vim/ui.lua'),
]),
'file_patterns': '*.lua',
'fn_name_prefix': '',
'section_name': {
'lsp.lua': 'core',
},
'section_fmt': lambda name: f'Lua module: {name.lower()}',
'helptag_fmt': lambda name: f'*lua-{name.lower()}*',
'fn_helptag_fmt': lambda fstem, name: f'*{fstem}.{name}()*',
'module_override': {
# `shared` functions are exposed on the `vim` module.
'shared': 'vim',
'uri': 'vim',
'ui': 'vim.ui',
},
'append_only': [
'shared.lua',
],
},
'lsp': {
'mode': 'lua',
'filename': 'lsp.txt',
'section_start_token': '*lsp-core*',
'section_order': [
'lsp.lua',
'buf.lua',
'diagnostic.lua',
'codelens.lua',
'handlers.lua',
'util.lua',
'log.lua',
'rpc.lua',
'protocol.lua',
],
'files': ' '.join([
os.path.join(base_dir, 'runtime/lua/vim/lsp'),
os.path.join(base_dir, 'runtime/lua/vim/lsp.lua'),
]),
'file_patterns': '*.lua',
'fn_name_prefix': '',
'section_name': {'lsp.lua': 'lsp'},
'section_fmt': lambda name: (
'Lua module: vim.lsp'
if name.lower() == 'lsp'
else f'Lua module: vim.lsp.{name.lower()}'),
'helptag_fmt': lambda name: (
'*lsp-core*'
if name.lower() == 'lsp'
else f'*lsp-{name.lower()}*'),
'fn_helptag_fmt': lambda fstem, name: (
f'*vim.lsp.{name}()*'
if fstem == 'lsp' and name != 'client'
else (
'*vim.lsp.client*'
# HACK. TODO(justinmk): class/structure support in lua2dox
if 'lsp.client' == f'{fstem}.{name}'
else f'*vim.lsp.{fstem}.{name}()*')),
'module_override': {},
'append_only': [],
},
'diagnostic': {
'mode': 'lua',
'filename': 'diagnostic.txt',
'section_start_token': '*diagnostic-api*',
'section_order': [
'diagnostic.lua',
],
'files': os.path.join(base_dir, 'runtime/lua/vim/diagnostic.lua'),
'file_patterns': '*.lua',
'fn_name_prefix': '',
'section_name': {'diagnostic.lua': 'diagnostic'},
'section_fmt': lambda _: 'Lua module: vim.diagnostic',
'helptag_fmt': lambda _: '*diagnostic-api*',
'fn_helptag_fmt': lambda fstem, name: f'*vim.{fstem}.{name}()*',
'module_override': {},
'append_only': [],
},
'treesitter': {
'mode': 'lua',
'filename': 'treesitter.txt',
'section_start_token': '*lua-treesitter-core*',
'section_order': [
'treesitter.lua',
'language.lua',
'query.lua',
'highlighter.lua',
'languagetree.lua',
],
'files': ' '.join([
os.path.join(base_dir, 'runtime/lua/vim/treesitter.lua'),
os.path.join(base_dir, 'runtime/lua/vim/treesitter/'),
]),
'file_patterns': '*.lua',
'fn_name_prefix': '',
'section_name': {},
'section_fmt': lambda name: (
'Lua module: vim.treesitter'
if name.lower() == 'treesitter'
else f'Lua module: vim.treesitter.{name.lower()}'),
'helptag_fmt': lambda name: (
'*lua-treesitter-core*'
if name.lower() == 'treesitter'
else f'*treesitter-{name.lower()}*'),
'fn_helptag_fmt': lambda fstem, name: (
f'*{name}()*'
if name != 'new'
else f'*{fstem}.{name}()*'),
# 'fn_helptag_fmt': lambda fstem, name: (
# f'*vim.treesitter.{name}()*'
# if fstem == 'treesitter'
# else (
# '*vim.lsp.client*'
# # HACK. TODO(justinmk): class/structure support in lua2dox
# if 'lsp.client' == f'{fstem}.{name}'
# else f'*vim.lsp.{fstem}.{name}()*')),
'module_override': {},
'append_only': [],
}
}
param_exclude = (
'channel_id',
)
# Annotations are displayed as line items after API function descriptions.
annotation_map = {
'FUNC_API_FAST': '{fast}',
'FUNC_API_CHECK_TEXTLOCK': 'not allowed when |textlock| is active',
}
# Tracks `xrefsect` titles. As of this writing, used only for separating
# deprecated functions.
xrefs = set()
# Raises an error with details about `o`, if `cond` is in object `o`,
# or if `cond()` is callable and returns True.
def debug_this(o, cond=True):
name = ''
if not isinstance(o, str):
try:
name = o.nodeName
o = o.toprettyxml(indent=' ', newl='\n')
except Exception:
pass
if ((callable(cond) and cond())
or (not callable(cond) and cond)
or (not callable(cond) and cond in o)):
raise RuntimeError('xxx: {}\n{}'.format(name, o))
# Appends a message to a list which will be printed on exit.
def msg(s):
msgs.append(s)
# Print all collected messages.
def msg_report():
for m in msgs:
print(f' {m}')
# Print collected messages, then throw an exception.
def fail(s):
msg_report()
raise RuntimeError(s)
def find_first(parent, name):
"""Finds the first matching node within parent."""
sub = parent.getElementsByTagName(name)
if not sub:
return None
return sub[0]
def iter_children(parent, name):
"""Yields matching child nodes within parent."""
for child in parent.childNodes:
if child.nodeType == child.ELEMENT_NODE and child.nodeName == name:
yield child
def get_child(parent, name):
"""Gets the first matching child node."""
for child in iter_children(parent, name):
return child
return None
def self_or_child(n):
"""Gets the first child node, or self."""
if len(n.childNodes) == 0:
return n
return n.childNodes[0]
def clean_text(text):
"""Cleans text.
Only cleans superfluous whitespace at the moment.
"""
return ' '.join(text.split()).strip()
def clean_lines(text):
"""Removes superfluous lines.
The beginning and end of the string is trimmed. Empty lines are collapsed.
"""
return re.sub(r'\A\n\s*\n*|\n\s*\n*\Z', '', re.sub(r'(\n\s*\n+)+', '\n\n', text))
def is_blank(text):
return '' == clean_lines(text)
def get_text(n, preformatted=False):
"""Recursively concatenates all text in a node tree."""
text = ''
if n.nodeType == n.TEXT_NODE:
return n.data
if n.nodeName == 'computeroutput':
for node in n.childNodes:
text += get_text(node)
return '`{}` '.format(text)
for node in n.childNodes:
if node.nodeType == node.TEXT_NODE:
text += node.data if preformatted else clean_text(node.data)
elif node.nodeType == node.ELEMENT_NODE:
text += ' ' + get_text(node, preformatted)
return text
# Gets the length of the last line in `text`, excluding newline ("\n") char.
def len_lastline(text):
lastnl = text.rfind('\n')
if -1 == lastnl:
return len(text)
if '\n' == text[-1]:
return lastnl - (1 + text.rfind('\n', 0, lastnl))
return len(text) - (1 + lastnl)
def len_lastline_withoutindent(text, indent):
n = len_lastline(text)
return (n - len(indent)) if n > len(indent) else 0
# Returns True if node `n` contains only inline (not block-level) elements.
def is_inline(n):
# if len(n.childNodes) == 0:
# return n.nodeType == n.TEXT_NODE or n.nodeName == 'computeroutput'
for c in n.childNodes:
if c.nodeType != c.TEXT_NODE and c.nodeName != 'computeroutput':
return False
if not is_inline(c):
return False
return True
def doc_wrap(text, prefix='', width=70, func=False, indent=None):
"""Wraps text to `width`.
First line is prefixed with `prefix`, subsequent lines are aligned.
If `func` is True, only wrap at commas.
"""
if not width:
# return prefix + text
return text
# Whitespace used to indent all lines except the first line.
indent = ' ' * len(prefix) if indent is None else indent
indent_only = (prefix == '' and indent is not None)
if func:
lines = [prefix]
for part in text.split(', '):
if part[-1] not in ');':
part += ', '
if len(lines[-1]) + len(part) > width:
lines.append(indent)
lines[-1] += part
return '\n'.join(x.rstrip() for x in lines).rstrip()
# XXX: Dummy prefix to force TextWrapper() to wrap the first line.
if indent_only:
prefix = indent
tw = textwrap.TextWrapper(break_long_words=False,
break_on_hyphens=False,
width=width,
initial_indent=prefix,
subsequent_indent=indent)
result = '\n'.join(tw.wrap(text.strip()))
# XXX: Remove the dummy prefix.
if indent_only:
result = result[len(indent):]
return result
def max_name(names):
if len(names) == 0:
return 0
return max(len(name) for name in names)
def update_params_map(parent, ret_map, width=62):
"""Updates `ret_map` with name:desc key-value pairs extracted
from Doxygen XML node `parent`.
"""
params = collections.OrderedDict()
for node in parent.childNodes:
if node.nodeType == node.TEXT_NODE:
continue
name_node = find_first(node, 'parametername')
if name_node.getAttribute('direction') == 'out':
continue
name = get_text(name_node)
if name in param_exclude:
continue
params[name.strip()] = node
max_name_len = max_name(params.keys()) + 8
# `ret_map` is a name:desc map.
for name, node in params.items():
desc = ''
desc_node = get_child(node, 'parameterdescription')
if desc_node:
desc = fmt_node_as_vimhelp(
desc_node, width=width, indent=(' ' * max_name_len))
ret_map[name] = desc
return ret_map
def render_node(n, text, prefix='', indent='', width=62):
"""Renders a node as Vim help text, recursively traversing all descendants."""
global fmt_vimhelp
global has_seen_preformatted
def ind(s):
return s if fmt_vimhelp else ''
text = ''
# space_preceding = (len(text) > 0 and ' ' == text[-1][-1])
# text += (int(not space_preceding) * ' ')
if n.nodeName == 'preformatted':
o = get_text(n, preformatted=True)
ensure_nl = '' if o[-1] == '\n' else '\n'
text += '>{}{}\n<'.format(ensure_nl, o)
elif is_inline(n):
text = doc_wrap(get_text(n), indent=indent, width=width)
elif n.nodeName == 'verbatim':
# TODO: currently we don't use this. The "[verbatim]" hint is there as
# a reminder that we must decide how to format this if we do use it.
text += ' [verbatim] {}'.format(get_text(n))
elif n.nodeName == 'listitem':
for c in n.childNodes:
result = render_node(
c,
text,
indent=indent + (' ' * len(prefix)),
width=width
)
if is_blank(result):
continue
text += indent + prefix + result
elif n.nodeName in ('para', 'heading'):
for c in n.childNodes:
if (is_inline(c)
and '' != get_text(c).strip()
and text
and ' ' != text[-1]):
text += ' '
text += render_node(c, text, indent=indent, width=width)
elif n.nodeName == 'itemizedlist':
for c in n.childNodes:
text += '{}\n'.format(render_node(c, text, prefix='• ',
indent=indent, width=width))
elif n.nodeName == 'orderedlist':
i = 1
for c in n.childNodes:
if is_blank(get_text(c)):
text += '\n'
continue
text += '{}\n'.format(render_node(c, text, prefix='{}. '.format(i),
indent=indent, width=width))
i = i + 1
elif n.nodeName == 'simplesect' and 'note' == n.getAttribute('kind'):
text += '\nNote:\n '
for c in n.childNodes:
text += render_node(c, text, indent=' ', width=width)
text += '\n'
elif n.nodeName == 'simplesect' and 'warning' == n.getAttribute('kind'):
text += 'Warning:\n '
for c in n.childNodes:
text += render_node(c, text, indent=' ', width=width)
text += '\n'
elif (n.nodeName == 'simplesect'
and n.getAttribute('kind') in ('return', 'see')):
text += ind(' ')
for c in n.childNodes:
text += render_node(c, text, indent=' ', width=width)
elif n.nodeName == 'computeroutput':
return get_text(n)
else:
raise RuntimeError('unhandled node type: {}\n{}'.format(
n.nodeName, n.toprettyxml(indent=' ', newl='\n')))
return text
def para_as_map(parent, indent='', width=62):
"""Extracts a Doxygen XML <para> node to a map.
Keys:
'text': Text from this <para> element
'params': <parameterlist> map
'return': List of @return strings
'seealso': List of @see strings
'xrefs': ?
"""
chunks = {
'text': '',
'params': collections.OrderedDict(),
'return': [],
'seealso': [],
'xrefs': []
}
# Ordered dict of ordered lists.
groups = collections.OrderedDict([
('params', []),
('return', []),
('seealso', []),
('xrefs', []),
])
# Gather nodes into groups. Mostly this is because we want "parameterlist"
# nodes to appear together.
text = ''
kind = ''
last = ''
if is_inline(parent):
# Flatten inline text from a tree of non-block nodes.
text = doc_wrap(render_node(parent, ""), indent=indent, width=width)
else:
prev = None # Previous node
for child in parent.childNodes:
if child.nodeName == 'parameterlist':
groups['params'].append(child)
elif child.nodeName == 'xrefsect':
groups['xrefs'].append(child)
elif child.nodeName == 'simplesect':
last = kind
kind = child.getAttribute('kind')
if kind == 'return' or (kind == 'note' and last == 'return'):
groups['return'].append(child)
elif kind == 'see':
groups['seealso'].append(child)
elif kind in ('note', 'warning'):
text += render_node(child, text, indent=indent, width=width)
else:
raise RuntimeError('unhandled simplesect: {}\n{}'.format(
child.nodeName, child.toprettyxml(indent=' ', newl='\n')))
else:
if (prev is not None
and is_inline(self_or_child(prev))
and is_inline(self_or_child(child))
and '' != get_text(self_or_child(child)).strip()
and text
and ' ' != text[-1]):
text += ' '
text += render_node(child, text, indent=indent, width=width)
prev = child
chunks['text'] += text
# Generate map from the gathered items.
if len(groups['params']) > 0:
for child in groups['params']:
update_params_map(child, ret_map=chunks['params'], width=width)
for child in groups['return']:
chunks['return'].append(render_node(
child, '', indent=indent, width=width))
for child in groups['seealso']:
chunks['seealso'].append(render_node(
child, '', indent=indent, width=width))
for child in groups['xrefs']:
# XXX: Add a space (or any char) to `title` here, otherwise xrefs
# ("Deprecated" section) acts very weird...
title = get_text(get_child(child, 'xreftitle')) + ' '
xrefs.add(title)
xrefdesc = get_text(get_child(child, 'xrefdescription'))
chunks['xrefs'].append(doc_wrap(xrefdesc, prefix='{}: '.format(title),
width=width) + '\n')
return chunks
def fmt_node_as_vimhelp(parent, width=62, indent=''):
"""Renders (nested) Doxygen <para> nodes as Vim :help text.
NB: Blank lines in a docstring manifest as <para> tags.
"""
rendered_blocks = []
def fmt_param_doc(m):
"""Renders a params map as Vim :help text."""
max_name_len = max_name(m.keys()) + 4
out = ''
for name, desc in m.items():
name = ' {}'.format('{{{}}}'.format(name).ljust(max_name_len))
out += '{}{}\n'.format(name, desc)
return out.rstrip()
def has_nonexcluded_params(m):
"""Returns true if any of the given params has at least
one non-excluded item."""
if fmt_param_doc(m) != '':
return True
for child in parent.childNodes:
para = para_as_map(child, indent, width)
# Generate text from the gathered items.
chunks = [para['text']]
if len(para['params']) > 0 and has_nonexcluded_params(para['params']):
chunks.append('\nParameters: ~')
chunks.append(fmt_param_doc(para['params']))
if len(para['return']) > 0:
chunks.append('\nReturn: ~')
for s in para['return']:
chunks.append(s)
if len(para['seealso']) > 0:
chunks.append('\nSee also: ~')
for s in para['seealso']:
chunks.append(s)
for s in para['xrefs']:
chunks.append(s)
rendered_blocks.append(clean_lines('\n'.join(chunks).strip()))
rendered_blocks.append('')
return clean_lines('\n'.join(rendered_blocks).strip())
def extract_from_xml(filename, target, width):
"""Extracts Doxygen info as maps without formatting the text.
Returns two maps:
1. Functions
2. Deprecated functions
The `fmt_vimhelp` global controls some special cases for use by
fmt_doxygen_xml_as_vimhelp(). (TODO: ugly :)
"""
global xrefs
global fmt_vimhelp
xrefs.clear()
fns = {} # Map of func_name:docstring.
deprecated_fns = {} # Map of func_name:docstring.
dom = minidom.parse(filename)
compoundname = get_text(dom.getElementsByTagName('compoundname')[0])
for member in dom.getElementsByTagName('memberdef'):
if member.getAttribute('static') == 'yes' or \
member.getAttribute('kind') != 'function' or \
member.getAttribute('prot') == 'private' or \
get_text(get_child(member, 'name')).startswith('_'):
continue
loc = find_first(member, 'location')
if 'private' in loc.getAttribute('file'):
continue
return_type = get_text(get_child(member, 'type'))
if return_type == '':
continue
if return_type.startswith(('ArrayOf', 'DictionaryOf')):
parts = return_type.strip('_').split('_')
return_type = '{}({})'.format(parts[0], ', '.join(parts[1:]))
name = get_text(get_child(member, 'name'))
annotations = get_text(get_child(member, 'argsstring'))
if annotations and ')' in annotations:
annotations = annotations.rsplit(')', 1)[-1].strip()
# XXX: (doxygen 1.8.11) 'argsstring' only includes attributes of
# non-void functions. Special-case void functions here.
if name == 'nvim_get_mode' and len(annotations) == 0:
annotations += 'FUNC_API_FAST'
annotations = filter(None, map(lambda x: annotation_map.get(x),
annotations.split()))
params = []
type_length = 0
for param in iter_children(member, 'param'):
param_type = get_text(get_child(param, 'type')).strip()
param_name = ''
declname = get_child(param, 'declname')
if declname:
param_name = get_text(declname).strip()
elif CONFIG[target]['mode'] == 'lua':
# XXX: this is what lua2dox gives us...
param_name = param_type
param_type = ''
if param_name in param_exclude:
continue
if fmt_vimhelp and param_type.endswith('*'):
param_type = param_type.strip('* ')
param_name = '*' + param_name
type_length = max(type_length, len(param_type))
params.append((param_type, param_name))
# Handle Object Oriented style functions here.
# We make sure they have "self" in the parameters,
# and a parent function
if return_type.startswith('function') \
and len(return_type.split(' ')) >= 2 \
and any(x[1] == 'self' for x in params):
split_return = return_type.split(' ')
name = f'{split_return[1]}:{name}'
c_args = []
for param_type, param_name in params:
c_args.append((' ' if fmt_vimhelp else '') + (
'%s %s' % (param_type.ljust(type_length), param_name)).strip())
if not fmt_vimhelp:
pass
else:
fstem = '?'
if '.' in compoundname:
fstem = compoundname.split('.')[0]
fstem = CONFIG[target]['module_override'].get(fstem, fstem)
vimtag = CONFIG[target]['fn_helptag_fmt'](fstem, name)
prefix = '%s(' % name
suffix = '%s)' % ', '.join('{%s}' % a[1] for a in params
if a[0] not in ('void', 'Error'))
if not fmt_vimhelp:
c_decl = '%s %s(%s);' % (return_type, name, ', '.join(c_args))
signature = prefix + suffix
else:
c_decl = textwrap.indent('%s %s(\n%s\n);' % (return_type, name,
',\n'.join(c_args)),
' ')
# Minimum 8 chars between signature and vimtag
lhs = (width - 8) - len(vimtag)
if len(prefix) + len(suffix) > lhs:
signature = vimtag.rjust(width) + '\n'
signature += doc_wrap(suffix, width=width, prefix=prefix,
func=True)
else:
signature = prefix + suffix
signature += vimtag.rjust(width - len(signature))
paras = []
brief_desc = find_first(member, 'briefdescription')
if brief_desc:
for child in brief_desc.childNodes:
paras.append(para_as_map(child))
desc = find_first(member, 'detaileddescription')
if desc:
for child in desc.childNodes:
paras.append(para_as_map(child))
log.debug(
textwrap.indent(
re.sub(r'\n\s*\n+', '\n',
desc.toprettyxml(indent=' ', newl='\n')), ' ' * 16))
fn = {
'annotations': list(annotations),
'signature': signature,
'parameters': params,
'parameters_doc': collections.OrderedDict(),
'doc': [],
'return': [],
'seealso': [],
}
if fmt_vimhelp:
fn['desc_node'] = desc # HACK :(
for m in paras:
if 'text' in m:
if not m['text'] == '':
fn['doc'].append(m['text'])
if 'params' in m:
# Merge OrderedDicts.
fn['parameters_doc'].update(m['params'])
if 'return' in m and len(m['return']) > 0:
fn['return'] += m['return']
if 'seealso' in m and len(m['seealso']) > 0:
fn['seealso'] += m['seealso']
if INCLUDE_C_DECL:
fn['c_decl'] = c_decl
if 'Deprecated' in str(xrefs):
deprecated_fns[name] = fn
elif name.startswith(CONFIG[target]['fn_name_prefix']):
fns[name] = fn
xrefs.clear()
fns = collections.OrderedDict(sorted(
fns.items(),
key=lambda key_item_tuple: key_item_tuple[0].lower()))
deprecated_fns = collections.OrderedDict(sorted(deprecated_fns.items()))
return (fns, deprecated_fns)
def fmt_doxygen_xml_as_vimhelp(filename, target):
"""Entrypoint for generating Vim :help from from Doxygen XML.
Returns 3 items:
1. Vim help text for functions found in `filename`.
2. Vim help text for deprecated functions.
"""
global fmt_vimhelp
fmt_vimhelp = True
fns_txt = {} # Map of func_name:vim-help-text.
deprecated_fns_txt = {} # Map of func_name:vim-help-text.
fns, _ = extract_from_xml(filename, target, width=text_width)
for name, fn in fns.items():
# Generate Vim :help for parameters.
if fn['desc_node']:
doc = fmt_node_as_vimhelp(fn['desc_node'])
if not doc:
doc = 'TODO: Documentation'
annotations = '\n'.join(fn['annotations'])
if annotations:
annotations = ('\n\nAttributes: ~\n' +
textwrap.indent(annotations, ' '))
i = doc.rfind('Parameters: ~')
if i == -1:
doc += annotations
else:
doc = doc[:i] + annotations + '\n\n' + doc[i:]
if INCLUDE_C_DECL:
doc += '\n\nC Declaration: ~\n>\n'
doc += fn['c_decl']
doc += '\n<'
func_doc = fn['signature'] + '\n'
func_doc += textwrap.indent(clean_lines(doc), ' ' * 16)
# Verbatim handling.
func_doc = re.sub(r'^\s+([<>])$', r'\1', func_doc, flags=re.M)
split_lines = func_doc.split('\n')
start = 0
while True:
try:
start = split_lines.index('>', start)
except ValueError:
break
try:
end = split_lines.index('<', start)
except ValueError:
break
split_lines[start + 1:end] = [
(' ' + x).rstrip()
for x in textwrap.dedent(
"\n".join(
split_lines[start+1:end]
)
).split("\n")
]
start = end
func_doc = "\n".join(split_lines)
if 'Deprecated' in xrefs:
deprecated_fns_txt[name] = func_doc
elif name.startswith(CONFIG[target]['fn_name_prefix']):
fns_txt[name] = func_doc
xrefs.clear()
fmt_vimhelp = False
return ('\n\n'.join(list(fns_txt.values())),
'\n\n'.join(list(deprecated_fns_txt.values())))
def delete_lines_below(filename, tokenstr):
"""Deletes all lines below the line containing `tokenstr`, the line itself,
and one line above it.
"""
lines = open(filename).readlines()
i = 0
found = False
for i, line in enumerate(lines, 1):
if tokenstr in line:
found = True
break
if not found:
raise RuntimeError(f'not found: "{tokenstr}"')
i = max(0, i - 2)
with open(filename, 'wt') as fp:
fp.writelines(lines[0:i])
def main(config, args):
"""Generates:
1. Vim :help docs
2. *.mpack files for use by API clients
Doxygen is called and configured through stdin.
"""
for target in CONFIG:
if args.target is not None and target != args.target:
continue
mpack_file = os.path.join(
base_dir, 'runtime', 'doc',
CONFIG[target]['filename'].replace('.txt', '.mpack'))
if os.path.exists(mpack_file):
os.remove(mpack_file)
output_dir = out_dir.format(target=target)
log.info("Generating documentation for %s in folder %s",
target, output_dir)
debug = args.log_level >= logging.DEBUG
p = subprocess.Popen(
['doxygen', '-'],
stdin=subprocess.PIPE,
# silence warnings
# runtime/lua/vim/lsp.lua:209: warning: argument 'foo' not found
stderr=(subprocess.STDOUT if debug else subprocess.DEVNULL))
p.communicate(
config.format(
input=CONFIG[target]['files'],
output=output_dir,
filter=filter_cmd,
file_patterns=CONFIG[target]['file_patterns'])
.encode('utf8')
)
if p.returncode:
sys.exit(p.returncode)
fn_map_full = {} # Collects all functions as each module is processed.
sections = {}
intros = {}
sep = '=' * text_width
base = os.path.join(output_dir, 'xml')
dom = minidom.parse(os.path.join(base, 'index.xml'))
# generate docs for section intros
for compound in dom.getElementsByTagName('compound'):
if compound.getAttribute('kind') != 'group':
continue
groupname = get_text(find_first(compound, 'name'))
groupxml = os.path.join(base, '%s.xml' %
compound.getAttribute('refid'))
group_parsed = minidom.parse(groupxml)
doc_list = []
brief_desc = find_first(group_parsed, 'briefdescription')
if brief_desc:
for child in brief_desc.childNodes:
doc_list.append(fmt_node_as_vimhelp(child))
desc = find_first(group_parsed, 'detaileddescription')
if desc:
doc = fmt_node_as_vimhelp(desc)
if doc:
doc_list.append(doc)
intros[groupname] = "\n".join(doc_list)
for compound in dom.getElementsByTagName('compound'):
if compound.getAttribute('kind') != 'file':
continue
filename = get_text(find_first(compound, 'name'))
if filename.endswith('.c') or filename.endswith('.lua'):
xmlfile = os.path.join(base,
'{}.xml'.format(compound.getAttribute('refid')))
# Extract unformatted (*.mpack).
fn_map, _ = extract_from_xml(xmlfile, target, width=9999)
# Extract formatted (:help).
functions_text, deprecated_text = fmt_doxygen_xml_as_vimhelp(
os.path.join(base, '{}.xml'.format(
compound.getAttribute('refid'))), target)
if not functions_text and not deprecated_text:
continue
else:
name = os.path.splitext(
os.path.basename(filename))[0].lower()
sectname = name.upper() if name == 'ui' else name.title()
doc = ''
intro = intros.get(f'api-{name}')
if intro:
doc += '\n\n' + intro
if functions_text:
doc += '\n\n' + functions_text
if INCLUDE_DEPRECATED and deprecated_text:
doc += f'\n\n\nDeprecated {sectname} Functions: ~\n\n'
doc += deprecated_text
if doc:
filename = os.path.basename(filename)
sectname = CONFIG[target]['section_name'].get(
filename, sectname)
title = CONFIG[target]['section_fmt'](sectname)
helptag = CONFIG[target]['helptag_fmt'](sectname)
sections[filename] = (title, helptag, doc)
fn_map_full.update(fn_map)
if len(sections) == 0:
fail(f'no sections for target: {target}')
if len(sections) > len(CONFIG[target]['section_order']):
raise RuntimeError(
'found new modules "{}"; update the "section_order" map'.format(
set(sections).difference(CONFIG[target]['section_order'])))
docs = ''
i = 0
for filename in CONFIG[target]['section_order']:
try:
title, helptag, section_doc = sections.pop(filename)
except KeyError:
msg(f'warning: empty docs, skipping (target={target}): {filename}')
msg(f' existing docs: {sections.keys()}')
continue
i += 1
if filename not in CONFIG[target]['append_only']:
docs += sep
docs += '\n%s%s' % (title,
helptag.rjust(text_width - len(title)))
docs += section_doc
docs += '\n\n\n'
docs = docs.rstrip() + '\n\n'
docs += ' vim:tw=78:ts=8:ft=help:norl:\n'
doc_file = os.path.join(base_dir, 'runtime', 'doc',
CONFIG[target]['filename'])
delete_lines_below(doc_file, CONFIG[target]['section_start_token'])
with open(doc_file, 'ab') as fp:
fp.write(docs.encode('utf8'))
fn_map_full = collections.OrderedDict(sorted(fn_map_full.items()))
with open(mpack_file, 'wb') as fp:
fp.write(msgpack.packb(fn_map_full, use_bin_type=True))
if not args.keep_tmpfiles:
shutil.rmtree(output_dir)
msg_report()
def filter_source(filename):
name, extension = os.path.splitext(filename)
if extension == '.lua':
p = subprocess.run([lua2dox_filter, filename], stdout=subprocess.PIPE)
op = ('?' if 0 != p.returncode else p.stdout.decode('utf-8'))
print(op)
else:
"""Filters the source to fix macros that confuse Doxygen."""
with open(filename, 'rt') as fp:
print(re.sub(r'^(ArrayOf|DictionaryOf)(\(.*?\))',
lambda m: m.group(1)+'_'.join(
re.split(r'[^\w]+', m.group(2))),
fp.read(), flags=re.M))
def parse_args():
targets = ', '.join(CONFIG.keys())
ap = argparse.ArgumentParser(
description="Generate helpdoc from source code")
ap.add_argument(
"--log-level", "-l", choices=LOG_LEVELS.keys(),
default=logging.getLevelName(logging.ERROR), help="Set log verbosity"
)
ap.add_argument('source_filter', nargs='*',
help="Filter source file(s)")
ap.add_argument('-k', '--keep-tmpfiles', action='store_true',
help="Keep temporary files")
ap.add_argument('-t', '--target',
help=f'One of ({targets}), defaults to "all"')
return ap.parse_args()
Doxyfile = textwrap.dedent('''
OUTPUT_DIRECTORY = {output}
INPUT = {input}
INPUT_ENCODING = UTF-8
FILE_PATTERNS = {file_patterns}
RECURSIVE = YES
INPUT_FILTER = "{filter}"
EXCLUDE =
EXCLUDE_SYMLINKS = NO
EXCLUDE_PATTERNS = */private/* */health.lua */_*.lua
EXCLUDE_SYMBOLS =
EXTENSION_MAPPING = lua=C
EXTRACT_PRIVATE = NO
GENERATE_HTML = NO
GENERATE_DOCSET = NO
GENERATE_HTMLHELP = NO
GENERATE_QHP = NO
GENERATE_TREEVIEW = NO
GENERATE_LATEX = NO
GENERATE_RTF = NO
GENERATE_MAN = NO
GENERATE_DOCBOOK = NO
GENERATE_AUTOGEN_DEF = NO
GENERATE_XML = YES
XML_OUTPUT = xml
XML_PROGRAMLISTING = NO
ENABLE_PREPROCESSING = YES
MACRO_EXPANSION = YES
EXPAND_ONLY_PREDEF = NO
MARKDOWN_SUPPORT = YES
''')
if __name__ == "__main__":
args = parse_args()
print("Setting log level to %s" % args.log_level)
args.log_level = LOG_LEVELS[args.log_level]
log.setLevel(args.log_level)
log.addHandler(logging.StreamHandler())
if len(args.source_filter) > 0:
filter_source(args.source_filter[0])
else:
main(Doxyfile, args)
# vim: set ft=python ts=4 sw=4 tw=79 et :
```
|
{
"source": "jdechalendar/sesi",
"score": 3
}
|
#### File: sesi/py_notebooks/myunits.py
```python
def units():
u = {
"GJ_per_mmbtu": 1.055, # src: Google
"kwh_per_mmbtu": 293.07,
"kWh_per_tonhr": 3.5, # src: Google
"GJ_per_kwh": 0.0036
}
u["GJ_per_ton"] = u["kWh_per_tonhr"] * u["GJ_per_kwh"]
return u
```
|
{
"source": "jdechalendar/tracking_emissions",
"score": 2
}
|
#### File: tracking_emissions/src/AMPD_1.py
```python
import os
import numpy as np
import logging
from load import AMPD, EGRID
DATA_PATH = os.getenv('DATA_PATH')
if DATA_PATH is None:
raise ValueError("DATA_PATH needs to be set")
def AMPD_1():
'''
PLNT-level cleaning.
'''
logger = logging.getLogger('clean')
logger.info("Starting AMPD_1")
# Load result from step 0
ampd = AMPD(step=0)
# Load egrid data
egrid_plnt = EGRID(sheet_name='PLNT16')
# Restrict to states in Con-US
egrid_plnt.df = egrid_plnt.df[~egrid_plnt.df.PSTATABB.isin(['AK', 'HI'])]
# Drop the AMPD plants that do not have enough timestamps
x = ampd.df.loc[:, ["ORISPL_CODE", "OP_DATE_TIME"]].groupby(
'ORISPL_CODE').count()
to_drop = x.mask(x > 8600).dropna()
print("Dropping %d plants out of %d that do not have enough timestamps" % (
len(to_drop), len(x)))
ampd.df = ampd.df[~ampd.df.ORISPL_CODE.isin(to_drop.index.values)]
egrid_orispl = set(egrid_plnt.df.ORISPL.values)
ampd_orispl = set(ampd.df.ORISPL_CODE.values)
egrid_only = egrid_orispl - ampd_orispl
ampd_only = ampd_orispl - egrid_orispl
print("%d are in egrid but not in ampd" % len(egrid_only))
print("%d are in ampd but not in egrid" % len(ampd_only))
# Drop the 11 AMPD plants that are not in EGRID
ampd.df = ampd.df[~ampd.df.ORISPL_CODE.isin(ampd_only)]
# For this step, also drop the EGRID plants that are not in AMPD
egrid_plnt.df = egrid_plnt.df[~egrid_plnt.df.ORISPL.isin(egrid_only)]
# Calculate AMPD annual totals
ampd_ann = ampd.df.loc[:, ['ORISPL_CODE', 'CO2', 'SO2', 'NOX']].groupby(
'ORISPL_CODE').sum()
# Prepare EGRID unadjusted data
egrid_un_ann = egrid_plnt.df.loc[:, ['ORISPL', 'UNCO2', 'UNSO2', 'UNNOX']]
egrid_un_ann.columns = ['ORISPL_CODE', 'CO2', 'SO2', 'NOX']
egrid_un_ann.set_index('ORISPL_CODE', inplace=True)
egrid_un_ann.fillna(0, inplace=True)
egrid_un_ann.sort_index(inplace=True)
# Prepare EGRID adjusted data
egrid_ann = egrid_plnt.df.loc[:, [
'ORISPL', 'PLCO2AN', 'PLSO2AN', 'PLNOXAN']]
egrid_ann.columns = ['ORISPL_CODE', 'CO2', 'SO2', 'NOX']
egrid_ann.set_index('ORISPL_CODE', inplace=True)
egrid_ann.sort_index(inplace=True)
egrid_ann.fillna(0, inplace=True)
# Check that we now have the same plants in both
logger.debug(ampd_ann.index.equals(egrid_un_ann.index))
logger.info("Checking %d plants from AMPD against EGRID unadj."
% len(egrid_un_ann.index))
# Check EGRID unadjusted data against AMPD annual totals
logger.info("Checking EGRID unadjusted data against AMPD annual totals")
diff = egrid_un_ann - ampd_ann
tol = 10 # metric tonne
diff = diff[(diff.CO2.abs() > tol) | (diff.SO2.abs() > tol)
| (diff.NOX.abs() > tol)]
logger.debug(diff.describe())
# Check that all of the plants have 8,784 timesteps
timesteps = ampd.df.loc[:, ['ORISPL_CODE', 'OP_DATE_TIME']].groupby(
'ORISPL_CODE').count()
logger.debug(np.sum(~(timesteps == 8784)))
# try to reconcile ampd with egrid unadjusted
for code in diff.index.values:
for col in ["CO2", "SO2", "NOX"]:
ampd.df.loc[ampd.df.ORISPL_CODE == code, col] += diff.loc[
code, col] / 8784
# Check results
ampd_ann2 = ampd.df.loc[:, ['ORISPL_CODE', 'CO2', 'SO2', 'NOX']].groupby(
'ORISPL_CODE').sum()
diff2 = egrid_un_ann - ampd_ann2
tol = 10 # metric tonne
logger.debug(diff2.describe())
if len(diff2[(diff2.CO2.abs() > tol) | (diff2.SO2.abs() > tol)
| (diff2.NOX.abs() > tol)]) > 0:
logger.warn("Cleaning did not go as expected")
# Now reconcile ampd with egrid adjusted - using first multiplication for
# CHP and then substraction for biomass. Note that this is not perfect for
# those plants that have both CHP and biomass flags
logger.info("Dealing with CHP plants (excluding biomass)")
df_tmp = egrid_plnt.df[(egrid_plnt.df.CHPFLAG == "Yes") & ~(
egrid_plnt.df.RMBMFLAG == "Yes")]
logger.debug(len(df_tmp))
egrid_ann = df_tmp.loc[:, ['ORISPL', 'PLCO2AN', 'PLSO2AN', 'PLNOXAN']]
egrid_ann.columns = ['ORISPL_CODE', 'CO2', 'SO2', 'NOX']
egrid_ann.set_index('ORISPL_CODE', inplace=True)
egrid_ann.sort_index(inplace=True)
egrid_ann.fillna(0, inplace=True)
egrid_un_ann = df_tmp.loc[:, ['ORISPL', 'UNCO2', 'UNSO2', 'UNNOX']]
egrid_un_ann.columns = ['ORISPL_CODE', 'CO2', 'SO2', 'NOX']
egrid_un_ann.set_index('ORISPL_CODE', inplace=True)
egrid_un_ann.fillna(0, inplace=True)
egrid_un_ann.sort_index(inplace=True)
logger.debug(egrid_un_ann.sum())
logger.debug(egrid_ann.sum())
ratios = egrid_ann / egrid_un_ann
tol = .01 # metric tonne
logger.debug(len(ratios))
ratios.fillna(0, inplace=True)
logger.debug(ratios.describe())
# try to reconcile ampd with egrid adjusted for CHP
for code in ratios.index.values:
for col in ["CO2", "SO2", "NOX"]:
ampd.df.loc[ampd.df.ORISPL_CODE == code, col] *= ratios.loc[
code, col]
logger.info("Dealing with biomass plants (including CHP)")
df_tmp = egrid_plnt.df[(egrid_plnt.df.RMBMFLAG == "Yes")]
print(len(df_tmp))
egrid_ann = df_tmp.loc[:, ['ORISPL', 'PLCO2AN', 'PLSO2AN', 'PLNOXAN']]
egrid_ann.columns = ['ORISPL_CODE', 'CO2', 'SO2', 'NOX']
egrid_ann.set_index('ORISPL_CODE', inplace=True)
egrid_ann.sort_index(inplace=True)
egrid_ann.fillna(0, inplace=True)
egrid_un_ann = df_tmp.loc[:, ['ORISPL', 'UNCO2', 'UNSO2', 'UNNOX']]
egrid_un_ann.columns = ['ORISPL_CODE', 'CO2', 'SO2', 'NOX']
egrid_un_ann.set_index('ORISPL_CODE', inplace=True)
egrid_un_ann.fillna(0, inplace=True)
egrid_un_ann.sort_index(inplace=True)
logger.debug(egrid_un_ann.sum())
logger.debug(egrid_ann.sum())
diff = egrid_ann - egrid_un_ann
tol = 1 # metric tonne
logger.debug(len(diff))
logger.debug(diff.describe())
# try to reconcile ampd with egrid adjusted for biomass
for code in diff.index.values:
for col in ["CO2", "SO2", "NOX"]:
ampd.df.loc[ampd.df.ORISPL_CODE == code, col] += diff.loc[
code, col] / 8784
# Recalculate AMPD annual totals
logger.info("Final round of adjustments")
ampd_ann2 = ampd.df.loc[:, ['ORISPL_CODE', 'CO2', 'SO2', 'NOX']].groupby(
'ORISPL_CODE').sum()
egrid_ann = egrid_plnt.df.loc[:, [
'ORISPL', 'PLCO2AN', 'PLSO2AN', 'PLNOXAN']]
egrid_ann.columns = ['ORISPL_CODE', 'CO2', 'SO2', 'NOX']
egrid_ann.set_index('ORISPL_CODE', inplace=True)
egrid_ann.sort_index(inplace=True)
egrid_ann.fillna(0, inplace=True)
# Check EGRID unadjusted data against AMPD annual totals
diff2 = egrid_ann - ampd_ann2
tol = 1 # metric tonne
logger.debug(len(diff2))
diff2 = diff2[(diff2.CO2.abs() > tol) | (diff2.SO2.abs() > tol)
| (diff2.NOX.abs() > tol)]
logger.debug(diff2.describe())
logger.debug(len(diff2))
# try to reconcile ampd with egrid adjusted for the final plants
for code in diff2.index.values:
for col in ["CO2", "SO2", "NOX"]:
ampd.df.loc[ampd.df.ORISPL_CODE == code, col] += diff2.loc[
code, col] / 8784
# final check
ampd_ann3 = ampd.df.loc[:, ['ORISPL_CODE', 'CO2', 'SO2', 'NOX']].groupby(
'ORISPL_CODE').sum()
egrid_ann = egrid_plnt.df.loc[:, [
'ORISPL', 'PLCO2AN', 'PLSO2AN', 'PLNOXAN']]
egrid_ann.columns = ['ORISPL_CODE', 'CO2', 'SO2', 'NOX']
egrid_ann.set_index('ORISPL_CODE', inplace=True)
egrid_ann.sort_index(inplace=True)
egrid_ann.fillna(0, inplace=True)
diff = egrid_ann - ampd_ann3
tol = 1 # metric tonne
logger.debug(len(diff))
logger.debug(diff.describe())
# Save data
logger.info("AMPD 1 - Saving data")
fileNm_out = os.path.join(DATA_PATH, 'analysis', 'AMPD_1.csv')
ampd.df.to_csv(fileNm_out)
```
#### File: tracking_emissions/src/load.py
```python
import os
import pandas as pd
import logging
import re
DATA_PATH = os.getenv('DATA_PATH')
if DATA_PATH is None:
raise ValueError("DATA_PATH needs to be set")
class BA_DATA(object):
'''Class to handle BA-level data. The EBA class provides generation,
consumption, the trade matrix and total interchange either at the BA or at
the regional (IEA-defined) level. User guide:
https://www.eia.gov/realtime_grid/docs/userguide-knownissues.pdf
Timestamps are in UTC.
EBA data columns
----------------
D: Demand
NG: Net Generation
TI: Total Interchange - (positive if exports)
ID: Interchange with directly connected balancing authorities - (positive
if exports)
Consistency requirements
------------------------
- Interchange data is antisymmetric: ID[i,j] == -ID[j,i]
- Total trade with interchange data: TI == sum(ID[i,:])
- Balance equation for total trade, demand, generation: TI + D == NG
Methods
-------
get_cols(self, r) : generate column names for regions r for a given field.
Attributes
----------
regions : are in alphabetical order
df : raw dataframe
'''
# Convenience dictionary to refer to keys
# call KEY['D']%ri to get demand for region ri
KEYS = {"E": {'D': 'EBA.%s-ALL.D.H', 'NG': 'EBA.%s-ALL.NG.H',
'TI': 'EBA.%s-ALL.TI.H', 'ID': 'EBA.%s-%s.ID.H'},
"CO2": {'D': "CO2_%s_D", 'NG': "CO2_%s_NG",
"TI": "CO2_%s_TI", "ID": "CO2_%s-%s_ID"},
"SO2": {'D': "SO2_%s_D", 'NG': "SO2_%s_NG",
"TI": "SO2_%s_TI", "ID": "SO2_%s-%s_ID"},
"NOX": {'D': "NOX_%s_D", 'NG': "NOX_%s_NG",
"TI": "NOX_%s_TI", "ID": "NOX_%s-%s_ID"},
"CO2i": {'D': "CO2i_%s_D", 'NG': "CO2i_%s_NG"},
"SO2i": {'D': "SO2i_%s_D", 'NG': "SO2i_%s_NG"},
"NOXi": {'D': "NOXi_%s_D", 'NG': "NOXi_%s_NG"}}
def __init__(self, step=None, fileNm=None, df=None, variable="E",
dataset="EBA"):
self.logger = logging.getLogger('load')
if df is not None:
self.df = df
else:
if step is not None:
fileNm = os.path.join(
DATA_PATH, 'analysis', '%s_%d.csv' % (dataset, step))
if fileNm is None:
fileNm = os.path.join(DATA_PATH, "analysis", "EBA_0.csv")
self.df = pd.read_csv(fileNm, index_col=0, parse_dates=True)
self.variable = variable
self.regions = self._parse_data_cols()
self.fileNm = fileNm
self.KEY = self.KEYS[variable]
def get_cols(self, r=None, field="D"):
if r is None:
r = self.regions
if isinstance(r, str):
r = [r]
return [self.KEY[field] % ir for ir in r]
def get_trade_partners(self, ba):
partners = []
for ba2 in self.regions:
if ((self.KEY["ID"] % (ba, ba2) in self.df.columns)
and (self.KEY["ID"] % (ba2, ba) in self.df.columns)):
partners += [ba2]
return partners
def _parse_data_cols(self):
'''
Checks:
- Consistent number of regions for demand / generation / total
interchange / trade matrix
Returns the list of regions
'''
regions = set([re.split(r"\.|-|_", el)[1] for el in self.df.columns])
D_cols = [re.split(r"\.|-|_", el)[1] for el in self.df.columns if 'D'
in re.split(r"\.|-|_", el)]
NG_cols = [re.split(r"\.|-|_", el)[1] for el in self.df.columns if 'NG'
in re.split(r"\.|-|_", el)]
TI_cols = [re.split(r"\.|-|_", el)[1] for el in self.df.columns if 'TI'
in re.split(r"\.|-|_", el)]
ID_cols = [re.split(r"\.|-|_", el)[1] for el in self.df.columns if 'ID'
in re.split(r"\.|-|_", el)]
ID_cols2 = [re.split(r"\.|-|_", el)[2] for el in self.df.columns if
'ID' in re.split(r"\.|-|_", el)]
if len(NG_cols) != len(D_cols):
self.logger.warn(
'Inconsistent columns: len(NG_cols) != len(D_cols)')
if set(NG_cols) != regions:
self.logger.warn(
'Inconsistent columns: set(NG_cols) != regions')
if not ("i" in self.variable):
if len(NG_cols) != len(TI_cols):
self.logger.warn(
'Inconsistent columns: len(NG_cols) != len(TI_cols)')
if set(NG_cols) != set(ID_cols):
self.logger.warn(
'Inconsistent columns: set(NG_cols) != set(ID_cols)')
if set(NG_cols) != set(ID_cols2):
self.logger.warn(
'Inconsistent columns: set(NG_cols) != set(ID_cols2)')
return sorted(list(regions))
def get_trade_out(self, r=None):
if r is None:
r = self.regions
if isinstance(r, str):
r = [r]
cols = []
for ir2 in self.regions:
cols += [self.KEY['ID'] % (ir, ir2) for ir in r]
return [c for c in cols if c in self.df.columns]
def checkBA(self, ba, tol=1e-2, log_level=logging.INFO):
'''
Sanity check function
'''
logger = self.logger
log_level_old = logger.level
logger.setLevel(log_level)
logger.debug("Checking %s" % ba)
partners = self.get_trade_partners(ba)
# NaNs
for field in ["D", "NG", "TI"]:
ind_na = self.df.loc[:, self.get_cols(r=ba, field=field)[0]].isna()
cnt_na = ind_na.sum()
if cnt_na != 0:
logger.error("There are still %d nans for %s field %s" %
(cnt_na, ba, field))
for ba2 in partners:
cnt_na = self.df.loc[:, self.KEY["ID"] % (ba, ba2)].isna().sum()
if cnt_na != 0:
logger.error("There are still %d nans for %s-%s" %
(cnt_na, ba, ba2))
# TI+D == NG
res1 = self.df.loc[:, self.get_cols(r=ba, field="NG")[0]] - (
self.df.loc[:, self.get_cols(r=ba, field="D")[0]]
+ self.df.loc[:, self.get_cols(r=ba, field="TI")[0]])
if (res1.abs() > tol).sum() != 0:
logger.error("%s: TI+D == NG violated" % ba)
# TI == ID.sum()
res2 = (
self.df.loc[:, self.get_cols(r=ba, field="TI")[0]]
- self.df.loc[:, [self.KEY["ID"] % (ba, ba2) for ba2 in partners]]\
.sum(axis=1))
if (res2.abs() > tol).sum() != 0:
logger.error("%s: TI == ID.sum()violated" % ba)
# ID[i,j] == -ID[j,i]
for ba2 in partners:
res3 = (self.df.loc[:, self.KEY["ID"] % (ba, ba2)]
+ self.df.loc[:, self.KEY["ID"] % (ba2, ba)])
if (res3.abs() > tol).sum() != 0:
logger.error("%s-%s: ID[i,j] == -ID[j,i] violated" % (ba, ba2))
# D and NG negative
for field in ["D", "NG"]:
ind_neg = self.df.loc[:, self.get_cols(r=ba, field=field)[0]] < 0
cnt_neg = ind_neg.sum()
if cnt_neg != 0:
logger.error("%s: there are %d <0 values for field %s" %
(ba, cnt_neg, field))
logger.setLevel(log_level_old)
class AMPD(object):
'''
Class to handle the AMPD data.
'''
def __init__(self, step=None, fileNm=None):
self.logger = logging.getLogger('load')
if step is not None:
fileNm = os.path.join(DATA_PATH, 'analysis', 'AMPD_%d.csv' % step)
if fileNm is None:
fileNm = os.path.join(DATA_PATH, 'analysis', 'AMPD_0.csv')
self.fileNm = fileNm
if step < 2:
self.df = pd.read_csv(fileNm, parse_dates=['OP_DATE_TIME'],
infer_datetime_format=True)
elif step == 2:
self.df = pd.read_csv(fileNm, index_col=0, parse_dates=True)
self.logger.info('Loading AMPD from %s' % self.fileNm)
class EGRID(object):
'''
Simple class to handle EGRID data.
The eGrid dataset contains a list of plants in the US including:
- ORISPL code
- Plant name
- Operator name
- Balancing authority
- State
- Geographical coordinates
- Nominal capacity
'''
def __init__(self, fileNm=None, sheet_name='BA16'):
self.logger = logging.getLogger('load')
if fileNm is None:
fileNm = os.path.join(
DATA_PATH,
"raw/EGRID/egrid2016_all_files/egrid2016_data_metric.xlsx")
self.df = pd.read_excel(fileNm, sheet_name=sheet_name, header=1)
self.fileNm = fileNm
self.sheet_name = sheet_name
self.logger.info('Loading EGRID sheet %s' % self.sheet_name)
def get_groups(self, grp_type='BACODE'):
'''
Method get_groups returns a dictionary of the form:
{grp_type: {state:[plant_codes]}}
This can then be used to aggregate AMPD data according to grp_type.
Options for parameter grp_type are: BA_CODE, NERC, SUBRGN.
'''
if self.sheet_name != "PLNT16":
raise ValueError("Cannot call this function with sheet %s!"
% self.sheet_name)
return self.df.groupby([grp_type])['PSTATABB', 'ORISPL']\
.apply(lambda df: df.groupby(["PSTATABB"])['ORISPL']
.apply(list).to_dict()).to_dict()
```
|
{
"source": "JDechery/polylearn",
"score": 2
}
|
#### File: polylearn/polylearn/factorization_machine.py
```python
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from sklearn.preprocessing import add_dummy_feature
from sklearn.utils import check_random_state
from sklearn.utils.validation import check_array
from sklearn.utils.extmath import safe_sparse_dot, row_norms
from sklearn.externals import six
try:
from sklearn.exceptions import NotFittedError
except ImportError:
class NotFittedError(ValueError, AttributeError):
pass
from lightning.impl.dataset_fast import get_dataset
from .base import _BasePoly, _PolyClassifierMixin, _PolyRegressorMixin
from .kernels import _poly_predict
from .cd_direct_fast import _cd_direct_ho
class _BaseFactorizationMachine(six.with_metaclass(ABCMeta, _BasePoly)):
@abstractmethod
def __init__(self, degree=2, loss='squared', n_components=2, alpha=1,
beta=1, tol=1e-6, fit_lower='explicit', fit_linear=True,
warm_start=False, init_lambdas='ones', max_iter=10000,
verbose=False, random_state=None):
self.degree = degree
self.loss = loss
self.n_components = n_components
self.alpha = alpha
self.beta = beta
self.tol = tol
self.fit_lower = fit_lower
self.fit_linear = fit_linear
self.warm_start = warm_start
self.init_lambdas = init_lambdas
self.max_iter = max_iter
self.verbose = verbose
self.random_state = random_state
def _augment(self, X):
# for factorization machines, we add a dummy column for each order.
if self.fit_lower == 'augment':
k = 2 if self.fit_linear else 1
for _ in range(self.degree - k):
X = add_dummy_feature(X, value=1)
return X
def fit(self, X, y):
"""Fit factorization machine to training data.
Parameters
----------
X : array-like or sparse, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : Estimator
Returns self.
"""
if self.degree > 3:
raise ValueError("FMs with degree >3 not yet supported.")
X, y = self._check_X_y(X, y)
X = self._augment(X)
n_features = X.shape[1] # augmented
X_col_norms = row_norms(X.T, squared=True)
dataset = get_dataset(X, order="fortran")
rng = check_random_state(self.random_state)
loss_obj = self._get_loss(self.loss)
if not (self.warm_start and hasattr(self, 'w_')):
self.w_ = np.zeros(n_features, dtype=np.double)
if self.fit_lower == 'explicit':
n_orders = self.degree - 1
else:
n_orders = 1
if not (self.warm_start and hasattr(self, 'P_')):
self.P_ = 0.01 * rng.randn(n_orders, self.n_components, n_features)
if not (self.warm_start and hasattr(self, 'lams_')):
if self.init_lambdas == 'ones':
self.lams_ = np.ones(self.n_components)
elif self.init_lambdas == 'random_signs':
self.lams_ = np.sign(rng.randn(self.n_components))
else:
raise ValueError("Lambdas must be initialized as ones "
"(init_lambdas='ones') or as random "
"+/- 1 (init_lambdas='random_signs').")
y_pred = self._get_output(X)
converged, self.n_iter_ = _cd_direct_ho(
self.P_, self.w_, dataset, X_col_norms, y, y_pred,
self.lams_, self.degree, self.alpha, self.beta, self.fit_linear,
self.fit_lower == 'explicit', loss_obj, self.max_iter,
self.tol, self.verbose)
if not converged:
warnings.warn("Objective did not converge. Increase max_iter.")
return self
def _get_output(self, X):
y_pred = _poly_predict(X, self.P_[0, :, :], self.lams_, kernel='anova',
degree=self.degree)
if self.fit_linear:
y_pred += safe_sparse_dot(X, self.w_)
if self.fit_lower == 'explicit' and self.degree == 3:
# degree cannot currently be > 3
y_pred += _poly_predict(X, self.P_[1, :, :], self.lams_,
kernel='anova', degree=2)
return y_pred
def _predict(self, X):
if not hasattr(self, "P_"):
raise NotFittedError("Estimator not fitted.")
X = check_array(X, accept_sparse='csc', dtype=np.double)
X = self._augment(X)
return self._get_output(X)
class FactorizationMachineRegressor(_BaseFactorizationMachine,
_PolyRegressorMixin):
"""Factorization machine for regression (with squared loss).
Parameters
----------
degree : int >= 2, default: 2
Degree of the polynomial. Corresponds to the order of feature
interactions captured by the model. Currently only supports
degrees up to 3.
n_components : int, default: 2
Number of basis vectors to learn, a.k.a. the dimension of the
low-rank parametrization.
alpha : float, default: 1
Regularization amount for linear term (if ``fit_linear=True``).
beta : float, default: 1
Regularization amount for higher-order weights.
tol : float, default: 1e-6
Tolerance for the stopping condition.
fit_lower : {'explicit'|'augment'|None}, default: 'explicit'
Whether and how to fit lower-order, non-homogeneous terms.
- 'explicit': fits a separate P directly for each lower order.
- 'augment': adds the required number of dummy columns (columns
that are 1 everywhere) in order to capture lower-order terms.
Adds ``degree - 2`` columns if ``fit_linear`` is true, or
``degree - 1`` columns otherwise, to account for the linear term.
- None: only learns weights for the degree given. If ``degree == 3``,
for example, the model will only have weights for third-order
feature interactions.
fit_linear : {True|False}, default: True
Whether to fit an explicit linear term <w, x> to the model, using
coordinate descent. If False, the model can still capture linear
effects if ``fit_lower == 'augment'``.
warm_start : boolean, optional, default: False
Whether to use the existing solution, if available. Useful for
computing regularization paths or pre-initializing the model.
init_lambdas : {'ones'|'random_signs'}, default: 'ones'
How to initialize the predictive weights of each learned basis. The
lambdas are not trained; using alternate signs can theoretically
improve performance if the kernel degree is even. The default value
of 'ones' matches the original formulation of factorization machines
(Rendle, 2010).
To use custom values for the lambdas, ``warm_start`` may be used.
max_iter : int, optional, default: 10000
Maximum number of passes over the dataset to perform.
verbose : boolean, optional, default: False
Whether to print debugging information.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use for
initializing the parameters.
Attributes
----------
self.P_ : array, shape [n_orders, n_components, n_features]
The learned basis functions.
``self.P_[0, :, :]`` is always available, and corresponds to
interactions of order ``self.degree``.
``self.P_[i, :, :]`` for i > 0 corresponds to interactions of order
``self.degree - i``, available only if ``self.fit_lower='explicit'``.
self.w_ : array, shape [n_features]
The learned linear model, completing the FM.
Only present if ``self.fit_linear`` is true.
self.lams_ : array, shape [n_components]
The predictive weights.
References
----------
Polynomial Networks and Factorization Machines:
New Insights and Efficient Training Algorithms.
<NAME>, <NAME>, <NAME>, <NAME>.
In: Proceedings of ICML 2016.
http://mblondel.org/publications/mblondel-icml2016.pdf
Factorization machines.
<NAME>
In: Proceedings of IEEE 2010.
"""
def __init__(self, degree=2, n_components=2, alpha=1, beta=1, tol=1e-6,
fit_lower='explicit', fit_linear=True, warm_start=False,
init_lambdas='ones', max_iter=10000, verbose=False,
random_state=None):
super(FactorizationMachineRegressor, self).__init__(
degree, 'squared', n_components, alpha, beta, tol, fit_lower,
fit_linear, warm_start, init_lambdas, max_iter, verbose,
random_state)
class FactorizationMachineClassifier(_BaseFactorizationMachine,
_PolyClassifierMixin):
"""Factorization machine for classification.
Parameters
----------
degree : int >= 2, default: 2
Degree of the polynomial. Corresponds to the order of feature
interactions captured by the model. Currently only supports
degrees up to 3.
loss : {'logistic'|'squared_hinge'|'squared'}, default: 'squared_hinge'
Which loss function to use.
- logistic: L(y, p) = log(1 + exp(-yp))
- squared hinge: L(y, p) = max(1 - yp, 0)²
- squared: L(y, p) = 0.5 * (y - p)²
n_components : int, default: 2
Number of basis vectors to learn, a.k.a. the dimension of the
low-rank parametrization.
alpha : float, default: 1
Regularization amount for linear term (if ``fit_linear=True``).
beta : float, default: 1
Regularization amount for higher-order weights.
tol : float, default: 1e-6
Tolerance for the stopping condition.
fit_lower : {'explicit'|'augment'|None}, default: 'explicit'
Whether and how to fit lower-order, non-homogeneous terms.
- 'explicit': fits a separate P directly for each lower order.
- 'augment': adds the required number of dummy columns (columns
that are 1 everywhere) in order to capture lower-order terms.
Adds ``degree - 2`` columns if ``fit_linear`` is true, or
``degree - 1`` columns otherwise, to account for the linear term.
- None: only learns weights for the degree given. If ``degree == 3``,
for example, the model will only have weights for third-order
feature interactions.
fit_linear : {True|False}, default: True
Whether to fit an explicit linear term <w, x> to the model, using
coordinate descent. If False, the model can still capture linear
effects if ``fit_lower == 'augment'``.
warm_start : boolean, optional, default: False
Whether to use the existing solution, if available. Useful for
computing regularization paths or pre-initializing the model.
init_lambdas : {'ones'|'random_signs'}, default: 'ones'
How to initialize the predictive weights of each learned basis. The
lambdas are not trained; using alternate signs can theoretically
improve performance if the kernel degree is even. The default value
of 'ones' matches the original formulation of factorization machines
(Rendle, 2010).
To use custom values for the lambdas, ``warm_start`` may be used.
max_iter : int, optional, default: 10000
Maximum number of passes over the dataset to perform.
verbose : boolean, optional, default: False
Whether to print debugging information.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use for
initializing the parameters.
Attributes
----------
self.P_ : array, shape [n_orders, n_components, n_features]
The learned basis functions.
``self.P_[0, :, :]`` is always available, and corresponds to
interactions of order ``self.degree``.
``self.P_[i, :, :]`` for i > 0 corresponds to interactions of order
``self.degree - i``, available only if ``self.fit_lower='explicit'``.
self.w_ : array, shape [n_features]
The learned linear model, completing the FM.
Only present if ``self.fit_linear`` is true.
self.lams_ : array, shape [n_components]
The predictive weights.
References
----------
Polynomial Networks and Factorization Machines:
New Insights and Efficient Training Algorithms.
<NAME>, <NAME>, <NAME>, <NAME>.
In: Proceedings of ICML 2016.
http://mblondel.org/publications/mblondel-icml2016.pdf
Factorization machines.
<NAME>
In: Proceedings of IEEE 2010.
"""
def __init__(self, degree=2, loss='squared_hinge', n_components=2, alpha=1,
beta=1, tol=1e-6, fit_lower='explicit', fit_linear=True,
warm_start=False, init_lambdas='ones', max_iter=10000,
verbose=False, random_state=None):
super(FactorizationMachineClassifier, self).__init__(
degree, loss, n_components, alpha, beta, tol, fit_lower,
fit_linear, warm_start, init_lambdas, max_iter, verbose,
random_state)
```
#### File: polylearn/tests/test_cd_linear.py
```python
from nose.tools import assert_less_equal, assert_greater_equal
from numpy.testing import assert_array_almost_equal
import numpy as np
from sklearn.utils.validation import assert_all_finite
from polylearn.cd_linear_fast import _cd_linear_epoch
from polylearn.loss_fast import Squared, SquaredHinge, Logistic
from lightning.impl.dataset_fast import get_dataset
rng = np.random.RandomState(0)
X = rng.randn(50, 10)
w_true = rng.randn(10)
y = np.dot(X, w_true)
X_ds = get_dataset(X, order='fortran')
X_col_norm_sq = (X ** 2).sum(axis=0)
n_iter = 100
def _fit_linear(X, y, alpha, n_iter, loss, callback=None):
n_samples, n_features = X.shape
X_col_norm_sq = (X ** 2).sum(axis=0)
X_ds = get_dataset(X, order='fortran')
w_init = np.zeros(n_features)
y_pred = np.zeros(n_samples)
for _ in range(n_iter):
viol = _cd_linear_epoch(w_init, X_ds, y, y_pred, X_col_norm_sq,
alpha, loss)
if callback is not None:
callback(w_init, viol)
return w_init
class Callback(object):
def __init__(self, X, y, alpha):
self.X = X
self.y = y
self.alpha = alpha
self.losses_ = []
def __call__(self, w, viol):
y_pred = np.dot(self.X, w)
lv = np.mean((y_pred - self.y) ** 2)
lv += 2 * self.alpha * np.sum(w ** 2)
self.losses_.append(lv)
def test_cd_linear_fit():
loss = Squared()
alpha = 1e-6
cb = Callback(X, y, alpha)
w = _fit_linear(X, y, alpha, n_iter, loss, cb)
assert_array_almost_equal(w_true, w)
assert_less_equal(cb.losses_[1], cb.losses_[0])
assert_less_equal(cb.losses_[-1], cb.losses_[0])
def check_cd_linear_clf(loss):
alpha = 1e-3
y_bin = np.sign(y)
w = _fit_linear(X, y_bin, alpha, n_iter, loss)
y_pred = np.dot(X, w)
accuracy = np.mean(np.sign(y_pred) == y_bin)
assert_greater_equal(accuracy, 0.97,
msg="classification loss {}".format(loss))
def test_cd_linear_clf():
for loss in (Squared(), SquaredHinge(), Logistic()):
yield check_cd_linear_clf, loss
def test_cd_linear_offset():
loss = Squared()
alpha = 1e-3
w_a = np.zeros_like(w_true)
w_b = np.zeros_like(w_true)
n_features = X.shape[0]
y_pred_a = np.zeros(n_features)
y_pred_b = np.zeros(n_features)
y_offset = np.arange(n_features).astype(np.double)
# one epoch with offset
_cd_linear_epoch(w_a, X_ds, y, y_pred_a + y_offset, X_col_norm_sq, alpha,
loss)
# one epoch with shifted target
_cd_linear_epoch(w_b, X_ds, y - y_offset, y_pred_b, X_col_norm_sq, alpha,
loss)
assert_array_almost_equal(w_a, w_b)
def test_cd_linear_trivial():
# trivial example that failed due to gh#4
loss = Squared()
alpha = 1e-5
n_features = 100
x = np.zeros((1, n_features))
x[0, 1] = 1
y = np.ones(1)
cb = Callback(x, y, alpha)
w = _fit_linear(x, y, alpha, n_iter=20, loss=loss, callback=cb)
assert_all_finite(w)
assert_all_finite(cb.losses_)
```
|
{
"source": "jdecid/DICOM-Factory",
"score": 3
}
|
#### File: dicom_factory/tests/factory.py
```python
import unittest
from dicom_factory.factory import DicomFactory
class TestFactory(unittest.TestCase):
def test_create_factory_with_custom_data_size_works_properly(self):
data_size = (100, 100)
factory_args = {'Rows': data_size[0], 'Columns': data_size[1]}
dicom = DicomFactory.build(factory_args)
self.assertEqual(data_size, dicom.pixel_array.shape)
def test_create_factory_with_custom_series_adds_series_description(self):
expected_series = 'leg'
factory_args = {'SeriesDescription': expected_series}
dicom = DicomFactory.build(factory_args)
self.assertEqual(expected_series, dicom.SeriesDescription)
def test_create_factory_with_unsupported_arguments_raises_value_error(self):
with self.assertRaises(ValueError):
factory_args = {'FakeArg': 123}
DicomFactory.build(factory_args)
```
|
{
"source": "jdecouchant/PAG",
"score": 3
}
|
#### File: javaCode/scripts/6_cdf_bandwidth_vs.py
```python
import sys
import os
import re
import scipy.stats as stats
from matplotlib.pyplot import *
from numpy import *
# Goal: follow the average bandwidth of all nodes over time
if len(sys.argv) == 1:
print "Goal: Give the cdf of the average bandwidth of each node during the overall session"
print "Usage: ./4_cdf_bandwidth.py bargossip_dir bargossip_label cofree_dir cofree_label"
sys.exit()
def x_cdf(dir):
avg_list = []
nb_nodes = 0
for filename in os.listdir(dir):
if re.search("downloadBandwidth", filename) == None:
continue
avg_bdw = 0
f = open(dir+"/"+filename, "r")
array_line = map(int, f.readline().split(' '))
nodeId = int(array_line[0])
nb_nodes += 1
nb_round = 0
for line in f:
array_line = map(int, line.split(' '))
roundId = array_line[0]
nodeState = array_line[1]
bdwTotal = array_line[2]
bdwUpdates = array_line[3]
bdwLog = array_line[4]
nb_round += 1
avg_bdw += bdwTotal
avg_list.append(avg_bdw / nb_round)
f.close()
precision = 0.1
max_bdw = 1400.0
min_bdw = 0.0
res = [0] * int(((max_bdw - min_bdw)/precision))
for avg in avg_list:
index = 0
value = min_bdw
while avg > value and index < ((max_bdw - min_bdw)/precision):
res[index] += 1
value += precision
index += 1
for i in range(len(res)):
res[i] = 100 - (res[i] * 100)/nb_nodes
x = [0] * int(((max_bdw - min_bdw)/precision))
value = min_bdw
for index in range(len(res)):
x[index] = value
value += precision
return (x, res)
# Main code
(x0,y0) = x_cdf(sys.argv[1])
(x1,y1) = x_cdf(sys.argv[3])
plot(x0, y0, 'k', linewidth=2, label=sys.argv[2]) # k for black
plot(x1, y1, 'k:', linewidth=2, label=sys.argv[4]) # k for black
#p2 = plot(roundList, bdwUpdatesList, 'k--', linewidth=2, label="Updates part")
#p3 = plot(roundList, bdwLogList, 'k:', linewidth=2, label="Log part")
#plt.xticks(tf)
#xt = linspace(1, len(jitteredRoundsList), 4)
#xticks(xt)
#title('my plot')
tick_params(axis='both', which='major', labelsize=18)
ylabel('Percentage of nodes (cumulative distribution)', fontsize=18)
xlabel('Bandwidth in kbps', fontsize=18)
legend(loc="lower right", prop={'size':18})
ylim(ymax=100, ymin=0.1)
xlim(xmax=700, xmin=450)
show()
#savefig('2_average_bandwidth.pdf')
#os.system("pdfcrop percentageNonJitteredRounds.pdf percentageNonJitteredRounds.pdf")
```
|
{
"source": "jdede/tools",
"score": 2
}
|
#### File: tools/misc/multiRun.py
```python
import argparse
import threading
import Queue
import subprocess
import datetime
import os
import sys
import shutil
import time
class executeThread(threading.Thread):
## \brief Worker thread
#
# Execute jar file in separate thread.
commands = Queue.Queue()
printLock = threading.Lock()
breakSimulationLock = threading.Lock()
breakSimulation = False
def run(self):
while True:
# Check if we should break all simulation runs
breakLoop = False
executeThread.breakSimulationLock.acquire()
breakLoop = executeThread.breakSimulation
executeThread.breakSimulationLock.release()
if breakLoop:
break
# Get new command from the queue, execute and mark as done
command = executeThread.commands.get()
self.executeCommand(command)
executeThread.commands.task_done()
def executeCommand(self, cmd):
## \brief Execute command
#
# Execute jar file, into stdout and stderr into files inside working
# dir. Use command "java -jar <cmd>"
#
# \param cmd Dict with command, working dir, ...
try:
# If breakSimulation set: Skip remaining simulation runs
executeThread.breakSimulationLock.acquire()
if executeThread.breakSimulation:
executeThread.breakSimulationLock.release()
return
executeThread.breakSimulationLock.release()
# Output files for stdout and stderr output of executed file
stdoutFileName = os.path.join(cmd["path"], "stdout.log")
stderrFileName = os.path.join(cmd["path"], "stderr.log")
stdoutFile = open(stdoutFileName, "w")
stderrFile = open(stderrFileName, "w")
self.log("Starting execution of \"" + cmd['cmd'] + \
"\" in path \"" + cmd['path'] + "\"")
# Start process
proc = subprocess.Popen(['java','-Djava.awt.headless=true', '-jar', cmd['cmd']],
stdout=stdoutFile, stderr=stderrFile, cwd=cmd['path'])
proc.wait()
ret = proc.returncode
# Remove simulation file. Copy available in the base directory
os.remove(os.path.join(cmd['path'], cmd['cmd']))
# Return value indicated error and user marked that this breaks
# execution of remaining simulations
if ret and cmd["break"]:
executeThread.breakSimulationLock.acquire()
executeThread.breakSimulation = True
executeThread.breakSimulationLock.release()
self.log("Path \"" + cmd["path"] + "\" done, return code: " + \
str(ret))
except KeyboardInterrupt:
# Inform others that a problem occured
executeThread.breakSimulationLock.acquire()
executeThread.breakSimulation = True
executeThread.breakSimulationLock.release()
finally:
try:
# Release lock, we could have
executeThread.breakSimulationLock.release()
executeThread.printLock.release()
except threading.ThreadError:
# Catch errors caused by not having a lock
pass
stdoutFile.close()
stderrFile.close()
def log(self, msg):
## \brief Log function
#
# Convenience function for logging
#
# \param msg Log message to print
# Ensure only one threat at time prints to screen using lock
executeThread.printLock.acquire()
print "(" + threading.current_thread().name + ") " + \
datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S") + \
": " + str(msg)
executeThread.printLock.release()
##
# Main App
##
parser = argparse.ArgumentParser(
description='Run multiple jar files in separate directories and ' +\
'threads using the command "java -jar <progname>"')
parser.add_argument('progname', help='Name of jar file')
parser.add_argument('--max_threads', type=int, default=4, dest='max_threads',
help='Number of maximum concurrent threads')
parser.add_argument('--runs', type=int, default=10, dest='runs', metavar="N",
help='Run given jar file RUNS times (montecarlo simulation)')
parser.add_argument('--break', dest='break_runs', action='store_const',
const=True, default=False,
help='Break remaining runs in case of return value is not 0')
parser.add_argument('--execute', dest='exe', metavar="<program name>",
default=None, help="Execute command when script has finished")
args = parser.parse_args()
print "Number of runs: ", args.runs
print "Number of threads:", args.max_threads
if args.exe:
print "Executing \"" + args.exe + "\" when done"
if not os.path.isfile(args.progname):
print "File not found:", args.progname
sys.exit(1)
# Place each run in separate folder including date
simulationFolderName=datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
if os.path.exists(simulationFolderName):
print "Simulation folder exists! Aborting"
sys.exit(1)
os.makedirs(simulationFolderName)
print "Storing results into", simulationFolderName
# Start threads
threads = [executeThread() for i in range(args.max_threads)]
for thread in threads:
thread.setDaemon(True)
thread.start()
cmds = []
# Copy one version of the app to the base directory. The other copies can be
# removed later
shutil.copy(args.progname, os.path.join(os.path.abspath("."), simulationFolderName))
# Generate a separate command for each simulation run. Create corresponding
# folder, execute output inside this folder
for i in range(args.runs):
progpath = os.path.join(os.path.abspath("."), simulationFolderName, "run_" + str(i+1))
os.makedirs(progpath)
shutil.copy(args.progname, progpath)
metainfo = {}
metainfo["cmd"] = args.progname
metainfo["path"] = progpath
metainfo["break"] = args.break_runs
cmds.append(metainfo)
# Enqueue commands. Worker thread will take commands out of this queue
for cmd in cmds:
executeThread.commands.put(cmd)
while (True):
time.sleep(1)
# Check if we should break the simulation
breakEverything = False
executeThread.breakSimulationLock.acquire()
breakEverything = executeThread.breakSimulation
executeThread.breakSimulationLock.release()
if executeThread.commands.empty():
# Wait until all threads are done
executeThread.commands.join()
break
if breakEverything:
break
if args.exe != None:
print "Executing \"" + args.exe + "\""
os.system(args.exe)
```
|
{
"source": "JDeepD/RailMgmtSys",
"score": 3
}
|
#### File: RailMgmtSys/railsys/__main__.py
```python
from __future__ import (absolute_import, print_function, with_statement)
import animations.intro as intro # pylint: disable=import-error,unused-import
import core.prompts as prmpt # pylint: disable=import-error,no-name-in-module
import core.fmgmt as dbase # pylint: disable=import-error,no-name-in-module
# o = prmpt.prompt()
# o.intro_prompt()
class Ask:
"""This will be the main class interacting with the user"""
def __init__(self):
self.ask = prmpt.prompt()
def __str__(self):
print("This is the Ask class. railsys/__main__.")
def intro_ask(self):
"""Asks the questions when the program is first launched"""
self.ask.intro_prompt()
inp = input("Enter your choice : ")
return inp
def train_ask(self):
""" To be written """
self.ask.train_book_prompt()
inp = input("Enter your choice : ")
return inp
def class_ask(self):
""" To be written """
self.ask.train_opts_prompt()
inp = input("Enter your choice : ")
return inp
def date_ask(self):
"""To be written"""
class Admin:
"This class contains methods that allows\
adding new Train schedules to the database"
def __init__(self):
self.db = dbase.MakeDb("TrainInfo") # pylint: disable=invalid-name
def add_info(self, trainid, train_name, journey, timing):
"""To be written"""
self.db.store_values(trainid, train_name, journey, timing)
def del_info(self, trainid, train_name, journey, timing):
"""To be written"""
self.db.delete_data(trainid, train_name, journey, timing)
```
|
{
"source": "jdeepe/Dynamodb-ORM-Demo",
"score": 2
}
|
#### File: api/dataset/views.py
```python
from flask import Blueprint, Response, request
from settings import Config
from .models import Dataset
blueprint = Blueprint("dataset", __name__)
@blueprint.route("/", methods=["GET"])
def get_dataset():
kwargs = request.json
data_schema = Dataset.get(kwargs["name"])
return dict(data_schema)
@blueprint.route("/", methods=["POST"])
def update_dataset():
kwargs = request.json
name = kwargs.pop("name")
dataset = Dataset(name)
actions = []
try:
for key in kwargs:
if kwargs[key]:
actions.append(getattr(Dataset, key).set(kwargs[key]))
else:
actions.append(getattr(Dataset, key).remove())
except Exception as err:
return Response(str(err), status=400)
response = dataset.update(actions=actions)
return {"data": {"updateDataSet": response}}
@blueprint.route("/delete", methods=["POST"])
def delete_dataset():
kwargs = request.json
data_schema = Dataset.get(kwargs["name"])
data_schema.delete()
return {"data": {"deleteDataSet": "Delete successful"}}
@blueprint.route("/list", methods=["POST"])
def list_dataset():
kwargs = request.json
filter_condition = kwargs["filter"] if kwargs and "filter" in kwargs else None
limit = (
kwargs["limit"] if kwargs and "limit" in kwargs else Config.DATASET_MAX_LIMIT
)
last_evaluated_key = (
kwargs["nextToken"] if kwargs and "nextToken" in kwargs else None
)
response = {"listDataSets": {"items": [], "nextToken": None}}
scan = Dataset.scan(
limit=limit,
filter_condition=filter_condition,
last_evaluated_key=last_evaluated_key,
)
for data_schema in scan:
response["listDataSets"]["items"].append(dict(data_schema))
response["listDataSets"]["nextToken"] = scan.last_evaluated_key
return {"data": response}
@blueprint.route("/count", methods=["POST"])
def count_dataset():
scan = Dataset.count()
response = {"countDataSets": scan}
return {"data": response}
```
#### File: Dynamodb-ORM-Demo/api/health.py
```python
from flask import Blueprint
blueprint = Blueprint("health", __name__)
@blueprint.route("/", methods=["GET"])
def health():
return "ok"
```
#### File: Dynamodb-ORM-Demo/tests/__init__.py
```python
from api.dataset.models import DataSchema, Dataset
def verify_settings(model, p_key, settings):
details = eval(model).get(p_key)
for key, setting in settings.items():
print(getattr(details, key), setting)
setting = setting if setting else None
assert getattr(details, key) == setting
def load_data():
load_dataschema_data()
load_dataset()
def load_dataschema_data():
DataSchema.create_table(read_capacity_units=1, write_capacity_units=1, wait=True)
for i in range(50):
name = "WorkOrder_v%d" % i
data_schema = DataSchema(name)
data = {
"dataset": name,
"status": "RETIRED",
"description": "Schema for %s" % name,
"creation_timestamp": "2017-04-24T11:38:41.164Z",
"last_updated_timestamp": "2017-12-24T22:38:47.346Z",
}
for key in data:
setattr(data_schema, key, data[key])
data_schema.save()
def load_dataset():
for i in range(50):
data = {
"name": "Forecast_v%d" % i,
"description": "Providing a demand forecast",
"status": "ACTIVE",
"type": "TRANSACTIONAL",
"frequency": "DAILY",
"classification": "Orange",
"owner": "Forecast team",
"owner_contact": "forecast@",
"service_arn": "arn:aws:s3:::org.ent-data-lake",
"location_pointer": "my.org/Forecasting/Forecast_v1",
"creation_timestamp": "2017-01-12T11:39:43.164Z",
"derived_from": None,
"replaced_by": None,
"from_date": "2017-01-03",
"to_date": None,
"schema_name": "Forecast_schema_v1",
"schema_location": None,
"data_lineage": None,
"compliance": None,
"enforce_sla": None,
"sla_cron": "0 6 * * *",
"tags": [
{"key": "org", "value": "CRM"},
{"key": "cost", "value": "SupplyChain"},
],
"retention": None,
"encryption": None,
"encryption_type": None,
"encryption_kms_id": None,
"cross_region_replication": None,
"config_crr": None,
"password": None,
"s3_logging_enabled": None,
"config_s3_logging": None,
"requestor_pays_enabled": None,
}
name = "Forecast_v%d" % i
dataset = Dataset(name)
for key in data:
setattr(dataset, key, data[key])
dataset.save()
```
|
{
"source": "jdeerhake/bacon-finder-py",
"score": 3
}
|
#### File: jdeerhake/bacon-finder-py/data.py
```python
from actor import Actor
from movie import Movie
from performance import Performance
def format_data(str):
return str.rstrip().split('|')
def get_actors():
actor_data = map(format_data, open('data/actors.txt', 'r').readlines())
actors = map(lambda data: Actor(data[0], data[1]), actor_data)
return dict([(actor.id, actor) for actor in actors])
def get_movies():
movie_data = map(format_data, open('data/movies.txt', 'r').readlines())
movies = map(lambda data: Movie(data[0], data[1]), movie_data)
return dict([(movie.id, movie) for movie in movies])
def get_performances(movies, actors):
perf_data = map(format_data, open('data/movie-actors.txt', 'r').readlines())
perfs = map(lambda data: Performance(movies[int(data[0])], actors[int(data[1])]), perf_data)
map(lambda perf: perf.actor.add_performance(perf), perfs)
map(lambda perf: perf.movie.add_performance(perf), perfs)
return perfs
```
#### File: jdeerhake/bacon-finder-py/movie.py
```python
class Movie:
def __init__(self, id, name):
self.name = name
self.id = int(id)
self.performances = []
def add_performance(self, perf):
self.performances.append(perf)
def actors(self):
return [perf.actor for perf in self.performances]
def has_actor(self, actor):
return actor in self.actors()
```
|
{
"source": "jdee-smith/PyForePa",
"score": 3
}
|
#### File: PyForePa/postprocess/plot.py
```python
import matplotlib.pyplot as plt
import numpy as np
def plot_forecast(
self,
title="Forecast",
x_lab="Index",
y_lab="Y",
add_series=True,
vline=True,
ci=True,
time_index=True,
x_rotation=45,
**kwargs
):
"""
Plots forecast. CODE IS GARBAGE!!! Needs complete re-do tbh.
"""
series_len = len(self.series)
h = len(self.forecasts)
series_begin = self.series["index"][0]
series_end = self.series["index"][-1]
forecast_end = series_end + (h + 1)
dtype = self.series["index"].dtype
if add_series is True:
if time_index is True:
x = np.arange(series_begin, forecast_end, dtype=dtype).astype("O")
else:
x = np.arange(1, series_len + h + 1, 1)
y = np.concatenate((self.series["X"], self.forecasts["point"]))
else:
if time_index is True:
x = np.arange(series_end + 1, 6, dtype=dtype).astype("O")
else:
x = np.arange(1, h + 1, 1)
y = self.forecasts["point"]
plt.plot(x, y, **kwargs)
if add_series is True and vline is True:
if time_index is True:
plt.axvline(x=series_end.astype("O"), linestyle="dotted")
else:
plt.axvline(x=series_len, linestyle="dotted")
plt.title(title)
plt.ylabel(y_lab)
plt.xlabel(x_lab)
plt.xticks(rotation=x_rotation)
plt.tight_layout()
```
|
{
"source": "JDeeth/SimSigCollate",
"score": 2
}
|
#### File: SimSigCollate/tests/conftest.py
```python
import pytest
from app import create_app, db
from config import Config
# pylint: disable=redefined-outer-name
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
class TestConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = "sqlite:///:memory:"
@pytest.fixture
def app():
yield create_app(TestConfig)
@pytest.fixture
def client(app):
with app.app_context():
db.create_all()
yield app.test_client()
```
|
{
"source": "jdeferio/healthdbModels",
"score": 3
}
|
#### File: healthdbModels/healthdbModels/field_types.py
```python
import re
import uuid
import pandas as pd
import sqlalchemy.types as types
from sqlalchemy.dialects.postgresql import UUID as psqlUUID
class UUID(types.TypeDecorator):
"""Converts input values to psqlUUID."""
impl = psqlUUID
def process_bind_param(self, value, dialect):
if pd.isnull(value):
return None
if not isinstance(value, uuid.UUID):
if isinstance(value, bytes):
value = uuid.UUID(bytes=value)
elif isinstance(value, int):
value = uuid.UUID(int=value)
elif isinstance(value, str):
value = uuid.UUID(value)
return str(value)
def process_result_value(self, value, dialect):
return value
class Integer(types.TypeDecorator):
"""Converts input values to Integer type, accepts np.nan as null value."""
impl = types.Integer
def process_bind_param(self, value, dialect):
if pd.isnull(value):
return None
return int(value)
def process_result_value(self, value, dialect):
return value
class Telephone(types.TypeDecorator):
"""Converts input values to 10-digit string Ensures that input has exactly
10 digits If string strips all non-digit characters."""
impl = types.String
def process_bind_param(self, value, dialect):
if pd.isnull(value):
return None
if isinstance(value, str):
value = re.sub("[^\d]", "", value)
elif isinstance(value, int):
value = str(value)
if len(value) == 11 and value[0] == "1":
value = value[1:]
if not len(value) == 10:
raise (ValueError("bad contact field formatting"))
return value
def process_result_value(self, value, dialect):
return value
```
|
{
"source": "jdeffo/Useful-Python-Scripts",
"score": 3
}
|
#### File: Useful-Python-Scripts/EmailScript/TenEmails.py
```python
from __future__ import print_function
import httplib2
import os
from apiclient import discovery
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
#Additional Imports
from email.mime.text import MIMEText
import base64
from apiclient import errors
import random
import time
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
#Scopes
SCOPES = 'https://www.googleapis.com/auth/gmail.send'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Python Gmail API'
#Get the credentials
def get_credentials():
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'gmail-python-quickstart.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
#Create Message
def create_message(sender, to, subject, message_text):
message = MIMEText(message_text)
message['to'] = to
message['from'] = sender
message['subject'] = subject
return {'raw': base64.urlsafe_b64encode(message.as_string())}
#Send message
def send_message(service, user_id, message):
try:
message = (service.users().messages().send(userId=user_id, body=message).execute())
print('Message Id: %s' % message['id'])
return message
except errors.HttpError as error:
print('An error occurred: %s' % error)
#Create a random subject
def create_subject():
words = ['Once', 'you', 'have', 'the', 'domain', 'name', 'and', 'port', 'information', 'for', 'your', 'email', 'provider', 'tuple', 'contains', 'information', 'about', 'a', 'single']
subject_len = random.randrange(1, 5)
subject = ""
for i in range(subject_len):
subject += " "+words[random.randrange(0, len(words))]
return subject.strip()
#Create a random body
def create_body():
words = ['Once', 'you', 'have', 'the', 'domain', 'name', 'and', 'port', 'information', 'for', 'your', 'email', 'provider', 'tuple', 'contains', 'information', 'about', 'a', 'single']
body_len = random.randrange(1, 10)
body = ""
for i in range(body_len):
body += " "+words[random.randrange(0, len(words))]
return body.strip()
#Main
def main():
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('gmail', 'v1', http=http)
#Message sender and receiver
sender = "<EMAIL>"
to = ""
#Send messsage on random intervals
for i in range(10):
waiting = random.randrange(1, 10)
print("waiting for {} seconds".format(waiting))
time.sleep(waiting)
subject = create_subject()
message_text = create_body()
msg = create_message(sender, to, subject, message_text)
send_message(service, "me", msg)
print("done")
if __name__ == '__main__':
main()
```
#### File: Useful-Python-Scripts/MonitorStockPrices/alerts_driver.py
```python
import time
from processes import *
#Lists
portfolioList = {}
watchList = {}
#Get Thresholds
def getThresholds():
global portfolioList
global watchList
#On Server: '/root/Scripts/Stocks/Lists/portfolio.txt'
with open("Lists/portfolio.txt") as _portfolio_file:
for line in _portfolio_file:
_temp_line = line.rstrip('\n')
if(_temp_line[0] != "#" ):
ticker, holdings, upperThreshold, lowerThreshold = _temp_line.split('|')
if(ticker != 'Portfolio' and ticker != 'Cash'):
upperThreshold = float(upperThreshold)
lowerThreshold = float(lowerThreshold)
portfolioList[ticker] = {"Holdings": holdings, "UpperThreshold": upperThreshold,
"LowerThreshold": lowerThreshold}
_portfolio_file.close()
#On Server: '/root/Scripts/Stocks/Lists/watchlist.txt'
with open("Lists/watchlist.txt") as _watchlist_file:
for line in _watchlist_file:
_temp_line = line.rstrip('\n')
if(_temp_line[0] != "#"):
ticker, upperThreshold, lowerThreshold = _temp_line.split('|')
upperThreshold = float(upperThreshold)
lowerThreshold = float(lowerThreshold)
watchList[ticker] = {"UpperThreshold": upperThreshold, "LowerThreshold": lowerThreshold}
_watchlist_file.close()
#Update Thresholds
def updateThresholds():
global portfolioList
global watchList
#Portfolio List:
#On Server: '/root/Scripts/Stocks/Lists/portfolio.txt'
_portfolioList_writer = open('Lists/portfolio.txt', 'w')
_portfolioList_writer.write('#Ticker|Holding Qty|Upper Threshold|Lower Threshold\n')
for ticker, values in portfolioList.items():
_portfolioList_writer.write(ticker + '|' + str(values["Holdings"]) + '|' +
str(values["UpperThreshold"]) + '|' + str(values["LowerThreshold"]) + '\n')
_portfolioList_writer.close()
#Watch List:
#On Server: '/root/Scripts/Stocks/Lists/watchlist.txt'
_watchList_writer = open('Lists/watchlist.txt', 'w')
_watchList_writer.write('#Ticker|Upper Threshold|Lower Threshold\n')
for ticker, values in watchList.items():
_watchList_writer.write(ticker + '|' + str(values["UpperThreshold"]) + '|' +
str(values["LowerThreshold"]) + '\n')
_watchList_writer.close()
#Main
def main():
global portfolioList
global watchList
getThresholds()
#Upper
for ticker, thresholds in portfolioList.items():
if(ticker != 'Portfolio' and ticker != 'Cash'):
res = upper_threshold_alert(ticker, thresholds)
if(res != False):
portfolioList[ticker] = {"UpperThreshold": res['upperThreshold'],
"LowerThreshold": res['lowerThreshold']}
for ticker, thresholds in watchList.items():
if(ticker != 'Portfolio' and ticker != 'Cash'):
res = upper_threshold_alert(ticker, thresholds)
if(res != False):
watchList[ticker] = {"UpperThreshold": res['upperThreshold'],
"LowerThreshold": res['lowerThreshold']}
#Lower
for ticker, thresholds in portfolioList.items():
if(ticker != 'Portfolio' and ticker != 'Cash'):
res = lower_threshold_alert(ticker, thresholds)
if(res != False):
portfolioList[ticker] = {"UpperThreshold": res['upperThreshold'],
"LowerThreshold": res['lowerThreshold']}
for ticker, thresholds in watchList.items():
if(ticker != 'Portfolio' and ticker != 'Cash'):
res = lower_threshold_alert(ticker, thresholds)
if(res != False):
watchList[ticker] = {"UpperThreshold": res['upperThreshold'],
"LowerThreshold": res['lowerThreshold']}
updateThresholds()
if __name__ == '__main__':
main()
```
#### File: Useful-Python-Scripts/MonitorStockPrices/processes.py
```python
import json
import requests
import datetime
from emailScript import *
#Report Var
report = """"""
#Get Quote for Stock
def get_quote(ticker):
#Robinhood API
fund_link = "https://api.robinhood.com/fundamentals/" + ticker + "/"
quote_link = "https://api.robinhood.com/quotes/" + ticker + "/"
#Get Data
fund_rsp = requests.get(fund_link)
quote_rsp = requests.get(quote_link)
if fund_rsp.status_code in (200,):
fund_data = json.loads(fund_rsp.content.decode('unicode_escape'))
if quote_rsp.status_code in (200,):
quote_data = json.loads(quote_rsp.content.decode('unicode_escape'))
#Results Dictionary
results = {}
results['ticker'] = ticker
results['op'] = fund_data["open"]
results['cp'] = quote_data['last_trade_price']
return results
#Add Message to Report
def add_message_text(msg):
global report
report += msg
#Get a Single Report
def get_single_report(ticker):
quote = get_quote(ticker)
msg_txt = """
Ticker: {}
Opening Price: {}
Current Price: {}
Differential {} ({}%)
"""
cp = quote['cp'].replace(",", "")
op = quote['op'].replace(",", "")
diffNum = float(cp) - float(op)
diffPerc = (diffNum/float(cp)) * 100
diffPerc = "{0:.2f}".format(diffPerc)
diffStr = str(diffNum)
if(diffNum > 0):
diffStr = "+" + diffStr
add_message_text(msg_txt.format(ticker, quote['op'], quote['cp'], diffStr, diffPerc))
#Get Full Report
def get_full_report(portfolio, watchList):
global report
report = ""
portfolio_txt = """
Portfolio Open: {}
Portfolio Value: {}
Differential {} ({}%)
"""
portfolio_open = portfolio['Portfolio']
portfolio_value = portfolio['Cash']
#Determine Portoflio Value
for ticker, qty in portfolio.items():
if(ticker != 'Portfolio' and ticker != 'Cash'):
temp_report = get_quote(ticker)
value = float(temp_report['cp'].replace(",", "")) * float(qty)
portfolio_value += value
#Determine Differential in Portfolio Value
diffNum = float(portfolio_value) - float(portfolio_open)
diffPerc = (diffNum/float(portfolio_open)) * 100
diffPerc = "{0:.2f}".format(diffPerc)
diffStr = str(diffNum)
if(diffNum > 0):
diffStr = "+" + diffStr
add_message_text(portfolio_txt.format(portfolio_open, portfolio_value, diffStr, diffPerc))
#Holdings
add_message_text("\nHoldings:\n")
for ticker, holdings in portfolio.items():
if(ticker != 'Portfolio' and ticker != 'Cash'):
get_single_report(ticker)
#WatchList
add_message_text("\nWatch List:\n")
for ticker, holdings in watchList.items():
if(ticker != 'Portfolio' and ticker != 'Cash'):
get_single_report(ticker)
#Send report and return
send_report(report)
return portfolio_value
#Send report
def send_report(msg):
subj = "Stock Report: " + datetime.date.today().strftime("%m-%d-%Y")
msg_txt = msg
print(msg_txt)
send_stock_report(subj, msg_txt)
#Alert Stock
#Check quote and send alert if stock has reached upper threshold
def upper_threshold_alert(ticker, thresholds):
report = get_quote(ticker)
cp = report['cp'].replace(",", "")
price = float(cp)
if(thresholds["UpperThreshold"] >= price):
return False;
#Alert Threshold
msg_txt = """
Upper Threshold Reached
Ticker: {}
Current Price: {}
"""
subj = "{} Threshold Alert"
send_stock_report(subj.format(report['ticker']), msg_txt.format(report['ticker'], report['cp']))
upperThreshold = price * 1.05
lowerThreshold = price - (price * .05)
thresholds = {}
thresholds['upperThreshold'] = upperThreshold
thresholds['lowerThreshold'] = lowerThreshold
return thresholds;
#Check quote and send alert if stock has reached lower threshold
def lower_threshold_alert(ticker, thresholds):
report = get_quote(ticker)
cp = report['cp'].replace(",", "")
price = float(cp)
if(price >= thresholds["LowerThreshold"]):
return False;
#Alert Threshold
msg_txt = """
Lower Threshold Reached
Ticker: {}
Current Price: {}
"""
subj = "{} Threshold Alert"
send_stock_report(subj.format(report['ticker']), msg_txt.format(report['ticker'], report['cp']))
upperThreshold = price * 1.05
lowerThreshold = price - (price * .05)
thresholds = {}
thresholds['upperThreshold'] = upperThreshold
thresholds['lowerThreshold'] = lowerThreshold
return thresholds;
```
#### File: Useful-Python-Scripts/MonitorStockPrices/report_driver.py
```python
from processes import *
#Get Report List
def getReportList():
portfolioList = {}
watchList = {}
result = {}
#On Server: '/root/Scripts/Stocks/Lists/portfolio.txt'
with open("Lists/portfolio.txt") as _portfolio_file:
for line in _portfolio_file:
_temp_line = line.rstrip('\n')
if(_temp_line[0] != "#"):
ticker, holdings, upperThreshold, lowerThreshold = _temp_line.split('|')
holdings = float(holdings)
portfolioList[ticker] = holdings
_portfolio_file.close()
#On Server: '/root/Scripts/Stocks/Lists/watchlist.txt'
with open("Lists/watchlist.txt") as _watchlist_file:
for line in _watchlist_file:
_temp_line = line.rstrip('\n')
if(_temp_line[0] != "#"):
ticker, upperThreshold, lowerThreshold = _temp_line.split('|')
watchList[ticker] = ticker
_watchlist_file.close()
result["Portfolio"] = portfolioList
result["WatchList"] = watchList
return result
def update_portfolio(value):
#On Server: "/root/Scripts/Stocks/Lists/portfolio.txt"
with open("Lists/portfolio.txt", 'r') as _portfolio_file:
data = _portfolio_file.readlines()
i = 0
for line in data:
_temp_line = line.rstrip('\n')
ticker, holdings, upperThreshold, lowerThreshold = _temp_line.split('|')
if(ticker == "Portfolio"):
data[i] = "Portfolio|" + str(value) +"|null|null"
i += 1
#On Server: "/root/Scripts/Stocks/Lists/portfolio.txt"
with open("Lists/portfolio.txt", 'w') as _portfolio_file:
_portfolio_file.writelines(data)
#Main
def main():
result = getReportList()
value = get_full_report(result["Portfolio"], result["WatchList"])
update_portfolio(value)
if __name__ == '__main__':
main()
```
|
{
"source": "jdeflander/chunked",
"score": 3
}
|
#### File: jdeflander/chunked/httpchunked.py
```python
from io import DEFAULT_BUFFER_SIZE
from re import compile
from typing import BinaryIO
def encode(dst: BinaryIO, src: BinaryIO) -> None:
"""
Encodes from the given source and writes chunks to the given destination.
"""
while True:
chunk_data = src.read(DEFAULT_BUFFER_SIZE)
chunk_size = len(chunk_data)
args = chunk_size, chunk_data
chunk = b"%X\r\n%s\r\n" % args
dst.write(chunk)
if chunk_size == 0:
break
def decode(dst: BinaryIO, src: BinaryIO) -> None:
"""
Decodes from the given source and writes chunk contents to the given
destination.
"""
while True:
chunk = src.readline()
match = _CHUNK_PATTERN.fullmatch(chunk)
if match is None:
raise ValueError
chunk_size_str = match.group("chunk_size")
chunk_size = int(chunk_size_str, 16)
if chunk_size == 0:
break
while chunk_size > 0:
buf = src.read(chunk_size)
dst.write(buf)
chunk_size -= len(buf)
crlf = src.readline()
if _CRLF_PATTERN.fullmatch(crlf) is None:
raise ValueError
for line in src:
if _CRLF_PATTERN.fullmatch(line) is not None:
return
if _TRAILER_PART_PATTERN.fullmatch(line) is None:
raise ValueError
_CHUNK_PATTERN = compile(
br"(?P<chunk_size>[\dA-F]+)"
br"(?:"
br";[-!#$%&'*+.^`|~\w]+"
br"(?"
br":=[-!#$%&'*+.^`|~\w]+|\""
br"(?:[\t !#-\[\]-~\x80-\xFF]|\\[\t \x21-\x7E\x80-\xFF])"
br"\""
br")?"
br")*"
br"\r\n"
)
_CRLF_PATTERN = compile(b"\r\n")
_TRAILER_PART_PATTERN = compile(
br"[-!#$%&'*+.^`|~\w]+:[ \t]*"
br"(?:"
br"(?:[\x21-\x7E\x80-\xFF](?:[ \t]+[\x21-\x7E\x80-\xFF])?)|(?:\r\n[ \t]+)"
br")*"
br"[ \t]*\r\n"
)
```
|
{
"source": "jdegene/ArcGIS-scripts",
"score": 3
}
|
#### File: jdegene/ArcGIS-scripts/GPS_extract.py
```python
import os
import arcpy
from arcpy import env
workDir = ".../inFol/" #Input-Folder, contains all files from the GPS device
outDir = ".../Output/" #Ouput-Folder
outFile = outDir + "GPS.txt"
d = open(outFile,"a") #create Output-TxT-File
d.write("Lat,Lon,Alt,Dev" + "\n") #header for Output-TxT-File
def CoordCalc(c): # function to convert into decimal degrees
decsec = float(c[-6:])
decdeg = decsec/60
if c[0] == "N":
result = float(c[1:3]) + decdeg
resultStr = str(result)
return resultStr
elif c[0] == "E":
result = float(c[1]) + decdeg
resultStr = str(result)
return resultStr
# Loop over all input files
for txtfile in os.listdir(workDir):
o = open(workDir + txtfile, "r")
oR = o.read()
split = oR.splitlines()
# Loop over lines in each file
for i in range(5,len(split)-1):
if split[i][0:8] == "Waypoint" or split[i][0:10] == "Trackpoint":
x = split[i].find('\tN') # Find position of first N, N = North coordinate
xx = x+1
m = split[i].find('m') # Find position of first m, m = altitude in m
northC = CoordCalc(split[i][xx:xx+10]) # convert to decimal degrees
eastC = CoordCalc(split[i][xx+11:xx+20])
height = split[i][m-4:m-1]
d.write(northC + "," + eastC + "," + height + "," + txtfile + "\n") #write N,E and altitude into textfile
o.close()
d.close()
env.workspace = ".../workspace/"
# Convert created .txt file into point layer file
in_Table = outDir + "GPS.txt"
y_coords = "Lat"
x_coords = "Lon"
z_coords = "Alt"
out_Layer = "GPSpoints"
saved_Feature = outDir + "GPS"
# Specify coordinate system
spRef = r"Coordinate Systems\Geographic Coordinate Systems\World\WGS 1984.prj"
arcpy.MakeXYEventLayer_management(in_Table, x_coords, y_coords, out_Layer, spRef, z_coords)
print arcpy.GetCount_management(out_Layer)
arcpy.CopyFeatures_management(out_Layer, saved_Feature)
```
|
{
"source": "jdegregorio/gym-custom-pygame",
"score": 4
}
|
#### File: envs/kuiper_escape/lidar.py
```python
import math
# 3rd party imports
import pygame
import numpy as np
class Beam(pygame.sprite.Sprite):
"""Beam Class
The beam class is a sub-component of the final virtual lidar sensing
system. It consists of a single point of "light" that propogates out in an
assigned direction from the origin.
The beam object is represented as a pygame sprite, so that it can
optinoally be inserted and visualized within the game environment.
The most important method is "step_out", which iteratively moves the beam
of light outward until it hits several stop criteria (i.e. off screen, hits
rock, or max distance)
"""
def __init__(self, x, y, angle, step, max_radius, screen_size):
super(Beam, self).__init__()
self.x_init = x
self.y_init = y
self.x = x
self.y = y
self.radius = 0
self.angle = angle
self.collide = 0
self.step = step
self.color_rock = (255, 211, 0)
self.color_wall = (102, 255, 0)
self.screen_size = screen_size
self.max_radius = max_radius
self.surf_size = 5
self.surf = pygame.Surface((self.surf_size, self.surf_size))
self.rect = self.surf.get_rect(center = (x, y))
# Iteratively step beam outward until collision or off-screen
def beam_out(self, collide_sprites):
done = False
collision = False
while not done:
self.step_out()
for sprite in collide_sprites:
if sprite.rect.collidepoint(self.x, self.y):
collision = True
break
if collision:
self.surf.fill(self.color_rock)
self.collide = 1
done = True
elif math.sqrt((self.x - self.x_init)**2 + (self.y - self.y_init)**2) > self.max_radius:
self.x = self.x_init + math.cos(self.angle) * self.max_radius
self.y = self.y_init + math.sin(self.angle) * self.max_radius
self.surf.fill(self.color_wall)
done = True
elif self.x < 0:
self.x = 0
self.rect.left = 0
self.surf.fill(self.color_wall)
done = True
elif self.x > self.screen_size:
self.x = self.screen_size
self.rect.right = self.screen_size
self.surf.fill(self.color_wall)
done = True
elif self.y < 0:
self.y = 0
self.rect.top = 0
self.surf.fill(self.color_wall)
done = True
elif self.y > self.screen_size:
self.y = self.screen_size
self.rect.bottom = self.screen_size
self.surf.fill(self.color_wall)
done = True
# Move lidar beam outward one step
def step_out(self):
self.x += self.step * math.cos(self.angle)
self.y += self.step * math.sin(self.angle)
self.rect.centerx = int(self.x)
self.rect.centery = int(self.y)
self.radius = math.sqrt((self.x - self.x_init)**2 + (self.y - self.y_init)**2)
def get_state(self):
return (self.radius, self.collide)
class Lidar:
"""Lidar Array Class This class is a set of lidar beams sent off in all
directions. The resulting state representation is a set of "lidar points"
giving a sense for what is surrounding the lidar array.
"""
def __init__(self, x, y, n_beams, step, max_radius, screen_size):
self.x = x
self.y = y
self.n_beams = n_beams
self.step = step
self.max_radius = max_radius
self.screen_size = screen_size
self.angles = np.linspace(0, 2* math.pi, num=n_beams, endpoint=False)
def sync_position(self, sprite):
self.x = sprite.rect.centerx
self.y = sprite.rect.centery
def scan(self, collide_sprites):
# Send out beams
self.ls_beams = []
for angle in self.angles:
beam = Beam(
x=self.x,
y=self.y,
angle=angle,
step=self.step,
max_radius=self.max_radius,
screen_size=self.screen_size
)
self.ls_beams.append(beam)
for beam in self.ls_beams:
beam.beam_out(
collide_sprites
)
# Summarize state
ls_radius = []
ls_collide = []
for beam in self.ls_beams:
radius, collide = beam.get_state()
ls_radius.append(radius)
ls_collide.append(collide)
return ls_radius, ls_collide
def get_beams(self):
return self.ls_beams
```
|
{
"source": "jdegrootlutzner/redial",
"score": 3
}
|
#### File: jdegrootlutzner/redial/tts.py
```python
import os
import random
# export=GOOGLE_APPLICATION_CREDENTIALS=/path/to/credentials
def synthesize_text(text, output_filename, output_dir, voice=None):
"""
Synthesizes speech from the input string of text.
Female voice = 0, Male voice = 1
output filename doesn't need extension
"""
from google.cloud import texttospeech_v1beta1 as texttospeech
client = texttospeech.TextToSpeechClient()
input_text = texttospeech.types.SynthesisInput(text=text)
genders = (texttospeech.enums.SsmlVoiceGender.FEMALE, texttospeech.enums.SsmlVoiceGender.MALE)
if not voice:
gender = genders[random.randrange(0, 2)]
else:
gender = genders[voice]
# Note: the voice can also be specified by name.
# Names of voices can be retrieved with client.list_voices().
voice = texttospeech.types.VoiceSelectionParams(
language_code='en-US',
ssml_gender=gender)
audio_config = texttospeech.types.AudioConfig(
audio_encoding=texttospeech.enums.AudioEncoding.MP3)
response = client.synthesize_speech(input_text, voice, audio_config)
# The response's audio_content is binary.
mp3_filepath = os.path.join(output_dir, "%s.mp3" % output_filename)
with open(mp3_filepath, 'wb') as out:
out.write(response.audio_content)
print('Audio content written to file %s' % mp3_filepath)
wav_name = os.path.join(output_dir, "%s.wav" % output_filename)
print('Audio content re-written to file %s' % wav_name)
os.system("mpg321 -w %s %s" % (wav_name, mp3_filepath))
print('Deleting mp3')
os.remove(mp3_filepath)
#synthesize_text('Welcome to the story collector. By dialing 8 you can record a new story that will be come a part of Redial. You will have one minute to record a first person story. Pretend that you are telling your story to a friend.', 'record', '/home/pi/Desktop/telephone-project/recordings/instructions')
synthesize_text('Welcome to Redial. This old phone houses personal stories that are told and retold by people, just like you! You will hear a story and then retell the story in your own words back to the phone. You can see how the stories change overtime by visiting retellproject.com. To get started, dial a number from one to eight to hear and retell a story. Dial nine to record your own story or leave a comment. Dial zero to here these instructions again.', 'operator', '/home/pi/Desktop/telephone-project/recordings/instructions')
#synthesize_text('Now retell the story in your own words while mantaining the first person perspective. Focus more on how the story teller felt and less on the specific words. Recording will start in 3, 2, 1.', 'retell', '/home/pi/Desktop/telephone-project/recordings/instructions')
#synthesize_text('You will hear a story and then retell the story. Story will start in 3, 2, 1.', 'listen', '/home/pi/Desktop/telephone-project/recordings/instructions')
```
|
{
"source": "jdegrootlutzner/tic-tac-toe",
"score": 4
}
|
#### File: jdegrootlutzner/tic-tac-toe/tic-tac-toe.py
```python
import random
import time
class Board:
def __init__(self, dimension):
self.dimension = dimension
self.locations = [*range(1, self.size()+1)]
self.valid_moves = [*range(1, self.size()+1)]
self.values = [0]*9
self.visuals = [" "]*9
def row_as_str(self, row_number, list_type):
start = (row_number-1)*self.dimension
return(" | ".join(map(str,list_type[start:start+self.dimension])))
def print(self,list_type):
for i in range(1,self.dimension):
print(self.row_as_str(i,list_type))
print("---------")
print(self.row_as_str(self.dimension,list_type))
def check_win(self):
for i in range(0,self.dimension):
# check rows
if abs(sum(self.values[(i*self.dimension):(i+ self.dimension)])) == self.dimension:
return True
# check cols
elif abs(sum(self.values[i:(self.size()):self.dimension])) == self.dimension:
return True
# check diagonals
return((abs(self.values[0]+ self.values[4]+ self.values[8]) == 3) |
(abs(self.values[2]+ self.values[4]+ self.values[6]) == 3) )
def is_legal_move(self, location):
return( (location != None) and
(location >= 0) and
(location <= self.size()) and
(self.values[location-1] == 0 ))
def add_move(self, location, move, num_moves):
self.values[location-1] = (move == "X") - (move == "O")
self.locations[location-1] = move
self.visuals[location-1] = move
self.valid_moves.remove(location)
def size(self):
return( self.dimension * self.dimension)
def min_winning_moves(self):
return (self.dimension + self.dimension - 1)
def random_move(self):
return( self.valid_moves[random.randint(0,len(self.valid_moves)-1)])
def user_pick_move(board):
choice = None
attempts = 0
while not (board.is_legal_move(choice)) :
attempts = attempts + 1
if(attempts > 1):
print("Available Moves:")
board.print(board.locations)
try:
choice = int(input("Which square" + str(board.valid_moves) + "?\n"))
except ValueError:
pass
return choice
def computer_pick_move(board):
return(board.random_move())
def alternate_move(last_move):
if last_move == "O":
return "X"
else:
return "O"
def play_game(board, user_move, computer_move):
last_move = "O"
num_moves = 0
board.print(board.locations)
while num_moves<=board.size():
if last_move == computer_move:
# if computer went last or its first turn, user goes
board.add_move(user_pick_move(board), user_move, num_moves)
else:
print("Computer thinking...")
time.sleep(1)
board.add_move(computer_pick_move(board), computer_move, num_moves)
last_move = alternate_move(last_move)
num_moves = num_moves + 1
board.print(board.visuals)
if (num_moves>=board.min_winning_moves() and board.check_win()):
if(last_move == computer_move):
print("You lost!")
return -1
else:
print("You won!")
return 1
print("Tie!")
return 0
if __name__ == "__main__":
dimension = 3
board = Board(dimension)
choice = None
while choice not in ["X","O"]:
try:
choice = str(input("Do you want to be \'X\' or \'O\' ? \n")).upper()
except ValueError:
pass
if choice == "X":
play_game(board, "X","O")
else:
play_game(board, "O","X")
```
|
{
"source": "jdehaan/borg",
"score": 3
}
|
#### File: borg/crypto/file_integrity.py
```python
import hashlib
import io
import json
import os
from hmac import compare_digest
from ..helpers import IntegrityError
from ..logger import create_logger
from ..checksums import StreamingXXH64
logger = create_logger()
class FileLikeWrapper:
def __enter__(self):
self.fd.__enter__()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.fd.__exit__(exc_type, exc_val, exc_tb)
def tell(self):
return self.fd.tell()
def seek(self, offset, whence=io.SEEK_SET):
return self.fd.seek(offset, whence)
def write(self, data):
return self.fd.write(data)
def read(self, n=None):
return self.fd.read(n)
def flush(self):
self.fd.flush()
def fileno(self):
return self.fd.fileno()
class FileHashingWrapper(FileLikeWrapper):
"""
Wrapper for file-like objects that computes a hash on-the-fly while reading/writing.
WARNING: Seeks should only be used to query the size of the file, not
to skip data, because skipped data isn't read and not hashed into the digest.
Similarly skipping while writing to create sparse files is also not supported.
Data has to be read/written in a symmetric fashion, otherwise different
digests will be generated.
Note: When used as a context manager read/write operations outside the enclosed scope
are illegal.
"""
ALGORITHM = None
FACTORY = None
def __init__(self, backing_fd, write):
self.fd = backing_fd
self.writing = write
self.hash = self.FACTORY()
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
self.hash_length()
super().__exit__(exc_type, exc_val, exc_tb)
def write(self, data):
"""
Write *data* to backing file and update internal state.
"""
n = super().write(data)
self.hash.update(data)
return n
def read(self, n=None):
"""
Read *data* from backing file (*n* has the usual meaning) and update internal state.
"""
data = super().read(n)
self.hash.update(data)
return data
def hexdigest(self):
"""
Return current digest bytes as hex-string.
Note: this can be called multiple times.
"""
return self.hash.hexdigest()
def update(self, data: bytes):
self.hash.update(data)
def hash_length(self, seek_to_end=False):
if seek_to_end:
# Add length of file to the hash to avoid problems if only a prefix is read.
self.seek(0, io.SEEK_END)
self.hash.update(str(self.tell()).encode())
class SHA512FileHashingWrapper(FileHashingWrapper):
ALGORITHM = 'SHA512'
FACTORY = hashlib.sha512
class XXH64FileHashingWrapper(FileHashingWrapper):
ALGORITHM = 'XXH64'
FACTORY = StreamingXXH64
SUPPORTED_ALGORITHMS = {
SHA512FileHashingWrapper.ALGORITHM: SHA512FileHashingWrapper,
XXH64FileHashingWrapper.ALGORITHM: XXH64FileHashingWrapper,
}
class FileIntegrityError(IntegrityError):
"""File failed integrity check: {}"""
class IntegrityCheckedFile(FileLikeWrapper):
def __init__(self, path, write, filename=None, override_fd=None, integrity_data=None):
self.path = path
self.writing = write
mode = 'wb' if write else 'rb'
self.file_fd = override_fd or open(path, mode)
self.digests = {}
hash_cls = XXH64FileHashingWrapper
if not write:
algorithm_and_digests = self.load_integrity_data(path, integrity_data)
if algorithm_and_digests:
algorithm, self.digests = algorithm_and_digests
hash_cls = SUPPORTED_ALGORITHMS[algorithm]
# TODO: When we're reading but don't have any digests, i.e. no integrity file existed,
# TODO: then we could just short-circuit.
self.fd = self.hasher = hash_cls(backing_fd=self.file_fd, write=write)
self.hash_filename(filename)
def load_integrity_data(self, path, integrity_data):
if integrity_data is not None:
return self.parse_integrity_data(path, integrity_data)
def hash_filename(self, filename=None):
# Hash the name of the file, but only the basename, ie. not the path.
# In Borg the name itself encodes the context (eg. index.N, cache, files),
# while the path doesn't matter, and moving e.g. a repository or cache directory is supported.
# Changing the name however imbues a change of context that is not permissible.
# While Borg does not use anything except ASCII in these file names, it's important to use
# the same encoding everywhere for portability. Using os.fsencode() would be wrong.
filename = os.path.basename(filename or self.path)
self.hasher.update(('%10d' % len(filename)).encode())
self.hasher.update(filename.encode())
@classmethod
def parse_integrity_data(cls, path: str, data: str):
try:
integrity_data = json.loads(data)
# Provisions for agility now, implementation later, but make sure the on-disk joint is oiled.
algorithm = integrity_data['algorithm']
if algorithm not in SUPPORTED_ALGORITHMS:
logger.warning('Cannot verify integrity of %s: Unknown algorithm %r', path, algorithm)
return
digests = integrity_data['digests']
# Require at least presence of the final digest
digests['final']
return algorithm, digests
except (ValueError, TypeError, KeyError) as e:
logger.warning('Could not parse integrity data for %s: %s', path, e)
raise FileIntegrityError(path)
def hash_part(self, partname, is_final=False):
if not self.writing and not self.digests:
return
self.hasher.update(('%10d' % len(partname)).encode())
self.hasher.update(partname.encode())
self.hasher.hash_length(seek_to_end=is_final)
digest = self.hasher.hexdigest()
if self.writing:
self.digests[partname] = digest
elif self.digests and not compare_digest(self.digests.get(partname, ''), digest):
raise FileIntegrityError(self.path)
def __exit__(self, exc_type, exc_val, exc_tb):
exception = exc_type is not None
if not exception:
self.hash_part('final', is_final=True)
self.hasher.__exit__(exc_type, exc_val, exc_tb)
if exception:
return
if self.writing:
self.store_integrity_data(json.dumps({
'algorithm': self.hasher.ALGORITHM,
'digests': self.digests,
}))
elif self.digests:
logger.debug('Verified integrity of %s', self.path)
def store_integrity_data(self, data: str):
self.integrity_data = data
class DetachedIntegrityCheckedFile(IntegrityCheckedFile):
def __init__(self, path, write, filename=None, override_fd=None):
super().__init__(path, write, filename, override_fd)
filename = filename or os.path.basename(path)
output_dir = os.path.dirname(path)
self.output_integrity_file = self.integrity_file_path(os.path.join(output_dir, filename))
def load_integrity_data(self, path, integrity_data):
assert not integrity_data, 'Cannot pass explicit integrity_data to DetachedIntegrityCheckedFile'
return self.read_integrity_file(self.path)
@staticmethod
def integrity_file_path(path):
return path + '.integrity'
@classmethod
def read_integrity_file(cls, path):
try:
with open(cls.integrity_file_path(path)) as fd:
return cls.parse_integrity_data(path, fd.read())
except FileNotFoundError:
logger.info('No integrity file found for %s', path)
except OSError as e:
logger.warning('Could not read integrity file for %s: %s', path, e)
raise FileIntegrityError(path)
def store_integrity_data(self, data: str):
with open(self.output_integrity_file, 'w') as fd:
fd.write(data)
```
#### File: borg/helpers/checks.py
```python
import os
import sys
from .errors import Error
from ..platformflags import is_win32, is_linux, is_freebsd, is_darwin
class PythonLibcTooOld(Error):
"""FATAL: this Python was compiled for a too old (g)libc and misses required functionality."""
def check_python():
if is_win32:
required_funcs = {os.stat}
else:
required_funcs = {os.stat, os.utime, os.chown}
if not os.supports_follow_symlinks.issuperset(required_funcs):
raise PythonLibcTooOld
class ExtensionModuleError(Error):
"""The Borg binary extension modules do not seem to be properly installed."""
def check_extension_modules():
from .. import platform, compress, crypto, item, chunker, hashindex
if hashindex.API_VERSION != '1.2_01':
raise ExtensionModuleError
if chunker.API_VERSION != '1.2_01':
raise ExtensionModuleError
if compress.API_VERSION != '1.2_02':
raise ExtensionModuleError
if crypto.low_level.API_VERSION != '1.3_01':
raise ExtensionModuleError
if item.API_VERSION != '1.2_01':
raise ExtensionModuleError
if platform.API_VERSION != platform.OS_API_VERSION or platform.API_VERSION != '1.2_05':
raise ExtensionModuleError
```
#### File: borg/testsuite/remote.py
```python
import errno
import os
import io
import time
from unittest.mock import patch
import pytest
from ..remote import SleepingBandwidthLimiter, RepositoryCache, cache_if_remote
from ..repository import Repository
from ..crypto.key import PlaintextKey
from ..compress import CompressionSpec
from ..helpers import IntegrityError
from .hashindex import H
from .key import TestKey
class TestSleepingBandwidthLimiter:
def expect_write(self, fd, data):
self.expected_fd = fd
self.expected_data = data
def check_write(self, fd, data):
assert fd == self.expected_fd
assert data == self.expected_data
return len(data)
def test_write_unlimited(self, monkeypatch):
monkeypatch.setattr(os, "write", self.check_write)
it = SleepingBandwidthLimiter(0)
self.expect_write(5, b"test")
it.write(5, b"test")
def test_write(self, monkeypatch):
monkeypatch.setattr(os, "write", self.check_write)
monkeypatch.setattr(time, "monotonic", lambda: now)
monkeypatch.setattr(time, "sleep", lambda x: None)
now = 100
it = SleepingBandwidthLimiter(100)
# all fits
self.expect_write(5, b"test")
it.write(5, b"test")
# only partial write
self.expect_write(5, b"123456")
it.write(5, b"1234567890")
# sleeps
self.expect_write(5, b"123456")
it.write(5, b"123456")
# long time interval between writes
now += 10
self.expect_write(5, b"1")
it.write(5, b"1")
# long time interval between writes, filling up quota
now += 10
self.expect_write(5, b"1")
it.write(5, b"1")
# long time interval between writes, filling up quota to clip to maximum
now += 10
self.expect_write(5, b"1")
it.write(5, b"1")
class TestRepositoryCache:
@pytest.fixture
def repository(self, tmpdir):
self.repository_location = os.path.join(str(tmpdir), 'repository')
with Repository(self.repository_location, exclusive=True, create=True) as repository:
repository.put(H(1), b'1234')
repository.put(H(2), b'5678')
repository.put(H(3), bytes(100))
yield repository
@pytest.fixture
def cache(self, repository):
return RepositoryCache(repository)
def test_simple(self, cache: RepositoryCache):
# Single get()s are not cached, since they are used for unique objects like archives.
assert cache.get(H(1)) == b'1234'
assert cache.misses == 1
assert cache.hits == 0
assert list(cache.get_many([H(1)])) == [b'1234']
assert cache.misses == 2
assert cache.hits == 0
assert list(cache.get_many([H(1)])) == [b'1234']
assert cache.misses == 2
assert cache.hits == 1
assert cache.get(H(1)) == b'1234'
assert cache.misses == 2
assert cache.hits == 2
def test_backoff(self, cache: RepositoryCache):
def query_size_limit():
cache.size_limit = 0
assert list(cache.get_many([H(1), H(2)])) == [b'1234', b'5678']
assert cache.misses == 2
assert cache.evictions == 0
iterator = cache.get_many([H(1), H(3), H(2)])
assert next(iterator) == b'1234'
# Force cache to back off
qsl = cache.query_size_limit
cache.query_size_limit = query_size_limit
cache.backoff()
cache.query_size_limit = qsl
# Evicted H(1) and H(2)
assert cache.evictions == 2
assert H(1) not in cache.cache
assert H(2) not in cache.cache
assert next(iterator) == bytes(100)
assert cache.slow_misses == 0
# Since H(2) was in the cache when we called get_many(), but has
# been evicted during iterating the generator, it will be a slow miss.
assert next(iterator) == b'5678'
assert cache.slow_misses == 1
def test_enospc(self, cache: RepositoryCache):
class enospc_open:
def __init__(self, *args):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def write(self, data):
raise OSError(errno.ENOSPC, 'foo')
def truncate(self, n=None):
pass
iterator = cache.get_many([H(1), H(2), H(3)])
assert next(iterator) == b'1234'
with patch('builtins.open', enospc_open):
assert next(iterator) == b'5678'
assert cache.enospc == 1
# We didn't patch query_size_limit which would set size_limit to some low
# value, so nothing was actually evicted.
assert cache.evictions == 0
assert next(iterator) == bytes(100)
@pytest.fixture
def key(self, repository, monkeypatch):
monkeypatch.setenv('BORG_PASSPHRASE', 'test')
key = PlaintextKey.create(repository, TestKey.MockArgs())
key.compressor = CompressionSpec('none').compressor
return key
def _put_encrypted_object(self, key, repository, data):
id_ = key.id_hash(data)
repository.put(id_, key.encrypt(id_, data))
return id_
@pytest.fixture
def H1(self, key, repository):
return self._put_encrypted_object(key, repository, b'1234')
@pytest.fixture
def H2(self, key, repository):
return self._put_encrypted_object(key, repository, b'5678')
@pytest.fixture
def H3(self, key, repository):
return self._put_encrypted_object(key, repository, bytes(100))
@pytest.fixture
def decrypted_cache(self, key, repository):
return cache_if_remote(repository, decrypted_cache=key, force_cache=True)
def test_cache_corruption(self, decrypted_cache: RepositoryCache, H1, H2, H3):
list(decrypted_cache.get_many([H1, H2, H3]))
iterator = decrypted_cache.get_many([H1, H2, H3])
assert next(iterator) == (7, b'1234')
with open(decrypted_cache.key_filename(H2), 'a+b') as fd:
fd.seek(-1, io.SEEK_END)
corrupted = (int.from_bytes(fd.read(), 'little') ^ 2).to_bytes(1, 'little')
fd.seek(-1, io.SEEK_END)
fd.write(corrupted)
fd.truncate()
with pytest.raises(IntegrityError):
assert next(iterator) == (7, b'5678')
```
|
{
"source": "jdehaan/Funani",
"score": 3
}
|
#### File: attic/test/hashdeep.py
```python
__author__ = '<NAME> <<EMAIL>>'
__license__ = 'Public Domain'
__version__ = '1.0'
import os
import os.path as osp
import hashlib
# --- helpers ---
def write(text):
""" helper for writing output, as a single point for replacement """
print(text)
def filehash(filepath):
blocksize = 64*1024
sha = hashlib.sha1()
with open(filepath, 'rb') as fp:
while True:
data = fp.read(blocksize)
if not data:
break
sha.update(data)
return sha.hexdigest()
# --- /helpers ---
write("""\
%%%% HASHDEEP-1.0
%%%% size,sha256,filename
##
## $ hashdeep.py
##""")
ROOT = '.'
for root, dirs, files in os.walk(ROOT):
for fpath in [osp.join(root, f) for f in files]:
size = osp.getsize(fpath)
sha = filehash(fpath)
name = osp.relpath(fpath, ROOT)
write('%s,%s,%s' % (size, sha, name))
#for ignored in ['.hg', '.svn', 'git']:
# if ignored in dirs:
# dirs.remove(ignored)
```
#### File: cas/src/funanidb.py
```python
import logging
import os
from metadb import MetadataDatabase
from mediadb import MediaDatabase
from address import hash_file, shard
LOGGER = logging.getLogger('funanidb')
EXTENSIONS_IMAGES = (
'.jpg', '.jpeg', '.png', '.dng',
'.tif', '.tiff', '.pnm', '.cr2', '.cr3', '.bmp',
'.xcf', '.gif')
EXTENSIONS_VIDEO = ('.mts', '.mp4', '.mov', '.avi', '.mpg', '.3gp', '.wmv')
EXTENSIONS_AUDIO = ('.aac', '.m4a', '.mp3', '.opus')
EXTENSIONS_ALL = EXTENSIONS_IMAGES + EXTENSIONS_VIDEO + EXTENSIONS_AUDIO
# Files to fully ignore during processing
IGNORE_FILES = ('thumbs.db', '.nomedia')
class FunaniDatabase(object):
ROOT_PATH = ''
metadata_db = None
media_db = None
def __init__(self, section):
self.ROOT_PATH = section['path']
self.metadata_db = MetadataDatabase(self.ROOT_PATH)
self.media_db = MediaDatabase(self.ROOT_PATH, section['auto-reflink'])
LOGGER.debug("Initialized database at '%s'", self.ROOT_PATH)
def __str__(self):
return 'FUNANIDB:{}'.format(self.ROOT_PATH)
def verify_files(self, force, metadata):
if metadata:
# TODO: check the metadata & SQL DB
pass
else:
# verify the files (data scrubbing)
self.media_db.verify_files(force)
def meta_get(self, hash_values, fixdb):
for hash_value in hash_values:
self.metadata_db.dump(hash_value)
# TODO: check & upload the metadata in the SQL db too
def check_file(self, file_path):
srcfullpath = os.path.abspath(file_path)
srcfullpath = os.path.realpath(srcfullpath)
#TODO: Build a DB table with "root paths" (repetitive Media base paths)
# root paths shall not be substrings of each other
#TODO: Build a DB table with "root path id" | modtime | "relative path" | hash
#TODO: Try to find the modtime:/filepath in the DB -> if yes return that metadata
# otherwise fall back to this default behaviour
hash_value = hash_file(srcfullpath)
print("hash:", hash_value)
self.metadata_db.dump(hash_value)
def _traverse(self, directory_path, reflink):
LOGGER.info("recursing through '%s'", directory_path)
for root, dirs, files in os.walk(directory_path):
for name in files:
if name.lower().endswith(EXTENSIONS_ALL):
srcfullpath = os.path.join(root, name)
self.import_file(srcfullpath, reflink)
else:
if name.lower() not in (IGNORE_FILES): # avoid noise in output
LOGGER.warning("skipping '%s'", os.path.join(root, name))
def import_recursive(self, src, reflink):
"""Import media from a src directory recusively.
Args:
src (str): The path to import
reflink (bool): Use reflinks if the backend FS supports it
Returns:
Nothing.
"""
srcfullpath = os.path.abspath(src)
srcfullpath = os.path.realpath(src)
if os.path.isfile(srcfullpath):
self.import_file(srcfullpath, reflink)
else:
self._traverse(srcfullpath, reflink)
def import_file(self, src, reflink):
"""Import a single media file.
Args:
src (str): The path to the file to import
reflink (bool): Use a reflink if the backend FS supports it
Returns:
Nothing.
"""
srcfullpath = os.path.abspath(src)
srcfullpath = os.path.realpath(srcfullpath)
# compute the hash and relative path to the file
hash_value = hash_file(srcfullpath)
reldirname = shard(hash_value, 2, 2)
(dstfullpath, is_duplicate) = self.media_db.import_file(srcfullpath, reldirname, reflink)
self.metadata_db.import_file(srcfullpath, dstfullpath, reldirname)
```
#### File: cas/src/mediadb.py
```python
from datetime import datetime
from random import shuffle
import json
import logging
import os
import subprocess
import sys
from address import shard, hash_file
LOGGER = logging.getLogger('mediadb')
DMODE = 0o700 # default directory creation mode
FMODE = 0o400 # default file creation mode
# Only copy file if it doesn't already exist.
def _copy_btrfs(src, dst):
if not os.path.isfile(dst):
is_duplicate = False
# Python 3.5
#subprocess.run(["cp", "--reflink=always", src, dst], check=True)
subprocess.check_call(["/bin/cp", "--reflink=always", src, dst])
subprocess.check_call(["/bin/chmod", "400", dst])
LOGGER.info("--> Imported '%s'", src)
else:
is_duplicate = True
return (dst, is_duplicate)
def _copy_stdfs(src, dst):
if not os.path.isfile(dst):
is_duplicate = False
subprocess.check_call(["/bin/cp", src, dst])
subprocess.check_call(["/bin/chmod", "400", dst])
LOGGER.info("--> Imported '%s'", src)
else:
is_duplicate = True
return (dst, is_duplicate)
class MediaDatabase(object):
ROOT_PATH = ''
AUTO_REFLINK_ROOT = ''
def __init__(self, root, reflink_root):
self.ROOT_PATH = os.path.join(root, '.media')
self.AUTO_REFLINK_ROOT = reflink_root
os.makedirs(self.ROOT_PATH, DMODE, True)
LOGGER.debug("Initialized media database at '%s'", self.ROOT_PATH)
def __str__(self):
return 'MEDIADB:{}'.format(self.ROOT_PATH)
def _flush_verification_status(self, jsonfilepath, results):
with open(jsonfilepath, 'w') as jsonfile:
json.dump(results, jsonfile)
# Determine if we have to recheck the file
def _get_recheck_flag(self, force, results, name):
if force:
# in this case, always do so
return True
# assume we have to by default
recheck = True
if name in results:
lastcheck = results[name]
if lastcheck:
lastts = lastcheck['checked']
delta = datetime.now() - datetime.strptime(lastts, "%Y-%m-%dT%H:%M:%S.%f")
#TODO: make these 60 configurable (via config file verify-limit option)
if delta.days < 60:
recheck = False
LOGGER.debug('... skipping because was done already %i days ago',
delta.days)
return recheck
# verification of files integrity
# contains for each hash, timestamp of last hash check
def _verify_files_in_dir(self, reldirname, mediaabsdirname, force):
results = {}
jsonfilepath = os.path.join(mediaabsdirname, 'verify.json')
if os.path.isfile(jsonfilepath):
LOGGER.debug('Reading %s', jsonfilepath)
try:
with open(jsonfilepath, 'r') as jsonfile:
results = json.load(jsonfile)
#print(results)
except:
LOGGER.error("Error during reading json file %s", jsonfilepath)
raise
changed = False
# go through all files in directory and check hash
for root, dirs, files in os.walk(mediaabsdirname):
del dirs[:] # we do not want to recurse
for name in files:
if not name.lower().endswith('.json'):
try:
recheck = self._get_recheck_flag(force, results, name)
except:
LOGGER.error("Error during _get_recheck_flag for file %s/%s", (root, name))
raise
if recheck:
file_to_verify = os.path.join(root, name)
LOGGER.debug("Verifying '%s'", file_to_verify)
actual_hash_value = hash_file(file_to_verify)
expected_hash_value = '{}{}{}'.format(reldirname[0], reldirname[1], name)
status = 'OK' if expected_hash_value == actual_hash_value else 'FAILED'
timestamp = datetime.now().isoformat()
changed = True
results[name] = {}
results[name]['status'] = status
results[name]['checked'] = timestamp
if status != 'OK':
LOGGER.error("Mismatching hash for file %s/%s", (root, name))
else:
LOGGER.info("OK - %s", actual_hash_value)
for name in sorted(results.keys()):
if results[name]['status'] != 'OK':
LOGGER.error("Mismatching hash for file %s%s", reldirname, name)
if changed:
self._flush_verification_status(jsonfilepath, results)
def verify_files(self, force):
# randomize the order of processing so that for lengthy
# operations all files have a change to be checked if process has
# to be aborted and is restarted later
parentdirs = list(range(0x0000, 0xffff))
shuffle(parentdirs)
index = 0
for start_hash in parentdirs:
sys.stdout.write("%d%% (%d) \r" % (index*100>>16, index))
sys.stdout.flush()
index = index + 1
hash_value = '{:04x}'.format(start_hash)
reldirname = shard(hash_value, 2, 2)
mediaabsdirname = os.path.join(self.ROOT_PATH, *reldirname)
if os.path.isdir(mediaabsdirname):
LOGGER.debug('Verifying files in %s', mediaabsdirname)
self._verify_files_in_dir(reldirname, mediaabsdirname, force)
def import_file(self, srcfullpath, reldirname, reflink):
mediaabsdirname = os.path.join(self.ROOT_PATH, *reldirname[:-1])
mediafullpath = os.path.join(mediaabsdirname, reldirname[-1])
os.makedirs(mediaabsdirname, DMODE, True)
# automatically use reflink if the root path is the same as specified
if self.AUTO_REFLINK_ROOT and srcfullpath.startswith(self.AUTO_REFLINK_ROOT):
reflink = True
if reflink:
return _copy_btrfs(srcfullpath, mediafullpath)
else:
return _copy_stdfs(srcfullpath, mediafullpath)
#TODO: find out when the optimization with btrfs can be used
#def find_mount_point(path):
# path = os.path.abspath(path)
# while not os.path.ismount(path):
# path = os.path.dirname(path)
# return path
#path = sys.argv[1]
#path = os.path.realpath(path)
#print("real path: ", path)
#mount_point = find_mount_point(path)
#print("mount point: ", mount_point)
#print("lstat: ", os.lstat(path))
#print("lstat /home/data/test.jpg: ", os.lstat("/home/data/test.jpg"))
#print(os.path.ismount(path))
#(drive, tail) = os.path.splitdrive(path)
#print((drive, tail))
```
|
{
"source": "jdehning/pymc4",
"score": 2
}
|
#### File: pymc4/tests/conftest.py
```python
import pytest
import pymc4 as pm
import numpy as np
import tensorflow as tf
import itertools
# Tensor shapes on which the GP model will be tested
BATCH_AND_FEATURE_SHAPES = [
(1,),
(2,),
(2, 2,),
]
SAMPLE_SHAPE = [(1,), (3,)]
@pytest.fixture(scope="function", autouse=True)
def tf_seed():
tf.random.set_seed(37208) # random.org
yield
@pytest.fixture(scope="function")
def simple_model():
@pm.model()
def simple_model():
norm = yield pm.Normal("norm", 0, 1)
return norm
return simple_model
@pytest.fixture(scope="function")
def simple_model_with_deterministic(simple_model):
@pm.model()
def simple_model_with_deterministic():
norm = yield simple_model()
determ = yield pm.Deterministic("determ", norm * 2)
return determ
return simple_model_with_deterministic
@pytest.fixture(scope="function")
def simple_model_no_free_rvs():
@pm.model()
def simple_model_no_free_rvs():
norm = yield pm.Normal("norm", 0, 1, observed=1)
return norm
return simple_model_no_free_rvs
@pytest.fixture(
scope="function",
params=itertools.product(
[(), (3,), (3, 2)], [(), (2,), (4,), (5, 4)], [(), (1,), (10,), (10, 10)]
),
ids=str,
)
def unvectorized_model(request):
norm_shape, observed_shape, batch_size = request.param
observed = np.ones(observed_shape)
@pm.model()
def unvectorized_model():
norm = yield pm.Normal("norm", 0, 1, batch_stack=norm_shape)
determ = yield pm.Deterministic("determ", tf.reduce_max(norm))
output = yield pm.Normal("output", determ, 1, observed=observed)
return unvectorized_model, norm_shape, observed, batch_size
@pytest.fixture(scope="module", params=["XLA", "noXLA"], ids=str)
def xla_fixture(request):
return request.param == "XLA"
@pytest.fixture(scope="function")
def deterministics_in_nested_models():
@pm.model
def nested_model(cond):
x = yield pm.Normal("x", cond, 1)
dx = yield pm.Deterministic("dx", x + 1)
return dx
@pm.model
def outer_model():
cond = yield pm.HalfNormal("cond", 1)
dcond = yield pm.Deterministic("dcond", cond * 2)
dx = yield nested_model(dcond)
ddx = yield pm.Deterministic("ddx", dx)
return ddx
expected_untransformed = {
"outer_model",
"outer_model/cond",
"outer_model/nested_model",
"outer_model/nested_model/x",
}
expected_transformed = {"outer_model/__log_cond"}
expected_deterministics = {
"outer_model/dcond",
"outer_model/ddx",
"outer_model/nested_model/dx",
}
deterministic_mapping = {
"outer_model/dcond": (["outer_model/__log_cond"], lambda x: np.exp(x) * 2),
"outer_model/ddx": (["outer_model/nested_model/dx"], lambda x: x),
"outer_model/nested_model/dx": (["outer_model/nested_model/x"], lambda x: x + 1,),
}
return (
outer_model,
expected_untransformed,
expected_transformed,
expected_deterministics,
deterministic_mapping,
)
@pytest.fixture(scope="module", params=["auto_batch", "trust_manual_batching"], ids=str)
def use_auto_batching_fixture(request):
return request.param == "auto_batch"
@pytest.fixture(scope="function", params=["unvectorized_model", "vectorized_model"], ids=str)
def vectorized_model_fixture(request):
is_vectorized_model = request.param == "vectorized_model"
observed = np.zeros((5, 4), dtype="float32")
core_shapes = {
"model/mu": (4,),
"model/__log_scale": (),
}
if is_vectorized_model:
# A model where we pay great attention to making each distribution
# have exactly the right event_shape, and assure that when we sample
# from its prior, the requested `sample_shape` gets sent to the
# conditionally independent variables, and expect that shape to go
# through the conditionally dependent variables as batch_shapes
@pm.model
def model():
mu = yield pm.Normal(
"mu", tf.zeros(4), 1, conditionally_independent=True, reinterpreted_batch_ndims=1,
)
scale = yield pm.HalfNormal("scale", 1, conditionally_independent=True)
x = yield pm.Normal(
"x",
mu,
scale[..., None],
observed=observed,
reinterpreted_batch_ndims=1,
event_stack=5,
)
else:
@pm.model
def model():
mu = yield pm.Normal("mu", tf.zeros(4), 1)
scale = yield pm.HalfNormal("scale", 1)
x = yield pm.Normal("x", mu, scale, batch_stack=5, observed=observed)
return model, is_vectorized_model, core_shapes
@pytest.fixture(scope="module", params=BATCH_AND_FEATURE_SHAPES, ids=str)
def get_batch_shape(request):
return request.param
@pytest.fixture(scope="module", params=SAMPLE_SHAPE, ids=str)
def get_sample_shape(request):
return request.param
@pytest.fixture(scope="module", params=BATCH_AND_FEATURE_SHAPES, ids=str)
def get_feature_shape(request):
return request.param
@pytest.fixture(scope="module")
def get_data(get_batch_shape, get_sample_shape, get_feature_shape):
X = tf.random.normal(get_batch_shape + get_sample_shape + get_feature_shape)
return get_batch_shape, get_sample_shape, get_feature_shape, X
```
|
{
"source": "jdehorty/image-quality-assessment",
"score": 2
}
|
#### File: src/evaluater/predict.py
```python
import argparse
import glob
import importlib
import json
import os
import numpy as np
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Dropout, Dense
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
import pandas as pd
def earth_movers_distance(y_true, y_pred):
cdf_true = K.cumsum(y_true, axis=-1)
cdf_pred = K.cumsum(y_pred, axis=-1)
emd = K.sqrt(K.mean(K.square(cdf_true - cdf_pred), axis=-1))
return K.mean(emd)
class TrainDataGenerator(tf.keras.utils.Sequence):
"""inherits from Keras Sequence base object, allows to use multiprocessing in .fit_generator"""
def __init__(self, samples, img_dir, batch_size, n_classes, basenet_preprocess, img_format,
img_load_dims=(256, 256), img_crop_dims=(224, 224), shuffle=True):
self.samples = samples
self.img_dir = img_dir
self.batch_size = batch_size
self.n_classes = n_classes
self.basenet_preprocess = basenet_preprocess # Keras basenet specific preprocessing function
self.img_load_dims = img_load_dims # dimensions that images get resized into when loaded
self.img_crop_dims = img_crop_dims # dimensions that images get randomly cropped to
self.shuffle = shuffle
self.img_format = img_format
self.on_epoch_end() # call ensures that samples are shuffled in first epoch if shuffle is set to True
def __len__(self):
return int(np.ceil(len(self.samples) / self.batch_size)) # number of batches per epoch
def __getitem__(self, index):
batch_indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size] # get batch indexes
batch_samples = [self.samples[i] for i in batch_indexes] # get batch samples
X, y = self.__data_generator(batch_samples)
return X, y
def on_epoch_end(self):
self.indexes = np.arange(len(self.samples))
if self.shuffle is True:
np.random.shuffle(self.indexes)
def __data_generator(self, batch_samples):
# initialize images and labels tensors for faster processing
X = np.empty((len(batch_samples), *self.img_crop_dims, 3))
y = np.empty((len(batch_samples), self.n_classes))
for i, sample in enumerate(batch_samples):
# load and randomly augment image
img_file = os.path.join(self.img_dir, '{}.{}'.format(sample['image_id'], self.img_format))
img = utils.load_image(img_file, self.img_load_dims)
if img is not None:
img = utils.random_crop(img, self.img_crop_dims)
img = utils.random_horizontal_flip(img)
X[i,] = img
# normalize labels
y[i,] = utils.normalize_labels(sample['label'])
# apply basenet specific preprocessing
# input is 4D numpy array of RGB values within [0, 255]
X = self.basenet_preprocess(X)
return X, y
class TestDataGenerator(tf.keras.utils.Sequence):
'''inherits from Keras Sequence base object, allows to use multiprocessing in .fit_generator'''
def __init__(self, samples, img_dir, batch_size, n_classes, basenet_preprocess, img_format,
img_load_dims=(224, 224)):
self.samples = samples
self.img_dir = img_dir
self.batch_size = batch_size
self.n_classes = n_classes
self.basenet_preprocess = basenet_preprocess # Keras basenet specific preprocessing function
self.img_load_dims = img_load_dims # dimensions that images get resized into when loaded
self.img_format = img_format
self.on_epoch_end() # call ensures that samples are shuffled in first epoch if shuffle is set to True
def __len__(self):
return int(np.ceil(len(self.samples) / self.batch_size)) # number of batches per epoch
def __getitem__(self, index):
batch_indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size] # get batch indexes
batch_samples = [self.samples[i] for i in batch_indexes] # get batch samples
X, y = self.__data_generator(batch_samples)
return X, y
def on_epoch_end(self):
self.indexes = np.arange(len(self.samples))
def __data_generator(self, batch_samples):
# initialize images and labels tensors for faster processing
X = np.empty((len(batch_samples), *self.img_load_dims, 3))
y = np.empty((len(batch_samples), self.n_classes))
for i, sample in enumerate(batch_samples):
# load and randomly augment image
img_file = os.path.join(self.img_dir, '{}.{}'.format(sample['image_id'], self.img_format))
img = load_image(img_file, self.img_load_dims)
if img is not None:
X[i,] = img
# normalize labels
if sample.get('label') is not None:
y[i,] = normalize_labels(sample['label'])
# apply basenet specific preprocessing
# input is 4D numpy array of RGB values within [0, 255]
X = self.basenet_preprocess(X)
return X, y
class Nima:
def __init__(self, base_model_name, n_classes=10, learning_rate=0.001, dropout_rate=0, loss=earth_movers_distance,
decay=0, weights='imagenet'):
self.n_classes = n_classes
self.base_model_name = base_model_name
self.learning_rate = learning_rate
self.dropout_rate = dropout_rate
self.loss = loss
self.decay = decay
self.weights = weights
self._get_base_module()
def _get_base_module(self):
# import Keras base model module
if self.base_model_name == 'InceptionV3':
self.base_module = importlib.import_module('tensorflow.keras.applications.inception_v3')
elif self.base_model_name == 'InceptionResNetV2':
self.base_module = importlib.import_module('tensorflow.keras.applications.inception_resnet_v2')
else:
self.base_module = importlib.import_module('tensorflow.keras.applications.' + self.base_model_name.lower())
def build(self):
# get base model class
BaseCnn = getattr(self.base_module, self.base_model_name)
# load pre-trained model
self.base_model = BaseCnn(input_shape=(224, 224, 3), weights=self.weights, include_top=False, pooling='avg')
# add dropout and dense layer
x = Dropout(self.dropout_rate)(self.base_model.output)
x = Dense(units=self.n_classes, activation='softmax')(x)
self.nima_model = Model(self.base_model.inputs, x)
def compile(self):
self.nima_model.compile(optimizer=Adam(lr=self.learning_rate, decay=self.decay), loss=self.loss)
def preprocessing_function(self):
return self.base_module.preprocess_input
def load_json(file_path):
with open(file_path, 'r') as f:
return json.load(f)
def save_json(data, target_file):
with open(target_file, 'w') as f:
json.dump(data, f, indent=2, sort_keys=True)
def random_crop(img, crop_dims):
h, w = img.shape[0], img.shape[1]
ch, cw = crop_dims[0], crop_dims[1]
assert h >= ch, 'image height is less than crop height'
assert w >= cw, 'image width is less than crop width'
x = np.random.randint(0, w - cw + 1)
y = np.random.randint(0, h - ch + 1)
return img[y:(y + ch), x:(x + cw), :]
def random_horizontal_flip(img):
assert len(img.shape) == 3, 'input tensor must have 3 dimensions (height, width, channels)'
assert img.shape[2] == 3, 'image not in channels last format'
if np.random.random() < 0.5:
img = img.swapaxes(1, 0)
img = img[::-1, ...]
img = img.swapaxes(0, 1)
return img
def load_image(img_file, target_size):
return np.asarray(tf.keras.preprocessing.image.load_img(img_file, target_size=target_size))
def normalize_labels(labels):
labels_np = np.array(labels)
return labels_np / labels_np.sum()
def calc_mean_score(score_dist):
score_dist = normalize_labels(score_dist)
return (score_dist * np.arange(1, 11)).sum()
def ensure_dir_exists(dir):
if not os.path.exists(dir):
os.makedirs(dir)
def image_file_to_json(img_path):
img_dir = os.path.dirname(img_path)
img_id = os.path.basename(img_path).split('.')[0]
return img_dir, [{'image_id': img_id}]
def image_dir_to_json(img_dir, img_type='jpg'):
img_paths = glob.glob(os.path.join(img_dir, '*.' + img_type))
samples = []
for img_path in img_paths:
img_id = os.path.basename(img_path).split('.')[0]
samples.append({'image_id': img_id})
return samples
def predict(model, data_generator):
return model.predict_generator(data_generator, workers=8, use_multiprocessing=True, verbose=1)
def main(base_model_name, weights_file, image_source, predictions_file, img_format='jpg'):
# load samples
if os.path.isfile(image_source):
image_dir, samples = image_file_to_json(image_source)
else:
image_dir = image_source
samples = image_dir_to_json(image_dir, img_type='jpg')
# build model and load weights
nima = Nima(base_model_name, weights=None)
nima.build()
nima.nima_model.load_weights(weights_file)
# initialize data generator
data_generator = TestDataGenerator(samples, image_dir, 64, 10, nima.preprocessing_function(),
img_format=img_format)
# get predictions
predictions = predict(nima.nima_model, data_generator)
# calc mean scores and add to samples
for i, sample in enumerate(samples):
sample['mean_score_prediction'] = calc_mean_score(predictions[i])
print(json.dumps(samples, indent=2))
# converts to CSV
pd.read_json(json.dumps(samples)).to_csv(f'scores_{weights_file.split("_")[-2]}.csv', index=False)
if predictions_file is not None:
save_json(samples, predictions_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-b', '--base-model-name', help='CNN base model name', required=True)
parser.add_argument('-w', '--weights-file', help='path of weights file', required=True)
parser.add_argument('-is', '--image-source', help='image directory or file', required=True)
parser.add_argument('-pf', '--predictions-file', help='file with predictions', required=False, default=None)
args = parser.parse_args()
main(**args.__dict__)
```
|
{
"source": "jdehorty/python-sample-vscode-flask-tutorial",
"score": 3
}
|
#### File: handlers/search/brute_force_cython.py
```python
from typing import Callable, Dict
# import pyximport
# import brute_force_cython_ext
class BruteForceCython:
"""
Class to perform search using a Brute force.
"""
def __init__(self, hash_dict: Dict, distance_function: Callable) -> None:
"""
Initialize a dictionary for mapping file names and corresponding hashes and a distance function to be used for
getting distance between two hash strings.
Args:
hash_dict: Dictionary mapping file names to corresponding hash strings {filename: hash}
distance_function: A function for calculating distance between the hashes.
"""
self.distance_function = distance_function
self.hash_dict = hash_dict # database
def search(self, query: str, tol: int = 10) -> Dict[str, int]:
"""
Function for searching using brute force.
Args:
query: hash string for which brute force needs to work.
tol: distance upto which duplicate is valid.
Returns:
List of tuples of the form [(valid_retrieval_filename1: distance), (valid_retrieval_filename2: distance)]
"""
filenames = []
hash_vals = []
for filename, hash_val in self.hash_dict.items():
filenames.append(filename.encode('utf-8'))
hash_vals.append(int(hash_val, 16))
return brute_force_cython_ext.query(
filenames, hash_vals, int(query, 16), tol
) # cast hex hash_val to decimals for __builtin_popcountll function
```
#### File: Package/PackageTmp/nima.py
```python
import importlib
from tensorflow.python.keras import Model
from tensorflow.python.keras.layers import Dropout, Dense
from tensorflow.python.keras.optimizer_v2.adam import Adam
from app import earth_movers_distance
class Nima:
def __init__(self, base_model_name, n_classes=10, learning_rate=0.001, dropout_rate=0, loss=earth_movers_distance,
decay=0, weights='imagenet'):
self.n_classes = n_classes
self.base_model_name = base_model_name
self.learning_rate = learning_rate
self.dropout_rate = dropout_rate
self.loss = loss
self.decay = decay
self.weights = weights
self._get_base_module()
def _get_base_module(self):
# import Keras base model module
if self.base_model_name == 'InceptionV3':
self.base_module = importlib.import_module('tensorflow.keras.applications.inception_v3')
elif self.base_model_name == 'InceptionResNetV2':
self.base_module = importlib.import_module('tensorflow.keras.applications.inception_resnet_v2')
else:
self.base_module = importlib.import_module('tensorflow.keras.applications.' + self.base_model_name.lower())
def build(self):
# get base model class
BaseCnn = getattr(self.base_module, self.base_model_name)
# load pre-trained model
self.base_model = BaseCnn(input_shape=(224, 224, 3), weights=self.weights, include_top=False, pooling='avg')
# add dropout and dense layer
x = Dropout(self.dropout_rate)(self.base_model.output)
x = Dense(units=self.n_classes, activation='softmax')(x)
self.nima_model = Model(self.base_model.inputs, x)
def compile(self):
self.nima_model.compile(optimizer=Adam(lr=self.learning_rate, decay=self.decay), loss=self.loss)
def preprocessing_function(self):
return self.base_module.preprocess_input
```
|
{
"source": "jdehorty/selective_copy_files",
"score": 3
}
|
#### File: selective_copy_files/selective_copy_files/selective_copy.py
```python
import os, shutil, sys, errno
class Copy ():
"""
Copy a specific file extension in the tree directory
"""
def __init__ (self, from_path, to_path, extension):
"""
Constructor of class. Get paths and extension. Generate file list
"""
self.from_path = from_path
self.to_path = to_path
self.extension = extension
self.files = []
self.__verify_paths ()
self.__find_files()
self.__copy_files()
def __verify_paths (self):
"""
Verify is the from and the to path exist in the pc
"""
# Verify the paths
if not os.path.exists (self.from_path):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), self.from_path)
if not os.path.exists (self.to_path):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), self.to_path)
def __find_files (self):
"""
Search all files inside the from folder, and save the
full path of the files that match the searched extension.
"""
# Check correct extension
if not self.extension.startswith('.'):
self.extension = '.' + self.extension #Add a dot
absPath = os.path.abspath(self.from_path)
# walk inside the origin tree
for folder_name, subfolder_name, file_names in os.walk(absPath):
# if the file has the correct extension, save complite path
for file in file_names:
if file.endswith(self.extension):
self.files.append(os.path.join(folder_name, file))
def __copy_files (self):
"""
Loop inside a list of files and copy to destiny
"""
# Check if exist files in file list
if self.files:
absPath = os.path.abspath(self.to_path)
for file in self.files:
print ('Copying "{}" to "{}" ...'.format (file, absPath))
shutil.copy(file, absPath)
else:
print ("No files found in from folder")
```
|
{
"source": "jdehotin/clarifaipy",
"score": 3
}
|
#### File: clarifai/rest/client.py
```python
import os
import time
import json
import copy
import base64
from pprint import pformat
import logging
import requests
import platform
from io import BytesIO
from configparser import ConfigParser
from posixpath import join as urljoin
from past.builtins import basestring
from future.moves.urllib.parse import urlparse
logger = logging.getLogger('clarifai')
logger.handlers = []
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.INFO)
logging.getLogger("requests").setLevel(logging.WARNING)
CLIENT_VERSION = '2.0.14'
DEFAULT_TAG_MODEL = 'general-v1.3'
class ClarifaiApp(object):
""" Clarifai Application Object
The is the entry point of the Clarifai Client API
With authentication to an application, you can access
all the models, concepts, inputs in this application through
the attributes of this class.
To access the models: use app.models
To access the inputs: use app.inputs
To access the concepts: use app.concepts
"""
def __init__(self, app_id=None, app_secret=None, base_url=None, quiet=True):
self.api = ApiClient(app_id=app_id, app_secret=app_secret, base_url=base_url, quiet=quiet)
self.auth = Auth(self.api)
self.concepts = Concepts(self.api)
self.inputs = Inputs(self.api)
self.models = Models(self.api)
"""
Below are the shortcut functions for a more smoothy transition of the v1 users
Also they are convenient functions for the tag only users so they do not have
to know the extra concepts of Inputs, Models, etc.
"""
def tag_urls(self, urls, model=DEFAULT_TAG_MODEL):
''' tag urls with user specified models
by default tagged by 'general-v1.3' model
Args:
urls: a list of URLs for tagging
the max lens of the list is 128, which is the max batch size
model: the model name to tag with
default model is general model for general tagging purpose
Returns:
the JSON string from the predict call
Examples:
>>> urls = ['https://samples.clarifai.com/metro-north.jpg', \
>>> 'https://samples.clarifai.com/dog2.jpeg']
>>> app.tag_urls(urls)
'''
# validate input
if not isinstance(urls, list) or (len(urls) > 1 and not isinstance(urls[0], basestring)):
raise UserError('urls must be a list of string urls')
if len(urls) > 128:
raise UserError('max batch size is 128')
images = [Image(url=url) for url in urls]
model = self.models.get(model)
res = model.predict(images)
return res
def tag_files(self, files, model=DEFAULT_TAG_MODEL):
''' tag files on disk with user specified models
by default tagged by 'general-v1.3' model
Args:
files: a list of local file names for tagging
the max lens of the list is 128, which is the max batch size
model: the model name to tag with
default model is general model for general tagging purpose
Returns:
the JSON string from the predict call
Examples:
>>> files = ['/tmp/metro-north.jpg', \
>>> '/tmp/dog2.jpeg']
>>> app.tag_urls(files)
'''
# validate input
if not isinstance(files, list) or (len(files) > 1 and not isinstance(files[0], basestring)):
raise UserError('files must be a list of string file names')
if len(files) > 128:
raise UserError('max batch size is 128')
images = [Image(filename=filename) for filename in files]
model = self.models.get(model)
res = model.predict(images)
return res
class Auth(object):
""" Clarifai Authentication
This class is initialized as an attirbute of the clarifai application object
with app.auth
"""
def __init__(self, api):
self.api = api
def get_token(self):
''' get token string
Returns:
The token as a string
'''
res = self.api.get_token()
token = res['access_token']
return token
class Input(object):
""" The Clarifai Input object
"""
def __init__(self, input_id=None, concepts=None, not_concepts=None, metadata=None):
''' Construct an Image/Video object. it must have one of url or file_obj set.
Args:
input_id: unique id to set for the image. If None then the server will create and return
one for you.
concepts: a list of concepts this asset associate with
not_concepts: a list of concepts this asset does not associate with
'''
self.input_id = input_id
if not isinstance(concepts, (list, tuple)) and concepts is not None:
raise UserError('concepts should be a list or tuple')
if not isinstance(not_concepts, (list, tuple)) and not_concepts is not None:
raise UserError('not_concepts should be a list or tuple')
self.concepts = concepts
self.not_concepts = not_concepts
self.metadata = metadata
def dict(self):
''' Return the data of the Input as a dict ready to be input to json.dumps. '''
data = {'data':{}}
if self.input_id is not None:
data['id'] = self.input_id
# fill the tags
if self.concepts is not None:
pos_terms = [(term, True) for term in self.concepts]
else:
pos_terms = []
if self.not_concepts is not None:
neg_terms = [(term, False) for term in self.not_concepts]
else:
neg_terms = []
terms = pos_terms + neg_terms
if terms:
data['data']['concepts'] = [{'id':name, 'value':value} for name, value in terms]
if self.metadata:
data['data']['metadata'] = self.metadata
return data
class Image(Input):
def __init__(self, url=None, file_obj=None, base64=None, filename=None, crop=None, \
image_id=None, concepts=None, not_concepts=None, \
metadata=None, allow_dup_url=False):
'''
url: the url to a publically accessible image.
file_obj: a file-like object in which read() will give you the bytes.
crop: a list of float in the range 0-1.0 in the order [top, left, bottom, right] to crop out
the asset before use.
'''
super(Image, self).__init__(image_id, concepts, not_concepts, metadata=metadata)
if crop is not None and (not isinstance(crop, list) or len(crop) != 4):
raise UserError("crop arg must be list of 4 floats or None")
self.url = url
self.filename = filename
self.file_obj = file_obj
self.base64 = base64
self.crop = crop
self.allow_dup_url = allow_dup_url
# override the filename with the fileobj as fileobj
if self.filename is not None:
if not os.path.exists(self.filename):
raise UserError("Invalid file path %s. Please check!")
elif not os.path.isfile(self.filename):
raise UserError("Not a regular file %s. Please check!")
self.file_obj = open(self.filename, 'rb')
self.filename = None
if self.file_obj is not None:
if not hasattr(self.file_obj, 'getvalue') and not hasattr(self.file_obj, 'read'):
raise UserError("Not sure how to read your file_obj")
if hasattr(self.file_obj, 'mode') and self.file_obj.mode != 'rb':
raise UserError(("If you're using open(), then you need to read bytes using the 'rb' mode. "
"For example: open(filename, 'rb')"))
def dict(self):
data = super(Image, self).dict()
image = {'image':{}}
if self.file_obj is not None:
# DO NOT put 'read' as first condition
# as io.BytesIO() has both read() and getvalue() and read() gives you an empty buffer...
if hasattr(self.file_obj, 'getvalue'):
base64_imgstr = base64.b64encode(self.file_obj.getvalue()).decode('UTF-8')
elif hasattr(self.file_obj, 'read'):
base64_imgstr = base64.b64encode(self.file_obj.read()).decode('UTF-8')
else:
raise UserError("Not sure how to read your file_obj")
image['image']['base64'] = base64_imgstr
elif self.base64 is not None:
image['image']['base64'] = self.base64.decode('UTF-8')
else:
image['image']['url'] = self.url
if self.crop is not None:
image['image']['crop'] = self.crop
image['image']['allow_duplicate_url'] = self.allow_dup_url
data['data'].update(image)
return data
class Video(Input):
def __init__(self):
raise Exception('Not supported yet.')
def dict(self):
pass
class SearchTerm(object):
"""
Clarifai search term interface
the base class for InputSearchTerm and OutputSearchTerm
It is used to build SearchQueryBuilder
"""
def __init__(self):
pass
def dict(self):
pass
class InputSearchTerm(SearchTerm):
"""
Clarifai Image Search Input search term
For input search, you can specify search term for url string match, input_id string match,
concept string match, and concept_id string match
value indicates whether the concept search is a NOT search
Examples:
>>> # search for url, string match
>>> InputSearchTerm(url='http://blabla')
>>> # search for input ID, string match
>>> InputSearchTerm(input_id='site1_bla')
>>> # search for annotated concept
>>> InputSearchTerm(concept='tag1')
>>> # search for not the annotated concept
>>> InputSearchTerm(concept='tag1', value=False)
>>> # search for metadata
>>> InputSearchTerm(metadata={'key':'value'})
"""
def __init__(self, url=None, input_id=None, concept=None, concept_id=None, value=True, \
metadata=None):
self.url = url
self.input_id = input_id
self.concept = concept
self.concept_id = concept_id
self.value = value
self.metadata = metadata
def dict(self):
if self.url:
obj = { "input": {
"data": {
"image": {
"url": self.url
}
}
}
}
elif self.input_id:
obj = { "input": {
"id": self.input_id,
"data": {
"image": {}
}
}
}
elif self.concept:
obj = { "input": {
"data": {
"concepts": [ {"name":self.concept, "value":self.value} ]
}
}
}
elif self.concept_id:
obj = { "input": {
"data": {
"concepts": [ {"id":self.concept_id, "value":self.value} ]
}
}
}
elif self.metadata:
obj = { "input": {
"data": {
"metadata": self.metadata
}
}
}
return obj
class OutputSearchTerm(SearchTerm):
"""
Clarifai Image Search Output search term
For output search, you can specify search term for url, base64, and input_id for
visual search,
or specify concept and concept_id for string match
value indicates whether the concept search is a NOT search
Examples:
>>> # search for visual similarity from url
>>> OutputSearchTerm(url='http://blabla')
>>> # search for visual similarity from base64 encoded image
>>> OutputSearchTerm(base64='sdfds')
>>> # search for visual similarity from input id
>>> OutputSearchTerm(input_id='site1_bla')
>>> # search for predicted concept
>>> OutputSearchTerm(concept='tag1')
>>> # search for not the predicted concept
>>> OutputSearchTerm(concept='tag1', value=False)
"""
def __init__(self, url=None, base64=None, input_id=None, concept=None, concept_id=None, \
value=True, crop=None):
self.url = url
self.base64 = base64
self.input_id = input_id
self.concept = concept
self.concept_id = concept_id
self.value = value
self.crop = crop
def dict(self):
if self.url:
obj = { "output": {
"input": {
"data": {
"image": {
"url": self.url
}
}
}
}
}
# add crop as needed
if self.crop:
obj['output']['input']['data']['image']['crop'] = self.crop
if self.base64:
obj = { "output": {
"input": {
"data": {
"image": {
"base64": self.base64
}
}
}
}
}
# add crop as needed
if self.crop:
obj['output']['input']['data']['image']['crop'] = self.crop
elif self.input_id:
obj = { "output": {
"input": {
"id": self.input_id,
"data": {
"image": {
}
}
}
}
}
# add crop as needed
if self.crop:
obj['output']['input']['data']['image']['crop'] = self.crop
elif self.concept:
obj = { "output": {
"data": {
"concepts": [
{"name": self.concept, "value":self.value}
]
}
}
}
elif self.concept_id:
obj = { "output": {
"data": {
"concepts": [
{"id": self.concept_id, "value":self.value}
]
}
}
}
return obj
class SearchQueryBuilder(object):
"""
Clarifai Image Search Query Builder
This builder is for advanced search use ONLY.
If you are looking for simple concept search, or simple image similarity search,
you should use the existing function search_by_annotated_concepts, search_by_predicted_concepts,
or search_by_image and search_by_metadata
Currently the query builder only supports a list of query terms with AND
InputSearchTerm and OutputSearchTerm are the only term supported by the query builder
Examples:
>>> qb = SearchQueryBuilder()
>>> qb.add_term(term1)
>>> qb.add_term(term2)
>>>
>>> app.inputs.search(qb)
"""
def __init__(self):
self.terms = []
def add_term(self, term):
''' add search term to the query
this could be search by input or output
construct the search_term with InputSearchTerm()
and OutputSearchTerm()
'''
if not isinstance(term, InputSearchTerm) and \
not isinstance(term, OutputSearchTerm):
raise UserError('first level search term could be only InputSearchTerm, OutputSearchTerm')
self.terms.append(term)
def dict(self):
''' construct the raw query for the RESTful API '''
query = { "ands":
[term.dict() for term in self.terms]
}
return query
class Models(object):
def __init__(self, api):
self.api = api
# the cache of the model name -> model id mapping
# to avoid an extra model query on every prediction by model name
self.model_id_cache = {}
def clear_model_cache(self):
''' clear model_name -> model_id cache
WARNING: This is an internal function, user should not call this
We cache model_name to model_id mapping for API efficiency
At the first time you call a models.get() by name, the name to ID
mapping is saved so next time there is no query. Then user does not
have to query the model ID every time when they want to work on it.
Returns:
There is no return result for this call
'''
self.model_id_cache = {}
def create(self, model_id, model_name=None, concepts=None, \
concepts_mutually_exclusive=False, \
closed_environment=False):
''' create a new model
Args:
model_id: ID of the model
model_name: optional name of the model
concepts: optional concepts to associated with this model
concepts_mutually_exclusive: True or False, whether concepts are mutually exclusive
closed_environment: True or False, whether use negatives for prediction
Returns:
Model object
Examples:
>>> # create a model with no concepts
>>> app.models.create('my_model1')
>>> # create a model with a few concepts
>>> app.models.create('my_model2', concepts=['bird', 'fish'])
'''
if not model_name:
model_name = model_id
res = self.api.create_model(model_id, model_name, concepts, \
concepts_mutually_exclusive, closed_environment)
if res.get('model'):
model = self._to_obj(res['model'])
elif res.get('status'):
status = res['status']
raise UserError('code: %d, desc: %s, details: %s' % \
(status['code'], status['description'], status['details']))
return model
def get_all(self):
''' get all models in the application
Returns:
a generator that yields Model object
Examples:
>>> for model in app.models.get_all():
>>> print model.model_name
'''
page = 1
per_page = 20
while True:
res = self.api.get_models(page, per_page)
if not res['models']:
break
for one in res['models']:
model = self._to_obj(one)
yield model
page += 1
def get_by_page(self, page=1, per_page=20):
''' get paginated models from the application
When the number of models get high, you may want to get
the paginated results from all the models
Args:
page: page number
per_page: number of models returned in one page
Returns:
a list of Model objects
Examples:
>>> models = app.models.get_by_page(2, 20)
'''
res = self.api.get_models(page, per_page)
results = [self._to_obj(one) for one in res['models']]
return results
def delete(self, model_id, version_id=None):
''' delete the model, or a specific version of the model
Without model version id specified, it is to delete a
model. Then all the versions associated with this model
will be deleted as well.
With model version id specified, it is to delete a
particular model version from the model
Args:
model_id: the unique ID of the model
version_id: the unique ID of the model version
Returns:
the raw JSON response from the server
Examples:
>>> # delete a model
>>> app.models.delete('model_id1')
>>> # delete a model version
>>> app.models.delete('model_id1', version_id='version1')
'''
if version_id is None:
res = self.api.delete_model(model_id)
else:
res = self.api.delete_model_version(model_id, version_id)
return res
def delete_all(self):
''' delete all models and the versions associated with each one
After this operation, you will have no model in the
application
Returns:
the raw JSON response from the server
Examples:
>>> app.models.delete_all()
'''
res = self.api.delete_all_models()
return res
def get(self, model_id, model_type='concept'):
''' get a model, by ID or name
Args:
model_id: unique identifier of a model
model_type: type of the model
Returns:
the Model object
Examples:
>>> # get general-v1.3 model
>>> app.models.get('general-v1.3')
'''
if self.model_id_cache.get(model_id):
model_id = self.model_id_cache[model_id]
try:
res = self.api.get_model(model_id)
model = self._to_obj(res['model'])
except ApiError as e:
model_name = model_id
if e.response.status_code == 404:
res = self.search(model_name, model_type)
if res is None or len(res) > 1:
raise e
else:
model = res[0]
model_id = model.model_id
self.model_id_cache.update({model_name:model_id})
return model
def search(self, model_name, model_type='concept'):
''' search model by name and type
search the model by name, default is to search concept model
only. All the custom model trained are concept model.
Args:
model_name: name of the model. name is not unique.
model_type: default to 'concept'
Returns:
a list of Model objects or None
Examples:
>>> # search for general-v1.3 concept model
>>> app.models.search('general-v1.3')
>>>
>>> # search for color model
>>> app.models.search('color', model_type='color')
'''
res = self.api.search_models(model_name, model_type)
if res.get('models'):
results = [self._to_obj(one) for one in res['models']]
else:
results = None
return results
def _to_obj(self, item):
''' convert a model json object to Model object '''
return Model(self.api, item)
class Inputs(object):
def __init__(self, api):
self.api = api
def create_image(self, image):
''' create an image from Image object
Args:
image: a Clarifai Image object
Returns:
the image object just got created and uploaded
Examples::
>>> app.inputs.create_image(Image(url='https://samples.clarifai.com/metro-north.jpg'))
'''
ret = self.api.add_inputs([image])
img = self._to_obj(ret['inputs'][0])
return img
def create_image_from_url(self, url, image_id=None, concepts=None, not_concepts=None, crop=None, \
metadata=None, allow_duplicate_url=False):
''' create an image from Image url
Args:
url: image url
image_id: ID of the image
concepts: a list of concepts
not_concepts: a list of concepts
crop: crop information, with four corner coordinates
metadata: meta data with a dictionary
allow_duplicate_url: True of False, the flag to allow duplicate url to be imported
Returns:
the image object just got created and uploaded
Examples::
>>> app.inputs.create_image_from_url(url='https://samples.clarifai.com/metro-north.jpg')
'''
image = Image(url=url, image_id=image_id, concepts=concepts, not_concepts=not_concepts, \
crop=crop, metadata=metadata, allow_dup_url=allow_duplicate_url)
return self.create_image(image)
def create_image_from_filename(self, filename, image_id=None, concepts=None, not_concepts=None, \
crop=None, metadata=None, allow_duplicate_url=False):
''' create an image by local filename
Args:
filename: local filename
image_id: ID of the image
concepts: a list of concepts
not_concepts: a list of concepts
crop: crop information, with four corner coordinates
metadata: meta data with a dictionary
allow_duplicate_url: True of False, the flag to allow duplicate url to be imported
Returns:
the image object just got created and uploaded
Examples::
>>> app.inputs.create_image_filename(filename="a.jpeg")
'''
fileio = open(filename, 'rb')
image = Image(file_obj=fileio, image_id=image_id, concepts=concepts, \
not_concepts=not_concepts, crop=crop, metadata=metadata, \
allow_dup_url=allow_duplicate_url)
return self.create_image(image)
def create_image_from_bytes(self, img_bytes, image_id=None, concepts=None, not_concepts=None, \
crop=None, metadata=None, allow_duplicate_url=False):
''' create an image by image bytes
Args:
img_bytes: raw bytes of an image
image_id: ID of the image
concepts: a list of concepts
not_concepts: a list of concepts
crop: crop information, with four corner coordinates
metadata: meta data with a dictionary
allow_duplicate_url: True of False, the flag to allow duplicate url to be imported
Returns:
the image object just got created and uploaded
Examples::
>>> app.inputs.create_image_bytes(img_bytes="raw image bytes...")
'''
fileio = BytesIO(img_bytes)
image = Image(file_obj=fileio, image_id=image_id, concepts=concepts, \
not_concepts=not_concepts, crop=crop, metadata=metadata, \
allow_dup_url=allow_duplicate_url)
return self.create_image(image)
def create_image_from_base64(self, base64_bytes, image_id=None, concepts=None, \
not_concepts=None, crop=None, metadata=None, \
allow_duplicate_url=False):
''' create an image by base64 bytes
Args:
base64_bytes: base64 encoded image bytes
image_id: ID of the image
concepts: a list of concepts
not_concepts: a list of concepts
crop: crop information, with four corner coordinates
metadata: meta data with a dictionary
allow_duplicate_url: True of False, the flag to allow duplicate url to be imported
Returns:
the image object just got created and uploaded
Examples::
>>> app.inputs.create_image_bytes(base64_bytes="base64 encoded image bytes...")
'''
image = Image(base64=base64_bytes, image_id=image_id, concepts=concepts, \
not_concepts=not_concepts, crop=crop, metadata=metadata, \
allow_dup_url=allow_duplicate_url)
return self.create_image(image)
def bulk_create_images(self, images):
''' bulk create images
Args:
images: a list of Image object
Returns:
a list of Image object just got created
Examples:
>>> img1 = Image(url="", concepts=['cat', 'kitty'])
>>> img2 = Image(url="", concepts=['dog'], not_concepts=['cat'])
>>> app.inputs.bulk_create_images([img1, img2])
'''
lens = len(images)
if lens > 128:
raise UserError('the maximum number of inputs in a batch is 128')
res = self.api.add_inputs(images)
images = [self._to_obj(one) for one in res['inputs']]
return images
def check_status(self):
''' check the input upload status
Args:
Void
Returns:
InputCounts object
Examples:
>>> status = app.inputs.check_status()
>>> print status.code
>>> print status.description
'''
ret = self.api.get_inputs_status()
counts = InputCounts(ret)
return counts
def get_all(self):
''' get all inputs in a generator
Args:
Void
Returns:
a generator that yields Input object
Examples:
>>> for image in app.inputs.get_all():
>>> print image.input_id
'''
page = 1
per_page = 20
while True:
res = self.api.get_inputs(page, per_page)
if not res['inputs']:
break
for one in res['inputs']:
yield self._to_obj(one)
page += 1
def get_by_page(self, page=1, per_page=20):
''' get input with pagination
Args:
page: page number
per_page: number of inputs to retrieve per page
Returns:
a list of Input object
Examples:
>>> for image in app.inputs.get_by_page(2, 10):
>>> print image.input_id
'''
res = self.api.get_inputs(page, per_page)
results = [self._to_obj(one) for one in res['inputs']]
return results
def delete(self, input_id):
''' delete an input with input ID
Args:
input_id: the unique input ID
Returns:
ApiStatus object
Examples:
>>> ret = app.inputs.delete('id1')
>>> print ret.code
'''
if isinstance(input_id, list):
res = self.api.delete_inputs(input_id)
else:
res = self.api.delete_input(input_id)
return ApiStatus(res['status'])
def delete_all(self):
''' delete all inputs from the application
'''
res = self.api.delete_all_inputs()
return ApiStatus(res['status'])
def get(self, input_id):
''' get an Input object by input ID
Args:
input_id: the unique identifier of the input
Returns:
an Image/Input object
Examples:
>>> image = app.inputs.get('id1')
>>> print image.input_id
'''
res = self.api.get_input(input_id)
one = res['input']
return self._to_obj(one)
def search(self, qb, page=1, per_page=20):
''' search with a clarifai image query builder
WARNING: this is the advanced search function. You will need to build a query builder
in order to use this.
There are a few simple search functions:
search_by_annotated_concepts()
search_by_predicted_concepts()
search_by_image()
search_by_metadata()
Args:
qb: clarifai query builder
Returns:
a list of Input/Image object
'''
res = self.api.search_inputs(qb.dict(), page, per_page)
hits = [self._to_obj(one['input']) for one in res['hits']]
return hits
def search_by_image(self, image_id=None, \
image=None, url=None, \
imgbytes=None, base64bytes=None, \
fileobj=None, filename=None, \
crop=None, page=1, per_page=20):
''' search for visually similar images
By passing image_id, raw image bytes, base64 encoded bytes, image file io stream,
image filename, or Clarifai Image object, you can use the visual search power of
the Clarifai API.
Also you can specify crop of the image to search over
Args:
image_id: unique ID of the image for search
image: Image object for search
imgbytes: raw image bytes for search
base64bytes: base63 encoded image bytes
fileobj: file io stream, like open(file)
filename: filename on local filesystem
crop: crop of the image
page: page number
per_page: number of images returned per page
Returns:
a list of Image object
Examples:
>>> # search by image url
>>> app.inputs.search_by_image(url='http://blabla')
>>> # search by local filename
>>> app.inputs.search_by_image(filename='bla')
>>> # search by raw image bytes
>>> app.inputs.search_by_image(imgbytes='data')
>>> # search by base64 encoded image bytes
>>> app.inputs.search_by_image(base64bytes='data')
>>> # search by file stream io
>>> app.inputs.search_by_image(fileobj=open('file'))
'''
not_nones = [x for x in [image_id, image, url, imgbytes, base64bytes, fileobj, filename] if x is not None]
if len(not_nones) != 1:
raise UserError('Unable to construct an image')
if image_id:
qb = SearchQueryBuilder()
term = OutputSearchTerm(input_id=image_id)
qb.add_term(term)
res = self.search(qb, page, per_page)
elif image:
qb = SearchQueryBuilder()
if image.url:
term = OutputSearchTerm(url=image.url)
elif image.base64:
term = OutputSearchTerm(base64=image.base64.decode('UTF-8'))
elif image.file_obj:
imgbytes = image.file_obj.read()
base64_bytes = base64.b64encode(imgbytes).decode('UTF-8')
term = OutputSearchTerm(base64=base64_bytes)
qb.add_term(term)
res = self.search(qb, page, per_page)
elif url:
img = Image(url=url, crop=crop)
res = self.search_by_image(image=img, page=page, per_page=per_page)
elif fileobj:
img = Image(file_obj=fileobj, crop=crop)
res = self.search_by_image(image=img, page=page, per_page=per_page)
elif imgbytes:
fileio = BytesIO(imgbytes)
img = Image(file_obj=fileio, crop=crop)
res = self.search_by_image(image=img, page=page, per_page=per_page)
elif filename:
fileio = open(filename, 'rb')
img = Image(file_obj=fileio, crop=crop)
res = self.search_by_image(image=img, page=page, per_page=per_page)
elif base64:
img = Image(base64=base64bytes, crop=crop)
res = self.search_by_image(image=img, page=page, per_page=per_page)
return res
def search_by_original_url(self, url, page=1, per_page=20):
''' search by original url of the imported images
Args:
url: url of the image
page: page number
per_page: the number of images to return per page
Returns:
a list of Image object
Examples:
>>> app.inputs.search_by_original_url(url='http://bla')
'''
qb = SearchQueryBuilder()
term = InputSearchTerm(url=url)
qb.add_term(term)
res = self.search(qb, page, per_page)
return res
def search_by_metadata(self, metadata, page=1, per_page=20):
''' search by other meta data of the image rather than concept
Args:
metadata: is a dictionary for meta data search.
The dictionary could be a simple one with only one key and value,
Or a nested dictionary with multi levels.
page: page number
per_page: the number of images to return per page
Returns:
a list of Image object
Examples:
>>> app.inputs.search_by_metadata(metadata={'name':'bla'})
>>> app.inputs.search_by_metadata(metadata={'my_class1': { 'name' : 'bla' }})
'''
if isinstance(metadata, dict):
qb = SearchQueryBuilder()
term = InputSearchTerm(metadata=metadata)
qb.add_term(term)
res = self.search(qb, page, per_page)
else:
raise UserError('Metadata must be a valid dictionary. Please double check.')
return res
def search_by_annotated_concepts(self, concept=None, concepts=None, \
value=True, values=None, \
concept_id=None, concept_ids=None, \
page=1, per_page=20):
''' search over the user annotated concepts
Args:
concept: concept name to search
concepts: a list of concept name to search
concept_id: concept id to search
concept_ids: a list of concept id to search
value: whether the concept should exist or NOT
values: the list of values corresponding to the concepts
page: page number
per_page: number of images to return per page
Returns:
a list of Image object
Examples:
>>> app.inputs.search_by_annotated_concepts(concept='cat')
'''
if not concept and not concepts and concept_id and concept_ids:
raise UserError('concept could not be null.')
if concept or concepts:
if concept and not isinstance(concept, basestring):
raise UserError('concept should be a string')
elif concepts and not isinstance(concepts, list):
raise UserError('concepts must be a list')
elif concepts and not all([isinstance(one, basestring) for one in concepts]):
raise UserError('concepts must be a list of all string')
if concept and concepts:
raise UserError('you can either search by concept or concepts but not both')
if concept:
concepts = [concept]
if not values:
values = [value]
qb = SearchQueryBuilder()
for concept, value in zip(concepts, values):
term = InputSearchTerm(concept=concept, value=value)
qb.add_term(term)
else:
if concept_id and not isinstance(concept_id, basestring):
raise UserError('concept should be a string')
elif concept_ids and not isinstance(concept_ids, list):
raise UserError('concepts must be a list')
elif concept_ids and not all([isinstance(one, basestring) for one in concept_ids]):
raise UserError('concepts must be a list of all string')
if concept_id and concept_ids:
raise UserError('you can either search by concept_id or concept_ids but not both')
if concept_id:
concept_ids = [concept_id]
if not values:
values = [value]
qb = SearchQueryBuilder()
for concept_id, value in zip(concept_ids, values):
term = InputSearchTerm(concept_id=concept_id, value=value)
qb.add_term(term)
return self.search(qb, page, per_page)
def search_by_predicted_concepts(self, concept=None, concepts=None, \
value=True, values=None,\
concept_id=None, concept_ids=None, \
page=1, per_page=20):
''' search over the predicted concepts
Args:
concept: concept name to search
concepts: a list of concept name to search
concept_id: concept id to search
concept_ids: a list of concept id to search
value: whether the concept should exist or NOT
values: the list of values corresponding to the concepts
page: page number
per_page: number of images to return per page
Returns:
a list of Image object
Examples:
>>> app.inputs.search_by_predicted_concepts(concept='cat')
'''
if not concept and not concepts and concept_id and concept_ids:
raise UserError('concept could not be null.')
if concept and not isinstance(concept, basestring):
raise UserError('concept should be a string')
elif concepts and not isinstance(concepts, list):
raise UserError('concepts must be a list')
elif concepts and not all([isinstance(one, basestring) for one in concepts]):
raise UserError('concepts must be a list of all string')
if concept or concepts:
if concept and concepts:
raise UserError('you can either search by concept or concepts but not both')
if concept:
concepts = [concept]
if not values:
values = [value]
qb = SearchQueryBuilder()
for concept, value in zip(concepts, values):
term = OutputSearchTerm(concept=concept, value=value)
qb.add_term(term)
else:
if concept_id and concept_ids:
raise UserError('you can either search by concept_id or concept_ids but not both')
if concept_id:
concept_ids = [concept_id]
if not values:
values = [value]
qb = SearchQueryBuilder()
for concept_id, value in zip(concept_ids, values):
term = OutputSearchTerm(concept_id=concept_id, value=value)
qb.add_term(term)
return self.search(qb, page, per_page)
def update(self, image, action='merge'):
''' update the input
update the information of an input/image
Args:
image: an Image() object that has concepts, metadata, etc.
method: one of ['merge', 'overwrite']
'merge' is to merge the info into the exising info, for either concept or metadata
'overwrite' is to overwrite the metadata, concepts with the existing ones
Returns:
an Image object
Examples:
>>> new_img = Image(image_id="abc", concepts=['c1', 'c2'], not_concepts=['c3'], metadata={'key':'val'})
>>> app.inputs.update(new_img, action='overwrite')
'''
res = self.api.patch_inputs(action=action, inputs=[image])
one = res['inputs'][0]
return self._to_obj(one)
def bulk_update(self, images, action='merge'):
''' update the input
update the information of an input/image
Args:
images: a list of Image() objects that have concepts, metadata, etc.
method: one of ['merge', 'overwrite']
'merge' is to merge the info into the exising info, for either concept or metadata
'overwrite' is to overwrite the metadata, concepts with the existing ones
Returns:
an Image object
Examples:
>>> new_img1 = Image(image_id="abc1", concepts=['c1', 'c2'], not_concepts=['c3'], metadata={'key':'val'})
>>> new_img2 = Image(image_id="abc2", concepts=['c1', 'c2'], not_concepts=['c3'], metadata={'key':'val'})
>>> app.inputs.update([new_img1, new_img2], action='overwrite')
'''
ret = self.api.patch_inputs(action=action, inputs=images)
objs = [self._to_obj(item) for item in ret['inputs']]
return objs
def delete_concepts(self, input_id, concepts):
''' delete concepts from an input/image
Args:
input_id: unique ID of the input
concepts: a list of concept name
Returns:
an Image object
'''
res = self.update(Image(image_id=input_id, concepts=concepts), action='remove')
return res
def bulk_merge_concepts(self, input_ids, concept_lists):
''' bulk merge concepts from a list of input ids
Args:
input_ids: a list of input IDs
concept_lists: a list of concept list
Returns:
an Input object
Examples:
>>> app.inputs.bulk_merge_concepts('id', [[('cat',True), ('dog',False)]])
'''
if len(input_ids) != len(concept_lists):
raise UserError('Argument error. please check')
inputs = []
for input_id, concept_list in zip(input_ids, concept_lists):
concepts = []
not_concepts = []
for concept_id, value in concept_list:
if value is True:
concepts.append(concept_id)
else:
not_concepts.append(concept_id)
image = Image(image_id=input_id, concepts=concepts, not_concepts=not_concepts)
inputs.append(image)
res = self.bulk_update(inputs, action='merge')
return res
def bulk_delete_concepts(self, input_ids, concept_lists):
''' bulk delete concepts from a list of input ids
Args:
input_ids: a list of input IDs
concept_lists: a list of concept list
Returns:
an Input object
Examples:
>>> app.inputs.bulk_delete_concepts(['id'], [['cat', 'dog']])
'''
# the reason list comprehension is not used is it breaks the 100 chars width
inputs = []
for input_id, concepts in zip(input_ids, concept_lists):
one_input = Image(image_id=input_id, concepts=concepts)
inputs.append(one_input)
res = self.bulk_update(inputs, action='remove')
return res
def merge_concepts(self, input_id, concepts, not_concepts, overwrite=False):
''' merge concepts for one input
Args:
input_id: the unique ID of the input
concepts: the list of concepts
not_concepts: the list of negative concepts
Returns:
an Input object
Examples:
>>> app.inputs.merge_concepts('id', ['cat', 'kitty'], ['dog'])
'''
image = Image(image_id=input_id, concepts=concepts, not_concepts=not_concepts)
if overwrite is True:
action='overwrite'
else:
action='merge'
res = self.update(image, action=action)
return res
def add_concepts(self, input_id, concepts, not_concepts):
''' add concepts for one input
This is just an alias of `merge_concepts` for easier understanding
when you try to add some new concepts to an image
Args:
input_id: the unique ID of the input
concepts: the list of concepts
not_concepts: the list of negative concepts
Returns:
an Input object
Examples:
>>> app.inputs.add_concepts('id', ['cat', 'kitty'], ['dog'])
'''
return self.merge_concepts(input_id, concepts, not_concepts)
def merge_metadata(self, input_id, metadata):
''' merge metadata for the image
This is to merge/update the metadata of the given image
Args:
input_id: the unique ID of the input
metadata: the metadata dictionary
Examples:
>>> # merge the metadata
>>> # metadata will be merged along with the existing key/value
>>> app.inputs.merge_metadata('id', {'key1':'value1', 'key2':'value2'})
'''
image = Image(image_id=input_id, metadata=metadata)
action = 'merge'
res = self.update(image, action=action)
return res
def _to_obj(self, one):
# get concepts
concepts = []
not_concepts = []
if one['data'].get('concepts'):
for concept in one['data']['concepts']:
if concept['value'] == 1:
concepts.append(concept['name'])
else:
not_concepts.append(concept['name'])
if not concepts:
concepts = None
if not not_concepts:
not_concepts = None
# get metadata
metadata=one['data'].get('metadata', None)
input_id = one['id']
if one['data'].get('image'):
if one['data']['image'].get('url'):
if one['data']['image'].get('crop'):
crop = one['data']['image']['crop']
one_input = Image(image_id=input_id, url=one['data']['image']['url'], \
concepts=concepts, not_concepts=not_concepts, crop=crop, \
metadata=metadata)
else:
one_input = Image(image_id=input_id, url=one['data']['image']['url'], \
concepts=concepts, not_concepts=not_concepts, metadata=metadata)
elif one['data']['image'].get('base64'):
if one['data']['image'].get('crop'):
crop = one['data']['image']['crop']
one_input = Image(image_id=input_id, base64=one['data']['image']['base64'], \
concepts=concepts, not_concepts=not_concepts, crop=crop, \
metadata=metadata)
else:
one_input = Image(image_id=input_id, base64=one['data']['image']['base64'], \
concepts=concepts, not_concepts=not_concepts, metadata=metadata)
elif one['data'].get('video'):
raise UserError('Not supported yet')
else:
raise UserError('Unknown input type')
return one_input
class Concepts(object):
def __init__(self, api):
self.api = api
def get_all(self):
''' get all concepts in a generator
Args:
void
Returns:
all concepts in a generator
'''
page = 1
per_page = 20
while True:
res = self.api.get_concepts(page, per_page)
if not res['concepts']:
break
for one in res['concepts']:
yield self._to_obj(one)
page += 1
def get(self, concept_id):
''' get a concept by id
Args:
concept_id: concept ID, the unique identifier of the concept
Returns:
If found, return the Concept object
Otherwise, return None
Examples:
>>> app.concepts.get('id')
'''
res = self.api.get_concept(concept_id)
if res.get('concept'):
concept = self._to_obj(res['concept'])
else:
concept = None
return concept
def search(self, term):
''' search concepts by concept name with wildcards
Args:
term: search term with wildcards
Returns:
a list concept in a generator
Examples:
>>> app.concepts.search('cat')
'''
page = 1
per_page = 20
while True:
res = self.api.search_concepts(term, page, per_page)
if not res['concepts']:
break
for one in res['concepts']:
yield self._to_obj(one)
page += 1
def create(self, concept_id, concept_name=None):
''' create a new concept
Args:
concept_id: concept ID, the unique identifier of the concept
concept_name: name of the concept
If name is not specified, it will be set to the same as concept ID
Returns:
the new Concept object
'''
res = self.api.add_concepts([concept_id], [concept_name])
concept = self._to_obj(res['concepts'][0])
return concept
def bulk_create(self, concept_ids, concept_names=None):
''' bulk create concepts
When the concept name is not set, it will be set as the same as concept ID.
Args:
concept_ids: a list of concept IDs
concept_names: a list of concept name
Returns:
A list of Concept() object
Examples::
>>> bulk_create(['id1', 'id2'], ['cute cat', 'cute dog'])
'''
res = self.api.add_concepts(concept_ids, concept_names)
concepts = [self._to_obj(one) for one in res['concepts']]
return concepts
def _to_obj(self, item):
concept_id = item['id']
concept_name = item['name']
app_id = item['app_id']
created_at = item['created_at']
return Concept(concept_name, concept_id, app_id, \
created_at)
class Model(object):
def __init__(self, api, item=None):
self.api = api
if item:
self.model_id = item['id']
self.model_name = item['name']
self.created_at = item['created_at']
self.app_id = item['app_id']
self.model_version = item['model_version']['id']
self.model_status_code = item['model_version']['status']['code']
self.output_info = item.get('output_info', {})
self.concepts = []
if self.output_info.get('output_config'):
output_config = self.output_info['output_config']
self.concepts_mutually_exclusive = output_config['concepts_mutually_exclusive']
self.closed_environment = output_config['closed_environment']
else:
self.concepts_mutually_exclusive = False
self.closed_environment = False
if self.output_info.get('data', {}).get('concepts'):
for concept in self.output_info['data']['concepts']:
concept = Concept(concept_name=concept['name'], concept_id=concept['id'], \
app_id=concept['app_id'], created_at=concept['created_at'])
self.concepts.add(concept)
def get_info(self, verbose=False):
''' get model info, with or without concepts info
Args:
verbose: default is False. True will yield output_info, with concepts of the model
Returns:
raw json of the response
Examples:
>>> # with basic model info
>>> model.get_info()
>>> # model info with concepts
>>> model.get_info(verbose=True)
'''
if verbose is False:
ret = self.api.get_model(self.model_id)
else:
ret = self.api.get_model_output_info(self.model_id)
return ret
def get_concept_ids(self):
''' get concepts IDs associated with the model
Args:
Void
Returns:
a list of concept IDs
Examples:
>>> ids = model.get_concept_ids()
'''
if self.concepts:
concepts = [c.dict() for c in self.concepts]
else:
res = self.get_info(verbose=True)
concepts = res['model']['output_info'].get('data', {}).get('concepts', [])
return [c['id'] for c in concepts]
def dict(self):
data = {
"model": {
"name": self.model_name,
"output_info": {
"output_config": {
"concepts_mutually_exclusive": self.concepts_mutually_exclusive,
"closed_environment": self.closed_environment
}
}
}
}
if self.model_id:
data['model']['id'] = self.model_id
if self.concepts:
ids = [{"id": concept_id} for concept_id in self.concepts]
data['model']['output_info']['data'] = { "concepts": ids }
return data
def train(self, sync=True, timeout=60):
''' train a model
train the model in synchronous or asynchronous mode
Args:
sync: indicating synchronous or asynchronous, default is True
Returns:
the Model object
'''
res = self.api.create_model_version(self.model_id)
status = res['status']
if status['code'] == 10000:
model_id = res['model']['id']
model_version = res['model']['model_version']['id']
model_status_code = res['model']['model_version']['status']['code']
else:
return res
if sync is False:
return res
# train in sync despite the RESTful api is always async
# will loop until the model is trained
# 21103: queued for training
# 21101: being trained
wait_interval = 1
time_start = time.time()
while model_status_code == 21103 or model_status_code == 21101:
elapsed = time.time() - time_start
if elapsed > timeout:
break
time.sleep(wait_interval)
res_ver = self.api.get_model_version(model_id, model_version)
model_status_code = res_ver['model_version']['status']['code']
res['model']['model_version'] = res_ver['model_version']
model = self._to_obj(res['model'])
return model
def predict_by_url(self, url):
''' predict a model with url
Args:
url: url of an image
Returns:
the prediction of the model in JSON format
'''
image = Image(url=url)
res = self.predict([image])
return res
def predict_by_filename(self, filename):
''' predict a model with a local filename
Args:
filename: filename on local filesystem
Returns:
the prediction of the model in JSON format
'''
fileio = open(filename, 'rb')
image = Image(file_obj=fileio)
res = self.predict([image])
return res
def predict_by_bytes(self, raw_bytes):
''' predict a model with image raw bytes
Args:
raw_bytes: raw bytes of an image
Returns:
the prediction of the model in JSON format
'''
base64_bytes = base64.b64encode(raw_bytes)
image = Image(base64=base64_bytes)
res = self.predict([image])
return res
def predict_by_base64(self, base64_bytes):
''' predict a model with base64 encoded image bytes
Args:
base64_bytes: base64 encoded image bytes
Returns:
the prediction of the model in JSON format
'''
image = Image(base64=base64_bytes)
res = self.predict([image])
return res
def predict(self, inputs):
''' predict with multiple images
Args:
inputs: a list of Image object
Returns:
the prediction of the model in JSON format
'''
res = self.api.predict_model(self.model_id, inputs, self.model_version)
return res
def merge_concepts(self, concept_ids, overwrite=False):
''' merge concepts in a model
If the concept does not exist in the model, it will be appended,
otherwise, the original one will be kept
Args:
concept_ids: a list of concept id
overwrite: True of False. If True, the concepts will be overwritten
Returns:
the Model object
'''
if overwrite is True:
action = 'overwrite'
else:
action = 'merge'
model = self.update(action=action, concept_ids=concept_ids)
return model
def add_concepts(self, concept_ids):
''' merge concepts in a model
This is just an alias of `merge_concepts`, for easier understanding of adding new concepts
to the model without overwritting them
Args:
concept_ids: a list of concept id
Returns:
the Model object
Examples:
>>> model = self.app.models.get('model_id')
>>> model.add_concepts(['cat', 'dog'])
'''
return self.merge_concepts(concept_ids)
def update(self, action='merge', model_name=None, concepts_mutually_exclusive=None, \
closed_environment=None, concept_ids=None):
''' update the model attributes
This is to update the model attributes. The name of the model, and list of concepts could be
changed. Also the training attributes concepts_mutually_exclusive and closed_environment could
be changed.
Note this is a overwriting change. For a valid call, at least one or more attributes should be
specified. Otherwise the call will be just skipped without error.
Args:
action: the way to patch the model: ['merge', 'remove', 'overwrite']
model_name: name of the model
concepts_mutually_exclusive: whether it's multually exclusive model
closed_environment: whether it's closed environment training
concept_ids: a list of concept ids
Returns:
the Model object
Examples:
>>> model = self.app.models.get('model_id')
>>> model.update(model_name="new_model_name")
>>> model.update(concepts_mutually_exclusive=False)
>>> model.update(closed_environment=True)
>>> model.update(concept_ids=["bird", "hurd"])
>>> model.update(concepts_mutually_exclusive=True, concept_ids=["bird", "hurd"])
'''
args = [model_name, concepts_mutually_exclusive, closed_environment, concept_ids]
if not any(map(lambda x: x is not None, args)):
return self
model = {"id": self.model_id,
"output_info": {
"output_config": {},
"data": {}
}
}
if model_name:
model["name"] = model_name
if concepts_mutually_exclusive is not None:
model["output_info"]["output_config"]["concepts_mutually_exclusive"] = concepts_mutually_exclusive
if closed_environment is not None:
model["output_info"]["output_config"]["closed_environment"] = closed_environment
if concept_ids is not None:
model["output_info"]["data"]["concepts"] = [{"id": concept_id} for concept_id in concept_ids]
res = self.api.patch_model(model, action)
model = res['models'][0]
return self._to_obj(model)
def delete_concepts(self, concept_ids):
''' delete concepts from a model
Args:
concept_ids: a list of concept id
Returns:
the Model object
Examples:
>>> model = self.app.models.get('model_id')
>>> model.delete_concepts(['cat', 'dog'])
'''
model = self.update(action='remove', concept_ids=concept_ids)
return model
def list_versions(self):
''' list all model versions
Args:
void
Returns:
the JSON response
Examples:
>>> model = self.app.models.get('model_id')
>>> model.list_versions()
'''
res = self.api.get_model_versions(self.model_id)
return res
def get_version(self, version_id):
''' get model version info for a particular version
Args:
version_id: version id of the model version
Returns:
the JSON response
Examples:
>>> model = self.app.models.get('model_id')
>>> model.get_version('model_version_id')
'''
res = self.api.get_model_version(self.model_id, version_id)
return res
def delete_version(self, version_id):
''' delete model version by version_id
Args:
version_id: version id of the model version
Returns:
the JSON response
Examples:
>>> model = self.app.models.get('model_id')
>>> model.delete_version('model_version_id')
'''
res = self.api.delete_model_version(self.model_id, version_id)
return res
def create_version(self):
res = self.api.create_model_version(self.model_id)
return res
def get_inputs(self, version_id=None, page=1, per_page=20):
''' get all the inputs from the model or a specific model version
Without specifying model version id, this will yield the inputs
Args:
version_id: model version id
page: page number
per_page: number of inputs to return for each page
Returns:
A list of Input objects
'''
res = self.api.get_model_inputs(self.model_id, version_id, \
page, per_page)
return res
def _to_obj(self, item):
''' convert a model json object to Model object '''
return Model(self.api, item)
class Concept(object):
""" Clarifai Concept
"""
def __init__(self, concept_name, concept_id=None, app_id=None, created_at=None):
self.concept_name = concept_name
self.concept_id = concept_id
self.app_id = app_id
self.created_at = created_at
def dict(self):
c = {'id': self.concept_id,
'name': self.concept_name,
'created_at': self.created_at,
'app_id': self.app_id
}
return c
class ApiClient(object):
""" Handles auth and making requests for you.
The constructor for API access. You must sign up at developer.clarifai.com first and create an
application in order to generate your credentials for API access.
Args:
self: instance of ApiClient
app_id: the app_id for an application you've created in your Clarifai account.
app_secret: the app_secret for the same application.
base_url: Base URL of the API endpoints.
quiet: if True then silence debug prints.
"""
patch_actions = ['merge', 'remove', 'overwrite']
def __init__(self, app_id=None, app_secret=None, base_url=None, quiet=True):
homedir = os.environ['HOMEPATH'] if platform.system() == 'Windows' else os.environ['HOME']
CONF_FILE=os.path.join(homedir, '.clarifai', 'config')
if app_id is None:
if os.environ.get('CLARIFAI_APP_ID') and os.environ.get('CLARIFAI_APP_SECRET'):
logger.debug("Using env variables for id and secret")
app_id = os.environ['CLARIFAI_APP_ID']
app_secret = os.environ['CLARIFAI_APP_SECRET']
elif os.path.exists(CONF_FILE):
parser = ConfigParser()
parser.optionxform = str
with open(CONF_FILE, 'r') as fdr:
parser.readfp(fdr)
if parser.has_option('clarifai', 'CLARIFAI_APP_ID') and \
parser.has_option('clarifai', 'CLARIFAI_APP_SECRET'):
app_id = parser.get('clarifai', 'CLARIFAI_APP_ID')
app_secret = parser.get('clarifai', 'CLARIFAI_APP_SECRET')
else:
app_id = api_secret = ''
else:
app_id = api_secret = ''
if base_url is None:
if os.environ.get('CLARIFAI_API_BASE'):
base_url = os.environ.get('CLARIFAI_API_BASE')
elif os.path.exists(CONF_FILE):
parser = ConfigParser()
parser.optionxform = str
with open(CONF_FILE, 'r') as fdr:
parser.readfp(fdr)
if parser.has_option('clarifai', 'CLARIFAI_API_BASE'):
base_url = parser.get('clarifai', 'CLARIFAI_API_BASE')
else:
base_url = 'api.clarifai.com'
else:
base_url = 'api.clarifai.com'
self.app_id = app_id
self.app_secret = app_secret
if quiet:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.DEBUG)
parsed = urlparse(base_url)
scheme = 'https' if parsed.scheme == '' else parsed.scheme
base_url = parsed.path if not parsed.netloc else parsed.netloc
self.base_url = base_url
self.scheme = scheme
self.basev2 = urljoin(scheme + '://', base_url)
logger.debug("Base url: %s", self.basev2)
self.token = None
self.headers = None
# Make sure when you create a client, it's ready for requests.
self.get_token()
def get_token(self):
''' Get an access token using your app_id and app_secret.
You shouldn't need to call this method yourself. If there is no access token yet, this method
will be called when a request is made. If a token expires, this method will also automatically
be called to renew the token.
'''
data = {'grant_type': 'client_credentials'}
auth = (self.app_id, self.app_secret)
logger.debug("get_token: %s data: %s", self.basev2 + '/v2/token', data)
authurl = urljoin(self.scheme + '://', self.base_url, 'v2', 'token')
res = requests.post(authurl, auth=auth, data=data)
if res.status_code == 200:
logger.debug("Got V2 token: %s", res.json())
self.token = res.json()['access_token']
self.headers = {'Authorization': "Bearer %s" % self.token}
else:
raise TokenError("Could not get a new token for v2: %s", str(res.json()))
return res.json()
def set_token(self, token):
''' manually set the token to this client
You shouldn't need to call this unless you know what you are doing, because the client handles
the token generation and refersh for you. This is only intended for debugging purpose when you
want to verify the token got from somewhere else.
'''
self.token = token
def delete_token(self):
''' manually reset the token to empty
You shouldn't need to call this unless you know what you are doing, because the client handles
the token generation and refersh for you. This is only intended for debugging purpose when you
want to reset the token.
'''
self.token = None
def _check_token(self):
''' set the token when it is empty
This function is called at every API call to check if the token is set.
If it is not set, a token call will be issued and the token will be
refreshed.
'''
if self.token is None:
self.get_token()
def _requester(self, resource, params, method, version="v2", files=None):
''' Obtains info and verifies user via Token Decorator
Args:
resource:
params: parameters passed to the request
version: v1 or v2
method: GET or POST
Returns:
JSON from user request
'''
self._check_token()
url = urljoin(self.basev2, version, resource)
# only retry under when status_code is non-200, under max-tries
# and under some circustances
status_code = 199
retry = True
attempts = 3
headers = {}
while status_code != 200 and attempts > 0 and retry is True:
logger.debug("=" * 100)
# mangle the base64 because it is too long
if params and params.get('inputs') and len(params['inputs']) > 0:
params_copy = copy.deepcopy(params)
for data in params_copy['inputs']:
data = data['data']
if data.get('image') and data['image'].get('base64'):
base64_bytes = data['image']['base64'][:10] + '......' + data['image']['base64'][-10:]
data['image']['base64'] = base64_bytes
else:
params_copy = params
# mangle the base64 because it is too long
logger.debug("%s %s\nHEADERS:\n%s\nPAYLOAD:\n%s",
method, url, pformat(headers), pformat(params_copy))
if method == 'GET':
headers = {'Content-Type': 'application/json',
'X-Clarifai-Client': 'python:%s' % CLIENT_VERSION,
'Authorization': self.headers['Authorization']}
res = requests.get(url, params=params, headers=headers)
elif method == "POST":
if files:
headers = {'Authorization': self.headers['Authorization'],
'X-Clarifai-Client': 'python:%s' % CLIENT_VERSION,
}
# Seek back to the start.
for f in files.itervalues():
f.seek(0)
res = requests.post(url, data=params, headers=headers, files=files)
else:
headers = {'Content-Type': 'application/json',
'X-Clarifai-Client': 'python:%s' % CLIENT_VERSION,
'Authorization': self.headers['Authorization']}
res = requests.post(url, data=json.dumps(params), headers=headers)
elif method == "DELETE":
headers = {'Content-Type': 'application/json',
'X-Clarifai-Client': 'python:%s' % CLIENT_VERSION,
'Authorization': self.headers['Authorization']}
res = requests.delete(url, data=json.dumps(params), headers=headers)
elif method == "PATCH":
headers = {'Content-Type': 'application/json',
'X-Clarifai-Client': 'python:%s' % CLIENT_VERSION,
'Authorization': self.headers['Authorization']}
res = requests.patch(url, data=json.dumps(params), headers=headers)
else:
raise UserError("Unsupported request type: '%s'" % method)
try:
js = res.json()
except Exception:
logger.exception("Could not get valid JSON from server response.")
logger.debug("\nRESULT:\n%s", str(res.content))
return res
logger.debug("\nRESULT:\n%s", pformat(json.loads(res.content.decode('utf-8'))))
status_code = res.status_code
attempts -= 1
# allow retry when token expires
if isinstance(js, dict) and js.get('status_code', None) == "TOKEN_EXPIRED":
self.get_token()
retry = True
continue
# handle Gateway Error, normally retry will solve the problem
if js['status']['code'] == 10020 and js['status']['description'] == 'Bad gateway':
retry = True
continue
# in other cases, error out without retrying
retry = False
if res.status_code != 200:
raise ApiError(resource, params, method, res)
return res.json()
def get(self, resource, params=None, version="v2"):
''' Authorized get from Clarifai's API. '''
return self._requester(resource, params, 'GET', version)
def post(self, resource, params=None, version="v2"):
''' Authorized post to Clarifai's API. '''
return self._requester(resource, params, 'POST', version)
def post_form(self, resource, params=None, version="v2"):
''' Authorized post to Clarifai's API. '''
return self._requester(resource, params=None, method='POST', version=version, files=params)
def delete(self, resource, params=None, version="v2"):
''' Authorized get from Clarifai's API. '''
return self._requester(resource, params, 'DELETE', version)
def patch(self, resource, params=None, version="v2"):
''' Authorized patch from Clarifai's API '''
return self._requester(resource, params, 'PATCH', version)
def add_inputs(self, objs):
''' Add a list of Images or Videos to an application.
Args:
obj: A list of Image or Video objects.
Returns:
raw JSON response from the API server, with a list of inputs and corresponding import status
'''
if not isinstance(objs, list):
raise UserError("objs must be a list")
if not isinstance(objs[0], (Image, Video)):
raise UserError("Not valid type of content to add. Must be Image or Video")
resource = "inputs"
data = {"inputs": [obj.dict() for obj in objs]}
res = self.post(resource, data)
return res
def search_inputs(self, query, page=1, per_page=20):
''' Search an application and get predictions (optional)
Args:
query: the JSON query object that complies with Clarifai RESTful API
page: the page of results to get, starts at 1.
per_page: number of results returned per page
Returns:
raw JSON response from the API server, with a list of inputs and corresponding ranking scores
'''
resource = "searches/"
# Similar image search and predictions
d = {'pagination': pagination(page, per_page).dict(),
'query': query
}
res = self.post(resource, d)
return res
def get_input(self, input_id):
''' Get a single image by it's id.
Args:
input_id: the id of the Image.
Returns:
raw JSON response from the API server
HTTP code:
200 for Found
404 for Not Found
'''
resource = "inputs/%s" % input_id
res = self.get(resource)
return res
def get_inputs(self, page=1, per_page=20):
''' List all images for the Application, with pagination
Args:
page: the page of results to get, starts at 1.
per_page: number of results returned per page
Returns:
raw JSON response from the API server, with paginated list of inputs and corresponding status
'''
resource = "inputs"
d = {'page': page, 'per_page': per_page}
res = self.get(resource, d)
return res
def get_inputs_status(self):
''' Get counts of inputs in the Application.
Returns:
counts of the inputs, including processed, processing, etc. in JSON format.
'''
resource = "inputs/status"
res = self.get(resource)
return res
def delete_input(self, input_id):
''' Delete a single input by its id.
Args:
input_id: the id of the input
Returns:
status of the deletion, in JSON format.
'''
if not input_id:
raise UserError('cannot delete with empty input_id. \
use delete_all_inputs if you want to delete all')
resource = "inputs/%s" % input_id
res = self.delete(resource)
return res
def delete_inputs(self, input_ids):
''' bulk delete inputs with a list of input IDs
Args:
input_ids: the ids of the input, in a list
Returns:
status of the bulk deletion, in JSON format.
'''
resource = "inputs"
data = {"ids": [input_id for input_id in input_ids]}
res = self.delete(resource, data)
return res
def delete_all_inputs(self):
''' delete all inputs from the application
Returns:
status of the deletion, in JSON format.
'''
resource = "inputs"
data = {"delete_all":True}
res = self.delete(resource, data)
return res
def patch_inputs(self, action, inputs):
''' bulk update inputs, to delete or modify concepts
Args:
action: "merge" or "remove" or "overwrite"
input_ids: list of input IDs
concept_ids_pairs: For "merge_concepts", this is a list of (concept_id, value) tuples
where value is either True or False
For "delete_concepts", this is a list of concept ids
Returns:
the update status, in JSON format
'''
if action not in self.patch_actions:
raise UserError("action not supported.")
resource = "inputs"
data = {
"action": action,
"inputs": []
}
images = []
for img in inputs:
item = img.dict()
if not item.get('data'):
continue
new_item = copy.deepcopy(item)
for key in item['data'].keys():
if key not in ['concepts', 'metadata']:
del new_item['data'][key]
images.append(new_item)
data["inputs"] = images
res = self.patch(resource, data)
return res
def get_concept(self, concept_id):
''' Get a single concept by it's id.
Args:
concept_id: unique id of the concept
Returns:
the concept in JSON format with HTTP 200 Status
or HTTP 404 with concept not found
'''
resource = "concepts/%s" % concept_id
res = self.get(resource)
return res
def get_concepts(self, page=1, per_page=20):
''' List all concepts for the Application.
Args:
page: the page of results to get, starts at 1.
per_page: number of results returned per page
Returns:
a list of concepts in JSON format
'''
resource = "concepts"
d = {'page': page, 'per_page': per_page}
res = self.get(resource, d)
return res
def add_concepts(self, concept_ids, concept_names):
''' Add a list of concepts
Args:
concept_ids: a list of concept id
concept_names: a list of concept name
Returns:
a list of concepts in JSON format along with the status code
'''
if not isinstance(concept_ids, list) or \
not isinstance(concept_names, list):
raise UserError('concept_ids and concept_names should be both be list ')
if len(concept_ids) != len(concept_names):
raise UserError('length of concept id list should match length of the concept name list')
resource = "concepts"
d = {'concepts':[]}
for cid, cname in zip(concept_ids, concept_names):
if cname is None:
concept = {'id':cid}
else:
concept = {'id':cid,'name':cname}
d['concepts'].append(concept)
res = self.post(resource, d)
return res
def search_concepts(self, term, page=1, per_page=20):
''' Search concepts
Args:
term: search term with wildcards
page: the page of results to get, starts at 1.
per_page: number of results returned per page
Returns:
a list of concepts in JSON format along with the status code
'''
resource = "concepts/searches/"
# Similar image search and predictions
d = {'pagination': pagination(page, per_page).dict()}
d.update({
"concept_query": {
"name":term
}
})
res = self.post(resource, d)
return res
def get_models(self, page=1, per_page=20):
''' get all models with pagination
Args:
page: page number
per_page: number of models to return per page
Returns:
a list of models in JSON format
'''
resource = "models"
params = {'page': page,
'per_page': per_page
}
res = self.get(resource, params)
return res
def get_model(self, model_id=None):
''' get model basic info by model id
Args:
model_id: the unique identifier of the model
Returns:
the model info in JSON format
'''
resource = "models/%s" % model_id
res = self.get(resource)
return res
def get_model_output_info(self, model_id=None):
''' get model output info by model id
Args:
model_id: the unique identifier of the model
Returns:
the model info with output_info in JSON format
'''
resource = "models/%s/output_info" % model_id
res = self.get(resource)
return res
def get_model_versions(self, model_id, page=1, per_page=20):
''' get model vesions
Args:
model_id: the unique identifier of the model
page: page number
per_page: the number of versions to return per page
Returns:
a list of model versions in JSON format
'''
resource = "models/%s/versions" % model_id
params = {'page': page,
'per_page': per_page
}
res = self.get(resource, params)
return res
def get_model_version(self, model_id, version_id):
''' get model info for a specific model version
Args:
model_id: the unique identifier of a model
version_id: the model version id
'''
resource = "models/%s/versions/%s" % (model_id, version_id)
res = self.get(resource)
return res
def delete_model_version(self, model_id, model_version):
''' delete a model version '''
resource = "models/%s/versions/%s" % (model_id, model_version)
res = self.delete(resource)
return res
def delete_model(self, model_id):
''' delete a model '''
resource = "models/%s" % model_id
res = self.delete(resource)
return res
def delete_all_models(self):
''' delete all models '''
resource = "models"
data = {"delete_all":True}
res = self.delete(resource, data)
return res
def get_model_inputs(self, model_id, version_id=None, page=1, per_page=20):
''' get inputs for the latest model or a specific model version '''
if not version_id:
resource = "models/%s/inputs?page=%d&per_page=%d" % \
(model_id, page, per_page)
else:
resource = "models/%s/version/%s/inputs?page=%d&per_page=%d" % \
(model_id, version_id, page, per_page)
res = self.get(resource)
return res
def search_models(self, name=None, model_type=None):
''' search model by name and type '''
resource = "models/searches"
if name is not None and model_type is not None:
data = {"model_query": {
"name": name,
"type": model_type
}
}
elif name is None and model_type is not None:
data = {"model_query": {
"type": model_type
}
}
elif name is not None and model_type is None:
data = {"model_query": {
"name": name
}
}
else:
data = {}
res = self.post(resource, data)
return res
def create_model(self, model_id, model_name=None, concepts=None, \
concepts_mutually_exclusive=False, \
closed_environment=False):
''' create custom model '''
if not model_name:
model_name = model_id
resource = "models"
data = {
"model": {
"id": model_id,
"name": model_name,
"output_info": {
"output_config": {
"concepts_mutually_exclusive": concepts_mutually_exclusive,
"closed_environment": closed_environment
}
}
}
}
if concepts:
data['model']['output_info']['data'] = { "concepts":
[{"id": concept} for concept in concepts]
}
res = self.post(resource, data)
return res
def patch_model(self, model, action='merge'):
if action not in self.patch_actions:
raise UserError("action not supported.")
resource = "models"
data = {
"action": action,
"models": [model]
}
res = self.patch(resource, data)
return res
def create_model_version(self, model_id):
''' train for a model '''
resource = "models/%s/versions" % model_id
res = self.post(resource)
return res
def predict_model(self, model_id, objs, version_id=None):
if version_id is None:
resource = "models/%s/outputs" % model_id
else:
resource = "models/%s/versions/%s/outputs" % (model_id, version_id)
if not isinstance(objs, list):
raise UserError("objs must be a list")
if not isinstance(objs[0], (Image, Video)):
raise UserError("Not valid type of content to add. Must be Image or Video")
data = {"inputs": [obj.dict() for obj in objs]}
res = self.post(resource, data)
return res
def predict_concepts(self, objs):
models = self.search_models(name='general-v1.3', model_type='concept')
model = models['models'][0]
model_id = model['id']
return self.predict_model(model_id, objs)
def predict_colors(self, objs):
models = self.search_models(name='color', model_type='color')
model = models['models'][0]
model_id = model['id']
return self.predict_model(model_id, objs)
def predict_embed(self, objs, model='general-v1.3'):
models = self.search_models(name=model, model_type='embed')
model = models['models'][0]
model_id = model['id']
return self.predict_model(model_id, objs)
class pagination(object):
def __init__(self, page=1, per_page=20):
self.page = page
self.per_page = per_page
def dict(self):
return {'page': self.page, 'per_page': self.per_page}
class TokenError(Exception):
pass
class ApiError(Exception):
""" API Server error """
def __init__(self, resource, params, method, response):
self.resource = resource
self.params = params
self.method = method
self.response = response
msg = "%s %s FAILED. code: %d, reason: %s, response:%s" % (
method, resource, response.status_code, response.reason,
str(response))
super(ApiError, self).__init__(msg)
# def __str__(self):
# parent_str = super(ApiError, self).__str__()
# return parent_str + str(self.json)
class ApiClientError(Exception):
""" API Client Error """
pass
class UserError(Exception):
""" User Error """
pass
class ApiStatus(object):
""" Clarifai API Status Code """
def __init__(self, item):
self.code = item['code']
self.description = item['description']
def dict(self):
d = {'status': { 'code': self.code,
'description': self.description
}
}
return d
class ApiResponse(object):
""" Clarifai API Response """
def __init__(self):
self.status = None
class InputCounts(object):
""" input counts for upload status """
def __init__(self, item):
if not item.get('counts'):
raise ApiClient('unable to initialize. need a dict with key=counts')
counts = item['counts']
self.processed = counts['processed']
self.to_process = counts['to_process']
self.errors = counts['errors']
def dict(self):
d = { 'counts': {
'processed': self.processed,
'to_process': self.to_process,
'errors': self.errors
}
}
return d
```
|
{
"source": "jdehotin/Clockworkfordynamo",
"score": 2
}
|
#### File: 0.9.x/python/Clipboard.SendTo.py
```python
import clr
import System
from System.Threading import Thread, ThreadStart
clr.AddReference("System.Windows.Forms")
def SetText(text):
def thread_proc():
System.Windows.Forms.Clipboard.SetText(text)
t = Thread(ThreadStart(thread_proc))
t.ApartmentState = System.Threading.ApartmentState.STA
t.Start()
try:
SetText(IN[0])
OUT = IN[0]
except:
OUT = 'Data could not be copied to clipboard'
```
#### File: 1.x/python/Buckyball.ByOriginAndRadius.py
```python
import clr
clr.AddReference('ProtoGeometry')
from Autodesk.DesignScript.Geometry import *
points = IN[0]
almostzero = IN[1]
struts = list()
# this function recursively finds all the pairs of points of the buckyball struts
def BuckyballStruts(points,struts):
firstpoint = points[0]
restofpoints = points[1:]
# measure distance between first point and rest of points
distances = [firstpoint.DistanceTo(x) for x in restofpoints]
# filter out all points that do not have a distance of 2 to the first point
strutpoints = list()
strutpointpairs = list()
i = 0
for dist in distances:
# use a little tolerance so we catch all struts
if dist > 2 - almostzero and dist < 2 + almostzero:
strutpoints.append(restofpoints[i])
strutpointpairs.append((firstpoint,restofpoints[i]))
i += 1
# add strutpointpairs to struts
if len(strutpointpairs) > 0: struts.extend(strutpointpairs)
# Continue processing the list recursively until there's only one point left. By always removing the first point from the list, we ensure that no duplicate struts are computed.
if len(restofpoints) > 1:
return BuckyballStruts(restofpoints,struts)
else: return (restofpoints,struts)
OUT = BuckyballStruts(points,struts)[1]
##### NEXT PYTHON NODE #####
import clr
clr.AddReference('ProtoGeometry')
from Autodesk.DesignScript.Geometry import *
struts = IN[0]
points = IN[1]
almostzero = IN[2]
def BuckyballFaces(struts,points,planes,almostzero,vertices):
firststrut = struts[0]
struts.pop(0)
# find the two adjacent struts
adjacent = list()
for strut in struts:
for point in strut:
if point.IsAlmostEqualTo(firststrut[0]):
adjacent.append(strut)
break
if len(adjacent) == 2:
break
# identify planes and find all vertices on planes
vlist = list()
for item in adjacent:
triangle = (firststrut[1],item[0],item[1])
pl = Plane.ByBestFitThroughPoints(triangle)
vlist = list()
for point in points:
dist = pl.DistanceTo(point)
if dist < almostzero and dist > -almostzero:
vlist.append(point)
newplane = (Plane.ByBestFitThroughPoints(vlist))
append_vertices = True
for pl in planes:
if newplane.IsAlmostEqualTo(pl):
append_vertices = False
if append_vertices:
vertices.append(vlist)
planes.append(newplane)
# let this function recursively call itself until it finds all planes
if len(planes) < 32:
return BuckyballFaces(struts,points,planes,almostzero,vertices)
else:
return (struts,points,planes,almostzero,vertices)
def OrderFaceIndices(p_ordered,p_unordered,almostzero):
i = 0;
for p in p_unordered:
dist = p_ordered[(len(p_ordered)-1)].DistanceTo(p)
if dist > 2-almostzero and dist < 2+almostzero:
p_ordered.append(p)
p_unordered.pop(i)
break
i += 1
if len(p_unordered) > 0:
return OrderFaceIndices(p_ordered,p_unordered,almostzero)
else:
return (p_ordered,p_unordered,almostzero)
vlist_unordered = BuckyballFaces(struts,points,list(),almostzero,list())[4]
vset_ordered = list()
for vset in vlist_unordered:
p_ordered = [vset[0]]
vset.pop(0)
vset_ordered.append(OrderFaceIndices(p_ordered,vset,almostzero))
vset_out = list()
for vset in vset_ordered:
vset_out.append(vset[0])
OUT = vset_out
```
#### File: 1.x/python/Element.Geometry+.py
```python
import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
clr.AddReference("RevitNodes")
import Revit
clr.ImportExtensions(Revit.Elements)
clr.ImportExtensions(Revit.GeometryConversion)
clr.AddReference("RevitServices")
import RevitServices
from RevitServices.Persistence import DocumentManager
from RevitServices.Transactions import TransactionManager
# this function takes care of exploding
# GeometryInstance objects. GI objects typically
# represent family instance geometry.
# in order to also catch possible nested families
# the fucntion calls itself recursively.
def convert_geometry_instance(geo, elementlist):
for g in geo:
if str(g.GetType()) == 'Autodesk.Revit.DB.GeometryInstance':
elementlist = convert_geometry_instance(g.GetInstanceGeometry(), elementlist)
else:
try:
if g.Volume != 0:
elementlist.append(g)
except:
pass
return elementlist
doc = DocumentManager.Instance.CurrentDBDocument
items = UnwrapElement(IN[0])
if IN[1] == "Coarse": detail_lvl = ViewDetailLevel.Coarse
elif IN[1] == "Fine": detail_lvl = ViewDetailLevel.Fine
else: detail_lvl = ViewDetailLevel.Medium
inc_invis = IN[2]
view = UnwrapElement(IN[3])
inserts = UnwrapElement(IN[4])
remove_inserts = IN[5]
revitlist = list()
dynlist = list()
catlist = list()
# we might need a transaction in order to
# temporarily delete all inserts and retrieve gross wall areas
TransactionManager.Instance.EnsureInTransaction(doc)
trans = SubTransaction(doc)
trans.Start()
i = 0
for item in items:
if remove_inserts == True:
for insert in inserts[i]:
doc.Delete(insert.Id)
doc.Regenerate()
geo_options = Options()
if view == None: geo_options.DetailLevel = detail_lvl
geo_options.IncludeNonVisibleObjects = inc_invis
if view != None: geo_options.View = view
revitGeo = item.Geometry[geo_options]
try:
revit_geos = convert_geometry_instance(revitGeo, list())
revitlist.append(revit_geos)
dyn_geos = list()
cats = list()
for geo in revit_geos:
try:
dyn_geos.append(geo.ToProtoType())
except:
dyn_geos.append(None)
try:
graphstyle = doc.GetElement(geo.GraphicsStyleId)
if graphstyle != None:
cats.append(Revit.Elements.Category.ById(graphstyle.GraphicsStyleCategory.Id.IntegerValue))
else:
cats.append(None)
except:
cats.append(None)
dynlist.append(dyn_geos)
catlist.append(cats)
except:
revitlist.append(list())
dynlist.append(list())
catlist.append(list())
i += 1
trans.RollBack()
TransactionManager.Instance.TransactionTaskDone()
OUT = (dynlist,revitlist,catlist)
```
|
{
"source": "jdehotin/IFC4.3.x-development",
"score": 2
}
|
#### File: IFC4.3.x-development/scripts/to_bsdd.py
```python
import os
import re
import sys
import html
import json
from collections import defaultdict
from xmi_document import xmi_document
try:
fn = sys.argv[1]
try:
OUTPUT = open(sys.argv[2], "w", encoding='utf-8')
except IndexError as e:
OUTPUT = sys.stdout
except:
print("Usage: python to_po.py <schema.xml>", file=sys.stderr)
exit()
xmi_doc = xmi_document(fn)
bfn = os.path.basename(fn)
schema_name = xmi_doc.xmi.by_tag_and_type["packagedElement"]['uml:Package'][1].name.replace("exp", "").upper()
schema_name = "".join(["_", c][c.isalnum()] for c in schema_name)
schema_name = re.sub(r"_+", "_", schema_name)
schema_name = schema_name.strip('_')
structure = {
'Domain': {
'Name': 'IFC',
'Version': schema_name,
'Classifications': None
}
}
def yield_parents(node):
yield node
if node.parentNode:
yield from yield_parents(node.parentNode)
def get_path(xmi_node):
nodes = list(yield_parents(xmi_node.xml))
def get_name(n):
if n.attributes:
v = n.attributes.get('name')
if v: return v.value
node_names = [get_name(n) for n in nodes]
return node_names[::-1]
included_packages = set(("IFC 4.2 schema (13.11.2019)", "Common Schema", "IFC Ports and Waterways", "IFC Road", "IFC Rail - PSM"))
def skip_by_package(element):
return not (set(get_path(xmi_doc.by_id[element.idref])) & included_packages)
HTML_TAG_PATTERN = re.compile('<.*?>')
MULTIPLE_SPACE_PATTERN = re.compile(r'\s+')
def strip_html(s):
S = html.unescape(s or '')
i = S.find('\n')
return re.sub(HTML_TAG_PATTERN, '', S)
def format(s):
return re.sub(MULTIPLE_SPACE_PATTERN, ' ', ''.join([' ', c][c.isalnum() or c in '.,'] for c in s)).strip()
def generalization(pe):
try:
P = xmi_doc.xmi.by_id[(pe|"generalization").general]
except:
P = None
if P: return generalization(P)
else: return pe
def generate_definitions():
"""
A generator that yields tuples of <a, b> with
a: location in file
a: a fully qualifying key as tuple
b: the documentation string
"""
make_defaultdict = lambda: defaultdict(make_defaultdict)
classifications = defaultdict(make_defaultdict)
def get_parent_of_pt(enum_type):
enum_id = enum_type.idref
type_refs = []
for assoc in xmi_doc.xmi.by_tag_and_type["packagedElement"]["uml:Association"]:
try:
c1, c2 = assoc/'ownedEnd'
except ValueError as e:
# print("encountered exception `%s' on %s" % (e, assoc))
continue
assoc_type_refs = set(map(lambda c: (c|"type").idref, (c1, c2)))
if enum_id in assoc_type_refs:
other_idref = list(assoc_type_refs - {enum_id})[0]
type_refs.append(xmi_doc.xmi.by_id[other_idref].name)
# @todo filter this based on inheritance hierarchy
type_refs_without_type = [s for s in type_refs if 'Type' not in s]
if len(type_refs_without_type) != 1:
print("WARNING:", len(type_refs_without_type), "type associations on", enum_type.name, file=sys.stderr)
return type_refs_without_type[0] if type_refs_without_type else None
class_name_to_node = {}
by_id = {}
# psets are deferred to the end so that all ids are resolved
psets = []
for item in xmi_doc:
if item.type == "ENUM" and item.stereotype == "PREDEFINED_TYPE":
p = get_parent_of_pt(item.node)
if p:
for c in item.children:
by_id[c.id] = di = classifications[p + "." + c.name]
di["Parent"] = p
di['Description'] = format(strip_html(c.documentation))
elif item.type == "PSET":
psets.append(item)
elif item.type == "ENTITY":
by_id[item.id] = di = classifications[item.name]
st = item.meta.get('supertypes', [])
if st:
di['Parent'] = st[0]
di['Description'] = format(strip_html(item.documentation))
for item in psets:
refs = item.meta.get('refs', [])
for id in refs:
di = by_id.get(id)
if di is None:
try:
log_attr_2 = xmi_doc.xmi.by_id[id].name
except KeyError as e:
log_attr_2 = id
print("WARNING: id %s not found" % id)
pass
print("WARNING: for %s entity %s not emitted" % (item.name, log_attr_2))
continue
for a in item.children:
type_name = "PEnum_" + a.name
# @todo why is this lookup by name?
enum_types_by_name = [c for c in xmi_doc.xmi.by_tag_and_type["packagedElement"]["uml:Class"] if c.name == type_name]
if len(enum_types_by_name) == 1:
type_values = [x.name for x in enum_types_by_name[0]/"ownedLiteral"]
else:
type_values = None
try:
pe_type = xmi_doc.xmi.by_id[(xmi_doc.xmi.by_id[a.node.idref]|"type").idref]
pe_type_name = pe_type.name
root_generalization = generalization(pe_type)
type_name = root_generalization.name.lower()
except ValueError as e:
print("WARNING:", a.name, "has no associated type", file=sys.stderr)
type_name = 'any'
continue
di["Psets"][item.name]["Properties"][a.name]['type'] = type_name
di["Psets"][item.name]["Properties"][a.name]["Description"] = format(strip_html(a.documentation))
type_to_values = {
'boolean': ['TRUE','FALSE'],
'logical': ['TRUE','FALSE','UNKNOWN'],
}
if type_values is None:
type_values = type_to_values.get(type_name)
if type_values:
di["Psets"][item.name]["Properties"][a.name]['values'] = type_values
return classifications
def filter_definition(di):
children = defaultdict(list)
for k, v in di.items():
if v.get("Parent"):
children[v.get("Parent")].append(k)
def parents(k):
yield k
v = di.get(k)
if v and v.get('Parent'):
yield from parents(v.get('Parent'))
def child_or_self_has_psets(k):
if di.get(k, {}).get("Psets"):
return True
for c in children[k]:
if child_or_self_has_psets(c):
return True
return False
def has_child(k):
def has_child_(k2):
if k2 == k: return True
if not children[k2]: return False
return any(has_child_(c) for c in children[k2])
return has_child_
def should_include(k, v):
return ("IfcProduct" in parents(k)) or has_child("IfcProduct")(k) or child_or_self_has_psets(k)
return {k: v for k, v in di.items() if should_include(k, v)}
def embed_in_structure(di):
d = {}
d.update(structure)
d["Domain"]['Classifications'] = di
return d
json.dump(embed_in_structure(filter_definition(generate_definitions())), OUTPUT, indent=2)
```
#### File: IFC4.3.x-development/scripts/UML_utils.py
```python
import os
import subprocess
import json
import sys
class tex_ulm_object():
def __init__(self,texfilename):
self.data = {}
self.tex_file = open(texfilename+".tex", "x")
self.tex_file_name = texfilename
self.tex_content = r''' \documentclass{article}
\usepackage{tikz-uml}
\usetikzlibrary{positioning}
\begin{document}
\hoffset=-1in
\voffset=-1in
\setbox0\hbox{\begin{tabular}{cccc}
\begin{tikzpicture} '''
self.tex_meta = set()
def get_data(self,entity_name,schema):
# Get data
data = {}
for d in schema:
if d['IFCtype'] == 'ENTITY':
if d['name'] == entity_name:
data['attributes'] = d['attributes']
data['subtypes'] = d['subtypes']
data['supertypes'] = d['supertypes']
data['is_abstract'] = d['is_abstract']
if d['IFCtype'] == 'ENUM':
if d['name'] == entity_name:
data['name'] = d['name']
data['values'] = d['values']
if d['IFCtype'] == 'TYPE':
if d['name'] == entity_name:
data['super'] = d['super']
data['name'] = d['name']
return data
def make_connection(self,source,target,connection_type="assoc",stereo="vec"):
if connection_type == 'aggreg':
tex_content = r''' \umluniaggreg[arg2=a, mult2=1, pos2=0.9]{%s}{%s}'''%(source,target)
if connection_type == 'assoc':
tex_content = r''' \umluniassoc[geometry=-|, arg1=x, mult1=1, pos1=1.9, arg2=y, mult2=*, pos2=0.2,stereo=%s,pos stereo=1.4]{%s}{%s} '''%(stereo,source,target)
if connection_type == 'uni':
tex_content = r''' \umlunicompo[arg=z, mult=1..*, pos=0.8, angle1=-90, angle2=-140, loopsize=2cm]{%s}{%s}'''%(source,target)
self.tex_content += tex_content
def write_type_class(self,type_name,schema,xpos=2,ypos=2,make_connections=True):
if ' ' in type_name:
type_name = type_name.split(' ')
type_name = type_name[-1]
type_name = type_name[:-1]
# Build content
if type_name not in self.tex_meta:
tex_content = r''' \umlsimpleclass[x=%s,y=%s,type=typedef]{%s}'''%(xpos,ypos,type_name)
self.tex_content += tex_content
self.tex_meta.add(type_name)
if make_connections:
x=5
y=7
# Get data
data = self.get_data(type_name,schema)
supertype = data['super']
self.write_type_class(supertype,schema,make_connections=False,xpos=x,ypos=y)
self.make_connection(type_name,supertype)
def write_enum_class(self,enum_name,schema,xpos=2,ypos=2):
# Get data
if ' ' in enum_name:
enum_name = enum_name.split(' ')
enum_name = enum_name[-1][:-1]
data = self.get_data(enum_name,schema)
values = data['values']
# Build content
attribute_content = ""
i = 0
for value in values:
value = value.replace('_',' ')
if i != len(values) - 1:
attribute_content += value + r'\\'
else:
attribute_content += value
i += 1
print('X',xpos,'\n')
print('Y',ypos,'\n')
print('enum_name',enum_name,'\n')
print('ac',attribute_content,'\n')
tex_content = r''' \umlclass[x=%s, y=%s, width=15ex, anchor=north,type=enum]{%s}{%s}{} '''%(xpos,ypos,enum_name,attribute_content)
self.tex_content += tex_content
self.tex_meta.add(enum_name)
def write_class(self,entity_name,schema,make_connections=True,xpos=2,ypos=2,relativepos=None,previous_class=None):
# Resister class name
# self.tex_meta[entity_name] = []
# Get data
data = self.get_data(entity_name,schema)
attributes = data['attributes']
subtypes = data['subtypes']
supertypes = data['supertypes']
is_abstract = data['is_abstract']
# Determine tex properties
# xpos = 2
# ypos = 2
anchor = 'north'
other_properties =[]
# Register tex class
entity_dict = {}
entity_dict['type'] = 'ENTITY'
entity_dict['classname'] = entity_name
entity_dict['x'] = xpos
entity_dict['y'] = ypos
# Build content
# todo: derived attributes!
attribute_content = ""
i = 0
y = 5
x = 5
for attribute in attributes:
y += 5
x += 5
if 'Enum' in attribute:
self.write_enum_class(attribute,schema,xpos=x,ypos=y)
else:
self.write_type_class(attribute,schema,make_connections=False,xpos=x,ypos=y)
if i != len(attributes) - 1:
attribute_content += attribute + r'\\'
else:
attribute_content += attribute
i += 1
if is_abstract:
if relativepos!=None:
command = "right="+str(relativepos)+"cm of "+previous_class+".north"
tex_content = r''' \umlclass[%s, width=15ex, anchor=north,type=abstract]{%s}{%s}{} '''%(command,entity_name,attribute_content)
else:
tex_content = r''' \umlclass[x=%s, y=%s, width=15ex, anchor=north,type=abstract]{%s}{%s}{} '''%(xpos,ypos,entity_name,attribute_content)
else:
if relativepos!=None:
command = "right="+str(relativepos)+"cm of "+previous_class+".north"
tex_content = r''' \umlclass[%s, width=15ex, anchor=north]{%s}{%s}{} '''%(command,entity_name,attribute_content)
else:
tex_content = r''' \umlclass[x=%s, y=%s, width=15ex, anchor=north]{%s}{%s}{} '''%(xpos,ypos,entity_name,attribute_content)
self.tex_content += tex_content
self.tex_meta.add(entity_name)
if make_connections:
# Children classes
y = -4
x = -15
j = 0
prev_class = subtypes[j]
for supertype in supertypes:
j += 1
# y -= 5
# x += 5
if j != 1:
prev_class = supertypes[j-2]
self.write_class(supertype,schema,make_connections=False,relativepos=5,previous_class=prev_class)
else:
self.write_class(supertype,schema,make_connections=False,xpos=x,ypos=y)
self.make_connection(entity_name,supertype)
# Parent class
y = 8
#x = -3
for subtype in subtypes:
self.write_class(subtype,schema,make_connections=False,xpos=x,ypos=y)
self.make_connection(subtype,entity_name)
for attribute in attributes:
attribute = attribute.split(' ')
stereo = attribute[0]
attribute = attribute[-1][:-1]
self.make_connection(entity_name,attribute,stereo=stereo)
def generate_pdf(self):
self.tex_content += r''' \end{tikzpicture}
\end{tabular}}
\pdfpageheight=\dimexpr\ht0+\dp0\relax
\pdfpagewidth=\wd0
\shipout\box0
\stop
\end{document}'''
self.tex_file.write(self.tex_content)
self.tex_file.close()
subprocess.call(['pdflatex',self.tex_file_name+'.tex'])
######TEST######
# print(sys.argv[1])
name_tex_file = sys.argv[1]
with open('ifcschema2.json', 'r') as fp:
data_schema = json.load(fp)
tex_object = tex_ulm_object(name_tex_file)
tex_object.write_class('IfcWindow',data_schema)
#tex_object.write_enum_class('IfcWindowTypePartitioningEnum',data_schema)
#tex_object.write_type_class('IfcLuminousFluxMeasure',data_schema)
tex_object.generate_pdf()
```
|
{
"source": "jdehotin/TensorFlow",
"score": 2
}
|
#### File: python/kernel_tests/bijector_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
import tensorflow as tf
from tensorflow.contrib.distributions.python.ops.bijector import _Exp
from tensorflow.contrib.distributions.python.ops.bijector import _Identity
from tensorflow.contrib.distributions.python.ops.bijector import _ShiftAndScale
class IdentityBijectorTest(tf.test.TestCase):
"""Tests the correctness of the Y = g(X) = X transformation."""
def testBijector(self):
with self.test_session():
bijector = _Identity()
self.assertEqual("Identity", bijector.name)
x = [[[0.],
[1.]]]
self.assertAllEqual(x, bijector.forward(x).eval())
self.assertAllEqual(x, bijector.inverse(x).eval())
self.assertAllEqual(0., bijector.inverse_log_det_jacobian(x).eval())
rev, jac = bijector.inverse_and_inverse_log_det_jacobian(x)
self.assertAllEqual(x, rev.eval())
self.assertAllEqual(0., jac.eval())
class ExpBijectorTest(tf.test.TestCase):
"""Tests the correctness of the Y = g(X) = exp(X) transformation."""
def testBijector(self):
with self.test_session():
bijector = _Exp(event_ndims=1)
self.assertEqual("Exp", bijector.name)
x = [[[1.],
[2.]]]
self.assertAllClose(np.exp(x), bijector.forward(x).eval())
self.assertAllClose(np.log(x), bijector.inverse(x).eval())
self.assertAllClose([[0., -math.log(2.)]],
bijector.inverse_log_det_jacobian(x).eval())
rev, jac = bijector.inverse_and_inverse_log_det_jacobian(x)
self.assertAllClose(np.log(x), rev.eval())
self.assertAllClose([[0., -math.log(2.)]], jac.eval())
class _ShiftAndScaleBijectorTest(tf.test.TestCase):
def testProperties(self):
with self.test_session():
mu = -1.
sigma = 2.
bijector = _ShiftAndScale(loc=mu, scale=sigma)
self.assertEqual("ShiftAndScale", bijector.name)
def testNoBatchScalar(self):
with self.test_session() as sess:
def static_run(fun, x):
return fun(x).eval()
def dynamic_run(fun, x_value):
x_value = np.array(x_value)
x = tf.placeholder(tf.float32, name="x")
return sess.run(fun(x), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = -1.
sigma = 2. # Scalar.
bijector = _ShiftAndScale(loc=mu, scale=sigma)
self.assertEqual(0, bijector.shaper.batch_ndims.eval()) # "no batches"
self.assertEqual(0, bijector.shaper.event_ndims.eval()) # "is scalar"
x = [1., 2, 3] # Three scalar samples (no batches).
self.assertAllClose([1., 3, 5], run(bijector.forward, x))
self.assertAllClose([1., 1.5, 2.], run(bijector.inverse, x))
self.assertAllClose([-math.log(2.)],
run(bijector.inverse_log_det_jacobian, x))
def testWeirdSampleNoBatchScalar(self):
with self.test_session() as sess:
def static_run(fun, x):
return fun(x).eval()
def dynamic_run(fun, x_value):
x_value = np.array(x_value)
x = tf.placeholder(tf.float32, name="x")
return sess.run(fun(x), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = -1.
sigma = 2. # Scalar.
bijector = _ShiftAndScale(loc=mu, scale=sigma)
self.assertEqual(0, bijector.shaper.batch_ndims.eval()) # "no batches"
self.assertEqual(0, bijector.shaper.event_ndims.eval()) # "is scalar"
x = [[1., 2, 3],
[4, 5, 6]] # Weird sample shape.
self.assertAllClose([[1., 3, 5],
[7, 9, 11]],
run(bijector.forward, x))
self.assertAllClose([[1., 1.5, 2.],
[2.5, 3, 3.5]],
run(bijector.inverse, x))
self.assertAllClose([-math.log(2.)],
run(bijector.inverse_log_det_jacobian, x))
def testOneBatchScalar(self):
with self.test_session() as sess:
def static_run(fun, x):
return fun(x).eval()
def dynamic_run(fun, x_value):
x_value = np.array(x_value)
x = tf.placeholder(tf.float32, name="x")
return sess.run(fun(x), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = [1.]
sigma = [1.] # One batch, scalar.
bijector = _ShiftAndScale(loc=mu, scale=sigma)
self.assertEqual(
1, bijector.shaper.batch_ndims.eval()) # "one batch dim"
self.assertEqual(
0, bijector.shaper.event_ndims.eval()) # "is scalar"
x = [1.] # One sample from one batches.
self.assertAllClose([2.], run(bijector.forward, x))
self.assertAllClose([0.], run(bijector.inverse, x))
self.assertAllClose([0.],
run(bijector.inverse_log_det_jacobian, x))
def testTwoBatchScalar(self):
with self.test_session() as sess:
def static_run(fun, x):
return fun(x).eval()
def dynamic_run(fun, x_value):
x_value = np.array(x_value)
x = tf.placeholder(tf.float32, name="x")
return sess.run(fun(x), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = [1., -1]
sigma = [1., 1] # Univariate, two batches.
bijector = _ShiftAndScale(loc=mu, scale=sigma)
self.assertEqual(
1, bijector.shaper.batch_ndims.eval()) # "one batch dim"
self.assertEqual(
0, bijector.shaper.event_ndims.eval()) # "is scalar"
x = [1., 1] # One sample from each of two batches.
self.assertAllClose([2., 0], run(bijector.forward, x))
self.assertAllClose([0., 2], run(bijector.inverse, x))
self.assertAllClose([0., 0],
run(bijector.inverse_log_det_jacobian, x))
def testNoBatchMultivariate(self):
with self.test_session() as sess:
def static_run(fun, x):
return fun(x).eval()
def dynamic_run(fun, x_value):
x_value = np.array(x_value)
x = tf.placeholder(tf.float32, name="x")
return sess.run(fun(x), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = [1., -1]
sigma = np.eye(2, dtype=np.float32)
bijector = _ShiftAndScale(loc=mu, scale=sigma, event_ndims=1)
self.assertEqual(0, bijector.shaper.batch_ndims.eval()) # "no batches"
self.assertEqual(1, bijector.shaper.event_ndims.eval()) # "is vector"
x = [1., 1]
self.assertAllClose([2., 0], run(bijector.forward, x))
self.assertAllClose([0., 2], run(bijector.inverse, x))
self.assertAllClose([0.], run(bijector.inverse_log_det_jacobian, x))
x = [[1., 1],
[-1., -1]]
self.assertAllClose([[2., 0],
[0, -2]],
run(bijector.forward, x))
self.assertAllClose([[0., 2],
[-2., 0]],
run(bijector.inverse, x))
self.assertAllClose([0.], run(bijector.inverse_log_det_jacobian, x))
# When mu is a scalar and x is multivariate then the location is
# broadcast.
for run in (static_run, dynamic_run):
mu = 1.
sigma = np.eye(2, dtype=np.float32)
bijector = _ShiftAndScale(loc=mu, scale=sigma, event_ndims=1)
self.assertEqual(0, bijector.shaper.batch_ndims.eval()) # "no batches"
self.assertEqual(1, bijector.shaper.event_ndims.eval()) # "is vector"
x = [1., 1]
self.assertAllClose([2., 2], run(bijector.forward, x))
self.assertAllClose([0., 0], run(bijector.inverse, x))
self.assertAllClose([0.], run(bijector.inverse_log_det_jacobian, x))
x = [[1., 1]]
self.assertAllClose([[2., 2]], run(bijector.forward, x))
self.assertAllClose([[0., 0]], run(bijector.inverse, x))
self.assertAllClose([0.], run(bijector.inverse_log_det_jacobian, x))
def testNoBatchMultivariateFullDynamic(self):
with self.test_session() as sess:
x = tf.placeholder(tf.float32, name="x")
mu = tf.placeholder(tf.float32, name="mu")
sigma = tf.placeholder(tf.float32, name="sigma")
event_ndims = tf.placeholder(tf.int32, name="event_ndims")
x_value = np.array([[1., 1]], dtype=np.float32)
mu_value = np.array([1., -1], dtype=np.float32)
sigma_value = np.eye(2, dtype=np.float32)
event_ndims_value = np.array(1, dtype=np.int32)
feed_dict = {x: x_value, mu: mu_value, sigma: sigma_value, event_ndims:
event_ndims_value}
bijector = _ShiftAndScale(loc=mu, scale=sigma, event_ndims=event_ndims)
self.assertEqual(0, sess.run(bijector.shaper.batch_ndims, feed_dict))
self.assertEqual(1, sess.run(bijector.shaper.event_ndims, feed_dict))
self.assertAllClose([[2., 0]], sess.run(bijector.forward(x), feed_dict))
self.assertAllClose([[0., 2]], sess.run(bijector.inverse(x), feed_dict))
self.assertAllClose(
[0.], sess.run(bijector.inverse_log_det_jacobian(x), feed_dict))
def testBatchMultivariate(self):
with self.test_session() as sess:
def static_run(fun, x):
return fun(x).eval()
def dynamic_run(fun, x_value):
x_value = np.array(x_value, dtype=np.float32)
x = tf.placeholder(tf.float32, name="x")
return sess.run(fun(x), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = [[1., -1]]
sigma = np.array([np.eye(2, dtype=np.float32)])
bijector = _ShiftAndScale(loc=mu, scale=sigma, event_ndims=1)
self.assertEqual(
1, bijector.shaper.batch_ndims.eval()) # "one batch dim"
self.assertEqual(
1, bijector.shaper.event_ndims.eval()) # "is vector"
x = [[[1., 1]]]
self.assertAllClose([[[2., 0]]], run(bijector.forward, x))
self.assertAllClose([[[0., 2]]], run(bijector.inverse, x))
self.assertAllClose([0.], run(bijector.inverse_log_det_jacobian, x))
def testBatchMultivariateFullDynamic(self):
with self.test_session() as sess:
x = tf.placeholder(tf.float32, name="x")
mu = tf.placeholder(tf.float32, name="mu")
sigma = tf.placeholder(tf.float32, name="sigma")
event_ndims = tf.placeholder(tf.int32, name="event_ndims")
x_value = np.array([[[1., 1]]], dtype=np.float32)
mu_value = np.array([[1., -1]], dtype=np.float32)
sigma_value = np.array([np.eye(2, dtype=np.float32)])
event_ndims_value = np.array(1, dtype=np.int32)
feed_dict = {x: x_value, mu: mu_value, sigma: sigma_value,
event_ndims: event_ndims_value}
bijector = _ShiftAndScale(loc=mu, scale=sigma, event_ndims=event_ndims)
self.assertEqual(1, sess.run(bijector.shaper.batch_ndims, feed_dict))
self.assertEqual(1, sess.run(bijector.shaper.event_ndims, feed_dict))
self.assertAllClose([[[2., 0]]], sess.run(bijector.forward(x), feed_dict))
self.assertAllClose([[[0., 2]]], sess.run(bijector.inverse(x), feed_dict))
self.assertAllClose(
[0.], sess.run(bijector.inverse_log_det_jacobian(x), feed_dict))
if __name__ == "__main__":
tf.test.main()
```
#### File: python/kernel_tests/kullback_leibler_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class KLTest(tf.test.TestCase):
def testRegistration(self):
class MyDist(tf.contrib.distributions.Normal):
pass
# Register KL to a lambda that spits out the name parameter
@tf.contrib.distributions.RegisterKL(MyDist, MyDist)
def _kl(unused_a, unused_b, name=None): # pylint: disable=unused-variable
return name
a = MyDist(mu=0.0, sigma=1.0)
# Run kl() with allow_nan=True because strings can't go through is_nan.
self.assertEqual(
"OK", tf.contrib.distributions.kl(a, a, allow_nan=True, name="OK"))
def testDomainErrorExceptions(self):
class MyDistException(tf.contrib.distributions.Normal):
pass
# Register KL to a lambda that spits out the name parameter
@tf.contrib.distributions.RegisterKL(MyDistException, MyDistException)
# pylint: disable=unused-variable
def _kl(unused_a, unused_b, name=None): # pylint: disable=unused-argument
return tf.identity([float("nan")])
# pylint: disable=unused-variable
with self.test_session():
a = MyDistException(mu=0.0, sigma=1.0)
kl = tf.contrib.distributions.kl(a, a)
with self.assertRaisesOpError(
"KL calculation between .* and .* returned NaN values"):
kl.eval()
kl_ok = tf.contrib.distributions.kl(a, a, allow_nan=True)
self.assertAllEqual([float("nan")], kl_ok.eval())
def testRegistrationFailures(self):
with self.assertRaisesRegexp(TypeError, "is not a subclass of"):
tf.contrib.distributions.RegisterKL(
tf.contrib.distributions.Normal, object)(lambda x: x)
with self.assertRaisesRegexp(TypeError, "is not a subclass of"):
tf.contrib.distributions.RegisterKL(
object, tf.contrib.distributions.Normal)(lambda x: x)
class MyDist(tf.contrib.distributions.Normal):
pass
with self.assertRaisesRegexp(TypeError, "must be callable"):
tf.contrib.distributions.RegisterKL(MyDist, MyDist)("blah")
# First registration is OK
tf.contrib.distributions.RegisterKL(MyDist, MyDist)(lambda a, b: None)
# Second registration fails
with self.assertRaisesRegexp(ValueError, "has already been registered"):
tf.contrib.distributions.RegisterKL(MyDist, MyDist)(lambda a, b: None)
if __name__ == "__main__":
tf.test.main()
```
#### File: python/kernel_tests/operator_pd_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
# For private members.
from tensorflow.contrib.distributions.python.ops import operator_pd
distributions = tf.contrib.distributions
class OperatorShape(operator_pd.OperatorPDBase):
"""Operator implements the ABC method ._shape."""
def __init__(self, shape):
self._stored_shape = shape
@property
def verify_pd(self):
return True
def get_shape(self):
return tf.TensorShape(self._stored_shape)
def _shape(self):
return tf.shape(np.random.rand(*self._stored_shape))
@property
def name(self):
return "OperatorShape"
def dtype(self):
return tf.int32
@property
def inputs(self):
return []
class OperatorSqrtSolve(OperatorShape):
"""Operator implements .sqrt_solve."""
def __init__(self, chol_array):
self._chol = tf.convert_to_tensor(chol_array)
super(OperatorSqrtSolve, self).__init__(chol_array.shape)
def _sqrt_solve(self, rhs):
return tf.matrix_triangular_solve(self._chol, rhs, lower=True)
def _batch_sqrt_solve(self, rhs):
return tf.matrix_triangular_solve(self._chol, rhs, lower=True)
def _inv_quadratic_form_on_vectors(self, x):
return self._iqfov_via_sqrt_solve(x)
class OperatorSolve(OperatorShape):
"""Operator implements .solve."""
def __init__(self, chol):
self._pos_def_matrix = tf.batch_matmul(chol, chol, adj_y=True)
super(OperatorSolve, self).__init__(chol.shape)
def _solve(self, rhs):
return tf.matrix_solve(self._pos_def_matrix, rhs)
def _batch_solve(self, rhs):
return tf.matrix_solve(self._pos_def_matrix, rhs)
def _inv_quadratic_form_on_vectors(self, x):
return self._iqfov_via_solve(x)
class OperatorPDBaseTest(tf.test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(42)
def _random_cholesky_array(self, shape):
mat = self._rng.rand(*shape)
chol = distributions.matrix_diag_transform(mat, transform=tf.nn.softplus)
# Zero the upper triangle because we're using this as a true Cholesky factor
# in our tests.
return tf.matrix_band_part(chol, -1, 0).eval()
def _numpy_inv_quadratic_form_on_vectors(self, chol, x):
# Numpy works with batches now (calls them "stacks").
x_expanded = np.expand_dims(x, -1)
whitened = np.linalg.solve(chol, x_expanded)
return (whitened**2).sum(axis=-1).sum(axis=-1)
def test_all_shapes_methods_defined_by_the_one_abstractproperty_shape(self):
shape = (1, 2, 3, 3)
with self.test_session():
operator = OperatorShape(shape)
self.assertAllEqual(shape, operator.shape().eval())
self.assertAllEqual(4, operator.rank().eval())
self.assertAllEqual((1, 2), operator.batch_shape().eval())
self.assertAllEqual((1, 2, 3), operator.vector_shape().eval())
self.assertAllEqual(3, operator.vector_space_dimension().eval())
self.assertEqual(shape, operator.get_shape())
self.assertEqual((1, 2), operator.get_batch_shape())
self.assertEqual((1, 2, 3), operator.get_vector_shape())
def test_iqfov_x_rank_same_as_broadcast_rank_using_sqrt_solve(self):
with self.test_session():
for batch_shape in [(), (2,)]:
for k in [1, 3]:
x_shape = batch_shape + (k,)
x = self._rng.randn(*x_shape)
chol_shape = batch_shape + (k, k)
chol = self._random_cholesky_array(chol_shape)
operator = OperatorSqrtSolve(chol)
qf = operator.inv_quadratic_form_on_vectors(x)
self.assertEqual(batch_shape, qf.get_shape())
numpy_qf = self._numpy_inv_quadratic_form_on_vectors(chol, x)
self.assertAllClose(numpy_qf, qf.eval())
def test_iqfov_x_rank_greater_than_broadcast_rank_using_sqrt_solve(self):
with self.test_session():
for batch_shape in [(), (2,), (2, 3)]:
for k in [1, 4]:
x_shape = batch_shape + (k,)
x = self._rng.randn(*x_shape)
# chol will not have the leading dimension.
chol_shape = batch_shape[1:] + (k, k)
chol = self._random_cholesky_array(chol_shape)
operator = OperatorSqrtSolve(chol)
qf = operator.inv_quadratic_form_on_vectors(x)
numpy_qf = self._numpy_inv_quadratic_form_on_vectors(chol, x)
self.assertEqual(batch_shape, qf.get_shape())
self.assertAllClose(numpy_qf, qf.eval())
def test_iqfov_x_rank_two_greater_than_broadcast_rank_using_sqrt_solve(self):
with self.test_session():
for batch_shape in [(2, 3), (2, 3, 4), (2, 3, 4, 5)]:
for k in [1, 4]:
x_shape = batch_shape + (k,)
x = self._rng.randn(*x_shape)
# chol will not have the leading two dimensions.
chol_shape = batch_shape[2:] + (k, k)
chol = self._random_cholesky_array(chol_shape)
operator = OperatorSqrtSolve(chol)
qf = operator.inv_quadratic_form_on_vectors(x)
numpy_qf = self._numpy_inv_quadratic_form_on_vectors(chol, x)
self.assertEqual(batch_shape, qf.get_shape())
self.assertAllClose(numpy_qf, qf.eval())
def test_iqfov_x_rank_same_as_broadcast_rank_using_solve(self):
with self.test_session():
for batch_shape in [(), (2,)]:
for k in [1, 3]:
x_shape = batch_shape + (k,)
x = self._rng.randn(*x_shape)
chol_shape = batch_shape + (k, k)
chol = self._random_cholesky_array(chol_shape)
operator = OperatorSolve(chol)
qf = operator.inv_quadratic_form_on_vectors(x)
self.assertEqual(batch_shape, qf.get_shape())
numpy_qf = self._numpy_inv_quadratic_form_on_vectors(chol, x)
self.assertAllClose(numpy_qf, qf.eval())
def test_iqfov_x_rank_greater_than_broadcast_rank_using_solve(self):
with self.test_session():
for batch_shape in [(2,), (2, 3)]:
for k in [1, 4]:
x_shape = batch_shape + (k,)
x = self._rng.randn(*x_shape)
# chol will not have the leading dimension.
chol_shape = batch_shape[1:] + (k, k)
chol = self._random_cholesky_array(chol_shape)
operator = OperatorSolve(chol)
qf = operator.inv_quadratic_form_on_vectors(x)
numpy_qf = self._numpy_inv_quadratic_form_on_vectors(chol, x)
self.assertEqual(batch_shape, qf.get_shape())
self.assertAllClose(numpy_qf, qf.eval())
def test_iqfov_x_rank_two_greater_than_broadcast_rank_using_solve(self):
with self.test_session():
for batch_shape in [(2, 3), (2, 3, 4), (2, 3, 4, 5)]:
for k in [1, 4]:
x_shape = batch_shape + (k,)
x = self._rng.randn(*x_shape)
# chol will not have the leading two dimensions.
chol_shape = batch_shape[2:] + (k, k)
chol = self._random_cholesky_array(chol_shape)
operator = OperatorSolve(chol)
qf = operator.inv_quadratic_form_on_vectors(x)
numpy_qf = self._numpy_inv_quadratic_form_on_vectors(chol, x)
self.assertEqual(batch_shape, qf.get_shape())
self.assertAllClose(numpy_qf, qf.eval())
class FlipMatrixToVectorTest(tf.test.TestCase):
def setUp(self):
self._rng = np.random.RandomState()
def test_matrix_and_vector_batch_shapes_the_same(self):
batch_shape = [6, 2, 3]
for static_batch_shape in [
tf.TensorShape(batch_shape), tf.TensorShape(None)]:
with self.test_session():
mat = self._rng.rand(2, 3, 4, 6)
vec = operator_pd.flip_matrix_to_vector(
mat, batch_shape, static_batch_shape)
vec_v = vec.eval()
self.assertAllEqual((6, 2, 3, 4), vec_v.shape)
self.assertAllEqual(mat[1, 2, 3, 4], vec_v[4, 1, 2, 3])
def test_matrix_and_vector_batch_shapes_same_rank_but_permuted(self):
batch_shape = [6, 3, 2]
for static_batch_shape in [
tf.TensorShape(batch_shape), tf.TensorShape(None)]:
with self.test_session():
mat = self._rng.rand(2, 3, 4, 6)
vec = operator_pd.flip_matrix_to_vector(
mat, batch_shape, static_batch_shape)
vec_v = vec.eval()
self.assertAllEqual((6, 3, 2, 4), vec_v.shape)
def test_vector_batch_shape_longer_than_matrix_batch_shape(self):
batch_shape = [2, 3, 2, 3]
for static_batch_shape in [
tf.TensorShape(batch_shape), tf.TensorShape(None)]:
with self.test_session():
mat = self._rng.rand(2, 3, 4, 6)
vec = operator_pd.flip_matrix_to_vector(
mat, batch_shape, static_batch_shape)
vec_v = vec.eval()
self.assertAllEqual((2, 3, 2, 3, 4), vec_v.shape)
def test_matrix_batch_shape_has_a_singleton_that_vec_batch_shape_doesnt(self):
batch_shape = [6, 3]
for static_batch_shape in [
tf.TensorShape(batch_shape), tf.TensorShape(None)]:
with self.test_session():
mat = self._rng.rand(1, 3, 4, 6)
vec = operator_pd.flip_matrix_to_vector(
mat, batch_shape, static_batch_shape)
vec_v = vec.eval()
self.assertAllEqual((6, 3, 4), vec_v.shape)
self.assertAllEqual(mat[0, 2, 3, 4], vec_v[4, 2, 3])
class FlipVectorToMatrixTest(tf.test.TestCase):
def setUp(self):
self._rng = np.random.RandomState()
def test_when_x_batch_rank_is_same_as_batch_rank_arg(self):
batch_shape = [4, 5]
x = self._rng.rand(4, 5, 6)
for static_batch_shape in [
tf.TensorShape(batch_shape), tf.TensorShape(None)]:
with self.test_session():
mat = operator_pd.flip_vector_to_matrix(
x, batch_shape, static_batch_shape)
mat_v = mat.eval()
expected_mat_v = x.reshape(x.shape + (1,))
self.assertAllEqual(expected_mat_v, mat_v)
def test_when_x_has_one_larger_larger_batch_rank_than_batch_rank_arg(self):
batch_shape = [4, 5]
x = self._rng.rand(3, 4, 5, 6)
for static_batch_shape in [
tf.TensorShape(batch_shape), tf.TensorShape(None)]:
with self.test_session():
mat = operator_pd.flip_vector_to_matrix(
x, batch_shape, static_batch_shape)
mat_v = mat.eval()
self.assertAllEqual((4, 5, 6, 3), mat_v.shape)
self.assertAllEqual(x[2, 2, 2, 1], mat_v[2, 2, 1, 2])
def test_when_batch_shape_requires_reshape_of_vector_batch_shape(self):
batch_shape = [5, 4]
x = self._rng.rand(3, 4, 5, 6) # Note x has (4,5) and batch_shape is (5, 4)
for static_batch_shape in [
tf.TensorShape(batch_shape), tf.TensorShape(None)]:
with self.test_session():
mat = operator_pd.flip_vector_to_matrix(
x, batch_shape, static_batch_shape)
mat_v = mat.eval()
self.assertAllEqual((5, 4, 6, 3), mat_v.shape)
def test_when_x_has_two_larger_larger_batch_rank_than_batch_rank_arg(self):
batch_shape = [4, 5]
x = self._rng.rand(2, 3, 4, 5, 6)
for static_batch_shape in [
tf.TensorShape(batch_shape), tf.TensorShape(None)]:
with self.test_session():
mat = operator_pd.flip_vector_to_matrix(
x, batch_shape, static_batch_shape)
mat_v = mat.eval()
self.assertAllEqual((4, 5, 6, 2*3), mat_v.shape)
class ExtractBatchShapeTest(tf.test.TestCase):
def setUp(self):
self._rng = np.random.RandomState()
def test_x_has_empty_batch_shape(self):
with self.test_session():
x = self._rng.rand(2, 3)
num_event_dims = 2
batch_shape = operator_pd.extract_batch_shape(x, num_event_dims)
self.assertAllEqual([], batch_shape.eval())
def test_x_has_non_empty_batch_shape(self):
with self.test_session():
x = self._rng.rand(2, 3, 4, 5)
num_event_dims = 2
batch_shape = operator_pd.extract_batch_shape(x, num_event_dims)
self.assertAllEqual([2, 3], batch_shape.eval())
if __name__ == "__main__":
tf.test.main()
```
#### File: python/layers/summaries_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class SummariesTest(tf.test.TestCase):
def test_duplicate_tag(self):
with self.test_session():
var = tf.Variable([1, 2, 3])
tf.contrib.layers.summarize_tensor(var)
with self.assertRaises(ValueError):
tf.contrib.layers.summarize_tensor(var)
def test_summarize_scalar_tensor(self):
with self.test_session():
scalar_var = tf.Variable(1)
summary_op = tf.contrib.layers.summarize_tensor(scalar_var)
self.assertTrue(summary_op.op.type == 'ScalarSummary')
def test_summarize_multidim_tensor(self):
with self.test_session():
tensor_var = tf.Variable([1, 2, 3])
summary_op = tf.contrib.layers.summarize_tensor(tensor_var)
self.assertTrue(summary_op.op.type == 'HistogramSummary')
def test_summarize_activation(self):
with self.test_session():
var = tf.Variable(1)
op = tf.identity(var, name='SummaryTest')
summary_op = tf.contrib.layers.summarize_activation(op)
self.assertTrue(summary_op.op.type == 'HistogramSummary')
names = [op.op.name for op in tf.get_collection(tf.GraphKeys.SUMMARIES)]
self.assertEquals(len(names), 1)
self.assertTrue(u'SummaryTest/activation_summary' in names)
def test_summarize_activation_relu(self):
with self.test_session():
var = tf.Variable(1)
op = tf.nn.relu(var, name='SummaryTest')
summary_op = tf.contrib.layers.summarize_activation(op)
self.assertTrue(summary_op.op.type == 'HistogramSummary')
names = [op.op.name for op in tf.get_collection(tf.GraphKeys.SUMMARIES)]
self.assertEquals(len(names), 2)
self.assertTrue(u'SummaryTest/zeros_summary' in names)
self.assertTrue(u'SummaryTest/activation_summary' in names)
def test_summarize_activation_relu6(self):
with self.test_session():
var = tf.Variable(1)
op = tf.nn.relu6(var, name='SummaryTest')
summary_op = tf.contrib.layers.summarize_activation(op)
self.assertTrue(summary_op.op.type == 'HistogramSummary')
names = [op.op.name for op in tf.get_collection(tf.GraphKeys.SUMMARIES)]
self.assertEquals(len(names), 3)
self.assertTrue(u'SummaryTest/zeros_summary' in names)
self.assertTrue(u'SummaryTest/sixes_summary' in names)
self.assertTrue(u'SummaryTest/activation_summary' in names)
def test_summarize_collection_regex(self):
with self.test_session():
var = tf.Variable(1)
tf.identity(var, name='Test1')
tf.add_to_collection('foo', tf.identity(var, name='Test2'))
tf.add_to_collection('foo', tf.identity(var, name='Foobar'))
tf.add_to_collection('foo', tf.identity(var, name='Test3'))
summaries = tf.contrib.layers.summarize_collection('foo', r'Test[123]')
names = [op.op.name for op in summaries]
self.assertEquals(len(names), 2)
self.assertTrue(u'Test2_summary' in names)
self.assertTrue(u'Test3_summary' in names)
if __name__ == '__main__':
tf.test.main()
```
#### File: learn/estimators/rnn_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.estimators._sklearn import mean_squared_error
def rnn_input_fn(x):
return tf.split(1, 5, x)
data = np.array(list([[2, 1, 2, 2, 3],
[2, 2, 3, 4, 5],
[3, 3, 1, 2, 1],
[2, 4, 5, 4, 1]]),
dtype=np.float32)
# labels for classification
labels = np.array(list([1, 0, 1, 0]), dtype=np.float32)
# targets for regression
targets = np.array(list([10, 16, 10, 16]), dtype=np.float32)
test_data = np.array(list([[1, 3, 3, 2, 1],
[2, 3, 4, 5, 6]]),
dtype=np.float32)
class RNNTest(tf.test.TestCase):
"""RNN estimator tests."""
def setUp(self):
random.seed(42)
tf.set_random_seed(42)
def testRNN(self):
# Classification
classifier = tf.contrib.learn.TensorFlowRNNClassifier(rnn_size=2,
cell_type="lstm",
n_classes=2,
steps=150,
input_op_fn=rnn_input_fn)
classifier.fit(data, labels)
# pylint: disable=pointless-statement
classifier.weights_
classifier.bias_
# pylint: enable=pointless-statement
predictions = classifier.predict(data[:2])
self.assertAllClose(predictions, labels[:2])
classifier = tf.contrib.learn.TensorFlowRNNClassifier(rnn_size=2,
cell_type="rnn",
n_classes=2,
input_op_fn=rnn_input_fn,
steps=100,
num_layers=2)
classifier.fit(data, labels)
classifier = tf.contrib.learn.TensorFlowRNNClassifier(
rnn_size=2, cell_type="invalid_cell_type", n_classes=2,
input_op_fn=rnn_input_fn, num_layers=2)
with self.assertRaises(ValueError):
classifier.fit(data, labels)
# Regression
regressor = tf.contrib.learn.TensorFlowRNNRegressor(rnn_size=2,
cell_type="gru",
steps=100,
input_op_fn=rnn_input_fn)
regressor.fit(data, targets)
# pylint: disable=pointless-statement
regressor.weights_
regressor.bias_
# pylint: enable=pointless-statement
predictions = regressor.predict(test_data)
# rnn with attention
classifier = tf.contrib.learn.TensorFlowRNNClassifier(rnn_size=2,
cell_type="lstm",
n_classes=2,
input_op_fn=rnn_input_fn,
bidirectional=False,
attn_length=2,
steps=100,
attn_size=2,
attn_vec_size=2)
classifier.fit(data, labels)
predictions = classifier.predict(data[:2])
self.assertAllClose(predictions, labels[:2])
def testBidirectionalRNN(self):
# Classification
classifier = tf.contrib.learn.TensorFlowRNNClassifier(rnn_size=2,
cell_type="lstm",
n_classes=2,
input_op_fn=rnn_input_fn,
steps=100,
bidirectional=True)
classifier.fit(data, labels)
predictions = classifier.predict(data[:2])
self.assertAllClose(predictions, labels[:2])
# bidirectional rnn with attention
classifier = tf.contrib.learn.TensorFlowRNNClassifier(rnn_size=2,
cell_type="lstm",
n_classes=2,
input_op_fn=rnn_input_fn,
bidirectional=True,
attn_length=2,
attn_size=2,
steps=100,
attn_vec_size=2)
classifier.fit(data, labels)
predictions = classifier.predict(data[:2])
self.assertAllClose(predictions, labels[:2])
if __name__ == "__main__":
tf.test.main()
```
#### File: tests/dataframe/binary_transform_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.dataframe import tensorflow_dataframe as df
from tensorflow.contrib.learn.python.learn.dataframe.transforms.binary_transforms import BINARY_TRANSFORMS
NUMPY_ARRAY_SIZE = 100
SCALAR = 50.0
TEST_NAME_PREFIX = "testBinaryOp_"
class BinaryTransformTestCase(tf.test.TestCase):
"""Test class for binary transforms."""
@classmethod
def add_test_case(cls, fn_name, op):
def _test(self):
rng = np.arange(-NUMPY_ARRAY_SIZE // 2,
NUMPY_ARRAY_SIZE // 2,
dtype="float32")
frame = df.TensorFlowDataFrame.from_numpy(rng,
batch_size=len(rng),
shuffle=False)
frame["sqr"] = frame["value"].square()
self.assertTrue(hasattr(frame["value"], fn_name))
frame["series_result"] = getattr(frame["value"],
fn_name)(frame["sqr"])
frame["scalar_result"] = getattr(frame["value"], fn_name)(SCALAR)
frame_built = frame.build()
expected_series_tensor = op(frame_built["value"], frame_built["sqr"])
actual_series_tensor = frame_built["series_result"]
expected_scalar_tensor = op(frame_built["value"], SCALAR)
actual_scalar_tensor = frame_built["scalar_result"]
session = tf.Session()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=session, coord=coord)
actual_series, expected_series, actual_scalar, expected_scalar = (
session.run([actual_series_tensor, expected_series_tensor,
actual_scalar_tensor, expected_scalar_tensor]))
coord.request_stop()
coord.join(threads)
np.testing.assert_almost_equal(expected_series, actual_series)
np.testing.assert_almost_equal(expected_scalar, actual_scalar)
setattr(cls, "{}{}".format(TEST_NAME_PREFIX, op.__name__), _test)
for bt in BINARY_TRANSFORMS:
BinaryTransformTestCase.add_test_case(*bt)
# Check that the number of test methods matches the number of binary transforms.
test_methods = [test for test in dir(BinaryTransformTestCase)
if test.startswith(TEST_NAME_PREFIX)]
assert len(test_methods) == len(BINARY_TRANSFORMS)
if __name__ == "__main__":
tf.test.main()
```
#### File: tests/dataframe/reader_source_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow.contrib.learn.python.learn.dataframe.transforms.reader_source as rs
class ReaderSourceTestCase(tf.test.TestCase):
"""Test class for ReaderSource."""
def setUp(self):
super(ReaderSourceTestCase, self).setUp()
self.work_units = [str(x) for x in range(1000)]
def testNoShuffle(self):
id_source = rs.ReaderSource(reader_cls=tf.IdentityReader,
work_units=self.work_units,
batch_size=1,
shuffle=False,
num_threads=1)
index_column, value_column = id_source()
index_tensor = index_column.build()
value_tensor = value_column.build()
self.assertEqual([1], index_tensor.get_shape().as_list())
self.assertEqual([1], value_tensor.get_shape().as_list())
with self.test_session() as sess:
tf.initialize_all_variables().run()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(50):
index, value = sess.run([index_tensor, value_tensor])
self.assertEqual(i, int(index[0]))
self.assertEqual(i, int(value[0]))
coord.request_stop()
coord.join(threads)
def testYesShuffle(self):
id_source = rs.ReaderSource(reader_cls=tf.IdentityReader,
work_units=self.work_units,
batch_size=1,
shuffle=True,
num_threads=10,
seed=1234)
index_column, value_column = id_source()
cache = {}
index_tensor = index_column.build(cache)
value_tensor = value_column.build(cache)
self.assertEqual([1], index_tensor.get_shape().as_list())
self.assertEqual([1], value_tensor.get_shape().as_list())
seen = set([])
with self.test_session() as sess:
tf.initialize_all_variables().run()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for _ in range(500):
index, value = sess.run([index_tensor, value_tensor])
self.assertEqual(index, value)
self.assertNotIn(int(value[0]), seen)
seen.add(int(value[0]))
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
tf.test.main()
```
#### File: learn/tests/summary_writer_cache_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import summary_writer_cache
class SummaryWriterCacheTest(tf.test.TestCase):
"""SummaryWriterCache tests."""
def _test_dir(self, test_name):
"""Create an empty dir to use for tests.
Args:
test_name: Name of the test.
Returns:
Absolute path to the test directory.
"""
test_dir = os.path.join(self.get_temp_dir(), test_name)
if os.path.isdir(test_dir):
for f in glob.glob('%s/*' % test_dir):
os.remove(f)
else:
os.makedirs(test_dir)
return test_dir
def test_cache(self):
with tf.Graph().as_default():
dir1 = self._test_dir('test_cache_1')
dir2 = self._test_dir('test_cache_2')
sw1 = summary_writer_cache.SummaryWriterCache.get(dir1)
sw2 = summary_writer_cache.SummaryWriterCache.get(dir2)
sw3 = summary_writer_cache.SummaryWriterCache.get(dir1)
self.assertEqual(sw1, sw3)
self.assertFalse(sw1 == sw2)
sw1.close()
sw2.close()
events1 = glob.glob(os.path.join(dir1, 'event*'))
self.assertTrue(events1)
events2 = glob.glob(os.path.join(dir2, 'event*'))
self.assertTrue(events2)
events3 = glob.glob(os.path.join('nowriter', 'event*'))
self.assertFalse(events3)
def test_clear(self):
with tf.Graph().as_default():
dir1 = self._test_dir('test_clear')
sw1 = summary_writer_cache.SummaryWriterCache.get(dir1)
summary_writer_cache.SummaryWriterCache.clear()
sw2 = summary_writer_cache.SummaryWriterCache.get(dir1)
self.assertFalse(sw1 == sw2)
if __name__ == '__main__':
tf.test.main()
```
#### File: learn/utils/export_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow.contrib import learn
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.contrib.session_bundle import manifest_pb2
class ExportTest(tf.test.TestCase):
def _get_default_signature(self, export_meta_filename):
"""Gets the default signature from the export.meta file."""
with tf.Session():
save = tf.train.import_meta_graph(export_meta_filename)
meta_graph_def = save.export_meta_graph()
collection_def = meta_graph_def.collection_def
signatures_any = collection_def['serving_signatures'].any_list.value
self.assertEquals(len(signatures_any), 1)
signatures = manifest_pb2.Signatures()
signatures_any[0].Unpack(signatures)
default_signature = signatures.default_signature
return default_signature
def testExportMonitor(self):
random.seed(42)
x = np.random.rand(1000)
y = 2 * x + 3
cont_features = [tf.contrib.layers.real_valued_column('', dimension=1)]
regressor = learn.LinearRegressor(feature_columns=cont_features)
export_dir = tempfile.mkdtemp() + 'export/'
export_monitor = learn.monitors.ExportMonitor(
every_n_steps=1, export_dir=export_dir, exports_to_keep=2,
signature_fn=export.generic_signature_fn)
regressor.fit(x, y, steps=10,
monitors=[export_monitor])
self.assertTrue(tf.gfile.Exists(export_dir))
# Only the written checkpoints are exported.
self.assertTrue(tf.gfile.Exists(export_dir + '00000001/export'))
self.assertTrue(tf.gfile.Exists(export_dir + '00000010/export'))
# Validate the signature
signature = self._get_default_signature(export_dir + '00000010/export.meta')
self.assertTrue(signature.HasField('generic_signature'))
def testExportMonitorRegressionSignature(self):
def _regression_signature(examples, unused_features, predictions):
signatures = {}
signatures['regression'] = (
tf.contrib.session_bundle.exporter.regression_signature(examples,
predictions))
return signatures['regression'], signatures
random.seed(42)
x = np.random.rand(1000)
y = 2 * x + 3
cont_features = [tf.contrib.layers.real_valued_column('', dimension=1)]
regressor = learn.LinearRegressor(feature_columns=cont_features)
export_dir = tempfile.mkdtemp() + 'export/'
export_monitor = learn.monitors.ExportMonitor(
every_n_steps=1,
export_dir=export_dir,
exports_to_keep=1,
signature_fn=_regression_signature)
regressor.fit(x, y, steps=10, monitors=[export_monitor])
self.assertTrue(tf.gfile.Exists(export_dir))
self.assertFalse(tf.gfile.Exists(export_dir + '00000000/export'))
self.assertTrue(tf.gfile.Exists(export_dir + '00000010/export'))
# Validate the signature
signature = self._get_default_signature(export_dir + '00000010/export.meta')
self.assertTrue(signature.HasField('regression_signature'))
if __name__ == '__main__':
tf.test.main()
```
#### File: python/ops/metric_ops_util_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.metrics.python.ops import metric_ops_util
class RemoveSqueezableDimensionsTest(tf.test.TestCase):
def testRemoveSqueezableDimensions(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False, predictions_have_extra_dim=False,
labels_have_static_shape=False, labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_extraLabelDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False, predictions_have_extra_dim=False,
labels_have_static_shape=False, labels_have_extra_dim=True)
def testRemoveSqueezableDimensions_staticLabel(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False, predictions_have_extra_dim=False,
labels_have_static_shape=True, labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_staticLabel_extraLabelDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False, predictions_have_extra_dim=False,
labels_have_static_shape=True, labels_have_extra_dim=True)
def testRemoveSqueezableDimensions_extraPredictionDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False, predictions_have_extra_dim=True,
labels_have_static_shape=False, labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_extraPredictionDim_staticLabel(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False, predictions_have_extra_dim=True,
labels_have_static_shape=True, labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_staticPrediction(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True, predictions_have_extra_dim=False,
labels_have_static_shape=False, labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_staticPrediction_extraLabelDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True, predictions_have_extra_dim=False,
labels_have_static_shape=False, labels_have_extra_dim=True)
def testRemoveSqueezableDimensions_static(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True, predictions_have_extra_dim=False,
labels_have_static_shape=True, labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_static_extraLabelDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True, predictions_have_extra_dim=False,
labels_have_static_shape=True, labels_have_extra_dim=True)
def testRemoveSqueezableDimensions_staticPrediction_extraPredictionDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True, predictions_have_extra_dim=True,
labels_have_static_shape=False, labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_static_extraPredictionDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True, predictions_have_extra_dim=True,
labels_have_static_shape=True, labels_have_extra_dim=False)
# TODO(ptucker): Replace this with parameterized test.
def _testRemoveSqueezableDimensions(
self,
predictions_have_static_shape,
predictions_have_extra_dim,
labels_have_static_shape,
labels_have_extra_dim):
assert not (predictions_have_extra_dim and labels_have_extra_dim)
predictions_value = (0, 1, 1, 0, 0, 1, 0)
labels_value = (0, 0, 1, 1, 0, 0, 0)
input_predictions_value = (
[[p] for p in predictions_value] if predictions_have_extra_dim else
predictions_value)
input_labels_value = (
[[l] for l in labels_value] if labels_have_extra_dim else labels_value)
with tf.Graph().as_default() as g:
feed_dict = {}
if predictions_have_static_shape:
predictions = tf.constant(input_predictions_value, dtype=tf.int32)
else:
predictions = tf.placeholder(dtype=tf.int32, name='predictions')
feed_dict[predictions] = input_predictions_value
if labels_have_static_shape:
labels = tf.constant(input_labels_value, dtype=tf.int32)
else:
labels = tf.placeholder(dtype=tf.int32, name='labels')
feed_dict[labels] = input_labels_value
squeezed_predictions, squeezed_labels = (
metric_ops_util.remove_squeezable_dimensions(predictions, labels))
with self.test_session(g):
tf.initialize_local_variables().run()
self.assertAllClose(
predictions_value, squeezed_predictions.eval(feed_dict=feed_dict))
self.assertAllClose(
labels_value, squeezed_labels.eval(feed_dict=feed_dict))
```
#### File: python/ops/training_ops.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import load_library
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import tf_logging as logging
TRAINING_OPS_FILE = '_training_ops.so'
_training_ops = None
_ops_lock = threading.Lock()
# TODO(b/31222613): Some of these ops are probably differentiable, and
# there may be latent bugs here.
ops.NotDifferentiable('HardRoutingFunction')
ops.NotDifferentiable('RoutingGradient')
ops.NotDifferentiable('KFeatureDataGradient')
ops.NotDifferentiable('KFeatureRoutingGradient')
ops.NotDifferentiable('KFeatureWeightGradient')
ops.NotDifferentiable('UnpackPath')
ops.RegisterShape('RoutingFunction')(common_shapes.call_cpp_shape_fn)
ops.RegisterShape('KFeatureRoutingFunction')(common_shapes.call_cpp_shape_fn)
ops.RegisterShape('HardRoutingFunction')(common_shapes.call_cpp_shape_fn)
ops.RegisterShape('StochasticHardRoutingFunction')(
common_shapes.call_cpp_shape_fn)
ops.RegisterShape('StochasticHardRoutingGradient')(
common_shapes.call_cpp_shape_fn)
ops.RegisterShape('UnpackPath')(common_shapes.call_cpp_shape_fn)
ops.RegisterShape('RoutingGradient')(common_shapes.call_cpp_shape_fn)
ops.RegisterShape('KFeatureDataGradient')(common_shapes.call_cpp_shape_fn)
ops.RegisterShape('KFeatureRoutingGradient')(common_shapes.call_cpp_shape_fn)
ops.RegisterShape('KFeatureWeightGradient')(common_shapes.call_cpp_shape_fn)
@ops.RegisterGradient('RoutingFunction')
def _RoutingFunctionGradient(op, grad):
"""The gradient of RoutingFunction.
Args:
op: The RoutingFunction op.
grad: Gradient with respect to the output of the RoutingFunction op.
Returns:
Gradients with respect to the input of the RoutingFunction op.
"""
routing_gradient = _training_ops.routing_gradient
input_data_tensor = op.inputs[0]
tree_weights_tensor = op.inputs[1]
tree_thresholds_tensor = op.inputs[2]
routing_function_tensor = op.outputs[0]
# The derivatives below are each defined over one or two of three dimensions:
# (batch_size, num_nodes, num_features). We explicitly expand each derivative
# to three dimensions to ensure that they're broadcasted correctly.
# dl / du is the derivative of the loss with respect to the output of the
# routing function, which is provided by tensorflow.
#
# dl / du has dimension (batch_size, num_nodes), which we expand to
# (batch_size, num_nodes, 1).
dl_du = array_ops.expand_dims(grad, 2)
# du / df is the derivative of the output of the routing function with respect
# to the decision function at each node. It is computed by
# routing_gradient_op.cc.
#
# du / df has dimension (batch_size, num_nodes), which we expand to
# (batch_size, num_nodes, 1).
du_df = array_ops.expand_dims(
routing_gradient(
input_data_tensor,
tree_weights_tensor,
tree_thresholds_tensor,
routing_function_tensor,
max_nodes=op.get_attr('max_nodes')),
2)
# df / dx is the derivative of the decision function with respect to the input
# data. f_i(x) = (-t_i * x + b_i), so df_i / dx = -t_i.
#
# df / dx has dimension (num_nodes, num_features), which we expand to
# (1, num_nodes, num_features).
df_dx = -array_ops.expand_dims(tree_weights_tensor, 0)
# df / dt is the derivative of the decision function with respect to its
# parameters. f_i(x) = (-t_i * x + b_i), so df_i / d t_i = -x.
#
# df / dt has dimension (batch_size, num_features), which we expand to
# (batch_size, 1, num_features).
df_dt = -array_ops.expand_dims(input_data_tensor, 1)
# df / dt is the derivative of the decision function with respect to its
# bias parameter. f_i(x) = (-t_i * x + b_i), so df_i / d t_i = 1.
#
# df / db has dimension (num_nodes), which we expand to
# (1, num_nodes, 1).
df_db = array_ops.expand_dims(
array_ops.expand_dims(array_ops.ones_like(tree_thresholds_tensor), 0), 2)
# Compute the derivatives of the loss with respect to the inputs using the
# chain rule (backpropagation).
dl_dx = math_ops.reduce_mean(dl_du * du_df * df_dx, 1)
dl_dt = math_ops.reduce_mean(dl_du * du_df * df_dt, 0)
dl_db = math_ops.reduce_mean(array_ops.squeeze(dl_du * du_df * df_db, [2]), 0)
input_gradients = [dl_dx, dl_dt, dl_db]
return input_gradients
@ops.RegisterGradient('StochasticHardRoutingFunction')
def _StochasticHardRoutingFunctionGradient(op, routing_grad, unused_path_grad):
"""The gradient of RoutingFunction.
Args:
op: The RoutingFunction op.
routing_grad: Gradient with respect to the output of the RoutingFunction op.
Returns:
Gradients with respect to the input of the RoutingFunction op.
"""
gradient_op = _training_ops.stochastic_hard_routing_gradient
unpack_path_op = _training_ops.unpack_path
input_data_tensor = op.inputs[0]
tree_weights_tensor = op.inputs[1]
tree_thresholds_tensor = op.inputs[2]
path_probability_tensor = op.outputs[0]
path_tensor = op.outputs[1]
# The derivatives below are each defined over one or two of three dimensions:
# (batch_size, num_nodes, num_features). We explicitly expand each derivative
# to three dimensions to ensure that they're broadcasted correctly.
du_df_raw, df_dx_raw, df_dt_raw, df_db_raw = gradient_op(
input_data_tensor,
tree_weights_tensor,
tree_thresholds_tensor,
path_probability_tensor,
path_tensor,
tree_depth=op.get_attr('tree_depth'))
# dl / du is the derivative of the loss with respect to the output of the
# routing function, which is provided by tensorflow.
#
# dl / du has dimension (batch_size, num_nodes), which we expand to
# (batch_size, num_nodes, 1).
dl_du = array_ops.expand_dims(unpack_path_op(path_tensor, routing_grad), 2)
# du / df is the derivative of the output of the routing function with respect
# to the decision function at each node. It is computed by
# single_feature_routing_gradient_op.cc.
#
# du / df has dimension (batch_size, num_nodes), which we expand to
# (batch_size, num_nodes, 1).
du_df = array_ops.expand_dims(du_df_raw, 2)
# df / dx is the derivative of the decision function with respect to the input
# data. f(x) = (-t * x + b), so df / dx = -t for the selected features and
# zero elsewhere.
#
# df / dx has dimension (num_nodes, num_features), which we expand to
# (1, num_nodes, num_features).
df_dx = array_ops.expand_dims(df_dx_raw, 0)
# df / dt is the derivative of the decision function with respect to its
# parameters. f(x) = (-t * x + b), so df / dt = -x[feature].
#
# df / dt has dimension (batch_size, num_nodes, num_features).
df_dt = -df_dt_raw
# df / dt is the derivative of the decision function with respect to its
# bias parameter. f(x) = (-t * x + b), so df / dt = 1.
#
# df / db has dimension (num_nodes), which we expand to
# (1, num_nodes, 1).
df_db = array_ops.expand_dims(array_ops.expand_dims(df_db_raw, 0), 2)
# Compute the derivatives of the loss with respect to the inputs using the
# chain rule (backpropagation).
dl_dx = math_ops.reduce_mean(dl_du * du_df * df_dx, 1)
dl_dt = math_ops.reduce_mean(dl_du * du_df * df_dt, 0)
dl_db = math_ops.reduce_mean(array_ops.squeeze(dl_du * du_df * df_db, [2]), 0)
input_gradients = [dl_dx, dl_dt, dl_db]
return input_gradients
@ops.RegisterGradient('KFeatureRoutingFunction')
def _KFeatureRoutingFunctionGradient(op, grad):
"""The gradient of RoutingFunction.
Args:
op: The RoutingFunction op.
grad: Gradient with respect to the output of the RoutingFunction op.
Returns:
Gradients with respect to the input of the RoutingFunction op.
"""
gradient_op = _training_ops.k_feature_gradient
input_data_tensor = op.inputs[0]
tree_weights_tensor = op.inputs[1]
tree_thresholds_tensor = op.inputs[2]
routing_function_tensor = op.outputs[0]
# The derivatives below are each defined over one or two of three dimensions:
# (batch_size, num_nodes, num_features). We explicitly expand each derivative
# to three dimensions to ensure that they're broadcasted correctly.
du_df_raw, df_dx_raw, df_dt_raw = gradient_op(
input_data_tensor,
tree_weights_tensor,
tree_thresholds_tensor,
routing_function_tensor,
layer_num=op.get_attr('layer_num'),
random_seed=op.get_attr('random_seed'))
# dl / du is the derivative of the loss with respect to the output of the
# routing function, which is provided by tensorflow.
#
# dl / du has dimension (batch_size, num_nodes), which we expand to
# (batch_size, num_nodes, 1).
dl_du = array_ops.expand_dims(grad, 2)
# du / df is the derivative of the output of the routing function with respect
# to the decision function at each node. It is computed by
# single_feature_routing_gradient_op.cc.
#
# du / df has dimension (batch_size, num_nodes), which we expand to
# (batch_size, num_nodes, 1).
du_df = array_ops.expand_dims(du_df_raw, 2)
# df / dx is the derivative of the decision function with respect to the input
# data. f(x) = (-t * x + b), so df / dx = -t for the selected features and
# zero elsewhere.
#
# df / dx has dimension (num_nodes, num_features), which we expand to
# (1, num_nodes, num_features).
df_dx = array_ops.expand_dims(df_dx_raw, 0)
# df / dt is the derivative of the decision function with respect to its
# parameters. f(x) = (-t * x + b), so df / dt = -x[feature].
#
# df / dt has dimension (batch_size, num_nodes, num_features).
df_dt = -df_dt_raw
# df / dt is the derivative of the decision function with respect to its
# bias parameter. f(x) = (-t * x + b), so df / dt = 1.
#
# df / db has dimension (num_nodes), which we expand to
# (1, num_nodes, 1).
df_db = array_ops.expand_dims(
array_ops.expand_dims(array_ops.ones_like(tree_thresholds_tensor), 0), 2)
# Compute the derivatives of the loss with respect to the inputs using the
# chain rule (backpropagation).
dl_dx = math_ops.reduce_mean(dl_du * du_df * df_dx, 1)
dl_dt = math_ops.reduce_mean(dl_du * du_df * df_dt, 0)
dl_db = math_ops.reduce_mean(array_ops.squeeze(dl_du * du_df * df_db, [2]), 0)
input_gradients = [dl_dx, dl_dt, dl_db]
return input_gradients
# Workaround for the fact that importing tensorflow imports contrib
# (even if a user isn't using this or any other contrib op), but
# there's not yet any guarantee that the shared object exists.
# In which case, "import tensorflow" will always crash, even for users that
# never use contrib.
def Load():
"""Load training ops library and return the loaded module."""
with _ops_lock:
global _training_ops
if not _training_ops:
ops_path = resource_loader.get_path_to_datafile(TRAINING_OPS_FILE)
logging.info('data path: %s', ops_path)
_training_ops = load_library.load_op_library(ops_path)
assert _training_ops, 'Could not load _training_ops.so'
return _training_ops
```
#### File: tensor_forest/python/topn_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.tensor_forest.python import topn
from tensorflow.contrib.tensor_forest.python.ops import topn_ops
from tensorflow.python.client import session
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class TopNOpsTest(test_util.TensorFlowTestCase):
def setUp(self):
self.ops = topn_ops.Load()
def testInsertOpIntoEmptyShortlist(self):
with self.test_session():
shortlist_ids, new_ids, new_scores = self.ops.top_n_insert(
[0, -1, -1, -1, -1, -1], # sl_ids
[-999, -999, -999, -999, -999, -999], # sl_scores
[5],
[33.0] # new id and score
)
self.assertAllEqual([1, 0], shortlist_ids.eval())
self.assertAllEqual([5, 1], new_ids.eval())
self.assertAllEqual([33.0, -999], new_scores.eval())
def testInsertOpIntoAlmostFullShortlist(self):
with self.test_session():
shortlist_ids, new_ids, new_scores = self.ops.top_n_insert(
[4, 13, -1, 27, 99, 15], # sl_ids
[60.0, 87.0, -999, 65.0, 1000.0, 256.0], # sl_scores
[5],
[93.0] # new id and score
)
self.assertAllEqual([2, 0], shortlist_ids.eval())
self.assertAllEqual([5, 5], new_ids.eval())
# Shortlist still contains all known scores > 60.0
self.assertAllEqual([93.0, 60.0], new_scores.eval())
def testInsertOpIntoFullShortlist(self):
with self.test_session():
shortlist_ids, new_ids, new_scores = self.ops.top_n_insert(
[5, 13, 44, 27, 99, 15], # sl_ids
[60.0, 87.0, 111.0, 65.0, 1000.0, 256.0], # sl_scores
[5],
[93.0] # new id and score
)
self.assertAllEqual([3, 0], shortlist_ids.eval())
self.assertAllEqual([5, 5], new_ids.eval())
# We removed a 65.0 from the list, so now we can only claim that
# it holds all scores > 65.0.
self.assertAllEqual([93.0, 65.0], new_scores.eval())
def testInsertOpHard(self):
with self.test_session():
shortlist_ids, new_ids, new_scores = self.ops.top_n_insert(
[4, 13, -1, 27, 99, 15], # sl_ids
[60.0, 87.0, -999, 65.0, 1000.0, 256.0], # sl_scores
[5, 6, 7, 8, 9],
[61.0, 66.0, 90.0, 100.0, 2000.0] # new id and score
)
# Top 5 scores are: 2000.0, 1000.0, 256.0, 100.0, 90.0
self.assertAllEqual([2, 3, 1, 0], shortlist_ids.eval())
self.assertAllEqual([9, 8, 7, 5], new_ids.eval())
# 87.0 is the highest score we overwrote or didn't insert.
self.assertAllEqual([2000.0, 100.0, 90.0, 87.0], new_scores.eval())
def testRemoveSimple(self):
with self.test_session():
shortlist_ids, new_length = self.ops.top_n_remove(
[5, 100, 200, 300, 400, 500], [200, 400, 600])
self.assertAllEqual([2, 4], shortlist_ids.eval())
self.assertAllEqual([3], new_length.eval())
def testRemoveAllMissing(self):
with self.test_session():
shortlist_ids, new_length = self.ops.top_n_remove(
[5, 100, 200, 300, 400, 500], [1200, 1400, 600])
self.assertAllEqual([], shortlist_ids.eval())
self.assertAllEqual([5], new_length.eval())
def testRemoveAll(self):
with self.test_session():
shortlist_ids, new_length = self.ops.top_n_remove(
[5, 100, 200, 300, 400, 500],
[100, 200, 300, 400, 500],)
self.assertAllEqual([1, 2, 3, 4, 5], shortlist_ids.eval())
self.assertAllEqual([0], new_length.eval())
class TopNTest(test_util.TensorFlowTestCase):
def testSimple(self):
t = topn.TopN(1000, shortlist_size=10)
t.insert([1, 2, 3, 4, 5], [1.0, 2.0, 3.0, 4.0, 5.0])
t.remove([4, 5])
ids, vals = t.get_best(2)
with session.Session() as sess:
sess.run(tf.initialize_all_variables())
ids_v, vals_v = sess.run([ids, vals])
self.assertItemsEqual([2, 3], list(ids_v))
self.assertItemsEqual([2.0, 3.0], list(vals_v))
def testSimpler(self):
t = topn.TopN(1000, shortlist_size=10)
t.insert([1], [33.0])
ids, vals = t.get_best(1)
with session.Session() as sess:
sess.run(tf.initialize_all_variables())
ids_v, vals_v = sess.run([ids, vals])
self.assertListEqual([1], list(ids_v))
self.assertListEqual([33.0], list(vals_v))
def testLotsOfInsertsAscending(self):
t = topn.TopN(1000, shortlist_size=10)
for i in range(100):
t.insert([i], [float(i)])
ids, vals = t.get_best(5)
with session.Session() as sess:
sess.run(tf.initialize_all_variables())
ids_v, vals_v = sess.run([ids, vals])
self.assertItemsEqual([95, 96, 97, 98, 99], list(ids_v))
self.assertItemsEqual([95.0, 96.0, 97.0, 98.0, 99.0], list(vals_v))
def testLotsOfInsertsDescending(self):
t = topn.TopN(1000, shortlist_size=10)
for i in range(99, 1, -1):
t.insert([i], [float(i)])
ids, vals = t.get_best(5)
with session.Session() as sess:
sess.run(tf.initialize_all_variables())
ids_v, vals_v = sess.run([ids, vals])
self.assertItemsEqual([95, 96, 97, 98, 99], list(ids_v))
self.assertItemsEqual([95.0, 96.0, 97.0, 98.0, 99.0], list(vals_v))
def testRemoveNotInShortlist(self):
t = topn.TopN(1000, shortlist_size=10)
for i in range(20):
t.insert([i], [float(i)])
t.remove([4, 5])
ids, vals = t.get_best(2)
with session.Session() as sess:
sess.run(tf.initialize_all_variables())
ids_v, vals_v = sess.run([ids, vals])
self.assertItemsEqual([18.0, 19.0], list(vals_v))
self.assertItemsEqual([18, 19], list(ids_v))
def testNeedToRefreshShortlistInGetBest(self):
t = topn.TopN(1000, shortlist_size=10)
for i in range(20):
t.insert([i], [float(i)])
# Shortlist now has 10 .. 19
t.remove([11, 12, 13, 14, 15, 16, 17, 18, 19])
ids, vals = t.get_best(2)
with session.Session() as sess:
sess.run(tf.initialize_all_variables())
ids_v, vals_v = sess.run([ids, vals])
self.assertItemsEqual([9, 10], list(ids_v))
self.assertItemsEqual([9.0, 10.0], list(vals_v))
if __name__ == '__main__':
googletest.main()
```
#### File: python/training/bucket_ops_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
import tensorflow as tf
def _which_bucket(bucket_edges, v):
"""Identify which bucket v falls into.
Args:
bucket_edges: int array, bucket edges
v: int scalar, index
Returns:
int scalar, the bucket.
If v < bucket_edges[0], return 0.
If bucket_edges[0] <= v < bucket_edges[1], return 1.
...
If bucket_edges[-2] <= v < bucket_edges[-1], return len(bucket_edges).
If v >= bucket_edges[-1], return len(bucket_edges) + 1
"""
v = np.asarray(v)
full = [0] + bucket_edges
found = np.where(np.logical_and(v >= full[:-1], v < full[1:]))[0]
if not found.size:
return len(full)
return found[0]
class BucketTest(tf.test.TestCase):
def setUp(self):
tf.reset_default_graph()
self.scalar_int_feed = tf.placeholder(tf.int32, ())
self.unk_int64_feed = tf.placeholder(tf.int64, (None,))
self.vec3_str_feed = tf.placeholder(tf.string, (3,))
self._coord = tf.train.Coordinator()
# Make capacity very large so we can feed all the inputs in the
# main thread without blocking
input_queue = tf.PaddingFIFOQueue(
5000,
dtypes=[tf.int32, tf.int64, tf.string],
shapes=[(), (None,), (3,)])
self._input_enqueue_op = input_queue.enqueue(
(self.scalar_int_feed, self.unk_int64_feed, self.vec3_str_feed))
self.scalar_int, self.unk_int64, self.vec3_str = input_queue.dequeue()
self._threads = None
self._close_op = input_queue.close()
self._sess = None
def enqueue_inputs(self, sess, feed_dict):
sess.run(self._input_enqueue_op, feed_dict=feed_dict)
def start_queue_runners(self, sess):
# Store session to be able to close inputs later
if self._sess is None:
self._sess = sess
self._threads = tf.train.start_queue_runners(coord=self._coord)
def tearDown(self):
if self._sess is not None:
self._sess.run(self._close_op)
self._coord.request_stop()
self._coord.join(self._threads)
def testSingleBucket(self):
bucketed_dynamic = tf.contrib.training.bucket(
tensors=[self.scalar_int, self.unk_int64, self.vec3_str],
which_bucket=tf.constant(0),
num_buckets=2,
batch_size=32,
num_threads=10,
dynamic_pad=True)
# Check shape inference on bucketing outputs
self.assertAllEqual(
[[32], [32, None], [32, 3]],
[out.get_shape().as_list() for out in bucketed_dynamic[1]])
with self.test_session() as sess:
for v in range(32):
self.enqueue_inputs(
sess,
{self.scalar_int_feed: v,
self.unk_int64_feed: v * [v],
self.vec3_str_feed: 3 * [str(v)]})
self.start_queue_runners(sess)
# Get a single minibatch
bucketed_values = sess.run(bucketed_dynamic)
# (which_bucket, bucket_tensors).
self.assertEqual(2, len(bucketed_values))
# Count number of bucket_tensors.
self.assertEqual(3, len(bucketed_values[1]))
# Ensure bucket 0 was used for all minibatch entries.
self.assertAllEqual(0, bucketed_values[0])
expected_scalar_int = np.arange(32)
expected_unk_int64 = np.zeros((32, 31)).astype(np.int64)
for i in range(32):
expected_unk_int64[i, :i] = i
expected_vec3_str = np.vstack(3 * [np.arange(32).astype(bytes)]).T
# Must resort the output because num_threads > 1 leads to
# sometimes-inconsistent insertion order.
resort = np.argsort(bucketed_values[1][0])
self.assertAllEqual(expected_scalar_int, bucketed_values[1][0][resort])
self.assertAllEqual(expected_unk_int64, bucketed_values[1][1][resort])
self.assertAllEqual(expected_vec3_str, bucketed_values[1][2][resort])
def testEvenOddBuckets(self):
which_bucket = (self.scalar_int % 2)
bucketed_dynamic = tf.contrib.training.bucket(
tensors=[self.scalar_int, self.unk_int64, self.vec3_str],
which_bucket=which_bucket,
num_buckets=2,
batch_size=32,
num_threads=10,
dynamic_pad=True)
# Check shape inference on bucketing outputs
self.assertAllEqual(
[[32], [32, None], [32, 3]],
[out.get_shape().as_list() for out in bucketed_dynamic[1]])
with self.test_session() as sess:
for v in range(64):
self.enqueue_inputs(
sess,
{self.scalar_int_feed: v,
self.unk_int64_feed: v * [v],
self.vec3_str_feed: 3 * [str(v)]})
self.start_queue_runners(sess)
# Get two minibatches (one containing even values, one containing odds)
bucketed_values_0 = sess.run(bucketed_dynamic)
bucketed_values_1 = sess.run(bucketed_dynamic)
# (which_bucket, bucket_tensors).
self.assertEqual(2, len(bucketed_values_0))
self.assertEqual(2, len(bucketed_values_1))
# Count number of bucket_tensors.
self.assertEqual(3, len(bucketed_values_0[1]))
self.assertEqual(3, len(bucketed_values_1[1]))
# Figure out which output has the even values (there's
# randomness due to the multithreaded nature of bucketing)
if bucketed_values_0[0] % 2 == 1:
bucketed_values_even, bucketed_values_odd = (
bucketed_values_1, bucketed_values_0)
else:
bucketed_values_even, bucketed_values_odd = (
bucketed_values_0, bucketed_values_1)
# Ensure bucket 0 was used for all minibatch entries.
self.assertAllEqual(0, bucketed_values_even[0])
self.assertAllEqual(1, bucketed_values_odd[0])
# Test the first bucket outputted, the events starting at 0
expected_scalar_int = np.arange(0, 32 * 2, 2)
expected_unk_int64 = np.zeros((32, 31 * 2)).astype(np.int64)
for i in range(0, 32):
expected_unk_int64[i, :2*i] = 2*i
expected_vec3_str = np.vstack(
3 * [np.arange(0, 32 * 2, 2).astype(bytes)]).T
# Must resort the output because num_threads > 1 leads to
# sometimes-inconsistent insertion order.
resort = np.argsort(bucketed_values_even[1][0])
self.assertAllEqual(expected_scalar_int,
bucketed_values_even[1][0][resort])
self.assertAllEqual(expected_unk_int64,
bucketed_values_even[1][1][resort])
self.assertAllEqual(expected_vec3_str,
bucketed_values_even[1][2][resort])
# Test the second bucket outputted, the odds starting at 1
expected_scalar_int = np.arange(1, 32 * 2 + 1, 2)
expected_unk_int64 = np.zeros((32, 31 * 2 + 1)).astype(np.int64)
for i in range(0, 32):
expected_unk_int64[i, :2*i + 1] = 2*i + 1
expected_vec3_str = np.vstack(
3 * [np.arange(1, 32 * 2 + 1, 2).astype(bytes)]).T
# Must resort the output because num_threads > 1 leads to
# sometimes-inconsistent insertion order.
resort = np.argsort(bucketed_values_odd[1][0])
self.assertAllEqual(expected_scalar_int,
bucketed_values_odd[1][0][resort])
self.assertAllEqual(expected_unk_int64,
bucketed_values_odd[1][1][resort])
self.assertAllEqual(expected_vec3_str,
bucketed_values_odd[1][2][resort])
def testEvenOddBucketsFilterOutAllOdd(self):
which_bucket = (self.scalar_int % 2)
keep_input = tf.equal(which_bucket, 0)
bucketed_dynamic = tf.contrib.training.bucket(
tensors=[self.scalar_int, self.unk_int64, self.vec3_str],
which_bucket=which_bucket,
num_buckets=2,
batch_size=32,
num_threads=10,
keep_input=keep_input,
dynamic_pad=True)
# Check shape inference on bucketing outputs
self.assertAllEqual(
[[32], [32, None], [32, 3]],
[out.get_shape().as_list() for out in bucketed_dynamic[1]])
with self.test_session() as sess:
for v in range(128):
self.enqueue_inputs(
sess,
{self.scalar_int_feed: v,
self.unk_int64_feed: v * [v],
self.vec3_str_feed: 3 * [str(v)]})
self.start_queue_runners(sess)
# Get two minibatches ([0, 2, ...] and [64, 66, ...])
bucketed_values_even0 = sess.run(bucketed_dynamic)
bucketed_values_even1 = sess.run(bucketed_dynamic)
# Ensure that bucket 1 was completely filtered out
self.assertAllEqual(0, bucketed_values_even0[0])
self.assertAllEqual(0, bucketed_values_even1[0])
# Merge their output for sorting and comparison
bucketed_values_all_elem0 = np.concatenate(
(bucketed_values_even0[1][0],
bucketed_values_even1[1][0]))
self.assertAllEqual(
np.arange(0, 128, 2), sorted(bucketed_values_all_elem0))
class BucketBySequenceLengthTest(tf.test.TestCase):
def _testBucketBySequenceLength(self, allow_small_batch):
tf.reset_default_graph()
# All inputs must be identical lengths across tuple index.
# The input reader will get input_length from the first tuple
# entry.
data_len = 4
target_len = 3
input_pairs = [
(length,
([np.int64(length)] * data_len,
[str(length).encode("ascii")] * target_len))
for length in (1, 3, 4, 5, 6, 10)]
lengths = tf.placeholder(tf.int32, ())
data = tf.placeholder(tf.int64, (data_len,))
targets = tf.placeholder(tf.string, (target_len,))
batch_size = 8
bucket_boundaries = [3, 4, 5, 10]
# Make capacity very large so we can feed all the inputs in the
# main thread without blocking
input_queue = tf.FIFOQueue(
5000, (tf.int32, tf.int64, tf.string),
((), (data_len,), (target_len,)))
input_enqueue_op = input_queue.enqueue((lengths, data, targets))
lengths_t, data_t, targets_t = input_queue.dequeue()
close_input_op = input_queue.close()
(out_lengths_t, data_and_targets_t) = (
tf.contrib.training.bucket_by_sequence_length(
input_length=lengths_t,
tensors=[data_t, targets_t],
batch_size=batch_size,
bucket_boundaries=bucket_boundaries,
allow_smaller_final_batch=allow_small_batch,
num_threads=10))
expected_batch_size = None if allow_small_batch else batch_size
self.assertEqual(out_lengths_t.get_shape().as_list(),
[expected_batch_size])
self.assertEqual(data_and_targets_t[0].get_shape().as_list(),
[expected_batch_size, data_len])
self.assertEqual(data_and_targets_t[1].get_shape().as_list(),
[expected_batch_size, target_len])
def _read_test(sess):
for _ in range(50):
(out_lengths, (data, targets)) = sess.run(
(out_lengths_t, data_and_targets_t))
if allow_small_batch:
self.assertEqual(data_len, data.shape[1])
self.assertEqual(target_len, targets.shape[1])
self.assertGreaterEqual(batch_size, out_lengths.shape[0])
self.assertGreaterEqual(batch_size, data.shape[0])
self.assertGreaterEqual(batch_size, targets.shape[0])
else:
self.assertEqual((batch_size, data_len), data.shape)
self.assertEqual((batch_size, target_len), targets.shape)
self.assertEqual((batch_size,), out_lengths.shape)
for (lr, dr, tr) in zip(out_lengths, data, targets):
# Make sure length matches data (here it's the same value)
self.assertEqual(dr[0], lr)
# Make sure data & targets match
self.assertEqual(dr[0], int(tr[0].decode("ascii")))
# Make sure for each row, data came from the same bucket.
self.assertEqual(_which_bucket(bucket_boundaries, dr[0]),
_which_bucket(bucket_boundaries, dr[1]))
with self.test_session() as sess:
coord = tf.train.Coordinator()
# Feed the inputs, then close the input thread.
for _ in range(50 * batch_size + 100):
which = random.randint(0, len(input_pairs) - 1)
length, pair = input_pairs[which]
sess.run(input_enqueue_op, feed_dict={
lengths: length, data: pair[0], targets: pair[1]})
sess.run(close_input_op)
# Start the queue runners
threads = tf.train.start_queue_runners(coord=coord)
# Read off the top of the bucket and ensure correctness of output
_read_test(sess)
coord.request_stop()
coord.join(threads)
def testBucketBySequenceLength(self):
self._testBucketBySequenceLength(allow_small_batch=False)
def testBucketBySequenceLengthAllow(self):
self._testBucketBySequenceLength(allow_small_batch=True)
if __name__ == "__main__":
tf.test.main()
```
#### File: python/kernel_tests/conv_ops_3d_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow as tf
class Conv3DTest(tf.test.TestCase):
def _VerifyValues(
self, tensor_in_sizes, filter_in_sizes, stride, padding, expected):
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
with self.test_session(use_gpu=True) as sess:
t1 = tf.constant(x1, shape=tensor_in_sizes)
t2 = tf.constant(x2, shape=filter_in_sizes)
conv = tf.nn.conv3d(t1,
t2, [1, stride, stride, stride, 1],
padding=padding)
value = sess.run(conv)
print("expected = ", expected)
print("actual = ", value)
self.assertArrayNear(expected, value.flatten(), 1e-5)
def testConv3D1x1x1Filter(self):
expected_output = [30.0, 36.0, 42.0, 66.0, 81.0, 96.0, 102.0, 126.0, 150.0,
138.0, 171.0, 204.0, 174.0, 216.0, 258.0, 210.0, 261.0,
312.0]
# These are equivalent to the Conv2D1x1 case.
self._VerifyValues(tensor_in_sizes=[1, 2, 3, 1, 3],
filter_in_sizes=[1, 1, 1, 3, 3],
stride=1,
padding="VALID",
expected=expected_output)
self._VerifyValues(tensor_in_sizes=[1, 2, 1, 3, 3],
filter_in_sizes=[1, 1, 1, 3, 3],
stride=1,
padding="VALID",
expected=expected_output)
self._VerifyValues(tensor_in_sizes=[1, 1, 2, 3, 3],
filter_in_sizes=[1, 1, 1, 3, 3],
stride=1,
padding="VALID",
expected=expected_output)
# Expected values computed using scipy's correlate function.
def testConv3D2x2x2Filter(self):
expected_output = [19554., 19962., 20370., 22110., 22590., 23070., 34890.,
35730., 36570., 37446., 38358., 39270., 50226., 51498.,
52770., 52782., 54126., 55470.]
# expected_shape = [1, 3, 1, 2, 5]
self._VerifyValues(tensor_in_sizes=[1, 4, 2, 3, 3], # b, z, y, x, fin
filter_in_sizes=[2, 2, 2, 3, 3], # z, y, x, fin, fout
stride=1, padding="VALID",
expected=expected_output)
def testConv3D2x2x2FilterStride2(self):
expected_output = [19554., 19962., 20370., 50226., 51498., 52770.]
self._VerifyValues(tensor_in_sizes=[1, 4, 2, 3, 3],
filter_in_sizes=[2, 2, 2, 3, 3],
stride=2,
padding="VALID",
expected=expected_output)
def testConv3DStride3(self):
expected_output = [
36564., 38022., 39480., 37824., 39354., 40884., 39084., 40686., 42288.,
46644., 48678., 50712., 47904., 50010., 52116., 49164., 51342., 53520.,
107124., 112614., 118104., 108384., 113946., 119508., 109644., 115278.,
120912., 117204., 123270., 129336., 118464., 124602., 130740., 119724.,
125934., 132144.
]
self._VerifyValues(tensor_in_sizes=[1, 6, 7, 8, 2],
filter_in_sizes=[3, 2, 1, 2, 3],
stride=3,
padding="VALID",
expected=expected_output)
def testConv3D2x2x2FilterStride2Same(self):
expected_output = [
19554., 19962., 20370., 10452., 10710., 10968., 50226., 51498., 52770.,
23844., 24534., 25224.
]
self._VerifyValues(tensor_in_sizes=[1, 4, 2, 3, 3],
filter_in_sizes=[2, 2, 2, 3, 3],
stride=2,
padding="SAME",
expected=expected_output)
def testKernelSmallerThanStride(self):
expected_output = [1., 3., 7., 9., 19., 21., 25., 27.]
self._VerifyValues(tensor_in_sizes=[1, 3, 3, 3, 1],
filter_in_sizes=[1, 1, 1, 1, 1],
stride=2,
padding="SAME",
expected=expected_output)
self._VerifyValues(tensor_in_sizes=[1, 3, 3, 3, 1],
filter_in_sizes=[1, 1, 1, 1, 1],
stride=2,
padding="VALID",
expected=expected_output)
expected_output = [1484., 1592., 770.,
2240., 2348., 1106.,
1149., 1191., 539.,
6776., 6884., 3122.,
7532., 7640., 3458.,
3207., 3249., 1421.,
3005., 3035., 1225.,
3215., 3245., 1309.,
1013., 1022., 343.]
self._VerifyValues(tensor_in_sizes=[1, 7, 7, 7, 1],
filter_in_sizes=[2, 2, 2, 1, 1],
stride=3,
padding="SAME",
expected=expected_output)
expected_output = [1484., 1592.,
2240., 2348.,
6776., 6884.,
7532., 7640.]
self._VerifyValues(tensor_in_sizes=[1, 7, 7, 7, 1],
filter_in_sizes=[2, 2, 2, 1, 1],
stride=3,
padding="VALID",
expected=expected_output)
def ConstructAndTestGradient(self, batch, input_planes, input_rows,
input_cols, filter_planes, filter_rows,
filter_cols, in_depth, out_depth, stride,
padding, test_input):
input_shape = [batch, input_planes, input_rows, input_cols, in_depth]
filter_shape = [filter_planes, filter_rows, filter_cols, in_depth,
out_depth]
if padding == "VALID":
output_planes = int(math.ceil((input_planes - filter_planes + 1.0) /
stride))
output_rows = int(math.ceil((input_rows - filter_rows + 1.0) / stride))
output_cols = int(math.ceil((input_cols - filter_cols + 1.0) / stride))
else:
output_planes = int(math.ceil(float(input_planes) / stride))
output_rows = int(math.ceil(float(input_rows) / stride))
output_cols = int(math.ceil(float(input_cols) / stride))
output_shape = [batch, output_planes, output_rows, output_cols, out_depth]
input_size = 1
for x in input_shape:
input_size *= x
filter_size = 1
for x in filter_shape:
filter_size *= x
input_data = [x * 1.0 / input_size for x in range(0, input_size)]
filter_data = [x * 1.0 / filter_size for x in range(0, filter_size)]
if tf.test.is_gpu_available():
data_type = tf.float32
if tf.test.is_gpu_available():
tolerance = 4e-3
else:
# As of Aug 2016, higher tolerance is needed for some CPU architectures.
# Runs on a single machine can also generate slightly different errors
# because of multithreading.
tolerance = 8e-3
else:
data_type = tf.float64
tolerance = 1e-8
with self.test_session(use_gpu=True):
input_tensor = tf.constant(input_data,
shape=input_shape,
dtype=data_type,
name="input")
filter_tensor = tf.constant(filter_data,
shape=filter_shape,
dtype=data_type,
name="filter")
conv = tf.nn.conv3d(input_tensor,
filter_tensor, [1, stride, stride, stride, 1],
padding,
name="conv")
if test_input:
err = tf.test.compute_gradient_error(input_tensor, input_shape, conv,
output_shape)
else:
err = tf.test.compute_gradient_error(filter_tensor, filter_shape, conv,
output_shape)
print("conv3d gradient error = ", err)
self.assertLess(err, tolerance)
def testInputGradientValidPaddingStrideOne(self):
self.ConstructAndTestGradient(batch=2,
input_planes=3,
input_rows=5,
input_cols=4,
filter_planes=3,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride=1,
padding="VALID",
test_input=True)
def testFilterGradientValidPaddingStrideOne(self):
self.ConstructAndTestGradient(batch=4,
input_planes=4,
input_rows=6,
input_cols=5,
filter_planes=2,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride=1,
padding="VALID",
test_input=False)
def testInputGradientValidPaddingStrideTwo(self):
self.ConstructAndTestGradient(batch=2,
input_planes=6,
input_rows=3,
input_cols=5,
filter_planes=3,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride=2,
padding="VALID",
test_input=True)
def testFilterGradientValidPaddingStrideTwo(self):
self.ConstructAndTestGradient(batch=2,
input_planes=7,
input_rows=6,
input_cols=5,
filter_planes=2,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride=2,
padding="VALID",
test_input=False)
def testInputGradientValidPaddingStrideThree(self):
self.ConstructAndTestGradient(batch=2,
input_planes=3,
input_rows=7,
input_cols=6,
filter_planes=3,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride=3,
padding="VALID",
test_input=True)
def testFilterGradientValidPaddingStrideThree(self):
self.ConstructAndTestGradient(batch=2,
input_planes=4,
input_rows=4,
input_cols=7,
filter_planes=4,
filter_rows=4,
filter_cols=4,
in_depth=2,
out_depth=3,
stride=3,
padding="VALID",
test_input=False)
def testInputGradientSamePaddingStrideOne(self):
self.ConstructAndTestGradient(batch=2,
input_planes=3,
input_rows=2,
input_cols=2,
filter_planes=3,
filter_rows=2,
filter_cols=1,
in_depth=2,
out_depth=1,
stride=1,
padding="SAME",
test_input=True)
def testFilterGradientSamePaddingStrideOne(self):
self.ConstructAndTestGradient(batch=2,
input_planes=3,
input_rows=6,
input_cols=5,
filter_planes=2,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride=1,
padding="SAME",
test_input=False)
def testInputGradientSamePaddingStrideTwo(self):
self.ConstructAndTestGradient(batch=2,
input_planes=6,
input_rows=3,
input_cols=4,
filter_planes=3,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride=2,
padding="SAME",
test_input=True)
def testFilterGradientSamePaddingStrideTwo(self):
self.ConstructAndTestGradient(batch=4,
input_planes=7,
input_rows=3,
input_cols=5,
filter_planes=2,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride=2,
padding="SAME",
test_input=False)
def testInputGradientSamePaddingStrideThree(self):
self.ConstructAndTestGradient(batch=2,
input_planes=9,
input_rows=3,
input_cols=6,
filter_planes=3,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride=3,
padding="SAME",
test_input=True)
def testFilterGradientSamePaddingStrideThree(self):
self.ConstructAndTestGradient(batch=2,
input_planes=9,
input_rows=4,
input_cols=7,
filter_planes=4,
filter_rows=4,
filter_cols=4,
in_depth=2,
out_depth=3,
stride=3,
padding="SAME",
test_input=False)
if __name__ == "__main__":
tf.test.main()
```
#### File: python/kernel_tests/spacetobatch_op_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class SpaceToBatchTest(tf.test.TestCase):
"""Tests input-output pairs for the SpaceToBatch and BatchToSpace ops."""
def _testPad(self, inputs, paddings, block_size, outputs):
with self.test_session(use_gpu=True):
# outputs = space_to_batch(inputs)
x_tf = tf.space_to_batch(
tf.to_float(inputs), paddings, block_size=block_size)
self.assertAllEqual(x_tf.eval(), outputs)
# inputs = batch_to_space(outputs)
x_tf = tf.batch_to_space(
tf.to_float(outputs), paddings, block_size=block_size)
self.assertAllEqual(x_tf.eval(), inputs)
def _testOne(self, inputs, block_size, outputs):
paddings = np.zeros((2, 2), dtype=np.int32)
self._testPad(inputs, paddings, block_size, outputs)
# [1, 2, 2, 1] <-> [4, 1, 1, 1]
def testSmallInput2x2(self):
x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 2
x_out = [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
self._testOne(x_np, block_size, x_out)
# [1, 2, 2, 1] <-> [1, 3, 3, 1] (padding) <-> [9, 1, 1, 1]
def testSmallInput2x2Pad1x0(self):
x_np = [[[[1], [2]], [[3], [4]]]]
paddings = np.array([[1, 0], [1, 0]], dtype=np.int32)
block_size = 3
x_out = [[[[0]]], [[[0]]], [[[0]]],
[[[0]]], [[[1]]], [[[2]]],
[[[0]]], [[[3]]], [[[4]]]]
self._testPad(x_np, paddings, block_size, x_out)
# Test with depth larger than 1.
# [1, 2, 2, 3] <-> [4, 1, 1, 3]
def testDepthInput2x2(self):
x_np = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]]
block_size = 2
x_out = [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
self._testOne(x_np, block_size, x_out)
# Test for larger input dimensions.
# [1, 4, 4, 1] <-> [4, 2, 2, 1]
def testLargerInput2x2(self):
x_np = [[[[1], [2], [3], [4]],
[[5], [6], [7], [8]],
[[9], [10], [11], [12]],
[[13], [14], [15], [16]]]]
block_size = 2
x_out = [[[[1], [3]], [[9], [11]]],
[[[2], [4]], [[10], [12]]],
[[[5], [7]], [[13], [15]]],
[[[6], [8]], [[14], [16]]]]
self._testOne(x_np, block_size, x_out)
# Test with batch larger than 1.
# [2, 2, 4, 1] <-> [8, 1, 2, 1]
def testBatchInput2x2(self):
x_np = [[[[1], [2], [3], [4]],
[[5], [6], [7], [8]]],
[[[9], [10], [11], [12]],
[[13], [14], [15], [16]]]]
block_size = 2
x_out = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
[[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input spatial dimensions AND batch larger than 1, to ensure
# that elements are correctly laid out spatially and properly interleaved
# along the batch dimension.
# [2, 4, 4, 1] <-> [8, 2, 2, 1]
def testLargerInputBatch2x2(self):
x_np = [[[[1], [2], [3], [4]],
[[5], [6], [7], [8]],
[[9], [10], [11], [12]],
[[13], [14], [15], [16]]],
[[[17], [18], [19], [20]],
[[21], [22], [23], [24]],
[[25], [26], [27], [28]],
[[29], [30], [31], [32]]]]
x_out = [[[[1], [3]], [[9], [11]]],
[[[17], [19]], [[25], [27]]],
[[[2], [4]], [[10], [12]]],
[[[18], [20]], [[26], [28]]],
[[[5], [7]], [[13], [15]]],
[[[21], [23]], [[29], [31]]],
[[[6], [8]], [[14], [16]]],
[[[22], [24]], [[30], [32]]]]
block_size = 2
self._testOne(x_np, block_size, x_out)
class SpaceToBatchSpaceToDepth(tf.test.TestCase):
# Verifies that: space_to_batch(x) = transpose(space_to_depth(transpose(x)))
def testSpaceToDepthTranspose(self):
x = np.arange(5 * 10 * 16 * 7, dtype=np.float32).reshape([5, 10, 16, 7])
block_size = 2
paddings = np.zeros((2, 2), dtype=np.int32)
y1 = tf.space_to_batch(x, paddings, block_size=block_size)
y2 = tf.transpose(
tf.space_to_depth(
tf.transpose(x, [3, 1, 2, 0]), block_size=block_size),
[3, 1, 2, 0])
with self.test_session(use_gpu=True):
self.assertAllEqual(y1.eval(), y2.eval())
class SpaceToBatchErrorHandlingTest(tf.test.TestCase):
def testInputWrongDimMissingBatch(self):
# The input is missing the first dimension ("batch")
x_np = [[[1], [2]], [[3], [4]]]
paddings = np.zeros((2, 2), dtype=np.int32)
block_size = 2
with self.assertRaises(ValueError):
_ = tf.space_to_batch(x_np, paddings, block_size)
def testBlockSize0(self):
# The block size is 0.
x_np = [[[[1], [2]], [[3], [4]]]]
paddings = np.zeros((2, 2), dtype=np.int32)
block_size = 0
with self.assertRaises(ValueError):
out_tf = tf.space_to_batch(x_np, paddings, block_size)
out_tf.eval()
def testBlockSizeOne(self):
# The block size is 1. The block size needs to be > 1.
x_np = [[[[1], [2]], [[3], [4]]]]
paddings = np.zeros((2, 2), dtype=np.int32)
block_size = 1
with self.assertRaises(ValueError):
out_tf = tf.space_to_batch(x_np, paddings, block_size)
out_tf.eval()
def testBlockSizeLarger(self):
# The block size is too large for this input.
x_np = [[[[1], [2]], [[3], [4]]]]
paddings = np.zeros((2, 2), dtype=np.int32)
block_size = 10
with self.assertRaises(ValueError):
out_tf = tf.space_to_batch(x_np, paddings, block_size)
out_tf.eval()
def testBlockSizeNotDivisibleWidth(self):
# The block size divides width but not height.
x_np = [[[[1], [2], [3]], [[3], [4], [7]]]]
paddings = np.zeros((2, 2), dtype=np.int32)
block_size = 3
with self.assertRaises(ValueError):
_ = tf.space_to_batch(x_np, paddings, block_size)
def testBlockSizeNotDivisibleHeight(self):
# The block size divides height but not width.
x_np = [[[[1], [2]], [[3], [4]], [[5], [6]]]]
paddings = np.zeros((2, 2), dtype=np.int32)
block_size = 3
with self.assertRaises(ValueError):
_ = tf.space_to_batch(x_np, paddings, block_size)
def testBlockSizeNotDivisibleBoth(self):
# The block size does not divide neither width or height.
x_np = [[[[1], [2]], [[3], [4]]]]
paddings = np.zeros((2, 2), dtype=np.int32)
block_size = 3
with self.assertRaises(ValueError):
_ = tf.space_to_batch(x_np, paddings, block_size)
def testUnknownShape(self):
t = tf.space_to_batch(tf.placeholder(tf.float32), tf.placeholder(tf.int32),
block_size=4)
self.assertEqual(4, t.get_shape().ndims)
class SpaceToBatchGradientTest(tf.test.TestCase):
# Check the gradients.
def _checkGrad(self, x, paddings, block_size):
assert 4 == x.ndim
with self.test_session(use_gpu=True):
tf_x = tf.convert_to_tensor(x)
tf_y = tf.space_to_batch(tf_x, paddings, block_size)
epsilon = 1e-5
((x_jacob_t, x_jacob_n)) = tf.test.compute_gradient(
tf_x,
x.shape,
tf_y,
tf_y.get_shape().as_list(),
x_init_value=x,
delta=epsilon)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon)
# Tests a gradient for space_to_batch of x which is a four dimensional
# tensor of shape [b, h * block_size, w * block_size, d].
def _compare(self, b, h, w, d, block_size, pad_beg, pad_end):
block_size_sq = block_size * block_size
x = np.random.normal(
0, 1, b * h * w * d * block_size_sq).astype(np.float32).reshape(
[b, h * block_size, w * block_size, d])
paddings = np.array([[pad_beg, pad_end], [pad_beg, pad_end]],
dtype=np.int32)
self._checkGrad(x, paddings, block_size)
# Don't use very large numbers as dimensions here as the result is tensor
# with cartesian product of the dimensions.
def testSmall(self):
block_size = 2
pad_beg = 0
pad_end = 0
self._compare(1, 2, 3, 5, block_size, pad_beg, pad_end)
def testSmall2(self):
block_size = 2
pad_beg = 0
pad_end = 0
self._compare(2, 4, 3, 2, block_size, pad_beg, pad_end)
def testSmallPad1x1(self):
block_size = 2
pad_beg = 1
pad_end = 1
self._compare(1, 2, 3, 5, block_size, pad_beg, pad_end)
if __name__ == "__main__":
tf.test.main()
```
#### File: python/platform/gfile.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.lib.io import file_io
class GFile(file_io.FileIO):
"""File I/O wrappers with thread locking."""
def __init__(self, name, mode='r'):
mode = mode.replace('b', '')
super(GFile, self).__init__(name=name, mode=mode)
class FastGFile(file_io.FileIO):
"""File I/O wrappers without thread locking."""
def __init__(self, name, mode='r'):
mode = mode.replace('b', '')
super(FastGFile, self).__init__(name=name, mode=mode)
# This should be kept consistent with the OSS implementation
# of the gfile interface.
# Does not alias to Open so that we use our version of GFile to strip
# 'b' mode.
Open = GFile
# pylint: disable=invalid-name
Exists = file_io.file_exists
IsDirectory = file_io.is_directory
Glob = file_io.get_matching_files
MkDir = file_io.create_dir
MakeDirs = file_io.recursive_create_dir
Remove = file_io.delete_file
DeleteRecursively = file_io.delete_recursively
ListDirectory = file_io.list_directory
Walk = file_io.walk
Stat = file_io.stat
Rename = file_io.rename
Copy = file_io.copy
```
|
{
"source": "jdeines/gee_tools",
"score": 3
}
|
#### File: gee_tools/datasources/nonoptical_datasources.py
```python
import ee
from gee_tools.datasources.interface import MultiImageDatasource, GlobalImageDatasource, SingleImageDatasource, DatasourceError
class NightlightDatasource(GlobalImageDatasource):
"""Abstract class for nightlights datasources"""
def init_coll(self, name):
return ee.ImageCollection(name) \
.filterDate(self.start_date, self.end_date) \
.map(self.nl_rename) \
.sort('system:time_start')
@staticmethod
def nl_rename(scene):
"""scene.select([0], ['NIGHTLIGHTS'])"""
return scene.select([0], ['NIGHTLIGHTS'])
class DMSPUncal(NightlightDatasource):
"""
Uncalibrated DMSP nightlights
Data in property dmsp
"""
def build_img_coll(self):
self.dmsp = self.init_coll("NOAA/DMSP-OLS/NIGHTTIME_LIGHTS")
def get_img_coll(self):
return self.dmsp
class DMSPCalV4(NightlightDatasource):
"""
Calibrated DMSP nightlights
Data in property dmsp
"""
def build_img_coll(self):
self.dmsp = self.init_coll("NOAA/DMSP-OLS/CALIBRATED_LIGHTS_V4")
def get_img_coll(self):
return self.dmsp
class VIIRSMonthlyStrCorr(NightlightDatasource):
"""
Calibrated VIIRS nightlights
Data in property viirs
"""
def build_img_coll(self):
self.viirs = self.init_coll("NOAA/VIIRS/DNB/MONTHLY_V1/VCMSLCFG")
def get_img_coll(self):
return self.viirs
class DMSPCalVIIRSJoined(NightlightDatasource):
"""
Returns the VIIRS image collection 'NOAA/VIIRS/DNB/MONTHLY_V1/VCMSLCFG'
if the request date range is either after 2014 or contains 2014.
2014 is the starting year for the VIIRS image collection listed above.
Otherwise returns the calibrated DMSP image collection "NOAA/DMSP-OLS/CALIBRATED_LIGHTS_V4"
"""
def build_img_coll(self):
viirs = self.init_coll("NOAA/VIIRS/DNB/MONTHLY_V1/VCMSLCFG")
dmsp = self.init_coll("NOAA/DMSP-OLS/CALIBRATED_LIGHTS_V4")
# Carefull not to communicate to the server
requested_daterange = ee.DateRange(self.start_date, self.end_date)
viirs_start = ee.Date('2014-1-1')
use_viirs = requested_daterange.contains(viirs_start)
start_year = ee.Number(ee.Date(self.start_date).get('year'))
viirs_start_year = ee.Number(viirs_start.get('year'))
use_viirs2 = start_year.gte(viirs_start_year)
self.nl = ee.Algorithms.If(use_viirs, viirs, dmsp)
self.nl = ee.Algorithms.If(use_viirs2, viirs, self.nl)
self.nl = ee.ImageCollection(self.nl)
def get_img_coll(self):
# TODO: not sure why this is helpful. All it's doing is returning a class method (George).
return self.nl
class SRTMElevation(SingleImageDatasource):
"""
Note: Near global
"""
def build_img_coll(self):
topo = ee.Image("USGS/SRTMGL1_003")
band_names = ['ELEV', 'SLO', 'ASP']
self.topo = ee.Algorithms.Terrain(topo).select(['elevation', 'slope', 'aspect'], band_names)
self.coll = ee.ImageCollection([self.topo])
def get_img_coll(self):
return self.coll
class Palsar(GlobalImageDatasource):
"""
Image Properties
HH HH polarization backscattering coefficient, 16-bit DN.
HV HV polarization backscattering coefficient, 16-bit DN.
angle Local incidence angle (degrees).
date Observation date (days since Jan 1, 1970).
qa Processing information.
Renamed to ["HH", "HV", "ANGLE", "DATE", "QA"]
"""
def build_img_coll(self):
self.palsar = ee.ImageCollection("JAXA/ALOS/PALSAR/YEARLY/SAR") \
.filterDate(self.start_date, self.end_date) \
.map(self.rename_pulsar) \
.map(self.mask_qa) \
.sort('system:time_start')
def get_img_coll(self):
return self.palsar
@staticmethod
def rename_pulsar(scene):
"""
Image Properties
HH HH polarization backscattering coefficient, 16-bit DN.
HV HV polarization backscattering coefficient, 16-bit DN.
angle Local incidence angle (degrees).
date Observation date (days since Jan 1, 1970).
qa Processing information.
"""
band_names = ["HH", "HV", "ANGLE", "DATE", "QA"]
return scene.select(range(len(band_names)), band_names)
@staticmethod
def decode_qa(scene):
"""
Value Color Description
0 000000 No data
50 0000FF Ocean and water
100 AAAA00 Radar layover
150 005555 Radar shadowing
255 AA9988 Land
"""
qa = scene.select(["QA"])
nodata = qa.eq(0)
nodata = nodata.updateMask(nodata).rename(["pxqa_nodata"])
hasdata = qa.neq(0)
hasdata = hasdata.updateMask(hasdata).rename(["pxqa_hasdata"])
water = qa.eq(50)
water = water.updateMask(water).rename(["pxqa_water"])
radar_layover = qa.eq(100)
radar_layover = radar_layover.updateMask(radar_layover).rename(["pxqa_radar_layover"])
radar_shad = qa.eq(150)
radar_shad = radar_shad.updateMask(radar_shad).rename(["pxqa_radar_shad"])
land = qa.eq(255)
land = land.updateMask(land).rename(["pxqa_land"])
masks = ee.Image.cat([nodata, hasdata, water, radar_layover, radar_shad, land])
return masks
def mask_qa(self, scene):
masks = self.decode_qa(scene)
scene.updateMask(masks.select(["pxqa_hasdata"]))
return scene
```
#### File: gee_tools/gee_tools/export_tables.py
```python
import ee
def export_features(features, fname, export_to='drive'):
if export_to == 'drive':
task = ee.batch.Export.table.toDrive(features, fname, '')
else:
task = ee.batch.Export.table.toCloudStorage(features,
description=fname,
bucket='us-cdl-samples',
fileNamePrefix=fname,
fileFormat=None)
task.start()
return task
def get_point(image, point, scale, depth):
s = image.reduceRegion(
reducer=ee.Reducer.first(),
geometry=point.geometry(),
scale=scale,
bestEffort=False,
maxPixels=depth,
tileScale=16)
return ee.Feature(ee.Feature(None, s).copyProperties(image).copyProperties(point)).set({
'PTLON': ee.Number(ee.List(point.geometry().coordinates()).get(0)),
'PTLAT': ee.Number(ee.List(point.geometry().coordinates()).get(1)),
'OBSDATE': ee.Date(image.get('system:time_start')).format(),
'MSTIME': image.get('system:time_start')
})
def reducegrid_image(image, grid, scale):
depth = image.bandNames().length()
samples = grid.map(lambda point: get_point(image, point, scale, depth))
return samples
def reducegrid_imgcoll(imagecoll, grid, scale, control, doexport, fname):
samples = imagecoll.map(
lambda image: reducegrid_image(image, grid, scale)
).flatten()
samples = samples.filter(ee.Filter.neq(control, None))
if doexport:
t = export_features(samples, fname, export_to='drive')
return {'samples':samples, 'task':t}
else:
return samples
def sampleregions_auto_image(image, regions, scale, controlvar, doexport, fname):
samples = image.sampleRegions(
collection=regions,
properties=None,
scale=scale,
projection=None,
tileScale=16)
samples = samples.filter(ee.Filter.neq(controlvar, None))
if doexport:
task = export_features(samples, fname, export_to='drive')
return {'samples': samples, 'task': task}
else:
return samples
def sampleregion_image(image, region, scale, npx):
samples = image.sample(
region=region.geometry(),
scale=scale,
projection=None,
factor=None,
numPixels=npx,
seed=12345,
dropNulls=True,
tileScale=16)
samples = samples.map(lambda p: ee.Feature(p.copyProperties(region).copyProperties(image)))
return samples
def sampleregions_image(image, regions, scale, npx, doexport, fname):
samples = regions.map(lambda region: sampleregion_image(image, region, scale, npx)).flatten()
if doexport:
task = export_features(samples, fname, export_to='drive')
return {'samples': samples, 'task': task}
else:
return samples
```
|
{
"source": "jdeinum/servus",
"score": 3
}
|
#### File: servus/src/system.py
```python
from abc import abstractmethod
import numpy as np
import requests
import sqlite3
from urllib.request import urlopen
from io import StringIO, BytesIO
import csv
import pandas as pd
import math
from website import *
from datetime import date
import os
import zipfile
##############################################################################
# CONSTANTS / GLOBALS
ROW_ORIENTED = 0
DATE_LAST_PARSED= 1
CRAWL_FREQUENCY = 2
UPDATE_TYPE= 3
INDUSTRY_CLASS = 4
ZIPPED = 5
FILE_TYPE = 6
SHEET_NUMBER = 7
TABLE_NAME = 8
TABLE_DESCRIPTION = 9
LABELS = 10
CONTENT = 11
URL = 12
RELATIVE = 13
CUT = 14
max_row_id = None # used for caching
##############################################################################
# CLASSES
'''
represents any way to get a location in a file
we use 2 locations (start and end) for each field (title, desc, content, etc)
to mark the start and end of that field in the data
'''
class Location():
data = None
def __init__(self, data):
self.data = data
def get_data(self):
return self.data
'''
describe how to get the index within the data file
'''
@abstractmethod
def get_index(self):
pass
'''
represents a location within a file identified by a row number
'''
class RowNumber(Location):
number = None
sheet = 0
def __init__(self, number, data):
super().__init__(data)
self.number = number
def get_index(self):
return self.number
'''
represents a location within a file identified by a tag and offset
i.e finding a row by matching a string in the first column, and then you
can seek relative to that location
ex) "NAME"+-1 would find the row where the first cell contains only NAME and
go up one row, the + is needed to seperate, so the user is required to enter
+0 if they want the exact row
'''
class TagAndOffset(Location):
tag = None
offset = None
sheet = 0
def __init__(self, tag, offset, data):
super().__init__(data)
self.tag = tag
self.offset = offset
def get_index(self):
for sheet in self.get_data():
for index, row in enumerate(sheet):
if len(row) > 0 and row[0]== self.tag:
return index + self.offset + 1
return -1
##############################################################################
# FUNCTIONS
'''
Returns a list of URLs to parse information from
Unless a wildcard is used, there will only be one url to scan per entry
If a wildcard is used, it finds each valid URL to scrape from
'''
def get_urls_list(base_url, row):
base_url = row[URL]
url_list = []
# there is wildcard in the address, we must scrape the website for files
if "/*" in base_url:
# ensure the user specified a web address type
try:
relative = row[RELATIVE]
if relative != "R" and relative != "A":
raise Exception
except:
print("RELATIVE must be 'A' or 'R',please read the description "
"of this field")
print(base_url)
exit(1)
# ensure the user provided a file type to scrape for
try:
search = row[FILE_TYPE]
if search != "csv" and search != "xlsx":
raise Exception
except:
print("only xlsx and csv files are supported for the FILE-TYPE")
exit(1)
# replace the asterix with nothing
base_url = base_url.replace("/*", "")
# create our website parser
website = Website("", base_url, relative)
crawler = Crawler(website, search, 'Return')
# tell the crawler we want to scrape all sites in the base url
# appended with '' (empty)
crawler.get('')
url_list = crawler.get_urls()
# adjust the urls if the website uses relative addresses
if relative == "R":
url_list = [base_url + x for x in url_list]
# does not use wildcards, therefore there is a single url in this field
else:
url_list.append(base_url)
return url_list
'''
data will be a 3D list, a list of sheets, where each sheet contains a
list of rows
csv files dont have sheets, but have been made compatible with this function
currently it can be a 2D list, but I left it this way incase servus wants to
be able to grab fields from multiple sheets i.e title from sheet 1, description
from sheet 2, etc ...
'''
def get_data(start, end, data, number=0):
# index is -1 in the case where some location in a file cannot be found
# returning None will signal the program to insert Null
if start < 0 or end < 0:
return None
return data[number][start:end]
'''
find the required data that was specified in the mapping file
'''
def find_data(data, string, sheet):
# ranges are seperated by ":"
split = string.split(":")
# each entry should atleast have one location on the left, i.e start
start = get_location_object(split[0], data, sheet)
# if start is a string, the user entered a string in this field
# we must return it as a 2D list to ensure it remains compatible
# with the rest of the program.
if type(start) == str:
first = []
second = []
second.append(start)
first.append(second)
return first
# not a string
# and no range was specified, which means the user provided a single row
# just return the row of data in the file
if len(split) == 1:
return get_data(start.get_index() - 1, start.get_index(), data, sheet)
# a range was specified, so we grab the end location
end = get_location_object(split[1], data, sheet)
# return the data between the start and end locations
try:
return get_data(start.get_index() - 1, end.get_index(), data, sheet)
except Exception:
print("A row number must be specified using a number or 'END'")
exit(1)
'''
finds and returns the type of location object by parsing 'string'
the program will take a look at the string passed and return either:
a) a row number object
b) a tag and offset object
'''
def get_location_object(string, data, sheet):
location = None
# if there a + in the string, it is a range
if "+" in string:
split= string.split("+")
tag = split[0]
offset = int(split[1])
location = TagAndOffset(tag, offset, data)
# "END" means to the end of the sheet
elif string == "END":
location = RowNumber(len(data[sheet]), data)
else:
# is it a number or a string?
# if it throws an exception it is a string, just return it
# otherwise make a location object with the row number and return it
try:
number = int(string)
except:
return string
else:
location = RowNumber(number, data)
if location:
return location
else:
print("Error parsing for location object!")
exit(1)
'''
Processes a single line of our mapping file
'''
def process_entry(row_passed, curs, entry_type):
# we need the URL to get the data from the web
try:
base_url = row_passed[URL]
except:
print("No URL was specified, or the mapping file was not formatted"
" properly")
exit(1)
# get the urls to iterate over
url_list = get_urls_list(base_url, row_passed)
for url in url_list:
# data is a list of rows (which are also lists)
data = None
# unzip the data if needed, and change the filename
zipped = row_passed[ZIPPED].split(":")
if zipped[0] == "yes":
filename = zipped[1]
byte_data = unzip_and_extract(url, filename)
if "csv" in filename:
data = parse_csv(url, curs, byte_data)
elif "xlsx" in filename:
data = parse_xlsx(url, curs, byte_data)
# file is a csv
elif "csv" in url:
data = parse_csv(url, curs)
# file is an xlsx
elif "xlsx" in url:
data = parse_xlsx(url, curs)
else:
print("Unsupported data type")
print("url: {}".format(url))
exit(1)
# get the orientation of the file
# column oriented, need to transpose each of the sheets
row_type = row_passed[ROW_ORIENTED]
if row_type == "C":
for index, lst in enumerate(data):
data[index] = columm_to_row(lst)
# get the sheet number from the mapping file
# we subtract 1 because excel and similar applications number sheets
# from 1, but python begins at index 0
try:
sheet_number = int(row_passed[SHEET_NUMBER]) - 1
except:
sheet_number = 0
# get all of the data we want our DB entry to have
title = construct_string(find_data(data, row_passed[TABLE_NAME],
sheet_number))
description = construct_string(find_data(data,
row_passed[TABLE_DESCRIPTION], sheet_number))
frequency = row_passed[CRAWL_FREQUENCY]
keywords = row_passed[INDUSTRY_CLASS].split(":")
labels = label_concat(find_data(data, row_passed[LABELS], sheet_number))
content = find_data(data, row_passed[CONTENT], sheet_number)
crawl_frequency = row_passed[CRAWL_FREQUENCY]
# enter data into our DB
enter_data(title, description, frequency, keywords, labels, content,
curs, url,crawl_frequency,",".join(row_passed),
row_passed[UPDATE_TYPE])
'''
constructs a string from a 2D list
we use this for our title and description so each of these fields can
span multiple rows
'''
def construct_string(data):
string = ""
for first in data:
for second in first:
string += second + " "
return string
'''
write the table to file to allow a user to look at the table without
needing to find it again
res_bytes is the byte representation of the file
we require the url to know what to name the table
'''
def write_to_file(res_bytes, url, curs):
# we use the rowid in the tables table as the name for our file
name = get_rowid(url, curs)
# get the file extension
file_type = None
if "xlsx" in url:
file_type = "xlsx"
elif "csv" in url:
file_type = "csv"
if not file_type:
raise Exception("Unsupported file type!")
# write to file
string = "tables/" + str(name) + "." + file_type
f = open(string, "wb")
f.write(res_bytes)
f.close()
'''
parses an xlsx file and returns a list of lists of rows (also lists)
'''
def parse_xlsx(url, curs, bytes_data=None):
# bytes data gets passed if the file was zipped
# in that case we can do not need to request it again
if not bytes_data:
# request and get the data
res = requests.get(url)
res_byte = BytesIO(res.content).read()
# we passed the bytes in because the file was zipped
# use these instead
else:
res_byte = bytes_data
# write the data to file
write_to_file(res_byte, url, curs)
# read the excel file
values = pd.read_excel(res_byte, usecols=None, header=None,
sheet_name=None)
clean_combined = []
for key in values.keys():
clean = []
val = values[key].values[:]
# replace all of the NaN in this file with ''
# it is important that we replace them and not remove them
# if we remove them, we will likely get errors when entering data into
# the database since it may remove an empty field (order is important)
for x in val:
new_row = []
for y in x:
if type(y) == float and math.isnan(y) is True:
y = ''
new_row.append(y)
clean.append(new_row)
clean_combined.append(clean)
# as with CSVs , we return a list containing a list of rows
return clean_combined
'''
Parses a csv file and returns a list of lists of rows (also lists)
'''
def parse_csv(url, curs, bytes_data=None):
# bytes data is passed if the file was zipped
if not bytes_data:
# read in the entire csv file
data = urlopen(url).read()
res_bytes = BytesIO(data).read()
else:
res_bytes = bytes_data
data = res_bytes
# write the bytes to file
write_to_file(res_bytes, url, curs,)
# decode the bytes so csv can interpret them
data_file = StringIO(data.decode('ascii', 'ignore'))
csvReader = csv.reader(data_file)
rows = []
# we return a list containing a list of rows, for future support
# of choosing specific sheets for each field in xlsx files
for row in csvReader:
rows.append(row)
second = []
second.append(rows)
return second
'''
update data in our tables metadata table
this function is used for both 'ADD' and 'REPLACE' update types
'''
def update_tables(title, description, update_frequency,
curs, url, crawl_freq, row_passed):
row_passed_split = row_passed.split(",")
row_passed_split[DATE_LAST_PARSED] = date.today().strftime("%d/%m/%Y")
row_passed = ",".join(row_passed_split)
# first enter the metadata details into our metadata tables
curs.execute("INSERT INTO tables VALUES (NULL,\"{}\", \"{}\", \"{}\", \"{}\", \"{}\",\"{}\") ON CONFLICT DO UPDATE SET \
title=excluded.title,description=excluded.description,update_frequency=excluded.update_frequency, \
crawl_frequency=excluded.crawl_frequency,row=excluded.row".
format(title,
description,
update_frequency,
crawl_freq,
url,
row_passed))
# get the table id of the table if it already exists
curs.execute("SELECT rowid FROM tables WHERE URL = \"{}\";".
format(url))
curs.execute("SELECT rowid from tables WHERE URL = \"{}\";"
.format(url))
table_id = curs.fetchone()[0]
return table_id
'''
updates the data in the cells table that use the replace update type
we use REPLACE when rows are appended, and the old data remains in the file. In
a sense we erase all of the existing data in the database, and then add all of
the new data. The reason we replace all of the data instead of just inserting
new rows is incase they make changes to the existing data. If the data will not
changed (i.e rows that arent appended) then use 'ADD' instead, it will be much
faster
'''
def update_cells_replace(table_id, content, labels, curs):
# first delete all of the existing cells
curs.execute("DELETE FROM cells WHERE table_id = {}".format(table_id))
# enter the cell data into the cells table
# start by iterating over each row of the data
row_id = 0
for row in content:
# skip rows if they are missing values
# we are not skipping any rows which contain data, since these are
# guaranteed to have the same number of elements as the labels
if len(row) < len(labels):
continue
# iterate over each value in the row
for col_id,cell in enumerate(row):
# don't enter any empty elements
if cell == '':
continue
curs.execute("INSERT INTO cells VALUES ({}, {}, {}, ?) ON CONFLICT DO UPDATE SET \
row_id=excluded.row_id, col_id=excluded.col_id,cell=excluded.cell;"
.format(table_id, row_id, col_id), (cell,))
row_id += 1
'''
Updates the cells for tables which replace old data with new data
We do not want to delete all of the previous data, and instead want to add
all of the new rows to the existing data
'''
def update_cells_add(table_id, content, curs):
# get the rowid
curs.execute("SELECT max(row_id) FROM cells WHERE table_id = {}".format(
table_id))
row_id = curs.fetchone()[0]
# rowid will be None if the table is empty
# we dont want the row to match at a
if not row_id:
max_index = -1
row_id = 0
else:
# get all of the rows associated with this table
curs.execute("SELECT * FROM cells WHERE table_id = {} and row_id = {}".format(
table_id,row_id))
# reconstruct the last row for matching purposes
last_row = curs.fetchall()
con_row = [x[3] for x in last_row]
# the index of which specifies the start of the new rows
# since we add 1, in theory if we dont get a match, we add the whole
# table
max_index = -1
# find the index of the last matched row in the data
for i in range(len(content) - 1 , 0, -1):
if content[i] == con_row:
max_index = i
# add any new rows to the data
for row in content[max_index + 1:]:
for index, val in enumerate(row):
curs.execute("INSERT INTO cells VALUES ( {}, {}, {}, ?)".format(
table_id,row_id + 1,index),(val,))
row_id += 1
'''
updates the columns table
'''
def update_columns(table_id, labels, curs):
# first delete any entries from the column table for the current table
curs.execute("DELETE FROM columns WHERE table_id = {}".
format(table_id))
# find the last empty non empty label, and delete everything after it
max_index = 0
for index, x in enumerate(labels):
if x != '':
max_index = index
labels = labels[:max_index + 1]
# handle empty labels, cannot have duplicate columns
unknown = 0
for index, x in enumerate(labels):
if x == '':
labels[index] = "unknown_column" + str(unknown)
unknown += 1
# get rid of newlines our labels
labels = [x.replace("\n", "") for x in labels]
# enter the column data into the columns table
column_id = 0
for label in labels:
curs.execute("INSERT INTO columns VALUES ({},{},?) ON CONFLICT DO UPDATE SET \
column_id=excluded.column_id,header=excluded.header".
format(table_id, column_id),(label,))
column_id += 1
return labels
'''
update the keywords table
'''
def update_keywords(table_id, keywords, curs):
for keyword in keywords:
curs.execute("INSERT INTO keywords VALUES ({}, \"{}\") ON CONFLICT DO NOTHING;".
format(table_id, keyword))
'''
enters the acquired data into our SQLite DB
'''
def enter_data(title, description, update_frequency, keywords, labels,
content, curs, url, crawl_freq,row_passed, update_type):
# insert / update our data
table_id = update_tables(title, description,update_frequency,
curs, url, crawl_freq, row_passed)
labels = update_columns(table_id, labels, curs)
update_keywords(table_id, keywords, curs)
# ensure we use the correct update type
if update_type == "REPLACE":
update_cells_replace(table_id, content, labels, curs)
elif update_type == "ADD":
update_cells_add(table_id, content, curs)
'''
updates the creation date for an entry when create.py is called
'''
def update_parse(filename):
# read in the current file
fin = open(filename)
data = fin.readlines()
new_data = []
for line in data:
# comments can just be added
if line[0] == "#":
new_data.append(line)
continue
split = line.split(",")
# lines that don't contain entries can just be added to the new data
if len(split) < 11:
new_data.append(line)
continue
# replace the date with todays date for any entry
else:
split[1] = date.today().strftime("%d/%m/%Y")
# we prepend a # to comment out the entry, because we dont want
# to use that entry each time we call create.py
line = '#' + ",".join(split)
new_data.append(line)
fin.close()
# reopen the file in write mode
# and write the new data
fout = open(filename, "w")
for line in new_data:
fout.write(line)
fout.close()
'''
gets the rowid of the row containing the specified URL
this is used when writing the file to storage, since we use the rowid as
the file name when saving it
NOTE: This function will fail if the number of entries exceeds 2^32 - 1 , since
sqlite will randomly try different numbers until it finds one it can use.
'''
def get_rowid(url, curs):
global max_row_id
# if this query succeeds, then the url already has an entry
# we use that for the filename
curs.execute("SELECT rowid FROM tables WHERE url = \"{}\"".
format(url))
number = curs.fetchone()
# this url is already in the database, get the rowid
if number and len(number) > 0:
return int(number[0])
# this url is not in our database, get the next table number and use it
else:
# avoid queries if we can
# max_row_id is the locally stored value of the next rowid to be used
if max_row_id:
max_row_id += 1
return max_row_id
curs.execute("SELECT max(rowid) FROM tables")
max_id = curs.fetchone()[0]
# max_id will be None if it is a new table
# sqlite defaults rowid to 1 in this case
if not max_id:
max_row_id = 1
return 1
# use the next row number as the id
# and save the rowid to use for later
else:
max_id = int(max_id)
max_row_id = max_id + 1
return max_id + 1
'''
updates all data in our database with a specific crawl frequency
this function is called from update.py , we could probably move this function
to that file and just import system.py
'''
def update(frequency):
# open a new connection to the database
conn = sqlite3.connect("MASTER.sqlite")
curs = conn.cursor()
# fails if the user tries to update the data before the tables are even
# created, in that case, they need to run create.py before running update
try:
curs.execute("SELECT row FROM tables WHERE crawl_frequency = '{}';".format(frequency))
except:
print("Please run: python3 create.py before trying to update!")
exit(1)
result = curs.fetchall()
# unpack the tuples
result = [x[0] for x in result]
# remove duplicates, this is more of a failsafe
result = list(set(result))
# for each entry in the database that matches the update frequency,
# update it
for row in result:
values = row.split(",")
process_entry(values, curs, "update")
conn.commit()
curs.close()
conn.close()
'''
deletes all entries asscoiated with a URL
WARNING: This will delete a file with the same name as the table with any
file extension
'''
def delete_entry(url, curs):
# get the row numbers of the url we are removing
# so we can delete the files from the tables/ directory
curs.execute("SELECT rowid FROM tables WHERE url LIKE \"{}%\"".format(url))
tables = curs.fetchall()
# delete the files off their local system
# ONLY WORKS ON UNIX
for table in tables:
table = table[0]
os.remove("tables/" + str(table) + ".*")
# wildcard in delete statement, delete any entries matching the wildcard
if "/*" in url:
url = url.replace("/*", "").strip()
curs.execute("DELETE FROM tables WHERE URL LIKE \"{}%\"".format(url))
return
curs.execute("DELETE FROM tables WHERE URL = \"{}\"".format(url))
return
'''
given n rows with m columns, it will return a single row with m columns,
where each column is a - seperated list of the joined column values
i.e
1 2 3 BLANK 5 6 7
a b c d e f g
will return:
1a 2b 3c 3d 5e 6f 7g
'''
def label_concat(rows):
cleaned_rows = []
# empty row, panic!
if len(rows) == 0:
print("We got an empty concat!")
exit(1)
max_row_length = 1
# fill in any blanks, start by using the same row
# if column 2 is empty, we use the value in column 1, 4 we use 3 ...
# if the first column is empty ... then we stick our head in the sand
for row in rows:
# replace the blanks
for index, val in enumerate(row):
# convert any dates or numbers to strings to allow concatenation
if type(val) != str:
row[index] = str(val)
# we also want to find the last column with a non empty value,
# since we will truncate each row to this length to avoid having
# the database cluttered with
if index > max_row_length and val != '':
max_row_length = index + 1
# value is emptry, replace it with the previous value
if val == '' and index > 0:
row[index] = row[index - 1]
max_row_length += 1
# truncate the data
for row in rows:
row = row[0:max_row_length]
cleaned_rows.append(row)
# now we join all the columns together in one list (1 list per column)
# should't have too large of a performance penalty due to caching
out_list = [[] for i in range(max_row_length)]
for row in cleaned_rows:
for index,val in enumerate(row):
out_list[index].append(val)
# join all of the values in each list with a dash ( - )
for i in range(len(out_list)):
out_list[i] = "-".join(out_list[i])
return out_list
'''
converts column oriented data into row data
as long as the user specifies all of the locations using column numbers instead
of row numbers, simply taking the transpose should work.
Not tested, didnt find any pure column oriented data
'''
def columm_to_row(columns):
rows = np.transpose(columns)
return rows
'''
validates some of the input from the mapping file
'''
def validate_input(row):
# row vs column
if row[0] != "R" and row[0] != "C":
print("ROW-ORIENTED MUST BE 'R' OR 'C'")
exit(1)
# update frequency
update_freqs = ["daily", "weekly", "monthly", "quarterly", "yearly",
"never"]
if row[2] not in update_freqs:
print("update type must be:")
for x in update_freqs:
print("\"{}\" ".format(x))
exit(1)
# update type
if row[3] != "ADD" and row[3] != "REPLACE":
print("update type must be 'ADD' or 'REPLACE'")
exit(1)
return
'''
unzips and extracts a file into the given directory
'''
def unzip_and_extract(url, filename):
# request and get the data
res = requests.get(url)
res_byte = BytesIO(res.content).read()
# convert into a zipfile
filebytes = BytesIO(res_byte)
myzipfile = zipfile.ZipFile(filebytes)
# extract the file
try:
extracted_file = myzipfile.open(filename)
# throws a key error if the file does not exist
except KeyError:
print("The given filename does not exist within the zip archive!")
exit(1)
# read the data as bytes and return it
data = extracted_file.read()
return data
```
|
{
"source": "jdejaegh/ics-fusion",
"score": 3
}
|
#### File: app/tools/caching.py
```python
import json
import os
import sched
import threading
import time
from hashlib import sha256
import traceback
import arrow
import requests
from ics import Calendar
from tatsu.exceptions import FailedParse
from tools.tools import horodate, process
def cache(entry: dict, scheduler: sched.scheduler = None) -> None:
"""Cache an .ics feed in the app/cache directory.
Different entries with the same URL will be cached in the same file.
The cached calendar contains a new line in the description with the current time when cached prefixed by the
'Cached at' mention
:param entry: representation of the entry to cache. This is the Python representation of the corresponding entry
in the config file
:type entry: dict
:param scheduler: scheduler used to relaunch the caching task in the future. If not scheduler is specified,
the task will not be relaunched
:type scheduler: sched.scheduler
"""
try:
if not os.path.isdir('app/cache'):
os.mkdir('app/cache')
url = entry['url']
path = "app/cache/" + sha256(url.encode()).hexdigest() + ".ics"
r = requests.get(entry["url"], allow_redirects=True)
if "encoding" in entry:
cal = Calendar(imports=r.content.decode(encoding=entry["encoding"]))
else:
cal = Calendar(imports=r.content.decode())
cal = horodate(cal, 'Cached at')
open(path, 'w').writelines(cal)
print(arrow.now().format("YYYY-MM-DD HH:mm:ss"), "Cached", entry['name'])
except FailedParse:
print("Could not parse", entry['name'])
# Save stack trace when an unknown error occurs
except Exception as e:
with open("error " + arrow.now().format("YYYY-MM-DD HH:mm:ss")+".txt", 'w') as file:
file.write(arrow.now().format("YYYY-MM-DD HH:mm:ss") + "\nCould not cache : " + str(entry))
file.write(str(e))
file.write(str(traceback.format_exc()))
finally:
if scheduler is not None:
delay = entry['cache'] if entry['cache'] > 0 else 10
delay *= 60
scheduler.enter(delay=delay, priority=1, action=cache, argument=(entry, scheduler))
def precompute(config: str, scheduler: sched.scheduler = None) -> None:
"""Precompute a configuration file result to serve it faster when it is requested. This function
should be used with a scheduler to be repeated over time.
:param config: name of the configuration file to precompute the result for
:type config: str
scheduler used to relaunch the precomputing task in the future. If not scheduler is specified,
the task will not be relaunched
:type scheduler: sched.scheduler
"""
try:
cal = process(os.path.basename(config), False)
path = "app/cache/" + os.path.basename(config).rstrip('.json') + ".ics"
open(path, 'w').writelines(cal)
print(arrow.now().format("YYYY-MM-DD HH:mm:ss"), "Precomputed", os.path.basename(config).rstrip('.json'))
except Exception as e:
with open("error " + arrow.now().format("YYYY-MM-DD HH:mm:ss")+".txt", 'w') as file:
file.write(arrow.now().format("YYYY-MM-DD HH:mm:ss") + "\nCould not precompute : " + str(config))
file.write(str(e))
file.write(str(traceback.format_exc()))
finally:
if scheduler is not None:
delay = get_min_cache(config)
delay *= 60
scheduler.enter(delay=delay, priority=1, action=precompute, argument=(config, scheduler))
def get_min_cache(path: str) -> float:
"""Get the minimum caching time of all the entries in a config file.
:param path: path of the config file to use
:type path: str
:return: float number representing the smallest caching time.
"""
result = float('inf')
with open(path, 'r') as config_file:
file = json.loads(config_file.read())
for entry in file:
if 'cache' in entry and entry['cache'] < result:
result = entry['cache']
return result
def start_scheduler(scheduler: sched.scheduler) -> None:
"""Start the caching of every config file found in the app/config directory
:param scheduler: scheduler object to use to schedule the caching
:type scheduler: sched.scheduler
"""
path = "app/config"
files = [os.path.join(path, f) for f in os.listdir(path)
if os.path.isfile(os.path.join(path, f)) and f.endswith('.json')]
for file in files:
with open(file, 'r') as config_file:
config = json.loads(config_file.read())
for entry in config:
if 'cache' in entry:
scheduler.enter(delay=0, priority=1, action=cache, argument=(entry, scheduler))
if get_min_cache(file) < float('inf'):
scheduler.enter(delay=get_min_cache(file)*60, priority=1, action=precompute, argument=(file, scheduler))
scheduler.run()
class CacheThread(threading.Thread):
"""Child class of the threading.Thread class to run the caching process every 10 minutes
"""
def __init__(self):
threading.Thread.__init__(self)
def run(self):
print("Starting cache process")
start_scheduler(sched.scheduler(time.time, time.sleep))
```
#### File: app/tools/tools.py
```python
import json
import re
import arrow
import os
from hashlib import sha256
from typing import List
import requests
from ics import Calendar
from pathvalidate import sanitize_filename
def filtering(cal: Calendar, filters: dict, field_name: str) -> Calendar:
"""Filter the event of a calendar according to the filters and the field_name
:param cal: the calendar to apply filters to
:type cal: Calendar
:param filters: the filters to apply to the calendar
:type filters: dict
:param field_name: the of the field in the filters to consider
:type field_name: str
:return: the modified cal argument after filtering out the events
:rtype: Calendar
:raises SyntaxError: if both exclude and includeOnly are specified in the filters
"""
if field_name in filters:
field = filters[field_name]
if ("exclude" in field) and ("includeOnly" in field):
raise SyntaxError("Cannot specify both exclude and includeOnly")
if ("exclude" not in field) and ("includeOnly" not in field):
return cal
new = Calendar()
ignore_case = True if ("ignoreCase" in field and field["ignoreCase"]) else False
if "exclude" in field:
p = re.compile(field["exclude"], re.IGNORECASE | re.DOTALL) \
if ignore_case else re.compile(field["exclude"], re.DOTALL)
for event in cal.events:
if event.name is None or (field_name == "name" and p.match(event.name) is None):
new.events.add(event)
elif event.description is None or (field_name == "description" and p.match(event.description) is None):
new.events.add(event)
if "includeOnly" in field:
p = re.compile(field["includeOnly"], re.IGNORECASE | re.DOTALL) \
if ignore_case else re.compile(field["includeOnly"], re.DOTALL)
for event in cal.events:
if field_name == "name" and event.name is not None and p.match(event.name) is not None:
new.events.add(event)
elif field_name == "description" and event.description is not None \
and p.match(event.description) is not None:
new.events.add(event)
cal = new
return cal
else:
return cal
def apply_filters(cal: Calendar, filters: dict) -> Calendar:
"""Apply all the filters to a calendar and returns the resulting calendar
:param cal: the calendar to apply filters to
:type cal: Calendar
:param filters: the filters to apply
:type filters: dict
:return: the modified cal parameter to satisfy the filters
:rtype: Calendar
:raises SyntaxError: if both exclude and includeOnly are specified for the same field in the filters
"""
cal = filtering(cal, filters, "name")
cal = filtering(cal, filters, "description")
return cal
def modify_time(cal: Calendar, modify: dict) -> Calendar:
"""Modify the time of all the events in a calendar as specified in the modify structure
:param cal: the calendar where it is needed to modify the time of the events
:type cal: Calendar
:param modify: the structure defining how to modify the time
:type modify: dict
:return: the modified cal parameter
:rtype: Calendar
"""
if ("time" in modify) and ("shift" in modify["time"]):
shift = modify["time"]["shift"]
year = 0 if not ("year" in shift) else shift["year"]
month = 0 if not ("month" in shift) else shift["month"]
day = 0 if not ("day" in shift) else shift["day"]
hour = 0 if not ("hour" in shift) else shift["hour"]
minute = 0 if not ("minute" in shift) else shift["minute"]
for event in cal.events:
event.end = event.end.shift(years=year, months=month, days=day, hours=hour, minutes=minute)
event.begin = event.begin.shift(years=year, months=month, days=day, hours=hour, minutes=minute)
return cal
def modify_text(cal: Calendar, modify: dict, field_name: str) -> Calendar:
"""Modify one text field (name, location, description) of all the events in the cal parameter
according to the modify structure and the field_name
:param cal: the calendar where it is needed to modify the text field
:type cal: Calendar
:param modify: the structure defining how to modify the time
:type modify: dict
:param field_name: the name of the field to modify
:type field_name: str
:return: the modified cal parameter
:rtype: Calendar
"""
if field_name in modify:
change = modify[field_name]
if "addPrefix" in change:
for event in cal.events:
if field_name == "name":
event.name = change["addPrefix"] + event.name \
if event.name is not None else change["addPrefix"]
elif field_name == "description":
event.description = change["addPrefix"] + event.description \
if event.description is not None else change["addPrefix"]
elif field_name == "location":
event.location = change["addPrefix"] + event.location \
if event.location is not None else change["addPrefix"]
if "addSuffix" in change:
for event in cal.events:
if field_name == "name":
event.name = event.name + change["addSuffix"] \
if event.name is not None else change["addSuffix"]
elif field_name == "description":
event.description = event.description + change["addSuffix"] \
if event.description is not None else change["addSuffix"]
elif field_name == "location":
event.location = event.location + change["addSuffix"] \
if event.location is not None else change["addSuffix"]
return cal
def apply_modify(cal: Calendar, modify: dict) -> Calendar:
"""Apply all the needed modifications to a calendar and returns the resulting calendar
:param cal: the calendar to apply modifications to
:type cal: Calendar
:param modify: the structure containing the modifications to apply
:type modify: dict
:return: the modified cal parameter
:rtype: Calendar
"""
cal = modify_time(cal, modify)
cal = modify_text(cal, modify, "name")
cal = modify_text(cal, modify, "description")
cal = modify_text(cal, modify, "location")
return cal
def merge(cals: List[Calendar]) -> Calendar:
"""Merge a list of calendars into a single calendar
Only takes the event into account, not the tasks or the alarms
:param cals: the list of calendars to merge
:type cals: List[Calendar]
:return: the calendar containing the union of the events contained in the cals list
:rtype: Calendar
:raises ValueError: if an element of the list is not a Calendar
"""
result = Calendar()
for cal in cals:
if not isinstance(cal, Calendar):
raise ValueError("All elements should be Calendar")
result.events = result.events.union(cal.events)
return result
def process(path: str, from_cache: bool = True) -> Calendar:
"""Open a config file from the specified path, download the calendars,
apply the filters, modify and merge the calendars as specified in the config file
:param from_cache:
:param path: name of the file to open. The file should be in the config/ folder
:type path: str
:return: the resulting calendar
:rtype: Calendar
"""
print("app/cache/" + sanitize_filename(path).rstrip(".json") + ".ics")
if from_cache and os.path.isfile("app/cache/" + sanitize_filename(path).rstrip(".json") + ".ics"):
with open("app/cache/" + sanitize_filename(path).rstrip(".json") + ".ics") as file:
data = file.read()
print("Serving precomputed file")
return data #Calendar(imports=data)
else:
o = "app/config/" + sanitize_filename(path)
print("Try to open " + o)
file = open(o, "r")
config = json.loads(file.read())
file.close()
data = []
for entry in config:
cal = load_cal(entry)
if "filters" in entry:
cal = apply_filters(cal, entry["filters"])
if "modify" in entry:
cal = apply_modify(cal, entry["modify"])
data.append(cal)
return merge(data)
def get_from_cache(entry: dict) -> Calendar:
"""Retrieve the entry from cache. If the entry is not found, an exception is raised
:param entry: representation of the entry to cache. This is the Python representation of the corresponding entry
in the config file
:type entry: dict
:return: the corresponding calendar in cache
:rtype: Calendar
:raises FileNotfoundError: if the entry has not been cached before
"""
url = entry['url']
path = "app/cache/" + sha256(url.encode()).hexdigest() + ".ics"
if not os.path.isfile(path):
print("Not cached")
raise FileNotFoundError("The calendar is not cached")
with open(path, 'r') as file:
data = file.read()
return Calendar(imports=data)
def load_cal(entry: dict) -> Calendar:
"""Load the calendar from the cache or from remote according to the entry. If the calendar is supposed to be in
cached but could not be found in cache, an error is thrown
:param entry: representation of the entry to cache. This is the Python representation of the corresponding entry
in the config file
:type entry: dict
:return: the calendar corresponding to the entry
:rtype: Calendar
:raises FileNotfoundError: if the entry was supposed to be cached but has not been cached before
"""
if "cache" in entry and entry["cache"]:
print("Getting", entry["name"], "from cache")
try:
return get_from_cache(entry)
except FileNotFoundError:
return Calendar()
else:
print("Getting", entry["name"], "from remote")
r = requests.get(entry["url"], allow_redirects=True)
if "encoding" in entry:
cal = Calendar(imports=r.content.decode(encoding=entry["encoding"]))
else:
cal = Calendar(imports=r.content.decode())
cal = horodate(cal, 'Downloaded at')
return cal
def horodate(cal: Calendar, prefix='') -> Calendar:
"""Add a new line at the end of the description of every event in the calendar with the current time prefixed by
the prefix parameter and a space
The date is added with the following format: YYYY-MM-DD HH:mm:ss
:param cal: calendar to process
:type cal: Calendar
:param prefix: the prefix to add in front of the date
:type prefix: str
:return: the modified calendar
:rtype: Calendar
"""
now = arrow.now().format("YYYY-MM-DD HH:mm:ss")
for event in cal.events:
event.description = event.description + '\n' + prefix + ' ' + now \
if event.description is not None else prefix + ' ' + now
return cal
```
|
{
"source": "jdeka22/imdb_keras-master",
"score": 3
}
|
#### File: jdeka22/imdb_keras-master/server.py
```python
from flask import Flask, request, jsonify
import traceback
import predict
app = Flask(__name__)
@app.route('/predict', methods=['POST'])
def run():
try:
data = request.get_json(force=True)
input_params = data['input']
result = predict.predict(input_params)
return jsonify({'prediction': result})
except Exception as e:
print(traceback.format_exc())
return jsonify({'error': str(e)})
if __name__ == '__main__':
app.run(host='127.0.0.1', port=8080)
```
|
{
"source": "jdekarske/astrobee",
"score": 3
}
|
#### File: scripts/build/genCommandDictionary.py
```python
import os
import sys
import logging
# hack to ensure xgds_planner2 submodule is at head of PYTHONPATH
ffroot = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.insert(0, os.path.join(ffroot, 'astrobee', 'commands', 'xgds_planner2'))
from xgds_planner2 import commandDictionary
def main():
import optparse
parser = optparse.OptionParser('usage: %prog <inSchemaPath> <outHtmlPath>\n\n' + __doc__.strip())
opts, args = parser.parse_args()
if len(args) == 2:
inSchemaPath, outHtmlPath = args
else:
parser.error('expected exactly 2 args')
logging.basicConfig(level=logging.DEBUG, format='%(message)s')
commandDictionary.writeCommandDictionary(inSchemaPath, outHtmlPath,
includeCommandSpecNameField=False,
includeCommandSpecNotesField=False)
if __name__ == '__main__':
main()
```
|
{
"source": "jdekarske/axopy",
"score": 2
}
|
#### File: axopy/gui/main.py
```python
import sys
from PyQt5 import QtCore, QtWidgets
from axopy import util
from axopy.messaging import Transmitter
import collections
from contextlib import contextmanager
# This mapping from key names in the Qt namespace to axopy key names just
# allows users to write code without any Qt stuff in it
key_map = {
QtCore.Qt.Key_A: util.key_a,
QtCore.Qt.Key_B: util.key_b,
QtCore.Qt.Key_C: util.key_c,
QtCore.Qt.Key_D: util.key_d,
QtCore.Qt.Key_E: util.key_e,
QtCore.Qt.Key_F: util.key_f,
QtCore.Qt.Key_G: util.key_g,
QtCore.Qt.Key_H: util.key_h,
QtCore.Qt.Key_I: util.key_i,
QtCore.Qt.Key_J: util.key_j,
QtCore.Qt.Key_K: util.key_k,
QtCore.Qt.Key_L: util.key_l,
QtCore.Qt.Key_M: util.key_m,
QtCore.Qt.Key_N: util.key_n,
QtCore.Qt.Key_O: util.key_o,
QtCore.Qt.Key_P: util.key_p,
QtCore.Qt.Key_Q: util.key_q,
QtCore.Qt.Key_R: util.key_r,
QtCore.Qt.Key_S: util.key_s,
QtCore.Qt.Key_T: util.key_t,
QtCore.Qt.Key_U: util.key_u,
QtCore.Qt.Key_V: util.key_v,
QtCore.Qt.Key_W: util.key_w,
QtCore.Qt.Key_X: util.key_x,
QtCore.Qt.Key_Y: util.key_y,
QtCore.Qt.Key_Z: util.key_z,
QtCore.Qt.Key_1: util.key_1,
QtCore.Qt.Key_2: util.key_2,
QtCore.Qt.Key_3: util.key_3,
QtCore.Qt.Key_4: util.key_4,
QtCore.Qt.Key_5: util.key_5,
QtCore.Qt.Key_6: util.key_6,
QtCore.Qt.Key_7: util.key_7,
QtCore.Qt.Key_8: util.key_8,
QtCore.Qt.Key_9: util.key_9,
QtCore.Qt.Key_0: util.key_0,
QtCore.Qt.Key_Space: util.key_space,
QtCore.Qt.Key_Return: util.key_return,
QtCore.Qt.Key_Escape: util.key_escape,
}
qt_key_map = {v: k for k, v in key_map.items()}
def get_qtapp():
"""Get a QApplication instance running.
Returns the current ``QApplication`` instance if it exists and creates it
otherwise.
Examples
--------
This function is primarily for internal usage, but it is exposed to make it
convenient to test graphical code without all of the experiment/task
machinery.
.. code-block:: python
from axopy.gui.main import get_qtapp, Container
# get the application instance first, before creating widgets etc.
app = get_qtapp()
con = Container()
# call show() to show the widget, then run the application
con.show()
app.exec_()
"""
global qtapp
inst = QtWidgets.QApplication.instance()
if inst is None:
qtapp = QtWidgets.QApplication(sys.argv)
else:
qtapp = inst
return qtapp
@contextmanager
def gui_check():
"""Check graphical interface code interactively.
This function makes it convenient to test graphical code without all of the
experiment/task machinery. You can create a :class:`Container`, add things
to the container, and then call this function with the container to run the
GUI and try it out.
.. note::
Be sure to call :mod:`Container.show()` at the end to display the
container.
Examples
--------
Minimal example
.. code-block:: python
from axopy.gui.main import Container, gui_check
with gui_check():
con = Container()
con.show()
"""
app = get_qtapp()
yield app
app.exec_()
class _MainWindow(QtWidgets.QMainWindow):
"""The window containing all graphical content of the application.
It is a very simple GUI implemented as a `QMainWindow` with a
`QStackedLayout` holding a list of :class:`Container` objects. The
containers, which in turn house all of the interesting graphical content.
"""
key_pressed = Transmitter(str)
def __init__(self):
app = get_qtapp()
super(_MainWindow, self).__init__()
app.installEventFilter(self)
self._central_widget = QtWidgets.QWidget(self)
self._layout = QtWidgets.QStackedLayout(self._central_widget)
self.setCentralWidget(self._central_widget)
status_bar = QtWidgets.QStatusBar(self)
self.setStatusBar(status_bar)
self._statusbar_label = QtWidgets.QLabel("status")
status_bar.addPermanentWidget(self._statusbar_label)
self.show()
def run(self):
"""Start the application."""
get_qtapp().exec_()
def new_container(self):
"""Add a new container to the stack and give it back.
Returns
-------
container : Container
The newly added container.
"""
c = Container()
self._layout.addWidget(c)
self._layout.setCurrentWidget(c)
return c
def set_container(self, container):
"""Make the given container visible.
If the container is already somewhere in the stack, it is just made
visible, otherwise it is added to the stack.
"""
if self._layout.indexOf(container) == -1:
self._layout.addWidget(container)
self._layout.setCurrentWidget(container)
def set_status(self, message):
"""Set the status bar message.
Parameters
----------
message : str
Message to display in the status bar.
"""
self._statusbar_label.setText(message)
def quit(self):
"""Quit the application."""
get_qtapp().quit()
def keyPressEvent(self, event):
"""Qt callback for key presses.
This overrides the `QMainWindow` method. It does not need to be called
directly and it doesn't need to be overriden. Connect to the
``key_pressed`` transmitter to handle key press events.
"""
try:
key = key_map[event.key()]
except KeyError:
return super().keyPressEvent(event)
self.key_pressed.emit(key)
class Container(QtWidgets.QWidget):
"""Graphics container for tasks."""
def set_widget(self, widget):
"""Set the widget containing all graphical elements.
Parameters
----------
widget : QWidget
Any QWidget is OK to add.
See Also
--------
axopy.gui.canvas: Canvas widget and canvas items that can be added to
the container.
axopy.gui.graph: Plotting widgets that can be added to the container.
"""
self.layout = QtWidgets.QGridLayout()
self.setLayout(self.layout)
self.layout.addWidget(widget, 0, 0)
def set_layout(self, layout):
"""Set the layout of the container.
Parameters
----------
layout : QLayout
Any QLayout is OK to add.
"""
self.setLayout(layout)
def show(self):
"""Show the container in the active application.
This is not normally needed, unless you're testing out a GUI and using
:func:`gui_check`.
"""
super(Container, self).show()
class _SessionConfig(QtWidgets.QDialog):
"""Widget for configuring a session.
Shows a form layout with the specified options. Options are passed as a
dictionary with option labels as keys and option types as values. The value
can also be a sequence of strings, which are shown in a combo box. Use
``run()`` to run the dialog and return the results in a dictionary.
"""
def __init__(self, options):
app = get_qtapp()
super(_SessionConfig, self).__init__()
self.options = options
self.results = {}
self.widgets = {}
main_layout = QtWidgets.QVBoxLayout()
self.setLayout(main_layout)
form_layout = QtWidgets.QFormLayout()
form_layout.setFormAlignment(QtCore.Qt.AlignVCenter)
main_layout.addLayout(form_layout)
for label, typ in options.items():
if typ in {str, int, float}:
w = QtWidgets.QLineEdit()
self.widgets[label] = w
form_layout.addRow(label, w)
elif isinstance(typ, collections.Sequence):
w = QtWidgets.QComboBox()
for choice in typ:
w.addItem(str(choice))
self.widgets[label] = w
form_layout.addRow(label, w)
else:
raise TypeError("option {}({}) not a supported type".format(
label, typ))
button = QtWidgets.QPushButton("Ok")
main_layout.addWidget(button)
button.clicked.connect(self._on_button_click)
self.show()
def run(self):
self.exec_()
return self.results
def _on_button_click(self):
for label, widget in self.widgets.items():
t = self.options[label]
if t is str:
self.results[label] = str(widget.text())
elif t is int:
self.results[label] = int(widget.text())
elif t is float:
self.results[label] = float(widget.text())
else:
self.results[label] = str(widget.currentText())
if 'subject' in self.options and self.results['subject'] == '':
QtWidgets.QMessageBox.warning(
self,
"Warning",
"Subject ID must not be empty.",
QtWidgets.QMessageBox.Ok)
return
self.done(0)
```
#### File: axopy/pipeline/core.py
```python
class Block(object):
"""Base class for all blocks.
Notes
-----
Blocks should take their parameters in ``__init__`` and provide at least
the ``process`` method for taking in data and returning some result.
"""
def __init__(self, name=None, hooks=None):
self._name = name
if name is None:
self._name = self.__class__.__name__
self._hooks = hooks
if hooks is None:
self._hooks = []
def __call__(self, *args, **kwargs):
return self.process(*args, **kwargs)
def process(self, data):
"""Process input data and produce a result.
Subclasses must implement this method, otherwise it shouldn't really be
a ``Block``.
"""
raise NotImplementedError
def clear(self):
"""Clear the state of the block.
Some blocks don't keep stateful attributes, so ``clear`` does nothing
by default.
"""
pass
@property
def name(self):
return self._name
@property
def hooks(self):
return self._hooks
def __repr__(self):
return "%s.%s()" % (
self.__class__.__module__,
self.__class__.__name__
)
class Pipeline(Block):
"""Feedforward arrangement of blocks for processing data.
A :class:`Pipeline` contains a set of :class:`Block` objects which operate
on data to produce a final output.
To create a pipeline, the following two rules are needed: blocks in a list
processed in series, and blocks in a tuple are processed in parallel.
Blocks that are arranged to take multiple inputs should expect to take the
corresponding number of inputs in the order they are given. It is up to the
user constructing the pipeline to make sure that the arrangement of blocks
makes sense.
Parameters
----------
blocks : container
The blocks in the pipline, with lists processed in series and tuples
processed in parallel.
Attributes
----------
named_blocks : dict
Dictionary of blocks in the pipeline. Keys are the names given to the
blocks in the pipeline and values are the block objects.
"""
def __init__(self, blocks, name=None):
super(Pipeline, self).__init__(name=name)
self.blocks = blocks
self.named_blocks = {}
# traverse the block structure to fill named_blocks
self._call_block('name', self.blocks)
def process(self, data):
"""
Calls the ``process`` method of each block in the pipeline, passing the
outputs around as specified in the block structure.
Parameters
----------
data : object
The input to the first block(s) in the pipeline. The type/format
doesn't matter, as long as the blocks you define accept it.
Returns
-------
out : object
The data output by the ``process`` method of the last block(s) in
the pipeline.
"""
out = self._call_block('process', self.blocks, data)
return out
def clear(self):
"""
Calls the ``clear`` method on each block in the pipeline. The effect
depends on the blocks themselves.
"""
self._call_block('clear', self.blocks)
def _call_block(self, fname, block, data=None):
if isinstance(block, list):
out = self._call_list(fname, block, data)
elif isinstance(block, tuple):
out = self._call_tuple(fname, block, data)
else:
if fname == 'name':
self.named_blocks[block.name] = block
return
f = getattr(block, fname)
if data is not None:
out = f(data)
else:
out = f()
if hasattr(block, 'hooks') and fname == 'process':
for hook in block.hooks:
hook(out)
return out
def _call_list(self, fname, block, data=None):
out = data
for b in block:
out = self._call_block(fname, b, out)
return out
def _call_tuple(self, fname, block, data=None):
out = []
for b in block:
out.append(self._call_block(fname, b, data))
if fname == 'process':
return out
else:
return None
```
#### File: axopy/examples/devices.py
```python
import sys
import argparse
import numpy as np
from axopy.task import Oscilloscope
from axopy.experiment import Experiment
from axopy.daq import NoiseGenerator, Keyboard, Mouse
from axopy.pipeline import Pipeline, Callable, Windower, Filter
def rainbow():
dev = NoiseGenerator(rate=2000, num_channels=16, read_size=200)
run(dev)
def keyboard():
dev = Keyboard()
# need a windower to show something interesting in the oscilloscope
pipeline = Pipeline([Windower(10)])
run(dev, pipeline)
def keystick():
dev = Keyboard(rate=20, keys=list('wasd'))
pipeline = Pipeline([
# window to average over
Windower(10),
# mean along rows
Callable(lambda x: np.mean(x, axis=1, keepdims=True)),
# window to show in the oscilloscope
Windower(60)
])
run(dev, pipeline)
def emgsim():
# sampling rate of the simulated EMG data
fs = 2000
# update rate of the generated data
update_rate = 20
# gain to use in noise generation
gain = 0.25
# number of seconds of data the oscilloscope shows
osc_view_time = 5
samp_per_input = int(fs / update_rate)
pipeline = Pipeline([
# get keyboard inputs of past second
Windower(update_rate),
# take mean over last second and apply a gain
Callable(lambda x: np.mean(x, axis=1, keepdims=True)),
# generate noise with amplitude of previous output
Callable(lambda x, k: gain * x * np.random.randn(x.shape[0], k),
func_args=(samp_per_input,)),
# window for pretty display in oscilloscope
Windower(osc_view_time * update_rate * samp_per_input),
])
dev = Keyboard(rate=update_rate, keys=list('wasd'))
run(dev, pipeline)
def mouse():
dev = Mouse(rate=20)
pipeline = Pipeline([
# just for scaling the input since it's in pixels
Callable(lambda x: x/100),
# window to show in the oscilloscope
Windower(40)
])
run(dev, pipeline)
def run(dev, pipeline=None):
# run an experiment with just an oscilloscope task
Experiment(daq=dev, subject='test').run(Oscilloscope(pipeline))
if __name__ == '__main__':
functions = {
'rainbow': rainbow,
'keyboard': keyboard,
'keystick': keystick,
'emgsim': emgsim,
'mouse': mouse,
}
parser = argparse.ArgumentParser(usage=__doc__)
parser.add_argument(
'function',
help='Function in the example script to run.')
args = parser.parse_args()
if args.function not in functions:
print("{} isn't a function in the example.".format(args.function))
sys.exit(-1)
else:
functions[args.function]()
```
#### File: axopy/examples/experiment_creation.py
```python
import argparse
from axopy.experiment import Experiment
from axopy.task import Oscilloscope
from axopy.daq import NoiseGenerator
daq = NoiseGenerator(rate=2000, num_channels=6, read_size=200)
def run():
"""Main function of the example. Runs each demo and then exits."""
customized()
def simple():
# subject is not given, so it is configured in run
exp = Experiment(daq=daq).run(Oscilloscope())
def customized():
exp = Experiment(daq=daq)
# optional config step, subject field is implied
config = exp.configure(group=('A', 'B'))
# here you can retrieve the selected group via `config['group']`
# run list of tasks
exp.run(Oscilloscope())
def debug():
# subject is given, so no configure step is needed
exp = Experiment(daq=daq, data='/tmp/data', subject='test').run(
Oscilloscope())
if __name__ == '__main__':
functions = {
'simple': simple,
'customized': customized,
}
parser = argparse.ArgumentParser(usage=__doc__)
parser.add_argument(
'function',
help='Function in the example script to run.')
args = parser.parse_args()
if args.function not in functions:
print("{} isn't a function in the example.".format(args.function))
sys.exit(-1)
else:
functions[args.function]()
```
#### File: axopy/tests/test_design.py
```python
import numpy as np
from axopy import design
def test_design():
d = design.Design()
b = d.add_block()
# add trial with attributes as args
t = b.add_trial(attrs={'attr': 1.0})
# set trial attributes later
t.attrs['var'] = True
assert set(t.attrs) == set({'trial': 0, 'block': 0, 'attr': 1.0,
'var': True})
# add an empty array
t.add_array('1d')
for i in range(3):
t.arrays['1d'].stack(np.zeros(5))
assert t.arrays['1d'].data.shape == (15,)
t.add_array('2d')
for i in range(3):
t.arrays['2d'].stack(np.zeros((2, 5)))
assert t.arrays['2d'].data.shape == (2, 15)
t.add_array('static', data=np.random.randn(100))
def test_block_shuffle():
d = design.Design()
b = d.add_block()
for i in range(10):
b.add_trial()
for i in range(10):
b.shuffle()
assert b[0].attrs['trial'] == 0
```
#### File: axopy/tests/test_storage.py
```python
import pytest
import os
import numpy
import pandas
from axopy.design import Design
from axopy.storage import (Storage, TaskWriter, TrialWriter, read_hdf5,
write_hdf5, storage_to_zip, makedirs)
@pytest.fixture(scope='function')
def tmpdirpath(tmpdir):
"""Convenience fixture to get the path to a temporary directory."""
return str(tmpdir.dirpath())
def test_storage(tmpdirpath):
"""Integration test for regular storage usage with task design."""
# usually done by task manager
storage = Storage(root=tmpdirpath)
storage.subject_id = 'p0'
writer = storage.create_task('task1')
d = Design()
for i in range(3):
b = d.add_block()
t = b.add_trial(attrs={'trial': 0, 'label': 'a'})
t.add_array('data', data=numpy.zeros(5))
writer.write(t)
t = b.add_trial(attrs={'trial': 1, 'label': 'b'})
t.add_array('data')
t.arrays['data'].stack(numpy.zeros(3))
writer.write(t)
writer.pickle([1, 2, 3], 'somelist')
# task reading
reader = storage.require_task('task1')
assert len(reader.trials) == 6
arrays = reader.iterarray('data')
assert next(arrays).shape == (5,)
assert next(arrays).shape == (3,)
assert next(arrays).shape == (5,)
assert reader.pickle('somelist') == [1, 2, 3]
def test_allow_overwrite(tmpdirpath):
root = os.path.join(tmpdirpath, 'overwrite')
storage = Storage(root=root)
storage.subject_id = 'p0'
writer = storage.create_task('task1')
# trying to create the task again raises error
with pytest.raises(ValueError):
storage.create_task('task1')
for i in range(2):
storage = Storage(root=root, allow_overwrite=True)
storage.subject_id = 'p0'
# with allow_overwrite, no more error
writer = storage.create_task('task1')
d = Design()
trial = d.add_block().add_trial(attrs={'var': 1})
trial.add_array('array', data=numpy.random.randn(10))
writer.write(trial)
def test_storage_directories(tmpdir_factory):
"""Test that Storage can find and create the right directories."""
# create a file structure:
# data/
# p0/
# task1/
# task2/
# p1/
# task1/
# p2/
root = str(tmpdir_factory.mktemp('data'))
folders = {'p0': ['task1', 'task2'], 'p1': ['task1'], 'p2': []}
for subj_id, tasknames in folders.items():
os.makedirs(os.path.join(root, subj_id))
for name in tasknames:
os.makedirs(os.path.join(root, subj_id, name))
storage = Storage(root)
assert list(storage.subject_ids) == sorted(folders.keys())
assert list(storage.task_ids) == []
# make sure everything matches the structure built by the fixture
for subj_id, tasknames in folders.items():
storage.subject_id = subj_id
assert list(storage.task_ids) == tasknames
# try a non-existing subject
storage.subject_id = 'other_subject'
assert list(storage.task_ids) == []
# create a new task
storage.create_task('task1')
assert os.path.exists(os.path.join(root, storage.subject_id, 'task1'))
assert list(storage.task_ids) == ['task1']
# ensure you can't overwrite existing task
with pytest.raises(ValueError):
storage.create_task('task1')
# require an existing task
storage.require_task('task1')
# fail if you require a non-existing task
with pytest.raises(ValueError):
storage.require_task('task2')
def test_hdf5_read_write(tmpdirpath):
fp = os.path.join(tmpdirpath, 'file.hdf5')
x_expected = numpy.array([[0.1, 2.1, 4.1], [2.1, 4.2, 2.1]])
write_hdf5(fp, x_expected)
x = read_hdf5(fp)
numpy.testing.assert_array_equal(x_expected, x)
write_hdf5(fp, x_expected, dataset='somedata')
x = read_hdf5(fp, dataset='somedata')
numpy.testing.assert_array_equal(x_expected, x)
def test_storage_to_zip(tmpdirpath):
# make a dataset root under a subfolder
p = os.path.join(tmpdirpath, 'datasets', 'dataset01')
os.makedirs(p)
with open(os.path.join(p, 'file.txt'), 'w') as f:
f.write("hello")
outfile = os.path.join(tmpdirpath, 'datasets', 'dataset01.zip')
zipfile = storage_to_zip(p)
assert zipfile == outfile
assert os.path.isfile(outfile)
outfile = os.path.join(tmpdirpath, 'dataset01_relocated.zip')
zipfile = storage_to_zip(p, outfile=outfile)
assert zipfile == outfile
assert os.path.isfile(outfile)
def test_makedirs(tmpdir):
path = os.path.join(str(tmpdir.dirpath()), 'subdir', 'subsubdir')
# regular usage
makedirs(path)
assert os.path.exists(path)
# fail if path already exists
with pytest.raises(OSError):
makedirs(path)
# succeed if path exists but that's ok
makedirs(path, exist_ok=True)
```
#### File: axopy/tests/test_task.py
```python
import pytest
from axopy import util
from axopy.task import Task
from axopy.task.base import _TaskIter
from axopy.messaging import Transmitter
@pytest.fixture
def simple_design():
design = [
[{'block': 0, 'trial': 0}, {'block': 0, 'trial': 1}],
[{'block': 1, 'trial': 0}, {'block': 1, 'trial': 1}]
]
return design
def test_task_iter(simple_design):
d = simple_design
it = _TaskIter(d)
b = it.next_block()
assert b == d[0]
t = it.next_trial()
assert t == b[0]
t = it.next_trial()
assert t == b[1]
t = it.next_trial()
assert t is None
b = it.next_block()
assert b == d[1]
b = it.next_block()
assert b is None
def test_base_task(simple_design):
task = Task()
for b in range(2):
block = task.iter.design.add_block()
for t in range(2):
block.add_trial()
# task prepare hooks run by Experiment
task.prepare_graphics(None)
task.prepare_daq(None)
task.prepare_storage(None)
task.run()
assert task.block.index == 0
# task is waiting for key press to advance
assert not hasattr(task, 'trial')
task.key_press(util.key_return)
assert task.trial.attrs == simple_design[0][0]
# automatically advance to next block
task.advance_block_key = None
task.next_trial()
task.next_trial()
assert task.block.index == 1
assert task.trial.attrs == simple_design[1][0]
class recv(object):
def finish(self):
self.finish_received = True
r = recv()
task.finished.connect(r.finish)
task.next_block()
assert r.finish_received
count = 0
def test_task_transmitters():
"""Check to make sure transmitter/receiver interface works for tasks."""
class CustomTask(Task):
tx = Transmitter()
def rx(self):
global count
count += 1
# create two of the same task, make sure rx is called the appropriate
# number of times
t1 = CustomTask()
t2 = CustomTask()
# using "raw" connect, both t1.tx and t2.tx will connect since those are
# the same underlying transmitter object
t1.tx.connect(t1.rx)
t1.tx.emit()
assert count == 1
# firing the second tasks's transmitter doesn't call the receiver more than
# once
t2.tx.connect(t2.rx)
t2.tx.emit()
assert count == 2
t1.tx.disconnect(t1.rx)
t2.tx.disconnect(t2.rx)
# check task connect/disconnect
t1.connect(t1.tx, t1.rx)
t1.tx.emit()
assert count == 3
# disconnect specific pair
t1.disconnect(t1.tx, t1.rx)
# try to disconnect again, fails silently
t1.disconnect(t1.tx, t1.rx)
t2.connect(t2.tx, t2.rx)
t2.tx.emit()
assert count == 4
t2.disconnect_all()
```
|
{
"source": "jdekarske/football_odds",
"score": 3
}
|
#### File: jdekarske/football_odds/odds.py
```python
import requests
import pandas as pd
import numpy as np
import datetime
import os
from jinja2 import Environment, FileSystemLoader
# jinja setup
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
env = Environment(loader=FileSystemLoader(THIS_DIR), trim_blocks=True)
template = env.get_template("template.html")
# either use an api key from github or local
API_KEY = os.getenv("ODDS_API_KEY_REPO")
if API_KEY == None:
print("API key not found in env, using SECRET.txt")
with open("SECRET.txt", "r") as file:
API_KEY = file.read()
SPORT = "americanfootball_nfl"
REGIONS = "us"
MARKETS = "spreads"
ODDS_FORMAT = "decimal"
DATE_FORMAT = "iso"
start_date = datetime.datetime.now(datetime.timezone.utc)
def main():
odds_response = requests.get(
f"https://api.the-odds-api.com/v4/sports/{SPORT}/odds",
params={
"api_key": API_KEY,
"regions": REGIONS,
"markets": MARKETS,
"oddsFormat": ODDS_FORMAT,
"dateFormat": DATE_FORMAT,
},
)
if odds_response.status_code != 200:
raise Exception(
f"Failed to get odds: status_code {odds_response.status_code}, response body {odds_response.text}"
)
odds_json = odds_response.json()
if not odds_json:
os.makedirs("public", exist_ok=True)
with open("public/index.html", "w") as fo:
fo.write(
template.render(
last_update=start_date.strftime("%b %-d %Y %H:%M:%S"),
odds_table="<p>Sorry, no football :(</p>",
)
)
print("no games available, exiting")
return
print("Number of events:", len(odds_json))
# Check the usage quota
print("Remaining requests", odds_response.headers["x-requests-remaining"])
print("Used requests", odds_response.headers["x-requests-used"])
df = pd.json_normalize(odds_json, record_path="bookmakers", meta="commence_time")
start_time = pd.to_datetime(df["commence_time"].str.strip())
df = pd.json_normalize(df["markets"].to_list())
df = pd.json_normalize(df.values.reshape(-1).tolist())["outcomes"]
df = pd.json_normalize(df.tolist())
df = pd.concat([start_time, df], axis=1)
team1 = pd.DataFrame.from_records(df[0].to_list())
team1 = pd.concat([df["commence_time"], team1], axis=1)
team2 = pd.DataFrame.from_records(df[1].to_list())
team2 = pd.concat([df["commence_time"], team2], axis=1)
all_teams = pd.concat([team1, team2])
# find the games played this week (current time until tuesday)
TUESDAY = 1
idx = (7 - start_date.weekday() + TUESDAY) % 7
tue = start_date + datetime.timedelta(idx)
end_date = tue
by_week = all_teams[start_date < all_teams["commence_time"]]
by_week = by_week[by_week["commence_time"] < end_date]
by_week = by_week.groupby(["commence_time", "name"]).mean()
pd.set_option("display.max_rows", None)
by_week.sort_values("point", inplace=True)
by_week = by_week.reset_index()
scores = pd.DataFrame(np.arange(1, len(by_week) / 2 + 1), columns=["score"])
scores = pd.concat([scores[::-1], scores]).reset_index().drop(columns="index")
assign_scores = pd.concat([by_week, scores], axis=1)
assign_scores.drop(["price"], axis=1, inplace=True)
assign_scores.rename(
columns={
"commence_time": "Commence Time (UTC)",
"name": "<NAME>",
"point": "Avg Spread",
"score": "Score",
},
inplace=True,
)
central_time = (
assign_scores["Commence Time (UTC)"]
.dt.tz_convert("US/Central")
.rename("Commence Time (CT)")
)
assign_scores = pd.concat([central_time, assign_scores], axis=1)
# don't display the timezone
assign_scores["Commence Time (CT)"] = assign_scores[
"Commence Time (CT)"
].dt.tz_localize(None)
assign_scores["Commence Time (UTC)"] = assign_scores[
"Commence Time (UTC)"
].dt.tz_localize(None)
os.makedirs("public", exist_ok=True)
with open("public/index.html", "w") as fo:
fo.write(
template.render(
last_update=start_date.strftime("%b %-d %Y %H:%M:%S"),
odds_table=assign_scores.to_html(
justify="left",
classes=["table table-striped table-dark table-hover table-sm"],
),
)
)
if __name__ == "__main__":
main()
```
|
{
"source": "jdekarske/isaac_user_interface",
"score": 3
}
|
#### File: backend/tiler/cropper.py
```python
def make_tile(input_file,output_file,maxX,maxY,maxZ,minX,minY,minZ):
# bounding_box = [ 11.8,-7.1,4.01, 9.8, -9.8,3.9 ]
# maxX = bounding_box[0]
# maxY = bounding_box[1]
# maxZ = bounding_box[2]
# minX = bounding_box[3]
# minY = bounding_box[4]
# minZ = bounding_box[5]
v_keepers = dict() # keeps track of which vertices are within the bounding box
kept_vertices = 0
discarded_vertices = 0
kept_faces = 0
discarded_faces = 0
discarded_lines = 0
kept_lines = 0
obj_file = open(input_file, 'r')
new_obj_file = open(output_file, 'w')
# the number of the next "v" vertex lines to process.
original_v_number = 1 # the number of the next "v" vertex lines to process.
# the new ordinal position of this vertex if out of bounds vertices were discarded.
new_v_number = 1
for line in obj_file:
line_elements = line.split()
# Python doesn't have a SWITCH statement, but we only have three cases, so we'll just use cascading if stmts
# if it isn't an "f" type line (face definition)
if line_elements[0] != "f":
# and it isn't an "v" type line either (vertex definition)
if line_elements[0] != "v":
# ************************ PROCESS ALL NON V AND NON F LINE TYPES ******************
# then we just copy it unchanged from the input OBJ to the output OBJ
new_obj_file.write(line)
kept_lines = kept_lines + 1
else: # then line_elements[0] == "v":
# ************************ PROCESS VERTICES ****************************************
# a "v" line looks like this:
# f x y z ...
x = float(line_elements[1])
y = float(line_elements[2])
z = float(line_elements[3])
if minX < x < maxX and minY < y < maxY and minZ < z < maxZ:
# if vertex is within the bounding box, we include it in the new OBJ file
new_obj_file.write(line)
v_keepers[str(original_v_number)] = str(new_v_number)
new_v_number = new_v_number + 1
kept_vertices = kept_vertices + 1
kept_lines = kept_lines + 1
else: # if vertex is NOT in the bounding box
new_obj_file.write(line)
discarded_vertices = discarded_vertices + 1
discarded_lines = discarded_lines + 1
original_v_number = original_v_number + 1
else: # line_elements[0] == "f":
# ************************ PROCESS FACES ****************************************
# a "f" line looks like this:
# f v1/vt1/vn1 v2/vt2/vn2 v3/vt3/vn3 ...
# We need to delete any face lines where ANY of the 3 vertices v1, v2 or v3 are NOT in v_keepers.
v = ["", "", ""]
# Note that v1, v2 and v3 are the first "/" separated elements within each line element.
for i in range(0, 3):
v[i] = line_elements[i+1].split('/')[0]
# now we can check if EACH of these 3 vertices are in v_keepers.
# for each f line, we need to determine if all 3 vertices are in the v_keepers list
if v[0] in v_keepers and v[1] in v_keepers and v[2] in v_keepers:
new_obj_file.write(line)
kept_lines = kept_lines + 1
kept_faces = kept_faces + 1
else: # at least one of the vertices in this face has been deleted, so we need to delete the face too.
discarded_lines = discarded_lines + 1
discarded_faces = discarded_faces + 1
new_obj_file.write("# CROPPED "+line)
# end of line processing loop
obj_file.close()
new_obj_file.close()
```
|
{
"source": "jdekoning/kaggle_toxicity",
"score": 3
}
|
#### File: kaggle_toxicity/activations/ReLUs.py
```python
from keras import backend as K
from keras.utils.generic_utils import get_custom_objects
from keras.layers import Activation
class ReLUs(Activation):
def __init__(self, activation, **kwargs):
super(ReLUs, self).__init__(activation, **kwargs)
self.__name__ = 'relus'
@staticmethod
def config(e_param):
ReLUs.e_param = e_param
get_custom_objects().update({'relus': ReLUs(ReLUs.relus)})
@staticmethod
def relus(Z):
e = ReLUs.e_param
pi = K.variable((3.14))
m = e * (K.sigmoid(K.sin(Z)) - K.sigmoid(K.cos(Z)) * K.exp(K.sqrt(pi)))
A = K.maximum(m, Z)
return A
```
|
{
"source": "JDekun/HCSC",
"score": 2
}
|
#### File: JDekun/HCSC/eval_semisup.py
```python
import argparse
import os
import time
import logging
from logging import getLogger
import urllib
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data as data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from utils.options import parse_semisup_args
from utils.utils import (
initialize_exp,
restart_from_checkpoint,
fix_random_seeds,
AverageMeter,
init_distributed_mode,
accuracy,
)
logger = getLogger()
def main():
global args, best_acc
args = parse_semisup_args()
init_distributed_mode(args)
fix_random_seeds(args.seed)
if args.rank==0:
if not os.path.exists(args.exp_dir):
os.makedirs(args.exp_dir)
logger, training_stats = initialize_exp(
args, "epoch", "loss", "prec1", "prec5", "loss_val", "prec1_val", "prec5_val"
)
# build data
train_data_path = os.path.join(args.data_path, "train")
train_dataset = datasets.ImageFolder(train_data_path)
# take either 1% or 10% of images
subset_file = "{}percent.txt".format(args.labels_perc)
with open(subset_file, "r") as f:
list_imgs = f.readlines()
list_imgs = [x.split("\n")[0] for x in list_imgs]
train_dataset.samples = [(
os.path.join(train_data_path, li.split('_')[0], li),
train_dataset.class_to_idx[li.split('_')[0]]
) for li in list_imgs]
val_dataset = datasets.ImageFolder(os.path.join(args.data_path, "val"))
tr_normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.228, 0.224, 0.225]
)
train_dataset.transform = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
tr_normalize,
])
val_dataset.transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
tr_normalize,
])
sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
train_loader = torch.utils.data.DataLoader(
train_dataset,
sampler=sampler,
batch_size=args.batch_size,
num_workers=args.workers,
pin_memory=True,
)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=args.batch_size,
num_workers=args.workers,
pin_memory=True,
)
logger.info("Building data done with {} images loaded.".format(len(train_dataset)))
# build model
model = models.__dict__[args.arch](num_classes=1000)
# convert batch norm layers
model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
# load weights
if os.path.isfile(args.pretrained):
state_dict = torch.load(args.pretrained, map_location="cuda:" + str(args.gpu))
if "state_dict" in state_dict:
state_dict = state_dict["state_dict"]
# remove prefixe "module."
state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}
state_dict = {k.replace("encoder_q.", ""): v for k, v in state_dict.items()}
for k, v in model.state_dict().items():
if k not in list(state_dict):
logger.info('key "{}" could not be found in provided state dict'.format(k))
elif state_dict[k].shape != v.shape:
logger.info('key "{}" is of different shape in model and provided state dict'.format(k))
state_dict[k] = v
msg = model.load_state_dict(state_dict, strict=False)
logger.info("Load pretrained model with msg: {}".format(msg))
else:
logger.info("No pretrained weights found => training from random weights")
# model to gpu
model = model.cuda()
model = nn.parallel.DistributedDataParallel(
model,
device_ids=[args.gpu],
find_unused_parameters=True,
)
# set optimizer
trunk_parameters = []
head_parameters = []
for name, param in model.named_parameters():
if 'fc' in name:
head_parameters.append(param)
else:
trunk_parameters.append(param)
optimizer = torch.optim.SGD(
[{'params': trunk_parameters},
{'params': head_parameters, 'lr': args.lr_last_layer}],
lr=args.lr,
momentum=0.9,
weight_decay=0,
)
# set scheduler
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, args.decay_epochs, gamma=args.gamma
)
# Optionally resume from a checkpoint
to_restore = {"epoch": 0, "best_acc": (0., 0.)}
restart_from_checkpoint(
os.path.join(args.exp_dir, "checkpoint.pth.tar"),
run_variables=to_restore,
state_dict=model,
optimizer=optimizer,
scheduler=scheduler,
)
start_epoch = to_restore["epoch"]
best_acc = to_restore["best_acc"]
cudnn.benchmark = True
for epoch in range(start_epoch, args.epochs):
# train the network for one epoch
logger.info("============ Starting epoch %i ... ============" % epoch)
# set samplers
train_loader.sampler.set_epoch(epoch)
scores = train(model, optimizer, train_loader, epoch)
scores_val = validate_network(val_loader, model)
training_stats.update(scores + scores_val)
scheduler.step()
# save checkpoint
if args.rank == 0:
save_dict = {
"epoch": epoch + 1,
"state_dict": model.state_dict(),
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
"best_acc": best_acc,
}
torch.save(save_dict, os.path.join(args.exp_dir, "checkpoint.pth.tar"))
logger.info("Fine-tuning with {}% of labels completed.\n"
"Test accuracies: top-1 {acc1:.1f}, top-5 {acc5:.1f}".format(
args.labels_perc, acc1=best_acc[0], acc5=best_acc[1]))
def train(model, optimizer, loader, epoch):
"""
Train the models on the dataset.
"""
# running statistics
batch_time = AverageMeter("time", ":.2f")
data_time = AverageMeter("data time", ":.2f")
# training statistics
top1 = AverageMeter("top1", ":.3f")
top5 = AverageMeter("top5", ":.3f")
losses = AverageMeter("loss", ":.3e")
end = time.perf_counter()
model.train()
criterion = nn.CrossEntropyLoss().cuda()
for iter_epoch, (inp, target) in enumerate(loader):
# measure data loading time
data_time.update(time.perf_counter() - end)
# move to gpu
inp = inp.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# forward
output = model(inp)
# compute cross entropy loss
loss = criterion(output, target)
# compute the gradients
optimizer.zero_grad()
loss.backward()
# step
optimizer.step()
# update stats
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), inp.size(0))
top1.update(acc1[0], inp.size(0))
top5.update(acc5[0], inp.size(0))
batch_time.update(time.perf_counter() - end)
end = time.perf_counter()
# verbose
if args.rank == 0 and iter_epoch % 50 == 0:
logger.info(
"Epoch[{0}] - Iter: [{1}/{2}]\t"
"Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t"
"Data {data_time.val:.3f} ({data_time.avg:.3f})\t"
"Loss {loss.val:.4f} ({loss.avg:.4f})\t"
"Prec {top1.val:.3f} ({top1.avg:.3f})\t"
"LR trunk {lr}\t"
"LR head {lr_W}".format(
epoch,
iter_epoch,
len(loader),
batch_time=batch_time,
data_time=data_time,
loss=losses,
top1=top1,
lr=optimizer.param_groups[0]["lr"],
lr_W=optimizer.param_groups[1]["lr"],
)
)
return epoch, losses.avg, top1.avg.item(), top5.avg.item()
def validate_network(val_loader, model):
batch_time = AverageMeter("time", ":.2f")
losses = AverageMeter("loss", ":.3e")
top1 = AverageMeter("top1", ":.2f")
top5 = AverageMeter("top5", ":.2f")
global best_acc
# switch to evaluate mode
model.eval()
criterion = nn.CrossEntropyLoss().cuda()
with torch.no_grad():
end = time.perf_counter()
for i, (inp, target) in enumerate(val_loader):
# move to gpu
inp = inp.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
output = model(inp)
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), inp.size(0))
top1.update(acc1[0], inp.size(0))
top5.update(acc5[0], inp.size(0))
# measure elapsed time
batch_time.update(time.perf_counter() - end)
end = time.perf_counter()
if top1.avg.item() > best_acc[0]:
best_acc = (top1.avg.item(), top5.avg.item())
if args.rank == 0:
logger.info(
"Test:\t"
"Time {batch_time.avg:.3f}\t"
"Loss {loss.avg:.4f}\t"
"Acc@1 {top1.avg:.3f}\t"
"Best Acc@1 so far {acc:.1f}".format(
batch_time=batch_time, loss=losses, top1=top1, acc=best_acc[0]))
return losses.avg, top1.avg.item(), top5.avg.item()
if __name__ == "__main__":
main()
```
#### File: HCSC/utils/options.py
```python
import argparse
def parse_args_main():
parser = argparse.ArgumentParser(description='PyTorch ImageNet Pre-training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--dataset', type=str, default='imagenet', choices=['imagenet', 'cifar10', 'cifar100', 'coco'],
help='which dataset should be used to pretrain the model')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet50',
help='model architecture')
parser.add_argument('-j', '--workers', default=32, type=int, metavar='N',
help='number of data loading workers (default: 32)')
parser.add_argument('--epochs', default=200, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.03, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--lr_final', default=0., type=float)
parser.add_argument('--schedule', default=[120, 160], nargs='*', type=int,
help='learning rate schedule (when to drop lr by 10x)')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum of SGD solver')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://localhost:10034', type=str,
help='url used to set up distributed training')
parser.add_argument("--local_rank", default=0, type=int,
help="this argument is not used and should be ignored")
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--dim', default=128, type=int,
help='feature dimension')
parser.add_argument('--queue_length', default=16384, type=int,
help='queue size; number of negative pairs')
parser.add_argument('--m', default=0.999, type=float,
help='moco momentum of updating key encoder (default: 0.999)')
parser.add_argument('--T', default=0.2, type=float,
help='temperature')
parser.add_argument('--mlp', type=int, default=1,
help='use mlp head')
parser.add_argument('--aug-plus', type=int, default=1,
help='use moco-v2/SimCLR data augmentation')
parser.add_argument('--cos', type=int, default=1,
help='use cosine lr schedule')
parser.add_argument('--num-cluster', default='3000,2000,1000', type=str,
help='number of clusters')
parser.add_argument('--warmup-epoch', default=20, type=int,
help='number of warm-up epochs to only train with InfoNCE loss')
parser.add_argument('--multi_crop', action='store_true',
default=False,
help='Whether to enable multi-crop transformation')
parser.add_argument("--nmb_crops", type=int, default=[1,1,1,1,1], nargs="+",
help="list of number of crops")
parser.add_argument("--size_crops", type=int, default=[224, 192, 160, 128, 96], nargs="+",
help="crops resolutions")
parser.add_argument("--min_scale_crops", type=float, default=[0.2, 0.172, 0.143, 0.114, 0.086], nargs="+",
help="argument in RandomResizedCrop ")
parser.add_argument("--max_scale_crops", type=float, default=[1.0, 0.86, 0.715, 0.571, 0.429], nargs="+",
help="argument in RandomResizedCrop")
## Selection configs
parser.add_argument("--selection_on_local", action="store_true", default=False,
help="whether enable mining on local views")
parser.add_argument("--instance_selection", type=int, default=1,
help="Whether enable instance selection")
parser.add_argument("--proto_selection", type=int, default=1,
help="Whether enable prototype selection")
parser.add_argument('--exp-dir', default='experiment_pcl', type=str,
help='experiment directory')
args = parser.parse_args()
return args
def parse_args_lincls_imagenet():
parser = argparse.ArgumentParser(description='ImageNet Linear Classification')
parser.add_argument('--data', type=str, metavar='DIR',
help='path to dataset')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet50')
parser.add_argument('-j', '--workers', default=32, type=int, metavar='N',
help='number of data loading workers (default: 32)')
parser.add_argument('--epochs', default=100, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--batch-size', default=256, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=5., type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--schedule', default=[60,80], nargs='*', type=int,
help='learning rate schedule (when to drop lr by a ratio)')
parser.add_argument('--cos', type=int, default=0,
help='use cosine lr schedule')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=0., type=float,
metavar='W', help='weight decay (default: 0.)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=200, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--world-size', default=1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=0, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://localhost:10028', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', type=int, default=1,
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
parser.add_argument('--pretrained', default='', type=str,
help='path to moco pretrained checkpoint')
parser.add_argument('--choose', type=str, default=None, help="choose gpu for training")
parser.add_argument("--dataset", type=str, default="ImageNet", help="which dataset is used to finetune")
parser.add_argument("--final_lr", type=float, default=0.0, help="ending learning rate for training")
parser.add_argument('--save_path', default="", type=str, help="model and record save path")
parser.add_argument('--log_path', type=str, default="train_log", help="log path for saving models")
args = parser.parse_args()
return args
def parse_args_lincls_places():
parser = argparse.ArgumentParser(description='Places205 Linear Classification')
parser.add_argument('--data', type=str, metavar='DIR',
help='path to dataset')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet50')
parser.add_argument('-j', '--workers', default=32, type=int, metavar='N',
help='number of data loading workers (default: 32)')
parser.add_argument('--epochs', default=100, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--batch-size', default=256, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=3., type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--schedule', default=[60, 80], nargs='*', type=int,
help='learning rate schedule (when to drop lr by a ratio)') # default is for places205
parser.add_argument('--cos', type=int, default=1,
help='use cosine lr schedule')
parser.add_argument("--sgdr", type=int, default=2)
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=0., type=float,
metavar='W', help='weight decay (default: 0.)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=200, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--world-size', default=1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=0, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://localhost:10028', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', type=int, default=1,
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
parser.add_argument('--pretrained', default='', type=str,
help='path to moco pretrained checkpoint')
parser.add_argument('--choose', type=str, default=None, help="choose gpu for training")
parser.add_argument("--dataset", type=str, default="ImageNet", help="which dataset is used to finetune")
parser.add_argument("--final_lr", type=float, default=0.01, help="ending learning rate for training")
parser.add_argument('--save_path', default="", type=str, help="model and record save path")
parser.add_argument('--log_path', type=str, default="train_log", help="log path for saving models")
parser.add_argument("--train_strong", type=int, default=1, help="training use stronger augmentation or not")
parser.add_argument("--sgdr_t0", type=int, default=10, help="sgdr t0")
parser.add_argument("--sgdr_t_mult", type=int, default=1, help="sgdr t mult")
parser.add_argument("--dropout", type=float, default=0.0, help="dropout layer settings")
parser.add_argument("--randcrop", type=int, default=1, help="use random crop or not")
args = parser.parse_args()
return args
def parse_semisup_args():
parser = argparse.ArgumentParser(description="ImageNet Semi-supervised Learning Evaluation")
parser.add_argument("--labels_perc", type=str, default="10", choices=["1", "10"],
help="fine-tune on either 1% or 10% of labels")
parser.add_argument("--exp_dir", type=str, default=".",
help="experiment dump path for checkpoints and log")
parser.add_argument("--seed", type=int, default=31, help="seed")
parser.add_argument("data_path", type=str, default="",
help="path to imagenet")
parser.add_argument("--workers", default=10, type=int,
help="number of data loading workers")
parser.add_argument("--arch", default="resnet50", type=str, help="convnet architecture")
parser.add_argument("--pretrained", default="", type=str, help="path to pretrained weights")
parser.add_argument("--epochs", default=70, type=int,
help="number of total epochs to run")
parser.add_argument("--batch_size", default=32, type=int,
help="batch size per gpu, i.e. how many unique instances per gpu")
parser.add_argument("--lr", default=0.005, type=float, help="initial learning rate - trunk")
parser.add_argument("--lr_last_layer", default=0.02, type=float, help="initial learning rate - head")
parser.add_argument("--decay_epochs", type=int, nargs="+", default=[30, 60],
help="Epochs at which to decay learning rate.")
parser.add_argument("--gamma", type=float, default=0.1, help="lr decay factor")
parser.add_argument("--dist_url", default="env://", type=str,
help="url used to set up distributed training")
parser.add_argument("--world_size", default=-1, type=int, help="""
number of processes: it is set automatically and
should not be passed as argument""")
parser.add_argument("--rank", default=0, type=int, help="""rank of this process:
it is set automatically and should not be passed as argument""")
parser.add_argument("--local_rank", default=0, type=int,
help="this argument is not used and should be ignored")
args = parser.parse_args()
return args
def parse_args_knn():
parser = argparse.ArgumentParser('Evaluation with weighted k-NN on ImageNet')
parser.add_argument('--batch_size_per_gpu', default=128, type=int, help='Per-GPU batch-size')
parser.add_argument('--nb_knn', default=[10, 20, 100, 200], nargs='+', type=int,
help='Number of NN to use. 20 is usually working the best.')
parser.add_argument('--temperature', default=0.07, type=float,
help='Temperature used in the voting coefficient')
parser.add_argument('--pretrained', default='', type=str, help="Path to pretrained weights to evaluate.")
parser.add_argument('--use_cuda', default=1, type=int,
help="Store features in GPU.")
parser.add_argument('--arch', default='resnet50', type=str, help='Architecture')
parser.add_argument("--checkpoint_key", default="state_dict", type=str,
help='Key to use in the checkpoint')
parser.add_argument('--dump_features', default=None,
help='Path where to save computed features, empty for no saving')
parser.add_argument('--load_features', default=None, help="""If the features have
already been computed, where to find them.""")
parser.add_argument('--num_workers', default=10, type=int, help='Number of data loading workers per GPU.')
parser.add_argument("--dist_url", default="env://", type=str, help="""url used to set up
distributed training; """)
parser.add_argument("--local_rank", default=0, type=int, help="Please ignore and do not set this argument.")
parser.add_argument("--debug", default=False, action='store_true')
parser.add_argument('--data', type=str)
args = parser.parse_args()
return args
```
|
{
"source": "jdelaney44/fsndp4-tic-tac-toe-01",
"score": 2
}
|
#### File: fsndp4-tic-tac-toe-01/crons/game_crons.py
```python
import webapp2
from api import game_api
class UpdateAverageMovesRemaining(webapp2.RequestHandler):
def post(self):
"""Update game listing announcement in memcache."""
game_api.TicTacToeAPI._cache_average_attempts()
self.response.set_status(204)
```
|
{
"source": "jdelanoy/sdgan",
"score": 3
}
|
#### File: sdgan/began/data_loader.py
```python
from collections import defaultdict
import os
from PIL import Image
from glob import glob
import tensorflow as tf
import numpy as np
import random
import scipy.misc
from tqdm import tqdm
# TODO: should be able to use tf queue's for this somehow and not have to load entire dataset into memory. main problem is dynamically retrieving pairs of images from same image class IDs (CIDs) using tensorflow.
class HackyCIGANLoader(object):
def __init__(self, batch_size, ninstances, data_format, cid_to_ims, h=64, w=64, c=3):
self.data_format = data_format
assert data_format in ['NCHW', 'NHWC']
original_len = len(cid_to_ims)
cid_to_ims = filter(lambda x: len(x[1]) > ninstances, cid_to_ims.items())
cid_to_ims = {k:v for k,v in cid_to_ims}
print '{}/{} CIDs have too few instances'.format(original_len - len(cid_to_ims), original_len)
self.batch_size = batch_size
self.ninstances = ninstances
self.cids = cid_to_ims.keys()
self.cid_to_ims = cid_to_ims
self.h = h
self.w = w
self.c = 3
if self.data_format == 'NHWC':
self.x = tf.placeholder(tf.float32, [batch_size, h, w, ninstances * c], 'x')
else:
self.x = tf.placeholder(tf.float32, [batch_size, ninstances * c, h, w], 'x')
def placeholder(self):
return self.x
def one_batch(self):
_x = np.empty((self.batch_size, self.h, self.w, self.ninstances * self.c), dtype=np.float32)
for b in xrange(self.batch_size):
cid = random.choice(self.cids)
cid_ims = self.cid_to_ims[cid]
random_instances = random.sample(cid_ims, self.ninstances)
stacked_channels = np.concatenate(random_instances, axis=2)
_x[b] = stacked_channels
if self.data_format == 'NCHW':
_x = np.transpose(_x, [0, 3, 1, 2])
return _x
def iter_forever(self, batch_size, ninstances):
while True:
yield self.one_batch(batch_size, ninstances)
def get_loader(root, batch_size, ninstances, scale_size, data_format, split=None, is_grayscale=False, seed=None):
print root
dataset_name = os.path.basename(root)
if dataset_name in ['CelebA'] and split:
root = os.path.join(root, 'splits', split)
for ext in ["jpg", "png"]:
paths = glob("{}/*.{}".format(root, ext))
if ext == "jpg":
tf_decode = tf.image.decode_jpeg
elif ext == "png":
tf_decode = tf.image.decode_png
if len(paths) != 0:
break
with Image.open(paths[0]) as img:
w, h = img.size
shape = [h, w, 3]
cid_to_fps = defaultdict(list)
for fp in paths:
fid = os.path.splitext(os.path.basename(fp))[0]
cid, _, ratio = fid.rsplit('_', 2)
cid_to_fps[cid].append(fp)
cid_to_ims = defaultdict(list)
for cid, fps in tqdm(cid_to_fps.items()):
for fp in fps:
im = scipy.misc.imread(fp)
im = im.astype(np.float32)
cid_to_ims[cid].append(im)
return HackyCIGANLoader(batch_size, ninstances, data_format, cid_to_ims)
```
#### File: jdelanoy/sdgan/sdgan.py
```python
from collections import defaultdict
import pickle
import math
import os
import time
#import tensorflow as tf
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from dcgan import DCGANGenerator64x64, SDDCGANDiscriminator64x64
from util import str2bool, decode_png_observation, encode_png_observation
"""
Samples a k-tuple from a group with or without replacement
Enqueues n at a time for efficiency
"""
def group_choose_k(
named_id_to_fps,
k,
n=None,
with_replacement=False,
capacity=4096,
min_after_dequeue=2048,
nthreads=4):
assert k > 0
# Join (variable-length) groups into CSV strings for enqueueing
avg_group_size = int(np.ceil(np.mean([len(group_fps) for group_fps in named_id_to_fps.values()])))
named_id_to_fps = [','.join(group_fps) for group_fps in named_id_to_fps.values()]
# If n is None, compute a reasonable value (avg group len choose k)
if n is None:
f = math.factorial
n = f(avg_group_size) / f(k) / f(avg_group_size - k)
# Dequeue one and split it into group
group_fps = tf.train.string_input_producer(named_id_to_fps).dequeue()
group_fps = tf.string_split([group_fps], ',').values
group_size = tf.shape(group_fps)[0]
tf.summary.histogram('group_size', group_size)
# Select some at random
# TODO: Should be some way to sample without replacement here rather than manually filtering
tuple_ids = tf.random_uniform([n, k], minval=0, maxval=group_size, dtype=tf.int32)
# Count num tuples enqueued
ntotal = tf.Variable(0)
tf.summary.scalar('tuples_ntotal', ntotal)
add_total = tf.assign_add(ntotal, n)
# Filter duplicates if sampling tuples without replacement
if not with_replacement and k > 1:
# Find unique tuples
tuple_unique = tf.ones([n], tf.bool)
for i in xrange(k):
for j in xrange(k):
if i == j:
continue
pair_unique = tf.not_equal(tuple_ids[:, i], tuple_ids[:, j])
tuple_unique = tf.logical_and(tuple_unique, pair_unique)
# Filter tuples with duplicates
valid_tuples = tf.where(tuple_unique)[:, 0]
# Count num valid tuples enqueued
nvalid = tf.Variable(0)
tf.summary.scalar('tuples_nvalid', nvalid)
tf.summary.scalar('tuples_valid_ratio',
tf.cast(nvalid, tf.float32) / tf.cast(ntotal, tf.float32))
add_valid = tf.assign_add(nvalid, tf.shape(valid_tuples)[0])
# Gather valid ids
with tf.control_dependencies([add_valid]):
tuple_ids = tf.gather(tuple_ids, valid_tuples)
# Gather valid tuples
with tf.control_dependencies([add_total]):
tuples = tf.gather(group_fps, tuple_ids)
# Make batches
tuple_q = tf.RandomShuffleQueue(capacity, min_after_dequeue, tuples.dtype, [k])
tuple_enq = tuple_q.enqueue_many(tuples)
tf.train.add_queue_runner(tf.train.QueueRunner(tuple_q, [tuple_enq] * nthreads))
tf.summary.scalar('tuples_queue_size', tuple_q.size())
return tuple_q.dequeue()
"""
Trains an SD-GAN
"""
def train(
train_dir,
named_id_to_fps,
batch_size,
k,
height,
width,
nch,
queue_capacity=8192,
queue_min=4096,
queue_nthreads=2,
d_i=50,
d_o=50,
G_dim=64,
D_dim=64,
loss='dcgan',
opt='dcgan',
D_siamese=True,
D_iters=1,
save_secs=300,
summary_secs=120):
# Get batch of observations
def make_batch(observations):
queue = tf.RandomShuffleQueue(
capacity=queue_capacity,
min_after_dequeue=queue_min,
shapes=[[k, height, width, nch]],
dtypes=[tf.float32])
example = tf.stack(observations, axis=0)
enqueue_op = queue.enqueue(example)
qr = tf.train.QueueRunner(queue, [enqueue_op] * queue_nthreads)
tf.train.add_queue_runner(qr)
tf.summary.scalar('queue_size', queue.size())
return queue.dequeue_many(batch_size)
# Load observation tuples
with tf.name_scope('loader'):
# Generate matched pairs of WAV fps
with tf.device('/cpu:0'):
tup = group_choose_k(named_id_to_fps, k, with_replacement=False)
observations = []
for i in xrange(k):
observation = decode_png_observation(tup[i])
observation.set_shape([height, width, nch])
observations.append(observation)
x = make_batch(observations)
# Make image summaries
for i in xrange(k):
tf.summary.image('x_{}'.format(i), encode_png_observation(x[:, i]))
# Make identity vector and repeat k times
zi = tf.random_uniform([batch_size, d_i], -1.0, 1.0, dtype=tf.float32)
zi = tf.tile(zi, [1, k])
zi = tf.reshape(zi, [batch_size, k, d_i])
# Draw iid observation vectors (no repetition)
zo = tf.random_uniform([batch_size, k, d_o], -1.0, 1.0, dtype=tf.float32)
# Concat [zi; zo]
z = tf.concat([zi, zo], axis=2)
# Make generator
with tf.variable_scope('G'):
z = tf.reshape(z, [batch_size * k, d_i + d_o])
G_z = DCGANGenerator64x64(z, nch, dim=G_dim, train=True)
G_z = tf.reshape(G_z, [batch_size, k, height, width, nch])
G_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='G')
# Print G summary
print ('-' * 80)
print ('Generator vars')
nparams = 0
for v in G_vars:
v_shape = v.get_shape().as_list()
v_n = reduce(lambda x, y: x * y, v_shape)
nparams += v_n
print ('{} ({}): {}'.format(v.get_shape().as_list(), v_n, v.name))
print ('Total params: {} ({:.2f} MB)'.format(nparams, (float(nparams) * 4) / (1024 * 1024)))
# Make image summaries
for i in xrange(k):
tf.summary.image('G_z_{}'.format(i), encode_png_observation(G_z[:, i]))
# Make real discriminator
with tf.name_scope('D_x'), tf.variable_scope('D'):
D_x = SDDCGANDiscriminator64x64(x, dim=D_dim, siamese=D_siamese)
D_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='D')
# Print D summary
print ('-' * 80)
print ('Discriminator vars')
nparams = 0
for v in D_vars:
v_shape = v.get_shape().as_list()
v_n = reduce(lambda x, y: x * y, v_shape)
nparams += v_n
print ('{} ({}): {}'.format(v.get_shape().as_list(), v_n, v.name))
print ('Total params: {} ({:.2f} MB)'.format(nparams, (float(nparams) * 4) / (1024 * 1024)))
print ('-' * 80)
# Make fake discriminator
with tf.name_scope('D_G_z'), tf.variable_scope('D', reuse=True):
D_G_z = SDDCGANDiscriminator64x64(G_z, dim=D_dim, siamese=D_siamese)
# Create loss
D_clip_weights = None
if loss == 'dcgan':
fake = tf.zeros([batch_size], dtype=tf.float32)
real = tf.ones([batch_size], dtype=tf.float32)
G_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=D_G_z,
labels=real
))
D_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=D_G_z,
labels=fake
))
D_loss += tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=D_x,
labels=real
))
D_loss /= 2.
elif loss == 'lsgan':
G_loss = tf.reduce_mean((D_G_z - 1.) ** 2)
D_loss = tf.reduce_mean((D_x - 1.) ** 2)
D_loss += tf.reduce_mean(D_G_z ** 2)
D_loss /= 2.
elif loss == 'wgan':
G_loss = -tf.reduce_mean(D_G_z)
D_loss = tf.reduce_mean(D_G_z) - tf.reduce_mean(D_x)
with tf.name_scope('D_clip_weights'):
clip_ops = []
for var in D_vars:
clip_bounds = [-.01, .01]
clip_ops.append(
tf.assign(
var,
tf.clip_by_value(var, clip_bounds[0], clip_bounds[1])
)
)
D_clip_weights = tf.group(*clip_ops)
elif loss == 'wgan-gp':
G_loss = -tf.reduce_mean(D_G_z)
D_loss = tf.reduce_mean(D_G_z) - tf.reduce_mean(D_x)
alpha = tf.random_uniform(shape=[batch_size, 1, 1, 1, 1], minval=0., maxval=1.)
differences = G_z - x
interpolates = x + (alpha * differences)
with tf.name_scope('D_interp'), tf.variable_scope('D', reuse=True):
D_interp = SDDCGANDiscriminator64x64(interpolates, dim=D_dim, siamese=D_siamese)
LAMBDA = 10
gradients = tf.gradients(D_interp, [interpolates])[0]
slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1, 2]))
gradient_penalty = tf.reduce_mean((slopes - 1.) ** 2.)
D_loss += LAMBDA * gradient_penalty
else:
raise NotImplementedError()
tf.summary.scalar('G_loss', G_loss)
tf.summary.scalar('D_loss', D_loss)
# Create optimizer
if opt == 'dcgan':
G_opt = tf.train.AdamOptimizer(
learning_rate=2e-4,
beta1=0.5)
D_opt = tf.train.AdamOptimizer(
learning_rate=2e-4,
beta1=0.5)
elif opt == 'lsgan':
G_opt = tf.train.RMSPropOptimizer(
learning_rate=1e-4)
D_opt = tf.train.RMSPropOptimizer(
learning_rate=1e-4)
elif opt == 'wgan':
G_opt = tf.train.RMSPropOptimizer(
learning_rate=5e-5)
D_opt = tf.train.RMSPropOptimizer(
learning_rate=5e-5)
elif opt == 'wgan-gp':
G_opt = tf.train.AdamOptimizer(
learning_rate=1e-4,
beta1=0.5,
beta2=0.9)
D_opt = tf.train.AdamOptimizer(
learning_rate=1e-4,
beta1=0.5,
beta2=0.9)
else:
raise NotImplementedError()
G_train_op = G_opt.minimize(G_loss, var_list=G_vars,
global_step=tf.train.get_or_create_global_step())
D_train_op = D_opt.minimize(D_loss, var_list=D_vars)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
# Run training
with tf.train.MonitoredTrainingSession(
checkpoint_dir=train_dir,
save_checkpoint_secs=save_secs,
save_summaries_secs=summary_secs,
config=config) as sess:
while True:
# Train discriminator
for i in xrange(D_iters):
sess.run(D_train_op)
if D_clip_weights is not None:
sess.run(D_clip_weights)
# Train generator
sess.run(G_train_op)
"""
Visualizes a fixed set of random latent vectors during training
"""
def preview(
train_dir,
nids,
nobs):
from scipy.misc import imsave
preview_dir = os.path.join(train_dir, 'preview')
if not os.path.isdir(preview_dir):
os.makedirs(preview_dir)
# Load graph
infer_metagraph_fp = os.path.join(train_dir, 'infer', 'infer.meta')
graph = tf.get_default_graph()
saver = tf.train.import_meta_graph(infer_metagraph_fp)
# Generate or restore z_i and z_o
zizo_fp = os.path.join(preview_dir, 'zizo.pkl')
if os.path.exists(zizo_fp):
# Restore z_i and z_o
with open(zizo_fp, 'rb') as f:
_samp_fetches = pickle.load(f)
else:
# Sample z_i and z_o
samp_feeds = {}
samp_feeds[graph.get_tensor_by_name('samp_zi_n:0')] = nids
samp_feeds[graph.get_tensor_by_name('samp_zo_n:0')] = nobs
samp_fetches = {}
samp_fetches['zis'] = graph.get_tensor_by_name('samp_zi:0')
samp_fetches['zos'] = graph.get_tensor_by_name('samp_zo:0')
with tf.Session() as sess:
_samp_fetches = sess.run(samp_fetches, samp_feeds)
# Save z_i and z_o
with open(zizo_fp, 'wb') as f:
pickle.dump(_samp_fetches, f)
# Set up graph for generating preview images
feeds = {}
feeds[graph.get_tensor_by_name('zi:0')] = _samp_fetches['zis']
feeds[graph.get_tensor_by_name('zo:0')] = _samp_fetches['zos']
fetches = {}
fetches['step'] = tf.train.get_or_create_global_step()
grid_prev = graph.get_tensor_by_name('G_z_grid_prev:0')
fetches['G_z_grid'] = grid_prev
# Summarize
fetches['G_z_grid_summary'] = tf.summary.image('preview/grid', tf.expand_dims(grid_prev, axis=0), max_outputs=1)
summary_writer = tf.summary.FileWriter(preview_dir)
# Loop, waiting for checkpoints
ckpt_fp = None
while True:
latest_ckpt_fp = tf.train.latest_checkpoint(train_dir)
if latest_ckpt_fp != ckpt_fp:
print ('Preview: {}'.format(latest_ckpt_fp))
with tf.Session() as sess:
saver.restore(sess, latest_ckpt_fp)
_fetches = sess.run(fetches, feeds)
preview_fp = os.path.join(preview_dir, '{}.png'.format(_fetches['step']))
imsave(preview_fp, _fetches['G_z_grid'])
summary_writer.add_summary(_fetches['G_z_grid_summary'], _fetches['step'])
print ('Done')
ckpt_fp = latest_ckpt_fp
time.sleep(1)
"""
Generates two-stage inference metagraph to train_dir/infer/infer.meta:
1) Sample zi/zo
2) Execute G([zi;zo])
Named ops (use tf.default_graph().get_tensor_by_name(name)):
1) Sample zi/zo
* (Placeholder) samp_zi_n/0: Number of IDs to sample
* (Placeholder) samp_zo_n/0: Number of observations to sample
* (Output) samp_zo/0: Sampled zo latent codes
* (Output) samp_zi/0: Sampled zi latent codes
* If named_id_to_fps is not None:
* (Random) samp_id/0: IDs to sample for inspection (override if desired)
* (Constant) meta_all_named_ids/0: Names for all IDs from filepaths
* (Constant) meta_all_group_fps/0: Comma-separated list of filepaths for all ID
* (Output) samp_named_ids/0: Names for IDs
* (Output) samp_group_fps/0: Comma-separated list of filepaths for IDs
* If id_name_tsv_fp is not None:
* (Constant) meta_all_names/0: Alternative names
* (Output) samp_names/0: Alternative names for all IDs
2) Execute G([zi;zo])
* (Placeholder) zi/0: Identity latent codes
* (Placeholder) zo/0: Observation latent codes
* (Output) G_z/0: Output of G([zi;zo]); zi/zo batch size must be same
* (Output) G_z_grid/0: Grid output of G([zi;zo]); batch size can differ
* (Output) G_z_uint8/0: uint8 encoding of G_z/0
* (Output) G_z_grid_uint8/0: uint8 encoding of G_z_grid/0
* (Output) G_z_grid_prev: Image preview version of grid (5 axes to 3)
"""
def infer(
train_dir,
height,
width,
nch,
d_i,
d_o,
G_dim,
named_id_to_fps=None,
id_name_tsv_fp=None):
infer_dir = os.path.join(train_dir, 'infer')
if not os.path.isdir(infer_dir):
os.makedirs(infer_dir)
# Placeholders for sampling stage
samp_zi_n = tf.placeholder(tf.int32, [], name='samp_zi_n')
samp_zo_n = tf.placeholder(tf.int32, [], name='samp_zo_n')
# Sample IDs or fps for comparison
if named_id_to_fps is not None:
# Find number of identities and sample
nids = len(named_id_to_fps)
tf.constant(nids, dtype=tf.int32, name='nids')
samp_id = tf.random_uniform([samp_zi_n], 0, nids, dtype=tf.int32, name='samp_id')
# Find named ids and group fps
named_ids = []
fps = []
for i, (named_id, group_fps) in enumerate(sorted(named_id_to_fps.items(), key=lambda k: k[0])):
named_ids.append(named_id)
fps.append(','.join(group_fps))
named_ids = tf.constant(named_ids, dtype=tf.string, name='meta_all_named_ids')
fps = tf.constant(fps, dtype=tf.string, name='meta_all_fps')
# Alternative names (such as real names with spaces; not convenient for file paths)
if id_name_tsv_fp is not None:
with open(id_name_tsv_fp, 'r') as f:
names = [l.split('\t')[1].strip() for l in f.readlines()[1:]]
named_ids = tf.constant(names, dtype=tf.string, name='meta_all_names')
samp_named_id = tf.gather(named_ids, samp_id, name='samp_named_ids')
samp_fp_group = tf.gather(fps, samp_id, name='samp_group_fps')
if id_name_tsv_fp is not None:
samp_name = tf.gather(names, samp_id, name='samp_names')
# Sample zi/zo
samp_zi = tf.random_uniform([samp_zi_n, d_i], -1.0, 1.0, dtype=tf.float32, name='samp_zi')
samp_zo = tf.random_uniform([samp_zo_n, d_o], -1.0, 1.0, dtype=tf.float32, name='samp_zo')
# Input zo
zi = tf.placeholder(tf.float32, [None, d_i], name='zi')
zo = tf.placeholder(tf.float32, [None, d_o], name='zo')
# Latent representation
z = tf.concat([zi, zo], axis=1, name='z')
# Make zi/zo grid
zi_n = tf.shape(zi)[0]
zo_n = tf.shape(zo)[0]
zi_grid = tf.expand_dims(zi, axis=1)
zi_grid = tf.tile(zi_grid, [1, zo_n, 1])
zo_grid = tf.expand_dims(zo, axis=0)
zo_grid = tf.tile(zo_grid, [zi_n, 1, 1])
z_grid = tf.concat([zi_grid, zo_grid], axis=2, name='z_grid')
# Execute generator
with tf.variable_scope('G'):
G_z = DCGANGenerator64x64(z, nch, dim=G_dim)
G_z = tf.identity(G_z, name='G_z')
# Execute generator on grid
z_grid = tf.reshape(z_grid, [zi_n * zo_n, d_i + d_o])
with tf.variable_scope('G', reuse=True):
G_z_grid = DCGANGenerator64x64(z_grid, nch, dim=G_dim)
G_z_grid = tf.reshape(G_z_grid, [zi_n, zo_n, height, width, nch], name='G_z_grid')
# Encode to uint8
G_z_uint8 = encode_png_observation(G_z, name='G_z_uint8')
G_z_grid_uint8 = encode_png_observation(G_z_grid, name='G_z_grid_uint8')
# Flatten grid of images to one large image (row shares zi, column shares zo)
grid_zo_n = tf.shape(G_z_grid_uint8)[1]
G_z_grid_prev = tf.transpose(G_z_grid_uint8, [1, 0, 2, 3, 4])
G_z_grid_prev = tf.reshape(G_z_grid_prev, [grid_zo_n, zi_n * height, width, nch])
G_z_grid_prev = tf.transpose(G_z_grid_prev, [1, 0, 2, 3])
G_z_grid_prev = tf.reshape(G_z_grid_prev, [zi_n * height, grid_zo_n * width, nch], name='G_z_grid_prev')
# Create saver
G_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='G')
global_step = tf.train.get_or_create_global_step()
saver = tf.train.Saver(G_vars + [global_step])
# Export graph
tf.train.write_graph(tf.get_default_graph(), infer_dir, 'infer.pbtxt')
# Export MetaGraph
infer_metagraph_fp = os.path.join(infer_dir, 'infer.meta')
tf.train.export_meta_graph(
filename=infer_metagraph_fp,
clear_devices=True,
saver_def=saver.as_saver_def())
# Reset graph (in case training afterwards)
tf.reset_default_graph()
if __name__ == '__main__':
import argparse
import glob
import sys
import numpy as np
from util import str2bool
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str, choices=['train', 'preview', 'infer'], default='train')
parser.add_argument('--train_dir', type=str,
help='Training directory', default='./train_shoes/')
parser.add_argument('--data_dir', type=str,
help='Data directory')
parser.add_argument('--data_set', type=str, choices=['msceleb12k', 'shoes4k','material'],
help='Which dataset')
parser.add_argument('--data_id_name_tsv_fp', type=str,
help='(Optional) alternate names for ids')
parser.add_argument('--data_nids', type=int,
help='If positive, limits number of identites')
parser.add_argument('--model_d_i', type=int,
help='Dimensionality of identity codes')
parser.add_argument('--model_d_o', type=int,
help='Dimensionality of observation codes')
parser.add_argument('--model_dim', type=int,
help='Dimensionality multiplier for model of G and D')
parser.add_argument('--train_batch_size', type=int,
help='Batch size')
parser.add_argument('--train_k', type=int,
help='k-wise SD-GAN training')
parser.add_argument('--train_queue_capacity', type=int,
help='Random example queue capacity (number of image tuples)')
parser.add_argument('--train_queue_min', type=int,
help='Random example queue minimum')
parser.add_argument('--train_disc_siamese', type=str2bool,
help='If false, stack channels rather than Siamese encoding')
parser.add_argument('--train_disc_nupdates', type=int,
help='Number of discriminator updates per generator update')
parser.add_argument('--train_loss', type=str, choices=['dcgan', 'lsgan', 'wgan', 'wgan-gp'],
help='Which GAN loss to use')
parser.add_argument('--train_save_secs', type=int,
help='How often to save model')
parser.add_argument('--train_summary_secs', type=int,
help='How often to report summaries')
parser.add_argument('--preview_nids', type=int,
help='Number of distinct identity vectors to preview')
parser.add_argument('--preview_nobs', type=int,
help='Number of distinct observation vectors to preview')
parser.set_defaults(
data_dir="./data/shoes4k",
data_set="shoes4k",
data_id_name_tsv_fp=None,
data_nids=-1,
model_d_i=50,
model_d_o=50,
model_dim=64,
train_batch_size=16,
train_k=2,
train_queue_capacity=8192,
train_queue_min=4096,
train_disc_siamese=True,
train_disc_nupdates=1,
train_loss='dcgan',
train_save_secs=300,
train_summary_secs=120,
preview_nids=6,
preview_nobs=8)
args = parser.parse_args()
# Make train dir
if not os.path.isdir(args.train_dir):
os.makedirs(args.train_dir)
# Assign appropriate split for mode
if args.mode == 'train':
split = 'train'
elif args.mode == 'preview':
split = None
elif args.mode == 'infer':
split = 'train'
else:
raise NotImplementedError()
# Dataset options
if args.data_set == 'msceleb12k':
data_extension = 'png'
fname_to_named_id = lambda fn: fn.rsplit('_', 2)[0]
height = 64
width = 64
nch = 3
elif args.data_set == 'shoes4k':
data_extension = 'png'
fname_to_named_id = lambda fn: fn.rsplit('_', 2)[0]
height = 64
width = 64
nch = 3
elif args.data_set == 'material':
data_extension = 'png'
fname_to_named_id = lambda fn: fn.rsplit('@', 2)[0]
height = 64
width = 64
nch = 3
else:
raise NotImplementedError()
# Find group fps and make splits
if split is not None:
print ('Finding files...')
named_id_to_fps = defaultdict(list) #store id -> filepath
glob_fp = os.path.join(args.data_dir, split, '*.{}'.format(data_extension))
data_fps = glob.glob(glob_fp)
for data_fp in sorted(data_fps):
if args.data_nids > 0 and len(named_id_to_fps) >= args.data_nids:
break
data_fname = os.path.splitext(os.path.split(data_fp)[1])[0]
named_id = fname_to_named_id(data_fname)
named_id_to_fps[named_id].append(data_fp)
if len(named_id_to_fps) == 0:
print ('No observations found for {}'.format(glob_fp))
sys.exit(1)
else:
print ('Found {} identities with average {} observations'.format(
len(named_id_to_fps.keys()),
np.mean([len(o) for o in named_id_to_fps.values()])))
if args.mode == 'train':
# Save inference graph first
infer(
args.train_dir,
height,
width,
nch,
args.model_d_i,
args.model_d_o,
args.model_dim,
named_id_to_fps=named_id_to_fps,
id_name_tsv_fp=args.data_id_name_tsv_fp)
# Train
train(
args.train_dir,
named_id_to_fps,
args.train_batch_size,
args.train_k,
height,
width,
nch,
queue_capacity=args.train_queue_capacity,
queue_min=args.train_queue_min,
queue_nthreads=4,
d_i=args.model_d_i,
d_o=args.model_d_o,
G_dim=args.model_dim,
D_dim=args.model_dim,
loss=args.train_loss,
opt=args.train_loss,
D_siamese=args.train_disc_siamese,
D_iters=args.train_disc_nupdates,
save_secs=args.train_save_secs,
summary_secs=args.train_summary_secs)
elif args.mode == 'preview':
preview(
args.train_dir,
args.preview_nids,
args.preview_nobs)
elif args.mode == 'infer':
infer(
args.train_dir,
height,
width,
nch,
args.model_d_i,
args.model_d_o,
args.model_dim,
named_id_to_fps=named_id_to_fps,
id_name_tsv_fp=args.data_id_name_tsv_fp)
else:
raise NotImplementedError()
```
|
{
"source": "jdelarosa0412/authority",
"score": 3
}
|
#### File: jdelarosa0412/authority/err.py
```python
class AuthorityError(Exception):
def __init__(self, msg, status = 0, deets = None):
self.msg = msg
self.detail = deets
self.status = status
def _status(self):
return {"status": self.status}
def _message(self):
return {"message": self.msg}
def _details(self):
if self.detail:
return {"details": str(self.detail)}
return {}
def __str__(self):
result = {}
for something in [self._status, self._message, self._details]:
result.update(something())
return str(result)
class IncorrectRegistration(AuthorityError):
'''
Registration did not have all the necessary items
'''
class TokenMalformed(AuthorityError):
'''
someone tried to modify the token contents ot it is incomplete
'''
class NameTaken(AuthorityError):
'''
the name has already been taken, they need to reach out to update'''
class NameNotFound(AuthorityError):
'''This happens whenever a name was attempted to be retireved but does not exist'''
```
#### File: jdelarosa0412/authority/kms.py
```python
import boto3
import os
import base64
class KMS(object):
def __init__(self, *args,**kw):
self.kw = kw
self.args = args
self.client = boto3.client('kms')
def encrypt(self, text):
return base64.b64encode(self.client.encrypt(
KeyId=os.environ['kms_id'],
Plaintext=text,
EncryptionContext={
'Authorizer': 'Register'
}
)['CiphertextBlob']).decode()
def decrypt(self):
return self.client.decrypt(
CiphertextBlob=base64.b64decode(self.kw['headers']['token'].encode()),
EncryptionContext={
'Authorizer': 'Register'
}
)['Plaintext'].decode()
```
|
{
"source": "jdelasoie/django-authority",
"score": 2
}
|
#### File: django-authority/authority/managers.py
```python
from django.db import models
from django.db.models import Q
from django.contrib.contenttypes.models import ContentType
class PermissionManager(models.Manager):
def get_content_type(self, obj):
return ContentType.objects.get_for_model(obj)
def get_for_model(self, obj):
return self.filter(content_type=self.get_content_type(obj))
def for_object(self, obj, approved=True):
return (
self.get_for_model(obj)
.select_related("user", "creator", "group", "content_type")
.filter(object_id=obj.id, approved=approved)
)
def for_user(self, user, obj, check_groups=True):
perms = self.get_for_model(obj)
if not check_groups:
return perms.select_related("user", "creator").filter(user=user)
# Hacking user to user__pk to workaround deepcopy bug:
# http://bugs.python.org/issue2460
# Which is triggered by django's deepcopy which backports that fix in
# Django 1.2
return (
perms.select_related("user", "creator")
.prefetch_related("user__groups")
.filter(Q(user__pk=user.pk) | Q(group__in=user.groups.all()))
)
def user_permissions(self, user, perm, obj, approved=True, check_groups=True):
return self.for_user(user, obj, check_groups,).filter(
codename=perm, approved=approved,
)
def group_permissions(self, group, perm, obj, approved=True):
"""
Get objects that have Group perm permission on
"""
return (
self.get_for_model(obj)
.select_related("user", "group", "creator")
.filter(group=group, codename=perm, approved=approved)
)
def delete_objects_permissions(self, obj):
"""
Delete permissions related to an object instance
"""
perms = self.for_object(obj)
perms.delete()
def delete_user_permissions(self, user, perm, obj, check_groups=False):
"""
Remove granular permission perm from user on an object instance
"""
user_perms = self.user_permissions(user, perm, obj, check_groups=False)
if not user_perms.filter(object_id=obj.id):
return
perms = self.user_permissions(user, perm, obj).filter(object_id=obj.id)
perms.delete()
```
|
{
"source": "jdelgit/txml",
"score": 3
}
|
#### File: jdelgit/txml/test_txml.py
```python
from txml import XmlParser
from xml.etree.ElementTree import fromstring
import unittest
class TestXmlParser(unittest.TestCase):
def setUp(self):
self.parser = XmlParser(source='sample.xml')
self.str_source = "<file path=\"export/level4/NL/30114.xml\" \
Product_ID=\"30114\" Updated=\"20150301102709\" Quality=\"AWESOME\" \
Supplier_id=\"5\" Prod_ID=\"FLY-734CU\" Catid=\"587\" On_Market=\"1\" \
Model_Name=\"Mibatsu Monstrosity\" Product_View=\"32767\" \
HighPic=\"http://images.awesome.biz/img/high/30114-Mibatsu.jpg\" \
HighPicSize=\"20782\" HighPicWidth=\"320\" HighPicHeight=\"300\" \
Date_Added=\"20050715000000\">\
<M_Prod_ID>ACLS5<b>test</b>.CEE</M_Prod_ID>\
<EAN_UPCS>\
<EAN_UPC Value=\"4901780776467\" />\
<EAN_UPC Value=\"5053460903188\" />\
</EAN_UPCS>\
<Country_Markets>\
<Country_Market Value=\"PL\" />\
<Country_Market Value=\"ES\" />\
<Country_Market Value=\"NL\" />\
<Country_Market Value=\"FR\" />\
<Country_Market Value=\"ZA\" />\
</Country_Markets>\
<TryCData>\
<![CDATA[cdata text & > hoi]]>\
</TryCData>\
</file>"
def tearDown(self):
del self.parser
def test_get_encoding(self):
self.encoded_parser = XmlParser(source='jan96down.xml')
control_encoding = 'iso-8859-1'
test_encoding = self.encoded_parser.encoding
self.assertEqual(test_encoding, control_encoding)
control_encoding = 'UTF-8'
test_encoding = self.parser.encoding
self.assertEqual(test_encoding, control_encoding)
def test_source_check(self):
non_existant_xml = 'some_random_file.xml'
test_parser = XmlParser(source=non_existant_xml)
self.assertEqual(test_parser.proces_file, False)
self.assertEqual(test_parser.use_io, False)
existing_xml = 'sample.xml'
test_parser = XmlParser(source=existing_xml)
self.assertEqual(test_parser.proces_file, True)
self.assertEqual(test_parser.use_io, False)
bad_format_str = "Just some random string of words"
test_parser = XmlParser(source=bad_format_str)
self.assertEqual(test_parser.proces_file, False)
self.assertEqual(test_parser.use_io, False)
proper_format_str = self.str_source
test_parser = XmlParser(source=proper_format_str)
self.assertEqual(test_parser.proces_file, True)
self.assertEqual(test_parser.use_io, True)
def test_node_to_dict(self):
test_node = fromstring(self.str_source)
my_parser = XmlParser()
if hasattr(test_node, 'getroot'):
test_node = test_node.getroot()
test_dict = my_parser._node_to_dict(test_node)
control_dict = {'path': 'export/level4/NL/30114.xml',
'Product_ID': '30114', 'Updated': '20150301102709',
'Quality': 'AWESOME', 'Supplier_id': '5',
'Prod_ID': 'FLY-734CU', 'Catid': '587',
'On_Market': '1',
'Model_Name': 'Mibatsu Monstrosity',
'Product_View': '32767',
'HighPic': 'http://images.awesome.biz/img/high/30114-Mibatsu.jpg',
'HighPicSize': '20782', 'HighPicWidth': '320',
'HighPicHeight': '300', 'Date_Added': '20050715000000',
'text': ' ',
'tag': "file"}
self.assertDictEqual(test_dict, control_dict)
def test_get_namespaces(self):
encoded_parser = XmlParser(source='jan96down.xml')
# encoded_parser._get_namespaces()
test_dict = encoded_parser.namespaces
t_key = list(test_dict.keys())[0]
ts_list = test_dict[t_key]
ts_list.sort()
test_dict = {t_key: ts_list}
control_list = ['Application', 'ParaCurve', 'Metric', 'Start',
'Cant', 'Feature', 'Curve', 'CoordGeom',
'Alignments', 'Property', 'LandXML', 'CantStation',
'Profile', 'End', 'Center', 'Project', 'PVI', 'Units',
'Spiral', 'ProfAlign', 'Alignment', 'PI', 'Line']
control_list.sort()
control_dict = {'{http://www.landxml.org/schema/LandXML-1.1}': control_list}
self.assertDictEqual(test_dict, control_dict)
def test_search_nodes(self):
products = self.parser.search_nodes(tag='controller')
products_list = list(products)
test_num_matches = len(products_list)
control_num_matches = 3
self.assertEqual(test_num_matches, control_num_matches)
test_product = products_list[0]['elem']
control_product = {'type': 'usb', 'index': '0',
'text': '\n ',
'tag': 'controller'}
self.assertDictEqual(test_product, control_product)
test_list = products_list[0]['children']
control_list = [{'children': [],
'elem': {'name': 'usb0',
'text': None, 'tag': 'alias'}},
{'children': [],
'elem': {'type': 'pci', 'domain': '0x0000',
'bus': '0x00', 'slot': '0x01',
'function': '0x2',
'text': None, 'tag': 'address'}}]
self.assertEqual(test_list, control_list)
def test_search_node_attr(self):
product = self.parser.search_node_attr(
tag='controller', type='usb')
prod_list = list(product)
test_matches = len(prod_list)
control_matches = 1
self.assertEqual(test_matches, control_matches)
test_product_elem = prod_list[0]['elem']
control_product_elem = {'type': 'usb', 'index': '0',
'text': '\n ',
'tag': 'controller'}
self.assertEqual(test_product_elem, control_product_elem)
test_product_children = prod_list[0]['children']
control_product_children = [{'children': [],
'elem': {'name': 'usb0',
'text': None,
'tag': 'alias'}},
{'children': [],
'elem': {'type': 'pci',
'domain': '0x0000',
'bus': '0x00',
'slot': '0x01',
'function': '0x2',
'text': None,
'tag': 'address'}}]
self.assertEqual(test_product_children, control_product_children)
def test_get_all_tags(self):
test_list = self.parser.get_all_tags()
control_list = ['sound', 'memballoon', 'pae', 'currentMemory', 'disk',
'mac', 'boot', 'emulator', 'driver', 'graphics',
'imagelabel', 'virtualport', 'video', 'on_crash',
'resource', 'serial', 'name', 'cpu', 'feature',
'alias', 'os', 'address', 'memory', 'channel',
'controller', 'console', 'parameters', 'uuid',
'devices', 'listen', 'domain', 'interface',
'type', 'input', 'label', 'on_poweroff',
'features', 'acpi', 'seclabel', 'vcpu', 'clock',
'on_reboot', 'apic', 'source', 'protocol',
'target', 'model', 'partition']
control_list.sort()
test_list.sort()
self.assertListEqual(test_list, control_list)
if __name__ == '__main__':
unittest.main()
```
#### File: jdelgit/txml/txml.py
```python
from xml.etree.ElementTree import iterparse, ParseError
from io import StringIO
from os.path import isfile
from re import findall
class XmlParser:
def __init__(self, source=""):
self.source = source
self.proces_file = False
self.use_io = False
self.encoding = 'UTF-8'
self.namespaces = {}
self.namespace_present = False
self._source_check()
# see also _get_encoding, _get_namespaces
def _source_check(self):
"""
[Function checkes whether the source input is a existing xml file
or a xml syle formatted string]
"""
_extension = self.source[-3:]
if _extension == "xml" or _extension == "xsd":
if isfile(self.source):
self.proces_file = True
self._get_encoding()
self._get_namespaces()
else:
print("File not found {}".format(self.source))
else:
context_test = iterparse(StringIO("""{}""".format(self.source)))
try:
context_test.__next__()
del context_test
self.proces_file = True
self.use_io = True
self._get_encoding()
self._get_namespaces()
except ParseError:
del context_test
print("Input is not in supported Xml format")
def _get_encoding(self):
if self.proces_file and not self.use_io:
with open(self.source, 'r') as f:
l = f.readline()
if 'encoding' in l:
match = findall('(encoding=.*\?)', l)
encoding = match[0].split('=')[1].replace(
'?', '').replace('\"', '')
self.encoding = encoding
# see also get_all_tags
def _get_namespaces(self):
"""[Creates a dictionary of the namespaces with their associated tags ]
Returns:
[dict] -- [Dictionary with namespaces as keys
and the corresponding tags in a list as value ]
"""
tags = self.get_all_tags()
namespaces = {}
for tag in tags:
namespace = findall('({.{1,}})', tag)
if len(namespace) > 0:
namespace = namespace[0]
formatted_tag = tag.replace(namespace, '')
try:
namespaces[namespace].append(formatted_tag)
except KeyError:
namespaces[namespace] = [formatted_tag]
if namespaces:
self.namespace_present = True
self.namespaces = namespaces
# return namespaces
def get_all_tags(self):
"""[All the unique tags available in the Xml
No hierachy is mainted for the xml structure]
Returns:
[list] -- [A list of all the unique tags available in the Xml]
"""
if self.source and self.proces_file:
if self.use_io:
context = iterparse(StringIO("""{}""".format(self.source)),
events=("start",))
else:
data = open(self.source, 'r', encoding=self.encoding)
context = iterparse(data, events=("start",))
else:
print("No source XML-file provided")
return
tag_set = []
for event, elem in context:
tag_set.append(elem.tag)
elem.clear()
if self.source and self.proces_file and not self.use_io:
data.close() # close filestream
del context
tag_set = list(set(tag_set))
return tag_set
# see also search_nodes
def search_namespace_node(self, namespace="", tag=""):
ntag = "{}{}".format(namespace, tag)
for node in self.search_nodes(tag=ntag):
yield node
# see also search_node_attr
def search_namespace_attr(self, namespace="", tag="", **kwargs):
ntag = "{}{}".format(namespace, tag)
for node in self.search_node_attr(tag=ntag, kwargs=kwargs):
yield node
# see also seach_nodes
def search_node_attr(self, tag="", get_children=True, **kwargs):
"""[This function filters results from the <search_node> function
based on given attributes,values]
Keyword Arguments:
tag {str} -- [tag of Xml node element] (default: {""})
get_children {bool} -- [Choice for whether subnodes
should be returned] (default: {True})
Returns / yields:
[dict] -- [Dictionary containing all matching nodes]
"""
if 'kwargs' in kwargs:
kwargs = kwargs['kwargs']
for node in self.search_nodes(tag=tag, get_children=get_children):
if len(kwargs) > 0:
for key in kwargs:
arg = kwargs[key]
try:
node_val = node['element']['attr'][key]
except KeyError:
# print("Key '{}' not found in element {}".format(key,
# tag))
# exit function if non-existing key is requested
node_val = ''
if node_val == arg:
give_node = True
else:
# attribute not matching
# move on to next node
give_node = False
break
else:
give_node = True
if give_node:
yield node
# see also _node_to_dict and _stack_state_controller
def search_nodes(self, tag="", get_children=True):
"""[If a tag is specified the function returns an generator
with all Xml elements which have a matching tag.
If tag is not specified, the root node is returned
When get_children is set, the function returns the subnodes
nested in a list of dictionaries]
Keyword Arguments:
tag {str} -- [tag of Xml node element] (default: {""})
get_children {bool} -- [Choice for whether subnodes
should be returned] (default: {True})
"""
if self.source and self.proces_file:
if self.use_io:
context = iterparse(StringIO("""{}""".format(self.source)),
events=('start', 'end'))
else:
data = open(self.source, 'r', encoding=self.encoding)
context = iterparse(data, events=('start', 'end'))
else:
print("Unable to process input")
return
if get_children:
children = []
p_stack = []
tag_stack = []
p_tag = ""
c_tag = ""
npd = False
append_children = False
for event, elem in context:
if not tag:
# if no tag is given then get data for entire document
tag = elem.tag
if get_children:
if elem.tag != tag and append_children:
event, elem, p_tag, c_tag, p_stack, \
tag_stack, children, npd = \
self._stack_state_controller(event=event,
elem=elem,
p_tag=p_tag,
c_tag=c_tag,
p_stack=p_stack,
tag_stack=tag_stack,
children=children,
npd=npd)
if elem.tag == tag and event == 'start':
append_children = True
if elem.tag == tag and event == 'end':
node_dict = self._node_to_dict(elem)
output_dict = {'element': node_dict, 'children': []}
elem.clear()
if get_children:
output_dict['children'] = children
children = []
append_children = False
yield output_dict
del context
if self.source and self.proces_file and not self.use_io:
data.close() # close filestream
del data
# see also node_to_dict
def _stack_state_controller(self, event, elem, p_tag="", c_tag="",
p_stack=[], tag_stack=[], children=[],
npd=False):
"""[Keeps track of a dictionary stack and a tag stack, and updates them as required.
This is done based on the start/end triggers from the elements in the Xml format]
Arguments:
event {[str]} -- [start/end points of element]
elem {[et.etree.ElementTree.Element]} -- [description]
Keyword Arguments:
p_tag {str} -- [Current parent tag (top of dict stack). (not used actively) ] (default: {""})
c_tag {str} -- [Current child tag (top of tag stack)] (default: {""})
p_stack {list} -- [Stack for holding the parent dictionaries ] (default: {[]})
tag_stack {list} -- [Stack for holding all the tags] (default: {[]})
children {list} -- [List for holding all subnodes found] (default: {[]})
npd {bool} -- [When set new dictionary is appended to stack] (default: {False})
Returns:
All arguments passed to it are returned after being updated
"""
# ndp controls the creation of new dicts in the p_stack
if (elem.tag != c_tag) and (event == "start"):
tag_stack.append(elem.tag)
if npd:
# add new dictionary when children are confiremed to exist
_p_dict = {'children': [], 'element': ""}
p_stack.append(_p_dict)
p_tag = c_tag
c_tag = elem.tag
npd = True
elif (elem.tag == c_tag) and (event == "end"):
if len(tag_stack) == 1:
# last child on stack
if len(p_stack) > 0:
# child has children
_child = p_stack.pop()
_child['element'] = self._node_to_dict(elem)
else:
_child = {'children': [],
'element': self._node_to_dict(elem)}
children.append(_child)
c_tag = ""
tag_stack.pop()
elif len(tag_stack) == len(p_stack):
_child = p_stack.pop()
_parent = p_stack.pop()
_child['element'] = self._node_to_dict(elem)
_parent['children'].append(_child)
p_stack.append(_parent)
tag_stack.pop()
c_tag = tag_stack[-1]
if len(tag_stack) > 1:
p_tag = tag_stack[-2]
else:
p_tag = ""
else:
_parent = p_stack.pop()
_child = self._node_to_dict(elem)
_parent['children'].append(_child)
p_stack.append(_parent)
tag_stack.pop()
c_tag = tag_stack[-1]
if len(tag_stack) > 1:
p_tag = tag_stack[-2]
else:
p_tag = ""
npd = False
elem.clear()
return [event, elem, p_tag, c_tag, p_stack,
tag_stack, children, npd]
def _node_to_dict(self, node=""):
"""[Convert node element attributes to dictionary]
Keyword Arguments:
node {et.etree.ElementTree.Element} -- [] (default: {""})
Returns:
[dict] -- [Dictionary containing all the attribute,value pairs
contained in the node]
"""
data = {}
data['attr'] = {n[0]: n[1] for n in node.items()}
data['text'] = node.text
data['tag'] = node.tag
return data
class XsdtoDict:
def __init__(self, source=''):
self.source = source
def convert_to_dict(self):
parser = XmlParser(source=self.source)
xsd_tags = self.get_export_type_data(parser)
data = {}
for tag in xsd_tags:
data[tag['name']] = self.parse_xml_entry(tag, parser)
return data
def get_export_type_data(self, validation_parser):
all_nodes = validation_parser.search_nodes()
output_types = []
for nodes in all_nodes:
if nodes:
output_types = [{'name': entry['element']['attr']['name'],
'tag': entry['element']['tag']}
for entry in nodes['children']]
return output_types
def parse_xml_entry(self, tag_data, xml_iterator):
parent_tag = tag_data['tag']
parent_name = tag_data['name']
sub_elements = xml_iterator.search_node_attr(tag=parent_tag,
name=parent_name)
if 'complexType' in parent_tag:
output = self.parse_complextypes(sub_elements)
elif 'simpleType' in parent_tag:
output = self.parse_simpletypes(sub_elements)
else:
output = list(sub_elements)
return output
def parse_complextypes(self, complex_iterator):
output = {}
for element_data in complex_iterator:
output['attr'] = element_data['element']['attr']
output['sequence'] = []
if element_data['children']:
for sub_element in element_data['children']:
if 'sequence' in sub_element['element']['tag']:
sequence_data = self.parse_sequence(sub_element['children'])
output['sequence'].append(sequence_data)
else:
pass
return output
def parse_sequence(self, sequence_elements):
sequence_output = []
for element in sequence_elements:
element_data = self.parse_element(element)
sequence_output.append(element_data)
return sequence_output
def parse_element(self, element):
output = {}
if 'children' in element:
output['tag'] = element['element']['tag']
output['attr'] = element['element']['attr']
element_children = element['children']
output['children_data'] = []
for child in element_children:
if 'simpleType' in child['element']['tag']:
child_data = self.parse_simpletypes(child)
output['children_data'].append(child_data)
else:
output['tag'] = element['tag']
output['attr'] = element['attr']
return output
def parse_simpletypes(self, simple_element):
output = {}
try:
element_children = simple_element['children']
for child_element in element_children:
if 'restriction' in child_element['element']['tag']:
output['restrictions'] = {'attr': child_element['element']['attr']}
restriction_data = self.parse_restrictions(child_element['children'])
output['restrictions']['restrictions'] = restriction_data
except TypeError:
element_data = list(simple_element)
element_data = element_data[0]
element_children = element_data['children']
element_children = element_children[0]['children']
output['restrictions'] = []
for data in element_children:
if 'element' in data:
output['restrictions'].append(data['element']['attr'])
else:
if 'minLength' in data['tag']:
output['restrictions'].append({'minlength':data['attr']})
if 'maxLength' in data['tag']:
output['restrictions'].append({'maxlength':data['attr']})
return output
def parse_restrictions(self, restriction_iterator):
output = []
for restriction in restriction_iterator:
restriction_data = {}
restriction_data['enumarations'] = []
restriction_data['length_data'] = []
if 'element' in restriction:
if 'enumeration' in restriction['element']['tag']:
enumaration_data = self.parse_enumarations(restriction['children'])
restriction_data['enumarations'].append(enumaration_data)
restriction_data['attr'] = restriction['element']['attr']
elif 'Length' in restriction['element']['tag']:
restriction_data['attr'] = restriction['element']['attr']
restriction_data['length_data'].append(restriction['element']['attr'])
else:
restriction_data['attr'] = restriction['attr']
output.append(restriction_data)
return output
def parse_enumarations(self, enumeration_iterator):
output = {'annotations': ""}
for enumaration in enumeration_iterator:
if 'annotation' in enumaration['element']['tag']:
annotations = enumaration['children']
annot = {'documentation': []}
for annotation in annotations:
annot['documentation'].append({'attr': annotation['attr'],
'text': ['text']})
output['annotations'] = annot
return output
```
|
{
"source": "jdelic/authserver",
"score": 2
}
|
#### File: maildaemons/forwarder/server.py
```python
import argparse
import asyncore
import json
import logging
import signal
import sys
import os
from types import FrameType
from typing import Tuple, Sequence, Any, Union, Optional, List, Dict
from concurrent.futures import ThreadPoolExecutor as Pool
import daemon
from django.db.utils import OperationalError
import authserver
from maildaemons.utils import SMTPWrapper, PatchedSMTPChannel, SaneSMTPServer
_log = logging.getLogger(__name__)
pool = Pool()
class ForwarderServer(SaneSMTPServer):
def __init__(self, remote_relay_ip: str, remote_relay_port: int, local_delivery_ip: str,
local_delivery_port: int, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.smtp = SMTPWrapper(
external_ip=remote_relay_ip, external_port=remote_relay_port,
error_relay_ip=local_delivery_ip, error_relay_port=local_delivery_port
)
# ** must be thread-safe, don't modify shared state,
# _log should be thread-safe as stated by the docs. Django ORM should be as well.
def _process_message(self, peer: Tuple[str, int], mailfrom: str, rcpttos: Sequence[str], data: bytes, *,
channel: PatchedSMTPChannel,
**kwargs: Any) -> Optional[str]:
# we can't import the Domain model before Django has been initialized
from mailauth.models import EmailAlias, Domain
data = self.add_received_header(peer, data, channel)
remaining_rcpttos = list(rcpttos) # ensure that new_rcpttos is a mutable list
combined_rcptto = {} # type: Dict[str, List[str]] # { new_mailfrom: [recipients] }
def add_rcptto(mfrom: str, rcpt: Union[str, List]) -> None:
if mfrom in combined_rcptto:
if isinstance(rcpt, list):
combined_rcptto[mfrom] += rcpt
else:
combined_rcptto[mfrom].append(rcpt)
else:
if isinstance(rcpt, list):
combined_rcptto[mfrom] = rcpt
else:
combined_rcptto[mfrom] = [rcpt]
# we're going to modify remaining_rcpttos so we start from its end
for ix in range(len(remaining_rcpttos) - 1, -1, -1):
rcptto = rcpttos[ix].lower()
rcptuser, rcptdomain = rcptto.split("@", 1)
# implement domain catch-all redirect
domain = None # type: Optional[Domain]
try:
domain = Domain.objects.get(name=rcptdomain)
except Domain.DoesNotExist:
pass
except OperationalError:
_log.exception("Database unavailable.")
return "421 Processing problem. Please try again later."
if domain:
if domain.redirect_to:
_log.debug("ix: %s - rcptto: %s - remaining rcpttos: %s", ix, rcptto, remaining_rcpttos)
del remaining_rcpttos[ix]
new_rcptto = "%s@%s" % (rcptuser, domain.redirect_to)
_log.info("%sForwarding email from <%s> to <%s> to domain @%s",
"(Retry) " if "retry" in kwargs and kwargs["retry"] else "",
mailfrom, rcptto, domain.redirect_to)
add_rcptto(mailfrom, new_rcptto)
continue
# follow the same path like the stored procedure authserver_resolve_alias(...)
if "-" in rcptuser:
# convert the first - to a +
user_mailprefix = "%s+%s" % tuple(rcptuser.split("-", 1)) # type: ignore
else:
user_mailprefix = rcptuser
if "+" in user_mailprefix:
# if we had a dashext, or a plusext, we're left with just the prefix after this
user_mailprefix = user_mailprefix.split("+", 1)[0]
try:
alias = EmailAlias.objects.get(mailprefix__iexact=user_mailprefix,
domain__name__iexact=rcptdomain) # type: EmailAlias
except EmailAlias.DoesNotExist:
# OpenSMTPD shouldn't even call us for invalid addresses if we're configured correctly
_log.error("%sUnknown mail address: %s (from: %s, prefix: %s)",
"(Retry) " if "retry" in kwargs and kwargs["retry"] else "",
rcptto, mailfrom, user_mailprefix)
continue
except OperationalError:
_log.exception("Database unavailable.")
return "421 Processing problem. Please try again later."
if alias.forward_to is not None:
# it's a mailing list, forward the email to all connected addresses
del remaining_rcpttos[ix] # remove this recipient from the list
_newmf = mailfrom
if alias.forward_to.new_mailfrom != "":
_newmf = alias.forward_to.new_mailfrom
_log.info("%sForwarding email from <%s> with new sender <%s> to <%s>",
"(Retry) " if "retry" in kwargs and kwargs["retry"] else "",
mailfrom, _newmf, alias.forward_to.addresses)
add_rcptto(_newmf, alias.forward_to.addresses)
# if there are any remaining non-list/non-forward recipients, we inject them back to OpenSMTPD here
if len(remaining_rcpttos) > 0:
_log.info("%sDelivering email from <%s> to remaining recipients <%s>",
"(Retry) " if "retry" in kwargs and kwargs["retry"] else "",
mailfrom, remaining_rcpttos)
add_rcptto(mailfrom, remaining_rcpttos)
if len(combined_rcptto.keys()) == 1:
_log.debug("Only one mail envelope sender, forwarding is atomic")
results = {k: "unsent" for k in combined_rcptto.keys()} # type: Dict[str, str]
for new_mailfrom in combined_rcptto.keys():
_log.debug("Injecting email from <%s> to <%s>", new_mailfrom, combined_rcptto[new_mailfrom])
ret = self.smtp.sendmail(new_mailfrom, combined_rcptto[new_mailfrom], data)
if ret is not None:
results[new_mailfrom] = "failure"
if len(combined_rcptto.keys()) > 1:
_log.error("Non-atomic mail sending failed from <%s> in dict(%s)", combined_rcptto.keys(),
json.dumps(results))
return ret
results[new_mailfrom] = "success"
# TODO: log results
_log.debug("Done processing.")
return None
def process_message(self, *args: Any, **kwargs: Any) -> Optional[str]:
future = pool.submit(ForwarderServer._process_message, self, *args, **kwargs)
return future.result()
def run(_args: argparse.Namespace) -> None:
server = ForwarderServer(_args.remote_relay_ip, _args.remote_relay_port,
_args.local_delivery_ip, _args.local_delivery_port,
(_args.input_ip, _args.input_port), None, decode_data=False,
daemon_name="mailforwarder")
asyncore.loop()
def _sigint_handler(sig: int, frame: FrameType) -> None:
print("CTRL+C exiting")
pool.shutdown(wait=False)
sys.exit(1)
def _main() -> None:
signal.signal(signal.SIGINT, _sigint_handler)
parser = argparse.ArgumentParser(
description="This is a SMTP daemon that is used through OpenSMTPD configuration "
"to check whether incoming emails are addressed to a forwarding email alias "
"and if they are, inject emails to all list delivery addresses / expand the alias."
)
grp_daemon = parser.add_argument_group("Daemon options")
grp_daemon.add_argument("-p", "--pidfile", dest="pidfile", default="./mailforwarder-server.pid",
help="Path to a pidfile")
grp_daemon.add_argument("-u", "--user", dest="user", default=None, help="Drop privileges and switch to this user")
grp_daemon.add_argument("-g", "--group", dest="group", default=None,
help="Drop privileges and switch to this group")
grp_daemon.add_argument("-d", "--daemonize", dest="daemonize", default=False, action="store_true",
help="If set, fork into background")
grp_daemon.add_argument("-v", "--verbose", dest="verbose", default=False, action="store_true",
help="Output extra logging (not implemented right now)")
grp_daemon.add_argument("-C", "--chdir", dest="chdir", default=".",
help="Change working directory to the provided value")
grp_network = parser.add_argument_group("Network options")
grp_network.add_argument("--input-ip", dest="input_ip", default="127.0.0.1", help="The network address to bind to")
grp_network.add_argument("--input-port", dest="input_port", metavar="PORT", type=int, default=10046,
help="The port to bind to")
grp_network.add_argument("--local-delivery-ip", dest="local_delivery_ip", default="127.0.0.1",
help="The OpenSMTPD instance IP for local email to be delivered.")
grp_network.add_argument("--local-delivery-port", dest="local_delivery_port", metavar="PORT", type=int,
default=10045, help="The port where OpenSMTPD listens for local email to be delivered")
grp_network.add_argument("--remote-relay-ip", dest="remote_relay_ip", default="127.0.0.1",
help="The OpenSMTPD instance IP that accepts mail for relay to external domains.")
grp_network.add_argument("--remote-relay-port", dest="remote_relay_port", default=10045,
help="The port where OpenSMTPD listens for mail to relay.")
grp_django = parser.add_argument_group("Django options")
grp_django.add_argument("--settings", dest="django_settings", default="authserver.settings",
help="The Django settings module to use for authserver database access (default: "
"authserver.settings)")
_args = parser.parse_args()
os.environ.setdefault("DJANGO_SETTINGS_MODULE", _args.django_settings)
# noinspection PyUnresolvedReferences
from django.conf import settings # initialize Django
import django
django.setup()
_log.info("mailforwarder v%s: Forwarding Alias Service starting" % authserver.version)
_log.info("Django ORM initialized")
pidfile = open(_args.pidfile, "w")
ctx = daemon.DaemonContext(
working_directory=_args.chdir,
pidfile=pidfile,
uid=_args.user,
gid=_args.group,
detach_process=_args.daemonize,
files_preserve=[1, 2, 3, pidfile],
stdin=sys.stdin,
stdout=sys.stdout,
stderr=sys.stderr,
)
with ctx:
run(_args)
def main() -> None:
try:
_main()
except Exception as e:
_log.critical("Unhandled exception", exc_info=True)
sys.exit(1)
if __name__ == "__main__":
main()
```
|
{
"source": "jdelic/dynamicsecrets",
"score": 2
}
|
#### File: dynamicsecrets/pillar/dynamicsecrets.py
```python
import logging
_log = logging.getLogger(__name__)
_log.info("dynamic secrets module loaded")
try:
import typing
except ImportError:
pass
else:
if typing.TYPE_CHECKING:
from typing import Union, Dict, List, Any
def ext_pillar(minion_id, pillar, **pillarconfig):
# type: (str, str, Dict[str, Any]) -> Dict[str, Dict[str, Union[str, Dict[str, str]]]]
db = __salt__['dynamicsecrets.get_store']() # type: DynamicSecretsPillar
if minion_id == __opts__['id']:
if minion_id.endswith("_master"):
minion_id = minion_id[0:-7]
else:
if 'dynamicsecrets.master_host_value' in __opts__:
minion_id = __opts__['dynamicsecrets.master_host_value']
else:
from salt.exceptions import SaltConfigurationError
raise SaltConfigurationError("If you configure your master 'id', you must set "
"'dynamicsecrets.master_host_value' so dynamicsecrets can map secrets "
"generated on the master to the correct minion's host name.")
# make sure all required secrets exist and filter them
# according to the current minion's roles or host id
this_node_secrets = {}
if "config" not in pillarconfig:
pillarconfig["config"] = {}
if "grainmapping" not in pillarconfig:
pillarconfig["grainmapping"] = {}
if "pillarmapping" not in pillarconfig:
pillarconfig["pillarmapping"] = {}
if "hostmapping" not in pillarconfig:
pillarconfig["hostmapping"] = {}
for grain in pillarconfig["grainmapping"]:
for grainvalue in pillarconfig["grainmapping"][grain]:
nodevalues = __grains__.get(grain, [])
_log.debug("dynamicsecrets matching %s=%s in %s", grain, grainvalue, nodevalues)
# "*" matches every grainvalue as long as there is at least one value
if nodevalues and grainvalue == "*" or grainvalue in nodevalues:
for secret_name in pillarconfig["grainmapping"][grain][grainvalue]:
_log.debug("adding secret %s to dynamicsecrets for grain match %s=%s", secret_name, grain,
grainvalue)
secret_config = pillarconfig["config"].get(secret_name, {})
host = "*"
if secret_name in pillarconfig["config"]:
if "unique-per-host" in pillarconfig["config"][secret_name] and \
pillarconfig["config"][secret_name]["unique-per-host"]:
host = minion_id
if secret_name is None:
_log.error("dynamicsecrets created None secret_name for data %s in %s", grain, gm)
continue
if secret_name not in this_node_secrets:
this_node_secrets[secret_name] = db.get_or_create(secret_config, secret_name, host)
for pillar in pillarconfig["pillarmapping"]:
for pillarvalue in pillarconfig["pillarmapping"][pillar]:
nodevalues = __pillars__.get(pillar, [])
# "*" matches every grainvalue as long as there is at least one value
if nodevalues and pillarvalue == "*" or pillarvalue in nodevalues:
for secret_name in pillarconfig["pillarmapping"][pillar][pillarvalue]:
secret_config = pillarconfig["config"].get(secret_name, {})
host = "*"
if secret_name in pillarconfig["config"]:
if "unique-per-host" in pillarconfig["config"][secret_name] and \
pillarconfig["config"][secret_name]["unique-per-host"]:
host = minion_id
if secret_name is None:
_log.error("dynamicsecrets created None secret_name for data %s in %s", pillar, pillarvalue)
continue
if secret_name not in this_node_secrets:
this_node_secrets[secret_name] = db.get_or_create(secret_config, secret_name, host)
minion_match_keys = __salt__['dynamicsecrets.match_minion_id'](minion_id, pillarconfig["hostmapping"])
for minion_match_key in minion_match_keys:
for secret_name in pillarconfig["hostmapping"][minion_match_key]:
secret_config = pillarconfig["config"].get(secret_name, {})
host = "*"
if secret_name in pillarconfig["config"]:
if "unique-per-host" in pillarconfig["config"][secret_name] and \
pillarconfig["config"][secret_name]["unique-per-host"]:
host = minion_id
if secret_name is None:
_log.error("dynamicsecrets created None secret_name for data %s/%s in %s", minion_match_key, minion_id,
pillarconfig["hostmapping"][minion_match_key])
continue
if secret_name not in this_node_secrets:
this_node_secrets[secret_name] = db.get_or_create(secret_config, secret_name, host)
return {
"dynamicsecrets": this_node_secrets
}
```
|
{
"source": "jdelic/salt",
"score": 2
}
|
#### File: tests/support/helpers.py
```python
import base64
import errno
import fnmatch
import functools
import inspect
import logging
import os
import random
import shutil
import socket
import string
import subprocess
import sys
import tempfile
import textwrap
import threading
import time
import types
from contextlib import contextmanager
import pytest
import salt.ext.tornado.ioloop
import salt.ext.tornado.web
import salt.utils.files
import salt.utils.platform
import salt.utils.pycrypto
import salt.utils.stringutils
import salt.utils.versions
from salt.ext import six
from salt.ext.six.moves import builtins
from saltfactories.exceptions import FactoryFailure as ProcessFailed
from saltfactories.utils.ports import get_unused_localhost_port
from saltfactories.utils.processes import ProcessResult
from tests.support.mock import patch
from tests.support.runtests import RUNTIME_VARS
from tests.support.sminion import create_sminion
from tests.support.unit import SkipTest, _id, skip, skipIf
log = logging.getLogger(__name__)
HAS_SYMLINKS = None
PRE_PYTEST_SKIP_OR_NOT = "PRE_PYTEST_DONT_SKIP" not in os.environ
PRE_PYTEST_SKIP_REASON = (
"PRE PYTEST - This test was skipped before running under pytest"
)
PRE_PYTEST_SKIP = pytest.mark.skipif(
PRE_PYTEST_SKIP_OR_NOT, reason=PRE_PYTEST_SKIP_REASON
)
SKIP_IF_NOT_RUNNING_PYTEST = skipIf(
RUNTIME_VARS.PYTEST_SESSION is False, "These tests now require running under PyTest"
)
def no_symlinks():
"""
Check if git is installed and has symlinks enabled in the configuration.
"""
global HAS_SYMLINKS
if HAS_SYMLINKS is not None:
return not HAS_SYMLINKS
output = ""
try:
output = subprocess.Popen(
["git", "config", "--get", "core.symlinks"],
cwd=RUNTIME_VARS.TMP,
stdout=subprocess.PIPE,
).communicate()[0]
except OSError as exc:
if exc.errno != errno.ENOENT:
raise
except subprocess.CalledProcessError:
# git returned non-zero status
pass
HAS_SYMLINKS = False
if output.strip() == "true":
HAS_SYMLINKS = True
return not HAS_SYMLINKS
def destructiveTest(caller):
"""
Mark a test case as a destructive test for example adding or removing users
from your system.
.. code-block:: python
class MyTestCase(TestCase):
@destructiveTest
def test_create_user(self):
pass
"""
# Late import
from tests.support.runtests import RUNTIME_VARS
if RUNTIME_VARS.PYTEST_SESSION:
setattr(caller, "__destructive_test__", True)
if os.environ.get("DESTRUCTIVE_TESTS", "False").lower() == "false":
reason = "Destructive tests are disabled"
if not isinstance(caller, type):
@functools.wraps(caller)
def skip_wrapper(*args, **kwargs):
raise SkipTest(reason)
caller = skip_wrapper
caller.__unittest_skip__ = True
caller.__unittest_skip_why__ = reason
return caller
def expensiveTest(caller):
"""
Mark a test case as an expensive test, for example, a test which can cost
money(Salt's cloud provider tests).
.. code-block:: python
class MyTestCase(TestCase):
@expensiveTest
def test_create_user(self):
pass
"""
# Late import
from tests.support.runtests import RUNTIME_VARS
if RUNTIME_VARS.PYTEST_SESSION:
setattr(caller, "__expensive_test__", True)
if os.environ.get("EXPENSIVE_TESTS", "False").lower() == "false":
reason = "Expensive tests are disabled"
if not isinstance(caller, type):
@functools.wraps(caller)
def skip_wrapper(*args, **kwargs):
raise SkipTest(reason)
caller = skip_wrapper
caller.__unittest_skip__ = True
caller.__unittest_skip_why__ = reason
return caller
def slowTest(caller):
"""
Mark a test case as a slow test.
.. code-block:: python
class MyTestCase(TestCase):
@slowTest
def test_that_takes_much_time(self):
pass
"""
# Late import
from tests.support.runtests import RUNTIME_VARS
if RUNTIME_VARS.PYTEST_SESSION:
setattr(caller, "__slow_test__", True)
return caller
if os.environ.get("SLOW_TESTS", "False").lower() == "false":
reason = "Slow tests are disabled"
if not isinstance(caller, type):
@functools.wraps(caller)
def skip_wrapper(*args, **kwargs):
raise SkipTest(reason)
caller = skip_wrapper
caller.__unittest_skip__ = True
caller.__unittest_skip_why__ = reason
return caller
def flaky(caller=None, condition=True, attempts=4):
"""
Mark a test as flaky. The test will attempt to run five times,
looking for a successful run. After an immediate second try,
it will use an exponential backoff starting with one second.
.. code-block:: python
class MyTestCase(TestCase):
@flaky
def test_sometimes_works(self):
pass
"""
if caller is None:
return functools.partial(flaky, condition=condition, attempts=attempts)
if isinstance(condition, bool) and condition is False:
# Don't even decorate
return caller
elif callable(condition):
if condition() is False:
# Don't even decorate
return caller
if inspect.isclass(caller):
attrs = [n for n in dir(caller) if n.startswith("test_")]
for attrname in attrs:
try:
function = getattr(caller, attrname)
if not inspect.isfunction(function) and not inspect.ismethod(function):
continue
setattr(
caller,
attrname,
flaky(caller=function, condition=condition, attempts=attempts),
)
except Exception as exc: # pylint: disable=broad-except
log.exception(exc)
continue
return caller
@functools.wraps(caller)
def wrap(cls):
for attempt in range(0, attempts):
try:
if attempt > 0:
# Run through setUp again
# We only run it after the first iteration(>0) because the regular
# test runner will have already ran setUp the first time
setup = getattr(cls, "setUp", None)
if callable(setup):
setup()
return caller(cls)
except SkipTest as exc:
cls.skipTest(exc.args[0])
except Exception as exc: # pylint: disable=broad-except
exc_info = sys.exc_info()
if isinstance(exc, SkipTest):
six.reraise(*exc_info)
if not isinstance(exc, AssertionError) and log.isEnabledFor(
logging.DEBUG
):
log.exception(exc, exc_info=exc_info)
if attempt >= attempts - 1:
# We won't try to run tearDown once the attempts are exhausted
# because the regular test runner will do that for us
six.reraise(*exc_info)
# Run through tearDown again
teardown = getattr(cls, "tearDown", None)
if callable(teardown):
teardown()
backoff_time = attempt ** 2
log.info("Found Exception. Waiting %s seconds to retry.", backoff_time)
time.sleep(backoff_time)
return cls
return wrap
def requires_sshd_server(caller):
"""
Mark a test as requiring the tests SSH daemon running.
.. code-block:: python
class MyTestCase(TestCase):
@requiresSshdServer
def test_create_user(self):
pass
"""
raise RuntimeError(
"Please replace @requires_sshd_server with @pytest.mark.requires_sshd_server"
)
class RedirectStdStreams:
"""
Temporarily redirect system output to file like objects.
Default is to redirect to `os.devnull`, which just mutes output, `stdout`
and `stderr`.
"""
def __init__(self, stdout=None, stderr=None):
# Late import
import salt.utils.files
if stdout is None:
# pylint: disable=resource-leakage
stdout = salt.utils.files.fopen(os.devnull, "w")
# pylint: enable=resource-leakage
if stderr is None:
# pylint: disable=resource-leakage
stderr = salt.utils.files.fopen(os.devnull, "w")
# pylint: enable=resource-leakage
self.__stdout = stdout
self.__stderr = stderr
self.__redirected = False
self.patcher = patch.multiple(sys, stderr=self.__stderr, stdout=self.__stdout)
def __enter__(self):
self.redirect()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.unredirect()
def redirect(self):
self.old_stdout = sys.stdout
self.old_stdout.flush()
self.old_stderr = sys.stderr
self.old_stderr.flush()
self.patcher.start()
self.__redirected = True
def unredirect(self):
if not self.__redirected:
return
try:
self.__stdout.flush()
self.__stdout.close()
except ValueError:
# already closed?
pass
try:
self.__stderr.flush()
self.__stderr.close()
except ValueError:
# already closed?
pass
self.patcher.stop()
def flush(self):
if self.__redirected:
try:
self.__stdout.flush()
except Exception: # pylint: disable=broad-except
pass
try:
self.__stderr.flush()
except Exception: # pylint: disable=broad-except
pass
class TstSuiteLoggingHandler:
"""
Simple logging handler which can be used to test if certain logging
messages get emitted or not:
.. code-block:: python
with TstSuiteLoggingHandler() as handler:
# (...) Do what ever you wish here
handler.messages # here are the emitted log messages
"""
def __init__(self, level=0, format="%(levelname)s:%(message)s"):
self.level = level
self.format = format
self.activated = False
self.prev_logging_level = None
def activate(self):
class Handler(logging.Handler):
def __init__(self, level):
logging.Handler.__init__(self, level)
self.messages = []
def emit(self, record):
self.messages.append(self.format(record))
self.handler = Handler(self.level)
formatter = logging.Formatter(self.format)
self.handler.setFormatter(formatter)
logging.root.addHandler(self.handler)
self.activated = True
# Make sure we're running with the lowest logging level with our
# tests logging handler
current_logging_level = logging.root.getEffectiveLevel()
if current_logging_level > logging.DEBUG:
self.prev_logging_level = current_logging_level
logging.root.setLevel(0)
def deactivate(self):
if not self.activated:
return
logging.root.removeHandler(self.handler)
# Restore previous logging level if changed
if self.prev_logging_level is not None:
logging.root.setLevel(self.prev_logging_level)
@property
def messages(self):
if not self.activated:
return []
return self.handler.messages
def clear(self):
self.handler.messages = []
def __enter__(self):
self.activate()
return self
def __exit__(self, type, value, traceback):
self.deactivate()
self.activated = False
# Mimic some handler attributes and methods
@property
def lock(self):
if self.activated:
return self.handler.lock
def createLock(self):
if self.activated:
return self.handler.createLock()
def acquire(self):
if self.activated:
return self.handler.acquire()
def release(self):
if self.activated:
return self.handler.release()
class ForceImportErrorOn:
"""
This class is meant to be used in mock'ed test cases which require an
``ImportError`` to be raised.
>>> import os.path
>>> with ForceImportErrorOn('os.path'):
... import os.path
...
Traceback (most recent call last):
File "<stdin>", line 2, in <module>
File "salttesting/helpers.py", line 263, in __import__
'Forced ImportError raised for {0!r}'.format(name)
ImportError: Forced ImportError raised for 'os.path'
>>>
>>> with ForceImportErrorOn(('os', 'path')):
... import os.path
... sys.modules.pop('os', None)
... from os import path
...
<module 'os' from '/usr/lib/python2.7/os.pyc'>
Traceback (most recent call last):
File "<stdin>", line 4, in <module>
File "salttesting/helpers.py", line 288, in __fake_import__
name, ', '.join(fromlist)
ImportError: Forced ImportError raised for 'from os import path'
>>>
>>> with ForceImportErrorOn(('os', 'path'), 'os.path'):
... import os.path
... sys.modules.pop('os', None)
... from os import path
...
Traceback (most recent call last):
File "<stdin>", line 2, in <module>
File "salttesting/helpers.py", line 281, in __fake_import__
'Forced ImportError raised for {0!r}'.format(name)
ImportError: Forced ImportError raised for 'os.path'
>>>
"""
def __init__(self, *module_names):
self.__module_names = {}
for entry in module_names:
if isinstance(entry, (list, tuple)):
modname = entry[0]
self.__module_names[modname] = set(entry[1:])
else:
self.__module_names[entry] = None
self.__original_import = builtins.__import__
self.patcher = patch.object(builtins, "__import__", self.__fake_import__)
def patch_import_function(self):
self.patcher.start()
def restore_import_funtion(self):
self.patcher.stop()
def __fake_import__(
self, name, globals_=None, locals_=None, fromlist=None, level=None
):
if six.PY2:
if globals_ is None:
globals_ = {}
if locals_ is None:
locals_ = {}
if level is None:
level = 0
if fromlist is None:
fromlist = []
if name in self.__module_names:
importerror_fromlist = self.__module_names.get(name)
if importerror_fromlist is None:
raise ImportError("Forced ImportError raised for {!r}".format(name))
if importerror_fromlist.intersection(set(fromlist)):
raise ImportError(
"Forced ImportError raised for {!r}".format(
"from {} import {}".format(name, ", ".join(fromlist))
)
)
return self.__original_import(name, globals_, locals_, fromlist, level)
def __enter__(self):
self.patch_import_function()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.restore_import_funtion()
class MockWraps:
"""
Helper class to be used with the mock library.
To be used in the ``wraps`` keyword of ``Mock`` or ``MagicMock`` where you
want to trigger a side effect for X times, and afterwards, call the
original and un-mocked method.
As an example:
>>> def original():
... print 'original'
...
>>> def side_effect():
... print 'side effect'
...
>>> mw = MockWraps(original, 2, side_effect)
>>> mw()
side effect
>>> mw()
side effect
>>> mw()
original
>>>
"""
def __init__(self, original, expected_failures, side_effect):
self.__original = original
self.__expected_failures = expected_failures
self.__side_effect = side_effect
self.__call_counter = 0
def __call__(self, *args, **kwargs):
try:
if self.__call_counter < self.__expected_failures:
if isinstance(self.__side_effect, types.FunctionType):
return self.__side_effect()
raise self.__side_effect
return self.__original(*args, **kwargs)
finally:
self.__call_counter += 1
def requires_network(only_local_network=False):
"""
Simple decorator which is supposed to skip a test case in case there's no
network connection to the internet.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(cls, *args, **kwargs):
has_local_network = False
# First lets try if we have a local network. Inspired in
# verify_socket
try:
pubsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
retsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
pubsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
pubsock.bind(("", 18000))
pubsock.close()
retsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
retsock.bind(("", 18001))
retsock.close()
has_local_network = True
except OSError:
# I wonder if we just have IPV6 support?
try:
pubsock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
retsock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
pubsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
pubsock.bind(("", 18000))
pubsock.close()
retsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
retsock.bind(("", 18001))
retsock.close()
has_local_network = True
except OSError:
# Let's continue
pass
if only_local_network is True:
if has_local_network is False:
# Since we're only supposed to check local network, and no
# local network was detected, skip the test
cls.skipTest("No local network was detected")
return func(cls)
if os.environ.get("NO_INTERNET"):
cls.skipTest("Environment variable NO_INTERNET is set.")
# We are using the google.com DNS records as numerical IPs to avoid
# DNS lookups which could greatly slow down this check
for addr in (
"172.16.17.32",
"172.16.58.3",
"192.168.127.12",
"172.16.17.32",
"172.16.17.32",
"172.16.58.3",
"172.16.58.3",
"172.16.58.3",
"192.168.127.12",
"192.168.3.11",
"172.16.17.32",
):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.settimeout(0.25)
sock.connect((addr, 80))
# We connected? Stop the loop
break
except OSError:
# Let's check the next IP
continue
else:
cls.skipTest("No internet network connection was detected")
finally:
sock.close()
return func(cls, *args, **kwargs)
return wrapper
return decorator
def with_system_user(
username, on_existing="delete", delete=True, password=<PASSWORD>, groups=None
):
"""
Create and optionally destroy a system user to be used within a test
case. The system user is created using the ``user`` salt module.
The decorated testcase function must accept 'username' as an argument.
:param username: The desired username for the system user.
:param on_existing: What to do when the desired username is taken. The
available options are:
* nothing: Do nothing, act as if the user was created.
* delete: delete and re-create the existing user
* skip: skip the test case
"""
if on_existing not in ("nothing", "delete", "skip"):
raise RuntimeError(
"The value of 'on_existing' can only be one of, "
"'nothing', 'delete' and 'skip'"
)
if not isinstance(delete, bool):
raise RuntimeError("The value of 'delete' can only be 'True' or 'False'")
def decorator(func):
@functools.wraps(func)
def wrap(cls):
# Let's add the user to the system.
log.debug("Creating system user {!r}".format(username))
kwargs = {"timeout": 60, "groups": groups}
if salt.utils.platform.is_windows():
kwargs.update({"password": password})
create_user = cls.run_function("user.add", [username], **kwargs)
if not create_user:
log.debug("Failed to create system user")
# The user was not created
if on_existing == "skip":
cls.skipTest("Failed to create system user {!r}".format(username))
if on_existing == "delete":
log.debug("Deleting the system user {!r}".format(username))
delete_user = cls.run_function(
"user.delete", [username, True, True]
)
if not delete_user:
cls.skipTest(
"A user named {!r} already existed on the "
"system and re-creating it was not possible".format(
username
)
)
log.debug("Second time creating system user {!r}".format(username))
create_user = cls.run_function("user.add", [username], **kwargs)
if not create_user:
cls.skipTest(
"A user named {!r} already existed, was deleted "
"as requested, but re-creating it was not possible".format(
username
)
)
if not salt.utils.platform.is_windows() and password is not None:
if salt.utils.platform.is_darwin():
hashed_password = password
else:
hashed_password = salt.utils.pycrypto.gen_hash(password=password)
hashed_password = "'{}'".<PASSWORD>(<PASSWORD>)
add_pwd = cls.run_function(
"shadow.set_password", [username, hashed_password]
)
failure = None
try:
try:
return func(cls, username)
except Exception as exc: # pylint: disable=W0703
log.error(
"Running {!r} raised an exception: {}".format(func, exc),
exc_info=True,
)
# Store the original exception details which will be raised
# a little further down the code
failure = sys.exc_info()
finally:
if delete:
delete_user = cls.run_function(
"user.delete", [username, True, True], timeout=60
)
if not delete_user:
if failure is None:
log.warning(
"Although the actual test-case did not fail, "
"deleting the created system user {!r} "
"afterwards did.".format(username)
)
else:
log.warning(
"The test-case failed and also did the removal"
" of the system user {!r}".format(username)
)
if failure is not None:
# If an exception was thrown, raise it
raise failure[1].with_traceback(failure[2])
return wrap
return decorator
def with_system_group(group, on_existing="delete", delete=True):
"""
Create and optionally destroy a system group to be used within a test
case. The system user is crated using the ``group`` salt module.
The decorated testcase function must accept 'group' as an argument.
:param group: The desired group name for the system user.
:param on_existing: What to do when the desired username is taken. The
available options are:
* nothing: Do nothing, act as if the group was created
* delete: delete and re-create the existing user
* skip: skip the test case
"""
if on_existing not in ("nothing", "delete", "skip"):
raise RuntimeError(
"The value of 'on_existing' can only be one of, "
"'nothing', 'delete' and 'skip'"
)
if not isinstance(delete, bool):
raise RuntimeError("The value of 'delete' can only be 'True' or 'False'")
def decorator(func):
@functools.wraps(func)
def wrap(cls):
# Let's add the user to the system.
log.debug("Creating system group {!r}".format(group))
create_group = cls.run_function("group.add", [group])
if not create_group:
log.debug("Failed to create system group")
# The group was not created
if on_existing == "skip":
cls.skipTest("Failed to create system group {!r}".format(group))
if on_existing == "delete":
log.debug("Deleting the system group {!r}".format(group))
delete_group = cls.run_function("group.delete", [group])
if not delete_group:
cls.skipTest(
"A group named {!r} already existed on the "
"system and re-creating it was not possible".format(group)
)
log.debug("Second time creating system group {!r}".format(group))
create_group = cls.run_function("group.add", [group])
if not create_group:
cls.skipTest(
"A group named {!r} already existed, was deleted "
"as requested, but re-creating it was not possible".format(
group
)
)
failure = None
try:
try:
return func(cls, group)
except Exception as exc: # pylint: disable=W0703
log.error(
"Running {!r} raised an exception: {}".format(func, exc),
exc_info=True,
)
# Store the original exception details which will be raised
# a little further down the code
failure = sys.exc_info()
finally:
if delete:
delete_group = cls.run_function("group.delete", [group])
if not delete_group:
if failure is None:
log.warning(
"Although the actual test-case did not fail, "
"deleting the created system group {!r} "
"afterwards did.".format(group)
)
else:
log.warning(
"The test-case failed and also did the removal"
" of the system group {!r}".format(group)
)
if failure is not None:
# If an exception was thrown, raise it
raise failure[1].with_traceback(failure[2])
return wrap
return decorator
def with_system_user_and_group(username, group, on_existing="delete", delete=True):
"""
Create and optionally destroy a system user and group to be used within a
test case. The system user is crated using the ``user`` salt module, and
the system group is created with the ``group`` salt module.
The decorated testcase function must accept both the 'username' and 'group'
arguments.
:param username: The desired username for the system user.
:param group: The desired name for the system group.
:param on_existing: What to do when the desired username is taken. The
available options are:
* nothing: Do nothing, act as if the user was created.
* delete: delete and re-create the existing user
* skip: skip the test case
"""
if on_existing not in ("nothing", "delete", "skip"):
raise RuntimeError(
"The value of 'on_existing' can only be one of, "
"'nothing', 'delete' and 'skip'"
)
if not isinstance(delete, bool):
raise RuntimeError("The value of 'delete' can only be 'True' or 'False'")
def decorator(func):
@functools.wraps(func)
def wrap(cls):
# Let's add the user to the system.
log.debug("Creating system user {!r}".format(username))
create_user = cls.run_function("user.add", [username])
log.debug("Creating system group {!r}".format(group))
create_group = cls.run_function("group.add", [group])
if not create_user:
log.debug("Failed to create system user")
# The user was not created
if on_existing == "skip":
cls.skipTest("Failed to create system user {!r}".format(username))
if on_existing == "delete":
log.debug("Deleting the system user {!r}".format(username))
delete_user = cls.run_function(
"user.delete", [username, True, True]
)
if not delete_user:
cls.skipTest(
"A user named {!r} already existed on the "
"system and re-creating it was not possible".format(
username
)
)
log.debug("Second time creating system user {!r}".format(username))
create_user = cls.run_function("user.add", [username])
if not create_user:
cls.skipTest(
"A user named {!r} already existed, was deleted "
"as requested, but re-creating it was not possible".format(
username
)
)
if not create_group:
log.debug("Failed to create system group")
# The group was not created
if on_existing == "skip":
cls.skipTest("Failed to create system group {!r}".format(group))
if on_existing == "delete":
log.debug("Deleting the system group {!r}".format(group))
delete_group = cls.run_function("group.delete", [group])
if not delete_group:
cls.skipTest(
"A group named {!r} already existed on the "
"system and re-creating it was not possible".format(group)
)
log.debug("Second time creating system group {!r}".format(group))
create_group = cls.run_function("group.add", [group])
if not create_group:
cls.skipTest(
"A group named {!r} already existed, was deleted "
"as requested, but re-creating it was not possible".format(
group
)
)
failure = None
try:
try:
return func(cls, username, group)
except Exception as exc: # pylint: disable=W0703
log.error(
"Running {!r} raised an exception: {}".format(func, exc),
exc_info=True,
)
# Store the original exception details which will be raised
# a little further down the code
failure = sys.exc_info()
finally:
if delete:
delete_user = cls.run_function(
"user.delete", [username, True, True]
)
delete_group = cls.run_function("group.delete", [group])
if not delete_user:
if failure is None:
log.warning(
"Although the actual test-case did not fail, "
"deleting the created system user {!r} "
"afterwards did.".format(username)
)
else:
log.warning(
"The test-case failed and also did the removal"
" of the system user {!r}".format(username)
)
if not delete_group:
if failure is None:
log.warning(
"Although the actual test-case did not fail, "
"deleting the created system group {!r} "
"afterwards did.".format(group)
)
else:
log.warning(
"The test-case failed and also did the removal"
" of the system group {!r}".format(group)
)
if failure is not None:
# If an exception was thrown, raise it
raise failure[1].with_traceback(failure[2])
return wrap
return decorator
class WithTempfile:
def __init__(self, **kwargs):
self.create = kwargs.pop("create", True)
if "dir" not in kwargs:
kwargs["dir"] = RUNTIME_VARS.TMP
if "prefix" not in kwargs:
kwargs["prefix"] = "__salt.test."
self.kwargs = kwargs
def __call__(self, func):
self.func = func
return functools.wraps(func)(
# pylint: disable=unnecessary-lambda
lambda testcase, *args, **kwargs: self.wrap(testcase, *args, **kwargs)
# pylint: enable=unnecessary-lambda
)
def wrap(self, testcase, *args, **kwargs):
name = salt.utils.files.mkstemp(**self.kwargs)
if not self.create:
os.remove(name)
try:
return self.func(testcase, name, *args, **kwargs)
finally:
try:
os.remove(name)
except OSError:
pass
with_tempfile = WithTempfile
class WithTempdir:
def __init__(self, **kwargs):
self.create = kwargs.pop("create", True)
if "dir" not in kwargs:
kwargs["dir"] = RUNTIME_VARS.TMP
self.kwargs = kwargs
def __call__(self, func):
self.func = func
return functools.wraps(func)(
# pylint: disable=unnecessary-lambda
lambda testcase, *args, **kwargs: self.wrap(testcase, *args, **kwargs)
# pylint: enable=unnecessary-lambda
)
def wrap(self, testcase, *args, **kwargs):
tempdir = tempfile.mkdtemp(**self.kwargs)
if not self.create:
os.rmdir(tempdir)
try:
return self.func(testcase, tempdir, *args, **kwargs)
finally:
shutil.rmtree(tempdir, ignore_errors=True)
with_tempdir = WithTempdir
def requires_system_grains(func):
"""
Function decorator which loads and passes the system's grains to the test
case.
"""
@functools.wraps(func)
def decorator(*args, **kwargs):
if not hasattr(requires_system_grains, "__grains__"):
# Late import
from tests.support.sminion import build_minion_opts
opts = build_minion_opts(minion_id="runtests-internal-sminion")
requires_system_grains.__grains__ = salt.loader.grains(opts)
kwargs["grains"] = requires_system_grains.__grains__
return func(*args, **kwargs)
return decorator
@requires_system_grains
def runs_on(grains=None, **kwargs):
"""
Skip the test if grains don't match the values passed into **kwargs
if a kwarg value is a list then skip if the grains don't match any item in the list
"""
reason = kwargs.pop("reason", None)
for kw, value in kwargs.items():
if isinstance(value, list):
if not any(str(grains.get(kw)).lower() != str(v).lower() for v in value):
if reason is None:
reason = "This test does not run on {}={}".format(
kw, grains.get(kw)
)
return skip(reason)
else:
if str(grains.get(kw)).lower() != str(value).lower():
if reason is None:
reason = "This test runs on {}={}, not {}".format(
kw, value, grains.get(kw)
)
return skip(reason)
return _id
@requires_system_grains
def not_runs_on(grains=None, **kwargs):
"""
Reverse of `runs_on`.
Skip the test if any grains match the values passed into **kwargs
if a kwarg value is a list then skip if the grains match any item in the list
"""
reason = kwargs.pop("reason", None)
for kw, value in kwargs.items():
if isinstance(value, list):
if any(str(grains.get(kw)).lower() == str(v).lower() for v in value):
if reason is None:
reason = "This test does not run on {}={}".format(
kw, grains.get(kw)
)
return skip(reason)
else:
if str(grains.get(kw)).lower() == str(value).lower():
if reason is None:
reason = "This test does not run on {}={}, got {}".format(
kw, value, grains.get(kw)
)
return skip(reason)
return _id
def _check_required_sminion_attributes(sminion_attr, *required_items):
"""
:param sminion_attr: The name of the sminion attribute to check, such as 'functions' or 'states'
:param required_items: The items that must be part of the designated sminion attribute for the decorated test
:return The packages that are not available
"""
# Late import
from tests.support.sminion import create_sminion
required_salt_items = set(required_items)
sminion = create_sminion(minion_id="runtests-internal-sminion")
available_items = list(getattr(sminion, sminion_attr))
not_available_items = set()
name = "__not_available_{items}s__".format(items=sminion_attr)
if not hasattr(sminion, name):
setattr(sminion, name, set())
cached_not_available_items = getattr(sminion, name)
for not_available_item in cached_not_available_items:
if not_available_item in required_salt_items:
not_available_items.add(not_available_item)
required_salt_items.remove(not_available_item)
for required_item_name in required_salt_items:
search_name = required_item_name
if "." not in search_name:
search_name += ".*"
if not fnmatch.filter(available_items, search_name):
not_available_items.add(required_item_name)
cached_not_available_items.add(required_item_name)
return not_available_items
def requires_salt_states(*names):
"""
Makes sure the passed salt state is available. Skips the test if not
.. versionadded:: 3000
"""
not_available = _check_required_sminion_attributes("states", *names)
if not_available:
return skip("Unavailable salt states: {}".format(*not_available))
return _id
def requires_salt_modules(*names):
"""
Makes sure the passed salt module is available. Skips the test if not
.. versionadded:: 0.5.2
"""
not_available = _check_required_sminion_attributes("functions", *names)
if not_available:
return skip("Unavailable salt modules: {}".format(*not_available))
return _id
def skip_if_binaries_missing(*binaries, **kwargs):
import salt.utils.path
if len(binaries) == 1:
if isinstance(binaries[0], (list, tuple, set, frozenset)):
binaries = binaries[0]
check_all = kwargs.pop("check_all", False)
message = kwargs.pop("message", None)
if kwargs:
raise RuntimeError(
"The only supported keyword argument is 'check_all' and "
"'message'. Invalid keyword arguments: {}".format(", ".join(kwargs.keys()))
)
if check_all:
for binary in binaries:
if salt.utils.path.which(binary) is None:
return skip(
"{}The {!r} binary was not found".format(
message and "{}. ".format(message) or "", binary
)
)
elif salt.utils.path.which_bin(binaries) is None:
return skip(
"{}None of the following binaries was found: {}".format(
message and "{}. ".format(message) or "", ", ".join(binaries)
)
)
return _id
def skip_if_not_root(func):
# Late import
from tests.support.runtests import RUNTIME_VARS
if RUNTIME_VARS.PYTEST_SESSION:
setattr(func, "__skip_if_not_root__", True)
if not sys.platform.startswith("win"):
if os.getuid() != 0:
func.__unittest_skip__ = True
func.__unittest_skip_why__ = (
"You must be logged in as root to run this test"
)
else:
current_user = salt.utils.win_functions.get_current_user()
if current_user != "SYSTEM":
if not salt.utils.win_functions.is_admin(current_user):
func.__unittest_skip__ = True
func.__unittest_skip_why__ = (
"You must be logged in as an Administrator to run this test"
)
return func
def repeat(caller=None, condition=True, times=5):
"""
Repeat a test X amount of times until the first failure.
.. code-block:: python
class MyTestCase(TestCase):
@repeat
def test_sometimes_works(self):
pass
"""
if caller is None:
return functools.partial(repeat, condition=condition, times=times)
if isinstance(condition, bool) and condition is False:
# Don't even decorate
return caller
elif callable(condition):
if condition() is False:
# Don't even decorate
return caller
if inspect.isclass(caller):
attrs = [n for n in dir(caller) if n.startswith("test_")]
for attrname in attrs:
try:
function = getattr(caller, attrname)
if not inspect.isfunction(function) and not inspect.ismethod(function):
continue
setattr(
caller,
attrname,
repeat(caller=function, condition=condition, times=times),
)
except Exception as exc: # pylint: disable=broad-except
log.exception(exc)
continue
return caller
@functools.wraps(caller)
def wrap(cls):
result = None
for attempt in range(1, times + 1):
log.info("%s test run %d of %s times", cls, attempt, times)
caller(cls)
return cls
return wrap
def http_basic_auth(login_cb=lambda username, password: False):
"""
A crude decorator to force a handler to request HTTP Basic Authentication
Example usage:
.. code-block:: python
@http_basic_auth(lambda u, p: u == 'foo' and p == 'bar')
class AuthenticatedHandler(salt.ext.tornado.web.RequestHandler):
pass
"""
def wrapper(handler_class):
def wrap_execute(handler_execute):
def check_auth(handler, kwargs):
auth = handler.request.headers.get("Authorization")
if auth is None or not auth.startswith("Basic "):
# No username/password entered yet, we need to return a 401
# and set the WWW-Authenticate header to request login.
handler.set_status(401)
handler.set_header("WWW-Authenticate", "Basic realm=Restricted")
else:
# Strip the 'Basic ' from the beginning of the auth header
# leaving the base64-encoded secret
username, password = base64.b64decode(auth[6:]).split(":", 1)
if login_cb(username, password):
# Authentication successful
return
else:
# Authentication failed
handler.set_status(403)
handler._transforms = []
handler.finish()
def _execute(self, transforms, *args, **kwargs):
check_auth(self, kwargs)
return handler_execute(self, transforms, *args, **kwargs)
return _execute
handler_class._execute = wrap_execute(handler_class._execute)
return handler_class
return wrapper
def generate_random_name(prefix, size=6):
"""
Generates a random name by combining the provided prefix with a randomly generated
ascii string.
.. versionadded:: 2018.3.0
prefix
The string to prefix onto the randomly generated ascii string.
size
The number of characters to generate. Default: 6.
"""
salt.utils.versions.warn_until_date(
"20220101",
"Please replace your call 'generate_random_name({0})' with 'random_string({0}, lowercase=False)' as "
"'generate_random_name' will be removed after {{date}}".format(prefix),
)
return random_string(prefix, size=size, lowercase=False)
def random_string(prefix, size=6, uppercase=True, lowercase=True, digits=True):
"""
Generates a random string.
..versionadded: 3001
Args:
prefix(str): The prefix for the random string
size(int): The size of the random string
uppercase(bool): If true, include uppercased ascii chars in choice sample
lowercase(bool): If true, include lowercased ascii chars in choice sample
digits(bool): If true, include digits in choice sample
Returns:
str: The random string
"""
if not any([uppercase, lowercase, digits]):
raise RuntimeError(
"At least one of 'uppercase', 'lowercase' or 'digits' needs to be true"
)
choices = []
if uppercase:
choices.extend(string.ascii_uppercase)
if lowercase:
choices.extend(string.ascii_lowercase)
if digits:
choices.extend(string.digits)
return prefix + "".join(random.choice(choices) for _ in range(size))
class Webserver:
"""
Starts a tornado webserver on 127.0.0.1 on a random available port
USAGE:
.. code-block:: python
from tests.support.helpers import Webserver
webserver = Webserver('/path/to/web/root')
webserver.start()
webserver.stop()
"""
def __init__(self, root=None, port=None, wait=5, handler=None, ssl_opts=None):
"""
root
Root directory of webserver. If not passed, it will default to the
location of the base environment of the integration suite's file
roots (tests/integration/files/file/base/)
port
Port on which to listen. If not passed, a random one will be chosen
at the time the start() function is invoked.
wait : 5
Number of seconds to wait for the socket to be open before raising
an exception
handler
Can be used to use a subclass of tornado.web.StaticFileHandler,
such as when enforcing authentication with the http_basic_auth
decorator.
"""
if port is not None and not isinstance(port, int):
raise ValueError("port must be an integer")
if root is None:
root = RUNTIME_VARS.BASE_FILES
try:
self.root = os.path.realpath(root)
except AttributeError:
raise ValueError("root must be a string")
self.port = port
self.wait = wait
self.handler = (
handler if handler is not None else salt.ext.tornado.web.StaticFileHandler
)
self.web_root = None
self.ssl_opts = ssl_opts
def target(self):
"""
Threading target which stands up the tornado application
"""
self.ioloop = salt.ext.tornado.ioloop.IOLoop()
self.ioloop.make_current()
if self.handler == salt.ext.tornado.web.StaticFileHandler:
self.application = salt.ext.tornado.web.Application(
[(r"/(.*)", self.handler, {"path": self.root})]
)
else:
self.application = salt.ext.tornado.web.Application(
[(r"/(.*)", self.handler)]
)
self.application.listen(self.port, ssl_options=self.ssl_opts)
self.ioloop.start()
@property
def listening(self):
if self.port is None:
return False
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
return sock.connect_ex(("127.0.0.1", self.port)) == 0
def url(self, path):
"""
Convenience function which, given a file path, will return a URL that
points to that path. If the path is relative, it will just be appended
to self.web_root.
"""
if self.web_root is None:
raise RuntimeError("Webserver instance has not been started")
err_msg = (
"invalid path, must be either a relative path or a path "
"within {}".format(self.root)
)
try:
relpath = (
path if not os.path.isabs(path) else os.path.relpath(path, self.root)
)
if relpath.startswith(".." + os.sep):
raise ValueError(err_msg)
return "/".join((self.web_root, relpath))
except AttributeError:
raise ValueError(err_msg)
def start(self):
"""
Starts the webserver
"""
if self.port is None:
self.port = get_unused_localhost_port()
self.web_root = "http{}://127.0.0.1:{}".format(
"s" if self.ssl_opts else "", self.port
)
self.server_thread = threading.Thread(target=self.target)
self.server_thread.daemon = True
self.server_thread.start()
for idx in range(self.wait + 1):
if self.listening:
break
if idx != self.wait:
time.sleep(1)
else:
raise Exception(
"Failed to start tornado webserver on 127.0.0.1:{} within "
"{} seconds".format(self.port, self.wait)
)
def stop(self):
"""
Stops the webserver
"""
self.ioloop.add_callback(self.ioloop.stop)
self.server_thread.join()
class SaveRequestsPostHandler(salt.ext.tornado.web.RequestHandler):
"""
Save all requests sent to the server.
"""
received_requests = []
def post(self, *args): # pylint: disable=arguments-differ
"""
Handle the post
"""
self.received_requests.append(self.request)
def data_received(self): # pylint: disable=arguments-differ
"""
Streaming not used for testing
"""
raise NotImplementedError()
class MirrorPostHandler(salt.ext.tornado.web.RequestHandler):
"""
Mirror a POST body back to the client
"""
def post(self, *args): # pylint: disable=arguments-differ
"""
Handle the post
"""
body = self.request.body
log.debug("Incoming body: %s Incoming args: %s", body, args)
self.write(body)
def data_received(self): # pylint: disable=arguments-differ
"""
Streaming not used for testing
"""
raise NotImplementedError()
def dedent(text, linesep=os.linesep):
"""
A wrapper around textwrap.dedent that also sets line endings.
"""
linesep = salt.utils.stringutils.to_unicode(linesep)
unicode_text = textwrap.dedent(salt.utils.stringutils.to_unicode(text))
clean_text = linesep.join(unicode_text.splitlines())
if unicode_text.endswith("\n"):
clean_text += linesep
if not isinstance(text, str):
return salt.utils.stringutils.to_bytes(clean_text)
return clean_text
class PatchedEnviron:
def __init__(self, **kwargs):
self.cleanup_keys = kwargs.pop("__cleanup__", ())
self.kwargs = kwargs
self.original_environ = None
def __enter__(self):
self.original_environ = os.environ.copy()
for key in self.cleanup_keys:
os.environ.pop(key, None)
# Make sure there are no unicode characters in the self.kwargs if we're
# on Python 2. These are being added to `os.environ` and causing
# problems
if sys.version_info < (3,):
kwargs = self.kwargs.copy()
clean_kwargs = {}
for k in self.kwargs:
key = k
if isinstance(key, str):
key = key.encode("utf-8")
if isinstance(self.kwargs[k], str):
kwargs[k] = kwargs[k].encode("utf-8")
clean_kwargs[key] = kwargs[k]
self.kwargs = clean_kwargs
os.environ.update(**self.kwargs)
return self
def __exit__(self, *args):
os.environ.clear()
os.environ.update(self.original_environ)
patched_environ = PatchedEnviron
class VirtualEnv:
def __init__(self, venv_dir=None):
self.venv_dir = venv_dir or tempfile.mkdtemp(dir=RUNTIME_VARS.TMP)
if salt.utils.platform.is_windows():
self.venv_python = os.path.join(self.venv_dir, "Scripts", "python.exe")
else:
self.venv_python = os.path.join(self.venv_dir, "bin", "python")
self.venv_bin_dir = os.path.dirname(self.venv_python)
def __enter__(self):
try:
self._create_virtualenv()
except subprocess.CalledProcessError:
raise AssertionError("Failed to create virtualenv")
return self
def __exit__(self, *args):
salt.utils.files.rm_rf(self.venv_dir)
def install(self, *args, **kwargs):
return self.run(self.venv_python, "-m", "pip", "install", *args, **kwargs)
def run(self, *args, **kwargs):
check = kwargs.pop("check", True)
kwargs.setdefault("cwd", self.venv_dir)
kwargs.setdefault("stdout", subprocess.PIPE)
kwargs.setdefault("stderr", subprocess.PIPE)
kwargs.setdefault("universal_newlines", True)
proc = subprocess.run(args, check=False, **kwargs)
ret = ProcessResult(
exitcode=proc.returncode,
stdout=proc.stdout,
stderr=proc.stderr,
cmdline=proc.args,
)
log.debug(ret)
if check is True:
try:
proc.check_returncode()
except subprocess.CalledProcessError:
raise ProcessFailed(
"Command failed return code check",
cmdline=proc.args,
stdout=proc.stdout,
stderr=proc.stderr,
exitcode=proc.returncode,
)
return ret
def _get_real_python(self):
"""
The reason why the virtualenv creation is proxied by this function is mostly
because under windows, we can't seem to properly create a virtualenv off of
another virtualenv(we can on linux) and also because, we really don't want to
test virtualenv creation off of another virtualenv, we want a virtualenv created
from the original python.
Also, on windows, we must also point to the virtualenv binary outside the existing
virtualenv because it will fail otherwise
"""
try:
if salt.utils.platform.is_windows():
return os.path.join(sys.real_prefix, os.path.basename(sys.executable))
else:
python_binary_names = [
"python{}.{}".format(*sys.version_info),
"python{}".format(*sys.version_info),
"python",
]
for binary_name in python_binary_names:
python = os.path.join(sys.real_prefix, "bin", binary_name)
if os.path.exists(python):
break
else:
raise AssertionError(
"Couldn't find a python binary name under '{}' matching: {}".format(
os.path.join(sys.real_prefix, "bin"), python_binary_names
)
)
return python
except AttributeError:
return sys.executable
def _create_virtualenv(self):
sminion = create_sminion()
sminion.functions.virtualenv.create(
self.venv_dir, python=self._get_real_python()
)
# https://github.com/pypa/setuptools/issues?q=is%3Aissue+setuptools+50+
self.install("-U", "setuptools<50.0.0")
@contextmanager
def change_cwd(path):
"""
Context manager helper to change CWD for a with code block and restore
it at the end
"""
old_cwd = os.getcwd()
try:
os.chdir(path)
# Do stuff
yield
finally:
# Restore Old CWD
os.chdir(old_cwd)
@functools.lru_cache(maxsize=1)
def get_virtualenv_binary_path():
# Under windows we can't seem to properly create a virtualenv off of another
# virtualenv, we can on linux but we will still point to the virtualenv binary
# outside the virtualenv running the test suite, if that's the case.
try:
real_prefix = sys.real_prefix
# The above attribute exists, this is a virtualenv
if salt.utils.platform.is_windows():
virtualenv_binary = os.path.join(real_prefix, "Scripts", "virtualenv.exe")
else:
# We need to remove the virtualenv from PATH or we'll get the virtualenv binary
# from within the virtualenv, we don't want that
path = os.environ.get("PATH")
if path is not None:
path_items = path.split(os.pathsep)
for item in path_items[:]:
if item.startswith(sys.base_prefix):
path_items.remove(item)
os.environ["PATH"] = os.pathsep.join(path_items)
virtualenv_binary = salt.utils.path.which("virtualenv")
if path is not None:
# Restore previous environ PATH
os.environ["PATH"] = path
if not virtualenv_binary.startswith(real_prefix):
virtualenv_binary = None
if virtualenv_binary and not os.path.exists(virtualenv_binary):
# It doesn't exist?!
virtualenv_binary = None
except AttributeError:
# We're not running inside a virtualenv
virtualenv_binary = None
return virtualenv_binary
```
|
{
"source": "jdelker/pipestash",
"score": 3
}
|
#### File: pipestash/output/redisoutput.py
```python
import urllib.parse
import pipestash.output
import redis
import json
import inspect
import re
import time
import random
class RedisOutput(pipestash.output.Output):
def __init__(self, config):
redis_url = urllib.parse.urlparse(config.redis_url)
self.redis = redis.StrictRedis(host=redis_url.hostname, port = redis_url.port, db=re.sub(r'^/','',redis_url.path))
self.redis_key = config.redis_key
def do(self, item):
while True:
try:
self.redis.rpush(self.redis_key, item)
break
except (redis.ConnectionError, redis.ResponseError):
time.sleep(config.timeout * random.random())
pass
```
|
{
"source": "jdelorimier/s3-sample",
"score": 3
}
|
#### File: src/s3_sample/cli.py
```python
import click
from . import __version__
from s3_sample.csv import sample_csv_head
from s3_sample.utils import generate_outpath
@click.command()
@click.version_option(version=__version__)
@click.option("--infile", help="infile for data")
@click.option("--outfile", default=None, help="oufile path: e.g. path/to/output/sample.csv")
@click.option("--nlines", default=10, help = "Number of lines to take from head")
## main is sample head for now. Will expand as other options are desired
def main(infile, outfile, nlines):
"""s3-sample: a tool designed to sample large files directly from AWS S3"""
out_path = generate_outpath(in_path=infile,out_path=outfile)
sample_csv_head(in_path=infile, out_path=out_path,n=nlines)
click.echo(f"Sampled {nlines} lines\nData sampled from {infile}\nData written to {out_path}")
```
|
{
"source": "jdelrue/digital_me",
"score": 2
}
|
#### File: clients/gedis/GedisClient.py
```python
import os
import sys
from jumpscale import j
import imp
from redis.connection import ConnectionError
TEMPLATE = """
host = "127.0.0.1"
port = "9900"
adminsecret_ = ""
ssl = false
sslkey = ""
"""
JSConfigBase = j.tools.configmanager.base_class_config
class Models():
def __init__(self):
pass
class CmdsBase():
def __init__(self):
pass
class GedisClient(JSConfigBase):
def __init__(self, instance, data=None, parent=None, interactive=False, reset=False,configureonly=False):
if data is None:
data = {}
JSConfigBase.__init__(self, instance=instance, data=data, parent=parent,template=TEMPLATE , interactive=interactive)
j.clients.gedis.latest = self
self.code_generated_dir = j.sal.fs.joinPaths(j.dirs.VARDIR, "codegen", "gedis", instance, "client")
j.sal.fs.createDir(self.code_generated_dir)
j.sal.fs.touch(j.sal.fs.joinPaths(self.code_generated_dir, '__init__.py'))
if configureonly:
return
if self.code_generated_dir not in sys.path:
sys.path.append(self.code_generated_dir)
self._redis = None
self.models = Models()
self.cmds = CmdsBase()
self.cmds_meta = {}
self._connected = True
test = self.redis.execute_command("system.ping")
if test != b'PONG':
raise RuntimeError('Can not ping server')
# this will make sure we have all the local schemas
schemas_meta = self.redis.execute_command("system.core_schemas_get")
schemas_meta = j.data.serializer.msgpack.loads(schemas_meta)
for key,txt in schemas_meta.items():
if key not in j.data.schema.schemas:
j.data.schema.schema_get(txt,url=key)
schema_urls = self.redis.execute_command("system.schema_urls")
self.schema_urls = j.data.serializer.msgpack.loads(schema_urls)
try:
# LOW LEVEL AT THIS TIME BUT TO SHOW SOMETHING
cmds_meta =self.redis.execute_command("system.api_meta")
cmds_meta = j.data.serializer.msgpack.loads(cmds_meta)
self.namespace = cmds_meta["namespace"]
for namespace_full, capnpbin in cmds_meta["cmds"].items():
shortname = namespace_full.split(".")[-1]
if not shortname.startswith("model"):
self.cmds_meta[namespace_full] = j.servers.gedis.cmds_get(
namespace_full,
capnpbin
).cmds
except ConnectionError:
self.logger.error('Connection error')
self._connected = False
return
self.generate(reset=reset)
def generate(self, reset=False):
for schema_url in self.schema_urls:
fname = "model_%s" % schema_url.replace(".","_")
dest = os.path.join(self.code_generated_dir, "%s.py"%fname)
if reset or not j.sal.fs.exists(dest):
schema = j.data.schema.schema_get(url=schema_url)
args = sorted([p for p in schema.properties if p.index], key=lambda p:p.name)
find_args = ''.join(["{0}={1},".format(p.name, p.default_as_python_code) for p in args]).strip(',')
kwargs = ''.join(["{0}".format(p.name) for p in args]).strip(',')
code = j.clients.gedis.code_model_template.render(obj= schema, find_args=find_args, kwargs=kwargs)
j.sal.fs.writeFile(dest,code)
m=imp.load_source(name=fname, pathname=dest)
self.logger.debug("schema:%s"%fname)
self.models.__dict__[schema_url.replace(".","_")] = m.model(client=self)
for nsfull, cmds_ in self.cmds_meta.items():
cmds = CmdsBase()
cmds.cmds = cmds_
cmds.name = nsfull.replace(".","_")
# for name,cmd in cmds.items():
location = nsfull.replace(".","_")
cmds_name_lower = nsfull.split(".")[-1].strip().lower()
cmds.cmds_name_lower = cmds_name_lower
fname="cmds_%s"%location
dest = os.path.join(self.code_generated_dir, "%s.py"%fname)
if reset or not j.sal.fs.exists(dest):
code = j.clients.gedis.code_client_template.render(obj= cmds)
j.sal.fs.writeFile(dest,code)
m=imp.load_source(name=fname, pathname=dest)
self.logger.debug("cmds:%s"%fname)
self.cmds.__dict__[cmds_name_lower] =m.CMDS(client=self,cmds=cmds.cmds)
@property
def redis(self):
"""
this gets you a redis instance, when executing commands you have to send the name of the function without
the postfix _cmd as is, do not capitlize it
if it is testtest_cmd, then you should call it by testtest
:return: redis instance
"""
if self._redis is None:
d = self.config.data
addr = d["host"]
port = d["port"]
secret = d["adminsecret_"]
ssl_certfile = d['sslkey']
if d['ssl']:
if not self.config.data['sslkey']:
ssl_certfile = j.sal.fs.joinPaths(os.path.dirname(self.code_generated_dir), 'ca.crt')
self.logger.info("redisclient: %s:%s (ssl:True cert:%s)"%(addr, port, ssl_certfile))
else:
self.logger.info("redisclient: %s:%s " % (addr, port))
self._redis = j.clients.redis.get(
ipaddr=addr,
port=port,
password=<PASSWORD>,
ssl=d["ssl"],
ssl_ca_certs=ssl_certfile
)
return self._redis
```
#### File: data/bcdb/BCDBIndexModel.py
```python
from jumpscale import j
import os
JSBASE = j.application.jsbase_get_class()
class IndexField():
def __init__(self,property):
self.name = property.name
self.jumpscaletype = property.jumpscaletype
if self.jumpscaletype.NAME == "string":
self.type = "TextField"
elif self.jumpscaletype.NAME in ["integer",'date']:
self.type = "IntegerField"
elif self.jumpscaletype.NAME in ["boolean"]:
self.type = "BooleanField"
elif self.jumpscaletype.NAME in ["numeric"]:
self.type = "FloatField"
else:
j.shell()
raise RuntimeError("did not find required type for peewee:%s"%self)
def __str__(self):
out = "indexfield:%s:%s:%s"%(self.name,self.type,self.jumpscaletype)
return out
__repr__ = __str__
class BCDBIndexModel(JSBASE):
def __init__(self,schema):
"""
"""
JSBASE.__init__(self)
self.schema=schema
if not isinstance(schema, j.data.schema.SCHEMA_CLASS):
raise RuntimeError("schema needs to be of type: j.data.schema.SCHEMA_CLASS")
self.fields = []
for p in schema.index_properties:
self.fields.append(IndexField(p))
def __str__(self):
out = "indexmodel:\s"
for item in self.fields:
out += " - "+str(item) + "\n"
return out
__repr__ = __str__
```
#### File: data/bcdb/BCDBModel.py
```python
from jumpscale import j
import msgpack
import struct
JSBASE = j.application.jsbase_get_class()
class BCDBModel(JSBASE):
def __init__(self, bcdb=None, schema=None, url=None, index_enable=True):
"""
for query example see http://docs.peewee-orm.com/en/latest/peewee/query_examples.html
e.g.
```
query = self.index.name.select().where(index.cost > 0)
for item in self.select(query):
print(item.name)
```
"""
JSBASE.__init__(self)
if bcdb is None:
# bcdb = j.data.bcdb.latest
raise RuntimeError("bcdb should be set")
self.bcdb = bcdb
if url is not None:
self.schema = j.data.schema.schema_get(url=url)
else:
if schema is None:
schema = SCHEMA # needs to be in code file
self.schema = j.data.schema.schema_add(schema)
key = j.data.text.strip_to_ascii_dense(self.schema.url)
self.key = key.replace(".", "_")
if bcdb.dbclient.dbtype == "RDB":
self.db = bcdb.dbclient
else:
self.db = bcdb.dbclient.namespace_new(name=self.key,
maxsize=0, die=False)
self.index_enable = index_enable
def index_delete(self):
pass
def index_load(self):
pass
# self.logger.info("build index done")
def destroy(self):
if bcdb.dbclient.dbtype == "RDB":
j.shell()
else:
raise RuntimeError("not implemented yet, need to go to db "
"and remove namespace")
def set(self, data, obj_id=None):
"""
if string -> will consider to be json
if binary -> will consider data for capnp
if obj -> will check of JSOBJ
if ddict will put inside JSOBJ
@RETURN JSOBJ
"""
if j.data.types.string.check(data):
data = j.data.serializer.json.loads(data)
obj = self.schema.get(data)
elif j.data.types.bytes.check(data):
obj = self.schema.get(capnpbin=data)
elif getattr(data, "_JSOBJ", None):
obj = data
if obj_id is None and obj.id is not None:
obj_id = obj.id
elif j.data.types.dict.check(data):
obj = self.schema.get(data)
else:
raise RuntimeError("Cannot find data type, str,bin,obj or "
"ddict is only supported")
bdata = obj._data
# prepare
obj = self.set_pre(obj)
# later:
acl = b""
crc = b""
signature = b""
l = [acl, crc, signature, bdata]
data = msgpack.packb(l)
if self.db.dbtype == "ETCD":
if obj_id is None:
# means a new one
obj_id = self.db.incr("kbcdb:%s:lastid" % self.key)-1
key = ("bcdb/%s" % (obj_id))
self.db.set(key, data)
elif self.db.dbtype == "RDB":
if obj_id is None:
# means a new one
obj_id = self.db.incr("bcdb:%s:lastid" % self.key)-1
self.db.hset("bcdb:%s" % self.key, obj_id, data)
else:
if obj_id is None:
# means a new one
obj_id = self.db.set(data)
else:
self.db.set(data, key=obj_id)
obj.id = obj_id
self.index_set(obj)
return obj
def new(self):
return self.schema.get()
def set_pre(self, obj):
return obj
def index_set(self, obj):
pass
def get(self, id, capnp=False):
"""
@PARAM id is an int or a key
@PARAM capnp if true will return data as capnp binary object,
no hook will be done !
@RETURN obj (.index is in obj)
"""
if id == None:
raise RuntimeError("id cannot be None")
if self.db.dbtype == "RDB":
data = self.db.hget("bcdb:%s" % self.key, id)
elif self.db.dbtype == 'ETCD':
key = ("<KEY>)
data = self.db.get(key)
else:
data = self.db.get(id)
if not data:
return None
#print ("data", id, type(data), repr(data))
return self._get(id, data, capnp=capnp)
def _get(self, id, data, capnp=False):
res = msgpack.unpackb(data)
if len(res) == 4:
acr, crc, signature, bdata = res
else:
raise RuntimeError("not supported format in table yet")
if capnp:
# obj = self.schema.get(capnpbin=bdata)
# return obj.data
return bdata
else:
obj = self.schema.get(capnpbin=bdata)
obj.id = id
return obj
def iterate(self, method, key_start=None, direction="forward",
nrrecords=100000, _keyonly=False,
result=None):
"""walk over the data and apply method as follows
call for each item:
'''
for each:
result = method(id,obj,result)
'''
result is the result of the previous call to the method
Arguments:
method {python method} -- will be called for each item found
in the file
Keyword Arguments:
key_start is the start key, if not given will be start of
database when direction = forward, else end
"""
def method_zdb(id, data, result0):
method_ = result0["method"]
obj = self._get(id, data)
result0["result"] = method_(id=id, obj=obj, result=result0["result"])
return result0
if self.db.dbtype == "ZDB":
result0 = {}
result0["result"] = result
result0["method"] = method
result0 = self.db.iterate(method=method_zdb, key_start=key_start,
direction=direction, nrrecords=nrrecords,
_keyonly=_keyonly, result=result0)
return result0["result"]
else:
# WE IGNORE Nrrecords
if not direction == "forward":
raise RuntimeError("not implemented, only forward iteration "
"supported")
if self.db.dbtype == "ETCD":
#print ("getting keys")
keys = list(map(int, self.db.keys("bcdb")))
#print ("keys", keys)
else:
keys = [int(item.decode()) for item in
self.db.hkeys("bcdb:%s" % self.key)]
keys.sort()
if len(keys) == 0:
return result
if key_start == None:
key_start = keys[0]
for key in keys:
if key >= key_start:
obj = self.get(id=key)
result = method(id, obj, result)
return result
def get_all(self):
def do(id, obj, result):
result.append(obj)
return result
return self.iterate(do, result=[])
def __str__(self):
out = "model:%s\n" % self.key
out += j.data.text.prefix(" ", self.schema.text)
return out
__repr__ = __str__
```
#### File: data/bcdb/BCDB.py
```python
from importlib import import_module
from Jumpscale import j
import sys
from peewee import *
import os
JSBASE = j.application.jsbase_get_class()
from .BCDBIndexModel import BCDBIndexModel
class BCDB(JSBASE):
def __init__(self,dbclient,reset=False):
JSBASE.__init__(self)
self.dbclient = dbclient
self.models = {}
self.logger_enable()
self.index_create(reset=reset)
if reset:
if self.dbclient.dbtype == "RDB":
for item in self.dbclient.keys("bcdb:*"):
self.dbclient.delete(item)
elif self.dbclient.dbtype == "ETCD":
print("CANNOT DELETE NAMESPACE HERE?")
else:
print("IMPLEMENT")
j.shell()
j.data.bcdb.latest = self
def index_create(self,reset=False):
j.sal.fs.createDir(j.sal.fs.joinPaths(j.dirs.VARDIR, "bcdb"))
if self.dbclient.dbtype == "RDB":
if "path" in self.dbclient.connection_pool.connection_kwargs:
instance=self.dbclient.connection_pool.connection_kwargs["path"]
else:
print("need to find addr:port as identifier")
conn_args = self.dbclient.connection_pool.connection_kwargs
instance = "%s:%s" % (conn_args['host'], conn_args['port'])
instance = j.data.text.strip_to_ascii_dense(instance)
else:
instance = self.dbclient.instance
dest = j.sal.fs.joinPaths(j.dirs.VARDIR, "bcdb",instance+".db")
self.logger.info("bcdb:indexdb:%s"%dest)
if reset:
j.sal.fs.remove(dest)
self.sqlitedb = SqliteDatabase(dest)
def model_create(self, schema,dest=None, include_schema=True):
"""
:param include_schema, if True schema is added to generated code
:param schema: j.data.schema ...
:param dest: optional path where the model should be generated, if not specified will be in codegeneration dir
:return: model
"""
if j.data.types.string.check(schema):
schema = j.data.schema.schema_add(schema)
else:
if not isinstance(schema, j.data.schema.SCHEMA_CLASS):
raise RuntimeError("schema needs to be of type: j.data.schema.SCHEMA_CLASS")
imodel = BCDBIndexModel(schema=schema)
imodel.enable = True
imodel.include_schema = include_schema
tpath = "%s/templates/Model.py"%j.data.bcdb._path
key = j.data.text.strip_to_ascii_dense(schema.url).replace(".","_")
schema.key = key
if dest is None:
dest = "%s/model_%s.py"%(j.data.bcdb.code_generation_dir,key)
self.logger.debug("render model:%s"%dest)
j.tools.jinja2.file_render(tpath, write=True, dest=dest, schema=schema, index=imodel)
return self.model_add(dest)
def model_add(self,model_or_path):
"""
add model to BCDB
"""
if isinstance(model_or_path, j.data.bcdb.MODEL_CLASS):
self.models[model_or_path.schema.url] = model_or_path
elif j.sal.fs.exists(model_or_path):
model_or_path = self._model_add_from_path(model_or_path)
else:
raise RuntimeError("model needs to be of type: j.data.bcdb.MODEL_CLASS or path to model.")
return model_or_path
def models_add(self,path,overwrite=False):
"""
will walk over directory and each class needs to be a model
when overwrite used it will overwrite the generated models (careful)
:param path:
:return: None
"""
tocheck = j.sal.fs.listFilesInDir(path, recursive=True, filter="*.toml", followSymlinks=True)
for schemapath in tocheck:
dest = "%s/bcdb_model_%s.py"%(j.sal.fs.getDirName(schemapath),j.sal.fs.getBaseName(schemapath, True))
if overwrite or not j.sal.fs.exists(dest):
self.model_create(schemapath,dest=dest)
tocheck = j.sal.fs.listFilesInDir(path, recursive=True, filter="*.py", followSymlinks=True)
for classpath in tocheck:
self.model_add(classpath)
def _model_add_from_path(self,classpath):
dpath = j.sal.fs.getDirName(classpath)
if dpath not in sys.path:
sys.path.append(dpath)
j.sal.fs.touch("%s/__init__.py" % dpath)
# self.logger.info("model all:%s" % classpath)
modulename = j.sal.fs.getBaseName(classpath)[:-3]
if modulename.startswith("_"):
return
try:
self.logger.info("import module:%s" % modulename)
model_module = import_module(modulename)
self.logger.debug("ok")
except Exception as e:
raise RuntimeError("could not import module:%s" % modulename, e)
model = model_module.Model(bcdb=self)
self.models[model.schema.url] = model
return model
def model_get(self, url):
if url in self.models:
return self.models[url]
raise RuntimeError("could not find model for url:%s"%url)
def destroy(self):
"""
delete all objects in the zdb
:return:
"""
self.dbclient.destroy()
```
#### File: tests/models/bcdb_model_house.py
```python
from jumpscale import j
JSBASE = j.application.jsbase_get_class()
SCHEMA = """
@url = jumpscale.bcdb.test.house2
@name = test_house
name = "" (S)
active = "" (B)
cost = (N)
room = (LO) !jumpscale.bcdb.test.room2
@url = jumpscale.bcdb.test.room2
@name = test_room
name = "" (S)
active = "" (B)
colors = []
"""
MODEL_CLASS = j.data.bcdb.MODEL_CLASS
class Model(MODEL_CLASS):
def __init__(self,bcdb=None):
MODEL_CLASS.__init__(self, bcdb=bcdb, schema=SCHEMA)
def _init(self):
j.shell()
```
#### File: servers/digitalme/Package.py
```python
from jumpscale import j
JSBASE = j.application.jsbase_get_class()
import sys
from importlib import import_module
model = """
@url = jumpscale.digitalme.package
enabled = false (B)
start = 0 (D)
path = "" (S)
docsites = (LO) !jumpscale.digitalme.package.docsite
blueprints = (LO) !jumpscale.digitalme.package.blueprints
actors = (LO) !jumpscale.digitalme.package.actors
chatflows = (LO) !jumpscale.digitalme.package.chatflow
recipes = (LO) !jumpscale.digitalme.package.recipes
docmacros = (LO) !jumpscale.digitalme.package.docmacros
zrbotrepos = (LO) !jumpscale.digitalme.package.zrbotrepos
models = (LO) !jumpscale.digitalme.package.models
@url = jumpscale.digitalme.package.docsite
name = "" (S)
url = "" (S)
path = "" (S)
publish = "" (S)
enabled = false (B)
@url = jumpscale.digitalme.package.blueprints
name = "" (S)
url = "" (S)
path = "" (S)
publish = (B)
enabled = false (B)
links = (LO) !jumpscale.digitalme.package.bp.link
@url = jumpscale.digitalme.package.bp.link
name = "" (S)
url = "" (S)
dest = "" (S)
enabled = false (B)
@url = jumpscale.digitalme.package.actors
name = "" (S)
url = "" (S)
path = "" (S)
enabled = false (B)
@url = jumpscale.digitalme.package.chatflow
name = "" (S)
url = "" (S)
path = "" (S)
enabled = false (B)
@url = jumpscale.digitalme.package.recipes
name = "" (S)
url = "" (S)
path = "" (S)
enabled = false (B)
@url = jumpscale.digitalme.package.docmacros
name = "" (S)
url = "" (S)
path = "" (S)
enabled = false (B)
@url = jumpscale.digitalme.package.zrbotrepo
name = "" (S)
url = "" (S)
path = "" (S)
enabled = false (B)
@url = jumpscale.digitalme.package.models
name = "" (S)
url = "" (S)
path = "" (S)
enabled = false (B)
"""
class Package(JSBASE):
def __init__(self,path):
JSBASE.__init__(self)
self.path = j.sal.fs.getDirName(path)
db_client = j.clients.redis_config.get().redis
self.bcdb = j.data.bcdb.get(db_client)
schema_model = j.data.bcdb.MODEL_CLASS(bcdb=self.bcdb, schema=model)
self.bcdb.model_add(schema_model)
self._model = self.bcdb.model_get(url="jumpscale.digitalme.package")
self.data = self._model.new()
data = j.data.serializer.toml.load(path)
#be flexible
#std value is False
if "enable" in data:
self.data.enabled =data["enable"]
elif "enabled" in data:
self.data.enabled =data["enabled"]
elif "active" in data:
self.data.enabled =data["active"]
self.data.name = j.sal.fs.getBaseName(self.path)
dir_items = j.sal.fs.listDirsInDir(self.path,False,True)
if "actors" in dir_items:
name = "%s_internal"%(self.name)
if name not in self.actors:
obj = self.data.actors.new({"name":name, "enabled":True,
"path":"%s/actors"%(self.path)})
if "blueprints" in dir_items:
name = "%s_internal"%(self.name)
if name not in self.blueprints:
obj = self.data.blueprints.new({"name":name, "enabled":True,
"path":"%s/blueprints"%(self.path)})
if "models" in dir_items:
name = "%s_internal"%(self.name)
if name not in self.models:
obj = self.data.models.new({"name":name, "enabled":True,
"path":"%s/models"%(self.path)})
if "chatflows" in dir_items:
name = "%s_internal"%(self.name)
if name not in self.chatflows:
obj = self.data.chatflows.new({"name":name, "enabled":True,
"path":"%s/chatflows"%(self.path)})
if "recipes" in dir_items:
name = "%s_internal"%(self.name)
if name not in self.recipes:
obj = self.data.recipes.new({"name":name, "enabled":True,
"path":"%s/recipes"%(self.path)})
if "doc_macros" in dir_items:
name = "%s_internal"%(self.name)
if name not in self.doc_macros:
obj = self.data.doc_macros.new({"name":name, "enabled":True,
"path":"%s/doc_macros"%(self.path)})
if "docs" in dir_items:
docs_dir = j.sal.fs.joinPaths(self.path, "docs")
dir_items = j.sal.fs.listDirsInDir(docs_dir,
recursive=False, dirNameOnly=True)
for dir_name in dir_items:
self.data.docsites.new({"name": dir_name, "enabled": True,
"path": j.sal.fs.joinPaths(docs_dir, dir_name)})
#TODO: *1 finish & test
if "docsite" in data:
for item in data["docsite"]:
if item["name"] not in self.docsites:
obj=self.data.docsites.new(item)
obj.path = j.clients.git.getContentPathFromURLorPath(obj.url)
if "blueprint" in data:
for item in data["blueprint"]:
if item["name"] not in self.blueprints:
obj = self.data.blueprints.new(item)
obj.path = j.clients.git.getContentPathFromURLorPath(obj.url)
if "chatflows" in data:
for item in data["chatflows"]:
if item["name"] not in self.chatflows:
obj = self.data.chatflows.new(item)
obj.path = j.clients.git.getContentPathFromURLorPath(obj.url)
if "actors" in data:
for item in data["actors"]:
if item["name"] not in self.actors:
obj = self.data.actors.new(item)
obj.path = j.clients.git.getContentPathFromURLorPath(obj.url)
if "models" in data:
for item in data["models"]:
if item["name"] not in self.models:
obj = self.data.models.new(item)
obj.path = j.clients.git.getContentPathFromURLorPath(obj.url)
if "recipes" in data:
for item in data["recipes"]:
if item["name"] not in self.recipes:
obj = self.data.recipes.new(item)
obj.path = j.clients.git.getContentPathFromURLorPath(obj.url)
if "doc_macros" in data:
for item in data["doc_macros"]:
if item["name"] not in self.doc_macros:
obj = self.data.doc_macros.new(item)
obj.path = j.clients.git.getContentPathFromURLorPath(obj.url)
#TODO:need to check and make sure we have all see ...threefoldtech/digital_me/packages/readme.md
self.load()
@property
def name(self):
return self.data.name
@property
def docsites(self):
return [item.name for item in self.data.docsites]
@property
def blueprints(self):
return [item.name for item in self.data.blueprints]
@property
def chatflows(self):
return [item.name for item in self.data.chatflows]
@property
def doc_macros(self):
return [item.name for item in self.data.doc_macros]
@property
def zrobot_repos(self):
return [item.name for item in self.data.zrobot_repos]
@property
def actors(self):
return [item.name for item in self.data.actors]
@property
def models(self):
return [item.name for item in self.data.models]
def load(self):
"""
load package into memory
"""
rack = j.servers.digitalme.rack
gedis = j.servers.gedis.latest
#need to load the blueprints, docsites, actors, ...
self.chatflows_load()
self.blueprints_load()
self.docsites_load()
def chatflows_load(self):
for item in self.data.chatflows:
j.servers.gedis.latest.chatbot.chatflows_load(item.path)
return
def blueprints_load(self):
for blueprint in self.data.blueprints:
if blueprint.enabled:
j.servers.web.latest.loader.paths.append(blueprint.path)
def docsites_load(self):
for doc_site in self.data.docsites:
j.tools.docsites.load(doc_site.path, doc_site.name)
```
#### File: servers/digitalme/RQ.py
```python
from jumpscale import j
JSBASE = j.application.jsbase_get_class()
import sys
import gipc
# def parent(nr=1000):
# processes = []
# for i in range(nr):
# cend, pend = gipc.pipe(duplex=True) #cend = client end, pend=parent end
# processes.append((cend, pend ))
# gipc.start_process(subprocess, (cend,i,))
# while True:
# time.sleep(1)
# print("############")
# for proc in processes:
# cend,pend = proc
# print(pend.get())
# def subprocess(cend,nr):
# """
# """
# from jumpscale import j
# cend.put("log: init %s"%nr)
# try:
# while True:
# time.sleep(1)
# cend.put("alive %s"%nr)
# except Exception as e:
# cend.put(e)
# def subprocess2(nr):
# """
# """
# from jumpscale import j
# import time
# time.sleep(1)
# if nr/10==int(nr/10):
# raise RuntimeError("error")
# return("test:%s"%nr)
def workers(nr=4):
# from redis import Redis
# from rq import Queue
# q = Queue(connection=Redis())
# for i in range(100):
# job = q.enqueue(subprocess2, i)
# # job.perform()
# # print (job.result)
# # time.sleep(1)
res=[]
for i in range(nr):
res.append(gipc.start_process(worker))
return res
from rq import Connection, Worker
from jumpscale import j
def worker():
"""
"""
with Connection():
qs = ['default']
w = Worker(qs)
w.work()
```
#### File: servers/dns/DNSServerFactory.py
```python
from pprint import pprint
from jumpscale import j
from .DNSServer import DNSServer
import os
JSBASE = j.application.jsbase_get_class()
from gevent import socket
#http://mirror1.malwaredomains.com/files/justdomains domains we should not query, lets download & put in redis core
#https://blog.cryptoaustralia.org.au/2017/12/05/build-your-private-dns-server/
class DNSServerFactory(JSBASE):
def __init__(self):
self.__jslocation__ = "j.servers.dns"
JSBASE.__init__(self)
self._extensions = {}
def get(self,port=53):
return DNSServer(port=port)
def start(self,port=53,background=False):
"""
js_shell 'j.servers.dns.start()'
"""
if background:
if j.core.platformtype.myplatform.isMac and port<1025:
pprint("PLEASE GO TO TMUX SESSION, GIVE IN PASSWD FOR SUDO, do tmux a")
cmd = "sudo js_shell 'j.servers.dns.start(background=False,port=%s)'"%port
else:
cmd = "js_shell 'j.servers.dns.start(background=False,port=%s)'"%port
j.tools.tmux.execute(cmd, session='main', window='dnsserver',pane='main', session_reset=False, window_reset=True)
self.logger.info("waiting for uidserver to start on port %s"%port)
res = j.sal.nettools.waitConnectionTest("localhost",port)
else:
s = self.get(port=port)
s.start()
@property
def dns_extensions(self):
"""
all known extensions on http://data.iana.org/TLD/tlds-alpha-by-domain.txt
"""
if self._extensions=={}:
path = os.path.join(os.path.dirname(__file__), "knownextensions.txt")
for line in j.sal.fs.readFile(path).split("\n"):
if line.strip()=="" or line[0]=="#":
continue
self._extensions[line]=True
return self._extensions
def ping(self,addr='localhost',port=53):
"""
js_shell 'print(j.servers.dns.ping(port=53))'
"""
address = (addr, port)
message = b'PING'
sock = socket.socket(type=socket.SOCK_DGRAM)
sock.connect(address)
pprint ('Sending %s bytes to %s:%s' % ((len(message), ) + address))
sock.send(message)
try:
data, address = sock.recvfrom(8192)
except Exception as e:
if "refused" in str(e):
return False
raise RuntimeError("unexpected result")
return True
def test(self,start=False,port=5354):
"""
js_shell 'j.servers.dns.test()'
"""
if start or not self.ping(port=port):
self.start(background=True,port=port)
def ping():
from gevent import socket
address = ('localhost', port)
message = b'PING'
sock = socket.socket(type=socket.SOCK_DGRAM)
sock.connect(address)
pprint ('Sending %s bytes to %s:%s' % ((len(message), ) + address))
sock.send(message)
data, address = sock.recvfrom(8192)
pprint ('%s:%s: got %r' % (address + (data, )))
assert data == b"PONG"
ping()
ns = j.tools.dnstools.get(["localhost"],port=port)
pprint(ns.namerecords_get("google.com"))
pprint(ns.namerecords_get("info.despiegk"))
```
#### File: servers/gedis/GedisCmds.py
```python
from jumpscale import j
import inspect
# import imp
import sys
import os
from .GedisCmd import GedisCmd
JSBASE = j.application.jsbase_get_class()
class GedisCmds(JSBASE):
"""
all commands captured in a capnp object, which can be stored in redis or any other keyvaluestor
"""
def __init__(self,server=None, name="",path=None,capnpbin=None):
JSBASE.__init__(self)
if path is None:
raise RuntimeError("path cannot be None")
self.path=path
self.server = server
SCHEMA = """
@url = jumpscale.gedis.cmd
@name = GedisCmds
name = ""
comment = ""
code = ""
schema_in = ""
schema_out = ""
args = ""
@url = jumpscale.gedis.api
@name = GedisServerSchema
namespace = ""
cmds = (LO) !jumpscale.gedis.cmd
"""
j.data.schema.schema_add(SCHEMA)
self.schema = j.data.schema.schema_get(url="jumpscale.gedis.api")
self._cmds = {}
if capnpbin:
self.data = self.schema.get(capnpbin=capnpbin)
else:
dname = j.sal.fs.getDirName(path)
if dname not in sys.path:
sys.path.append(dname)
classname = self._class_find_name()
exec("from %s import %s" % (classname, classname))
class_ = eval(classname)
self.server.classes[name] = class_()
# j.shell()
self.data = self.schema.new()
self.data.name = name
self.data.namespace = name
for name,item in inspect.getmembers(class_):
if name.startswith("_"):
continue
if name.startswith("logger"):
continue
if name in ["cache"]:
continue
if inspect.isfunction(item):
cmd = self.data.cmds.new()
cmd.name = name
code = inspect.getsource(item)
cmd.code,cmd.comment,cmd.schema_in, cmd.schema_out, cmd.args= self._method_source_process(code)
@property
def name(self):
return self.data.name
@property
def cmds(self):
if self._cmds == {}:
print('\n\nPopulating commands for namespace(%s)\n' % self.data.namespace)
for cmd in self.data.cmds:
print("\tpopulata: %s"%(cmd.name))
self._cmds[cmd.name] = GedisCmd(self,cmd)
print('\n')
return self._cmds
def _class_find_name(self):
txt = j.sal.fs.fileGetContents(self.path)
for line in txt.split("\n"):
if line.strip().startswith("class"):
pre = line.split("(")[0]
classname = pre.split(" ")[1].strip()
return classname
raise RuntimeError("did not find class name in %s"%self.path)
def _method_source_process(self,txt):
"""
return code,comment,schema_in, schema_out
"""
txt=j.data.text.strip(txt)
code = ""
comment = ""
schema_in = ""
schema_out = ""
args = ""
state="START"
for line in txt.split("\n"):
lstrip = line.strip().lower()
if state=="START" and lstrip.startswith("def"):
state = "DEF"
if "self" in lstrip:
if "," in lstrip:
arg0,arg1=lstrip.split(",",1)
args,_ = arg1.split(")",1)
else:
args = ""
else:
arg0,arg1=lstrip.split("(",1)
args,_ = arg1.split(")",1)
continue
if lstrip.startswith("\"\"\""):
if state=="DEF":
state="COMMENT"
continue
if state=="COMMENT":
state="CODE"
continue
raise RuntimeError()
if lstrip.startswith("```") or lstrip.startswith("'''"):
if state.startswith("SCHEMA"): #are already in schema go back to comment
state="COMMENT"
continue
if state=="COMMENT": #are in comment, now found the schema
if lstrip.endswith("out"):
state="SCHEMAO"
else:
state="SCHEMAI"
continue
raise RuntimeError()
if state=="SCHEMAI":
schema_in+="%s\n"%line
continue
if state=="SCHEMAO":
schema_out+="%s\n"%line
continue
if state=="COMMENT":
comment+="%s\n"%line
continue
if state=="CODE" or state=="DEF":
code+="%s\n"%line
continue
raise RuntimeError()
return j.data.text.strip(code),j.data.text.strip(comment),j.data.text.strip(schema_in),\
j.data.text.strip(schema_out),j.data.text.strip(args)
def cmd_exists(self,name):
return name in self.children
def __repr__(self):
path2 = self.path.split("github")[-1].strip("/")
return 'CMDS:%s' % (path2)
__str__ = __repr__
```
#### File: blueprints/admin/routes.py
```python
from flask import render_template, redirect, request, url_for
from blueprints.adminsite2 import *
from jumpscale import j
login_manager = j.servers.web.latest.loader.login_manager
@blueprint.route('/')
def route_default():
# return redirect(url_for('index_.html'))
return redirect('/%s/index_.html'%name)
# @login_required
@blueprint.route('/<template>.html')
def route_template(template):
return render_template('%s_%s.html'%(name,template))
```
#### File: blueprints/chat/routes.py
```python
from flask import render_template, redirect, make_response, request
from blueprints.chat import *
from jumpscale import j
import json
login_manager = j.servers.web.latest.loader.login_manager
chat_server = j.servers.gedis.latest
users = [
{"id": 1, "name": "ahmed", "email": "<EMAIL>"},
]
todos = [
{"id": 1, "title": "fix it", "done": False},
]
@blueprint.route('/')
def route_default():
return redirect('/%s/chat_index.html' % name)
# @login_required
@blueprint.route('/session/<topic>')
def route_chattopic(topic):
# needs to return the session id
session_id = j.servers.gedis.latest.chatbot.session_new(topic)
return render_template("chat_index.html", session_id=session_id)
# @login_required
@blueprint.route('/admin/<topic>')
def route_admin(topic):
# needs to return the session id
models2datatable = {
'user': {
"view": "datatable",
"id": "crudPanel",
"icon": "user",
"columns": [
{"id": "id"},
{"id": "name"},
{"id": "email"},
],
"data": users,
},
'todo': {
"view": "datatable",
"id": "crudPanel",
"icon": "dashboard",
"columns": [
{"id": "id"},
{"id": "title"},
{"id": "done"},
{"id": "notes"},
],
"data": todos,
}
}
return render_template("chat_admin.html", data=models2datatable)
@blueprint.route('/add', methods=['GET', 'POST'])
def add():
if request.method == 'POST':
type = request.form['type']
if type == "user":
user = {
'id': request.form['id'],
'name': request.form['name'],
'email': request.form['email']
}
users.append(user)
elif type == "todo":
todo = {
'id': request.form['id'],
'title': request.form['title'],
'done': request.form['done'],
'notes': request.form['notes']
}
todos.append(todo)
# @login_required
@blueprint.route('/<template>')
def route_template(template):
return render_template(template)
@blueprint.route('/jsclient.js')
def load_js_client():
scheme = "ws"
if request.scheme == "https":
scheme = "wss"
js_code = j.servers.gedis.latest.code_js_client
js_client = js_code.replace("%%host%%", "{scheme}://{host}/chat/ws/gedis".format(scheme=scheme, host=request.host))
res = make_response(js_client)
res.headers['content-type'] = "application/javascript"
return res
@ws_blueprint.route('/ws/gedis')
def chat_interact(socket):
while not socket.closed:
message = socket.receive()
if not message:
continue
req = message.split(" ")
cmd, err = chat_server.get_command(req[0])
if err:
socket.send(err)
continue
res, err = chat_server.process_command(cmd, req)
if err:
socket.send(err)
continue
socket.send(json.dumps(res))
```
#### File: blueprints/base/routes.py
```python
from flask import render_template, redirect, request, url_for
from jumpscale import j
from blueprints.base import blueprint
# from blueprints import login_manager
login_manager = j.servers.web.latest.loader.login_manager
# from blueprints import db
# from blueprints.base.models import User
# from flask_login import (
# current_user,
# login_required,
# login_user,
# logout_user
# )
# from .forms import LoginForm, CreateAccountForm
# @blueprint.route('/')
# def route_default():
# return redirect(url_for('base_blueprint.login'))
# @login_required
@blueprint.route('/githook', methods=['POST'])
def route_github():
d=request.json
name=d["repository"]['full_name']
#will be something like 'threefoldfoundation/info_foundation'
account,repo=name.split("/",1)
print("GITHUB:%s:%s"%(account,repo))
return ("OK")
@blueprint.route('/<template>')
def route_template(template):
if template=="favicon.ico":
return url_for('static',filename="images/favicon.png")
return render_template(template + '.html')
# @blueprint.route('/fixed_<template>')
# @login_required
# def route_fixed_template(template):
# return render_template('fixed/fixed_{}.html'.format(template))
# @blueprint.route('/page_<error>')
# def route_errors(error):
# return render_template('errors/page_{}.html'.format(error))
# ## Login & Registration
# @blueprint.route('/login', methods=['GET', 'POST'])
# def login():
# login_form = LoginForm(request.form)
# create_account_form = CreateAccountForm(request.form)
# if 'login' in request.form:
# username = str(request.form['username'])
# password = str(request.form['password'])
# user = User.query.filter_by(username=username).first()
# if user and password == user.password:
# login_user(user)
# return redirect(url_for('base_blueprint.route_default'))
# return render_template('errors/page_403.html')
# elif 'create_account' in request.form:
# login_form = LoginForm(request.form)
# user = User(**request.form)
# db.session.add(user)
# db.session.commit()
# return redirect(url_for('base_blueprint.login'))
# if not current_user.is_authenticated:
# return render_template(
# 'login/login.html',
# login_form=login_form,
# create_account_form=create_account_form
# )
# return redirect(url_for('home_blueprint.index'))
# @blueprint.route('/logout')
# @login_required
# def logout():
# logout_user()
# return redirect(url_for('base_blueprint.login'))
# @blueprint.route('/shutdown')
# def shutdown():
# func = request.environ.get('werkzeug.server.shutdown')
# if func is None:
# raise RuntimeError('Not running with the Werkzeug Server')
# func()
# return 'Server shutting down...'
# ## Errors
# @login_manager.unauthorized_handler
# def unauthorized_handler():
# return render_template('errors/page_403.html'), 403
@blueprint.errorhandler(403)
def access_forbidden(error):
return render_template('errors/page_403.html'), 403
@blueprint.errorhandler(404)
def not_found_error(error):
return render_template('errors/page_404.html'), 404
```
#### File: blueprints/gedis/routes.py
```python
from flask import render_template, redirect, request, url_for
from blueprints.gedis import *
from jumpscale import j
import gevent
login_manager = j.servers.web.latest.loader.login_manager
from gevent import time
#alias for scheduling rq job
schedule = j.servers.gedis.latest.job_schedule
users = [
{"id": 1, "name": "ahmed", "email": "<EMAIL>"},
]
todos = [
{"id": 1, "title": "fix it", "done": False},
]
@blueprint.route('/')
def route_default():
return redirect('/%s/gedis_index.html'%name)
def test2(timetowait=20,descr="should not be visible"):
print("TEST2:%s"%descr)
import time
time.sleep(timetowait)
return descr
@blueprint.route('/test')
def gedis_test():
greenlets=[]
for i in range(10):
job = schedule(test2,timetowait=i,descr="HELLO",timeout=60)
greenlets.append(job.greenlet)
print("START")
gevent.joinall(greenlets)
print("STOP")
return "ALLDONE"
# res=schedule(test2,wait=True,timetowait=3,descr="HELLO")
@blueprint.route('/admin')
def route_admin():
# needs to return the session id
models2datatable = {
'user': {
"view": "datatable",
"id": "crudPanel",
"icon": "user",
"columns": [
{"id": "id"},
{"id": "name"},
{"id": "email"},
],
"data": users,
},
'todo': {
"view": "datatable",
"id": "crudPanel",
"icon": "dashboard",
"columns": [
{"id": "id"},
{"id": "title"},
{"id": "done"},
{"id": "notes"},
],
"data": todos,
}
}
return render_template("gedis_admin.html", data=models2datatable)
# @login_required
@blueprint.route('/<template>')
def route_template(template):
return render_template(template)
```
#### File: blueprints/wiki/routes.py
```python
from blueprints.wiki import blueprint
from flask import render_template, send_file
from flask import abort, redirect, url_for
import io
# from flask_login import login_required
from werkzeug.routing import BaseConverter
from jumpscale import j
@blueprint.route('/index')
@blueprint.route('/')
def index():
return redirect("wiki/foundation")
@blueprint.route('')
def index_sub(sub):
return render_template('index_docsify.html')
@blueprint.route('/<path:subpath>')
def wiki_route(subpath):
subpath=subpath.strip("/")
parts = subpath.split("/")
if len(parts)==1: #"readme" in parts[0].lower() or "index" in parts[0].lower()
#means we are in root of a wiki, need to load the html
return render_template('index_docsify.html')
if len(parts)<2:
return render_template('error_notfound.html',url=subpath)
wikicat = parts[0].lower().strip()
parts = parts[1:]
url = "/".join(parts)
try:
#at this point we know the docsite
ds = j.tools.docsites.docsite_get(wikicat,die=False)
if ds==None:
return "Cannot find docsite with name:%s"%wikicat
if len(parts)>0 and parts[0].startswith("verify"):
return ds.verify()
if len(parts)>0 and parts[0].startswith("errors"):
return ds.errors
#if binary file, return
name = parts[-1]
if not name.endswith(".md"):
file_path = ds.file_get(name)
with open(file_path, 'rb') as bites:
return send_file(
io.BytesIO(bites.read()),
attachment_filename=name
)
except Exception as e:
raise e
return ("# **ERROR**\n%s\n"%e)
if "sidebar.md" in url:
res = ds.sidebar_get(url)
if res == None:
raise RuntimeError("sidebar did not return result")
return res
else:
doc = ds.doc_get(parts,die=False)
if doc:
return doc.markdown
return render_template('error_notfound.html',url=url)
```
#### File: blueprints/hub/merge.py
```python
import os
from jumpscale import j
from .flist import HubFlist, HubPublicFlist
class HubMerger:
def __init__(self, config, username, flistname):
self.config = config
self.destination = HubPublicFlist(config, username, flistname)
# ensure user exists
self.destination.user_create()
def merge(self, sources):
items = {}
merger = j.tools.flist.get_merger()
# extracting flists to distinct directories
for source in sources:
flistpath = os.path.join(self.config['public-directory'], source)
if not os.path.exists(flistpath):
return "%s source doesn't exists" % source
flist = HubFlist(self.config)
flist.loads(flistpath)
merger.add_source(flist.flist)
items[source] = flist
# merging sources
self.destination.raw.initialize("/")
merger.add_destination(self.destination.raw.flist)
merger.merge()
self.destination.raw.commit()
self.destination.raw.pack(self.destination.target)
return True
```
#### File: blueprints/hub/routes.py
```python
import os
import shutil
import json
from stat import *
from blueprints.hub import *
from flask import request, render_template, abort, make_response, send_from_directory
from werkzeug.utils import secure_filename
# from werkzeug.contrib.fixers import ProxyFix
from .config import config
from blueprints.hub.hub.flist import HubPublicFlist
# from hub.itsyouonline import ItsYouChecker
from blueprints.hub.hub.docker import HubDocker
from blueprints.hub.hub.merge import HubMerger
#
# runtime configuration
# theses location should works out-of-box if you use default settings
#
base_path = j.dirs.VARDIR+"/hub"
j.sal.fs.createDir(base_path)
config['public-directory'] = os.path.join(base_path, "public/users/")
config['flist-work-directory'] = os.path.join(base_path, "workdir/temp")
config['docker-work-directory'] = os.path.join(base_path, "workdir/temp")
config['upload-directory'] = os.path.join(base_path, "workdir/distfiles")
config['allowed-extensions'] = {'.tar.gz'}
print("[+] upload directory: %s" % config['upload-directory'])
print("[+] flist creation : %s" % config['flist-work-directory'])
print("[+] public directory: %s" % config['public-directory'])
#
# initialize flask application
#
# app = Flask(__name__)
# app.wsgi_app = ItsYouChecker(app.wsgi_app)
# app.wsgi_app = ProxyFix(app.wsgi_app)
# app.url_map.strict_slashes = False
######################################
#
# TEMPLATES MANIPULATION
#
######################################
def allowed_file(filename, validate=False):
if validate:
return filename.endswith(".flist")
for ext in config['allowed-extensions']:
if filename.endswith(ext):
return True
return False
def global_template(filename, args):
args['debug'] = config['DEBUG']
return render_template(filename, **args)
def file_from_flist(filename):
clean_file_name = filename
for ext in config['allowed-extensions']:
if clean_file_name.endswith(ext):
clean_file_name = clean_file_name[:-len(ext)]
return clean_file_name
def upload_success(flist_name, files_count, username=None):
if username is None:
username = request.environ['username']
settings = {
'username': username,
'accounts': request.environ['accounts'],
'flistname': flist_name,
'filescount': files_count,
'flisturl': "%s/%s/%s" % (config['PUBLIC_WEBADD'], username, flist_name),
'ardbhost': 'ardb://%s:%d' % (config['PUBLIC_ARDB_HOST'], config['PUBLIC_ARDB_PORT']),
}
return global_template("success.html", settings)
def internal_redirect(target, error=None):
settings = {
'username': request.environ['username'],
'accounts': request.environ['accounts'],
}
if error:
settings['error'] = error
return global_template(target, settings)
def flist_merge_post():
sources = request.form.getlist('flists[]')
target = request.form['name']
return flist_merge_data(sources, target)
def flist_merge_data(sources, target):
data = {'error': None, 'sources': sources, 'target': target}
if not isinstance(sources, list):
data['error'] = 'malformed json request'
return data
if len(data['sources']) == 0:
data['error'] = "no source found"
return data
if not data['target']:
data['error'] = "missing build (target) name"
return data
if "/" in data['target']:
data['error'] = "build name not allowed"
return data
if not data['target'].endswith('.flist'):
data['target'] += '.flist'
return data
######################################
#
# ROUTING ACTIONS
#
######################################
@blueprint.route('/upload', methods=['GET', 'POST'])
def upload_file():
if not request.environ['username']:
return "Access denied."
username = request.environ['username']
if request.method == 'POST':
response = api_flist_upload(request, username)
if response['status'] == 'success':
return upload_success(response['flist'], response['count'])
if response['status'] == 'error':
return internal_redirect("upload.html", response['message'])
return internal_redirect("upload.html")
@blueprint.route('/upload-flist', methods=['GET', 'POST'])
def upload_file_flist():
if not request.environ['username']:
return "Access denied."
username = request.environ['username']
if request.method == 'POST':
response = api_flist_upload(request, username, validate=True)
if response['status'] == 'success':
return upload_success(response['flist'], response['count'])
if response['status'] == 'error':
return internal_redirect("upload-flist.html", response['message'])
return internal_redirect("upload-flist.html")
@blueprint.route('/merge', methods=['GET', 'POST'])
def flist_merge():
if not request.environ['username']:
return "Access denied."
username = request.environ['username']
if request.method == 'POST':
data = flist_merge_post()
print(data)
if data['error']:
return internal_redirect("merge.html", data['error'])
merger = HubMerger(config, username, data['target'])
status = merger.merge(data['sources'])
if not status:
variables = {'error': status}
return global_template("merge.html", variables)
return upload_success(data['target'], 0)
# Merge page
return internal_redirect("merge.html")
@blueprint.route('/docker-convert', methods=['GET', 'POST'])
def docker_handler():
if not request.environ['username']:
return "Access denied."
username = request.environ['username']
if request.method == 'POST':
if not request.form.get("docker-input"):
return internal_redirect("docker.html", "missing docker image name")
docker = HubDocker(config)
response = docker.convert(request.form.get("docker-input"), username)
if response['status'] == 'success':
return upload_success(response['flist'], response['count'])
if response['status'] == 'error':
return internal_redirect("docker.html", response['message'])
# Docker page
return internal_redirect("docker.html")
######################################
#
# ROUTING NAVIGATION
#
######################################
@blueprint.route('/')
def show_users():
return global_template("users.html", {})
@blueprint.route('/<username>')
def show_user(username):
flist = HubPublicFlist(config, username, "unknown")
if not flist.user_exists:
abort(404)
return global_template("user.html", {'targetuser': username})
@blueprint.route('/<username>/<flist>.md')
def show_flist_md(username, flist):
flist = HubPublicFlist(config, username, flist)
if not flist.file_exists:
abort(404)
variables = {
'targetuser': username,
'flistname': flist.filename,
'flisturl': "%s/%s/%s" % (config['PUBLIC_WEBADD'], username, flist.filename),
'ardbhost': 'ardb://%s:%d' % (config['PUBLIC_ARDB_HOST'], config['PUBLIC_ARDB_PORT']),
'checksum': flist.checksum
}
return global_template("preview.html", variables)
@blueprint.route('/<username>/<flist>.txt')
def show_flist_txt(username, flist):
flist = HubPublicFlist(config, username, flist)
if not flist.file_exists:
abort(404)
text = "File: %s\n" % flist.filename
text += "Uploader: %s\n" % username
text += "Source: %s/%s/%s\n" % (config['PUBLIC_WEBADD'], username, flist.filename)
text += "Storage: ardb://%s:%d\n" % (config['PUBLIC_ARDB_HOST'], config['PUBLIC_ARDB_PORT'])
text += "Checksum: %s\n" % flist.checksum
response = make_response(text)
response.headers["Content-Type"] = "text/plain"
return response
@blueprint.route('/<username>/<flist>.json')
def show_flist_json(username, flist):
flist = HubPublicFlist(config, username, flist)
if not flist.file_exists:
abort(404)
data = {
'flist': flist,
'uploader': username,
'source': "%s/%s/%s" % (config['PUBLIC_WEBADD'], username, flist),
'storage': "ardb://%s:%d" % (config['PUBLIC_ARDB_HOST'], config['PUBLIC_ARDB_PORT']),
'checksum': flist.checksum
}
response = make_response(json.dumps(data) + "\n")
response.headers["Content-Type"] = "application/json"
return response
@blueprint.route('/<username>/<flist>.flist')
def download_flist(username, flist):
flist = HubPublicFlist(config, username, flist)
return send_from_directory(directory=flist.user_path, filename=flist.filename)
@blueprint.route('/<username>/<flist>.flist.md5')
def checksum_flist(username, flist):
flist = HubPublicFlist(config, username, flist)
flist_hash = flist.checksum
if not flist_hash:
abort(404)
response = make_response(flist_hash + "\n")
response.headers["Content-Type"] = "text/plain"
return response
######################################
#
# ROUTING API
#
######################################
@blueprint.route('/api/flist')
def api_list():
repositories = api_repositories()
output = []
for user in repositories:
target = os.path.join(config['public-directory'], user['name'])
# ignore files (eg: .keep file)
if not os.path.isdir(target):
continue
flists = sorted(os.listdir(target))
for flist in flists:
output.append("%s/%s" % (user['name'], flist))
response = make_response(json.dumps(output) + "\n")
response.headers["Content-Type"] = "application/json"
return response
@blueprint.route('/api/repositories')
def api_list_repositories():
repositories = api_repositories()
response = make_response(json.dumps(repositories) + "\n")
response.headers["Content-Type"] = "application/json"
return response
@blueprint.route('/api/flist/<username>')
def api_user_contents(username):
flist = HubPublicFlist(config, username, "unknown")
if not flist.user_exists:
abort(404)
files = sorted(os.listdir(flist.user_path))
contents = []
for file in files:
filepath = os.path.join(config['public-directory'], username, file)
stat = os.lstat(filepath)
if S_ISLNK(stat.st_mode):
target = os.readlink(filepath)
contents.append({
'name': file,
'size': "--",
'updated': int(stat.st_mtime),
'type': 'symlink',
'target': target,
})
else:
contents.append({
'name': file,
'size': "%.2f KB" % (stat.st_size / 1024),
'updated': int(stat.st_mtime),
'type': 'regular',
})
response = make_response(json.dumps(contents) + "\n")
response.headers["Content-Type"] = "application/json"
return response
@blueprint.route('/api/flist/<username>/<flist>', methods=['GET', 'INFO'])
def api_inspect(username, flist):
flist = HubPublicFlist(config, username, flist)
if not flist.user_exists:
return api_response("user not found", 404)
if not flist.file_exists:
return api_response("source not found", 404)
contents = ""
if request.method == 'GET':
contents = api_contents(flist)
if request.method == 'INFO':
contents = api_flist_info(flist)
response = make_response(json.dumps(contents) + "\n")
response.headers["Content-Type"] = "application/json"
return response
@blueprint.route('/api/flist/me', methods=['GET'])
def api_my_myself():
if not request.environ['username']:
return api_response("Access denied", 401)
return api_response(extra={"username": request.environ['username']})
@blueprint.route('/api/flist/me/<flist>', methods=['GET', 'DELETE'])
def api_my_inspect(flist):
if not request.environ['username']:
return api_response("Access denied", 401)
username = request.environ['username']
if request.method == 'DELETE':
return api_delete(username, flist)
return api_inspect(username, flist)
@blueprint.route('/api/flist/me/<source>/link/<linkname>', methods=['GET'])
def api_my_flist(source, linkname):
if not request.environ['username']:
return api_response("Access denied", 401)
username = request.environ['username']
return api_symlink(username, source, linkname)
@blueprint.route('/api/flist/me/<source>/rename/<destination>')
def api_my_rename(source, destination):
if not request.environ['username']:
return api_response("Access denied", 401)
username = request.environ['username']
flist = HubPublicFlist(config, username, source)
destflist = HubPublicFlist(config, username, destination)
if not flist.user_exists:
return api_response("user not found", 404)
if not flist.file_exists:
return api_response("source not found", 404)
os.rename(flist.target, destflist.target)
return api_response()
@blueprint.route('/api/flist/me/promote/<sourcerepo>/<sourcefile>/<localname>', methods=['GET'])
def api_my_promote(sourcerepo, sourcefile, localname):
if not request.environ['username']:
return api_response("Access denied", 401)
username = request.environ['username']
return api_promote(username, sourcerepo, sourcefile, localname)
@blueprint.route('/api/flist/me/upload', methods=['POST'])
def api_my_upload():
if not request.environ['username']:
return api_response("Access denied", 401)
username = request.environ['username']
response = api_flist_upload(request, username)
if response['status'] == 'success':
if config['DEBUG']:
return api_response(extra={'name': response['flist'], 'files': response['count'], 'timing': {}})
else:
return api_response(extra={'name': response['flist'], 'files': response['count']})
if response['status'] == 'error':
return api_response(response['message'], 500)
@blueprint.route('/api/flist/me/upload-flist', methods=['POST'])
def api_my_upload_flist():
if not request.environ['username']:
return api_response("Access denied", 401)
username = request.environ['username']
response = api_flist_upload(request, username, validate=True)
if response['status'] == 'success':
if config['DEBUG']:
return api_response(extra={'name': response['flist'], 'files': response['count'], 'timing': {}})
else:
return api_response(extra={'name': response['flist'], 'files': response['count']})
if response['status'] == 'error':
return api_response(response['message'], 500)
@blueprint.route('/api/flist/me/merge/<target>', methods=['POST'])
def api_my_merge(target):
if not request.environ['username']:
return api_response("Access denied", 401)
username = request.environ['username']
sources = request.get_json(silent=True, force=True)
data = flist_merge_data(sources, target)
if data['error']:
return api_response(data['error'], 500)
merger = HubMerger(config, username, data['target'])
status = merger.merge(data['sources'])
if not status:
return api_response(status, 500)
return api_response()
@blueprint.route('/api/flist/me/docker', methods=['POST'])
def api_my_docker():
if not request.environ['username']:
return api_response("Access denied", 401)
username = request.environ['username']
if not request.form.get("image"):
return api_response("missing docker image name", 400)
docker = HubDocker(config)
response = docker.convert(request.form.get("image"), username)
if response['status'] == 'success':
return api_response(extra={'name': response['flist']})
if response['status'] == 'error':
return api_response(response['message'], 500)
return api_response("unexpected docker convert error", 500)
######################################
#
# API IMPLEMENTATION
#
######################################
def api_delete(username, source):
flist = HubPublicFlist(config, username, source)
if not flist.user_exists:
return api_response("user not found", 404)
if not flist.file_exists:
return api_response("source not found", 404)
os.unlink(flist.target)
return api_response()
def api_symlink(username, source, linkname):
flist = HubPublicFlist(config, username, source)
linkflist = HubPublicFlist(config, username, linkname)
if not flist.user_exists:
return api_response("user not found", 404)
if not flist.file_exists:
return api_response("source not found", 404)
# remove previous symlink if existing
if os.path.islink(linkflist.target):
os.unlink(linkflist.target)
# if it was not a link but a regular file, we don't overwrite
# existing flist, we only allows updating links
if os.path.isfile(linkflist.target):
return api_response("link destination is already a file", 401)
cwd = os.getcwd()
os.chdir(flist.user_path)
os.symlink(flist.filename, linkflist.filename)
os.chdir(cwd)
return api_response()
def api_promote(username, sourcerepo, sourcefile, targetname):
flist = HubPublicFlist(config, sourcerepo, sourcefile)
destination = HubPublicFlist(config, username, targetname)
if not flist.user_exists:
return api_response("user not found", 404)
if not flist.file_exists:
return api_response("source not found", 404)
# remove previous file if existing
if os.path.exists(destination.target):
os.unlink(destination.target)
print("[+] promote: %s -> %s" % (flist.target, destination.target))
shutil.copy(flist.target, destination.target)
return api_response()
def api_flist_upload(req, username, validate=False):
# check if the post request has the file part
if 'file' not in req.files:
return {'status': 'error', 'message': 'no file found'}
file = req.files['file']
# if user does not select file, browser also
# submit a empty part without filename
if file.filename == '':
return {'status': 'error', 'message': 'no file selected'}
if not allowed_file(file.filename, validate):
return {'status': 'error', 'message': 'this file is not allowed'}
#
# processing the file
#
filename = secure_filename(file.filename)
print("[+] saving file")
source = os.path.join(config['upload-directory'], filename)
file.save(source)
cleanfilename = file_from_flist(filename)
flist = HubPublicFlist(config, username, cleanfilename)
# validate if the flist exists
if not validate:
# extracting archive to workspace
workspace = flist.raw.workspace()
# create the flist
flist.raw.unpack(source, workspace.name)
flist.raw.initialize(workspace.name)
flist.raw.insert(workspace.name)
flist.raw.upload()
else:
# loads content
flist.raw.loads(source)
if not flist.raw.validate():
return {'status': 'error', 'message': 'unauthorized upload, contents is not fully present on backend'}
flist.raw.commit()
flist.user_create()
flist.raw.pack(flist.target)
# removing uploaded source file
os.unlink(source)
return {'status': 'success', 'flist': flist.filename, 'home': username, 'count': 0, 'timing': {}}
def api_repositories():
root = sorted(os.listdir(config['public-directory']))
output = []
for user in root:
target = os.path.join(config['public-directory'], user)
# ignore files (eg: .keep file)
if not os.path.isdir(target):
continue
official = (user in config['PUBLIC_OFFICIALS'])
output.append({'name': user, 'official': official})
return output
def api_contents(flist):
flist.raw.loads(flist.target)
contents = flist.raw.listing()
return contents
def api_flist_info(flist):
stat = os.lstat(flist.target)
file = os.path.basename(flist.target)
contents = {
'name': file,
'size': stat.st_size,
'updated': int(stat.st_mtime),
'type': 'regular',
'md5': flist.checksum,
}
if S_ISLNK(stat.st_mode):
contents['type'] = 'symlink'
contents['target'] = flist.target
contents['size'] = 0
return contents
def api_response(error=None, code=200, extra=None):
reply = {"status": "success"}
if error:
reply = {"status": "error", "message": error}
if extra:
reply['payload'] = extra
response = make_response(json.dumps(reply) + "\n", code)
response.headers["Content-Type"] = "application/json"
return response
```
#### File: blueprints/gridcapacity/__init__.py
```python
import os
import sys
from flask import Blueprint, jsonify, url_for
from jumpscale import j
from .flask_itsyouonline import configure, callback
from . import settings
name = j.sal.fs.getDirName(__file__,True)
print("NAME::: ", name)
blueprint = Blueprint(
'%s_blueprint'%name,
__name__,
url_prefix="/%s"%name,
template_folder='templates',
static_folder='static'
)
configure(blueprint, settings.IYO_CLIENTID, settings.IYO_SECRET, settings.IYO_CALLBACK, '/gridcapacity/callback', None, True, True, 'organization')
@blueprint.app_template_filter
def uptime(seconds):
if not seconds:
return "not available"
delta = datetime.timedelta(seconds=seconds)
# manually compute hh:mm:ss
hrs = int(delta.seconds / 3600)
min = int((delta.seconds - (hrs * 3600)) / 60)
sec = delta.seconds % 60
if delta.days > 0:
return '%d days, %02d:%02d:%02d' % (delta.days, hrs, min, sec)
return '%02d:%02d:%02d' % (hrs, min, sec)
@blueprint.app_template_filter
def deltatime_color(time):
"""
return a color base on the delta time between now and time
:param time: time we when to compare
:type time: datetime.datetime
:return: color
:rtype: str
"""
if not time:
return 'danger'
delta = (datetime.datetime.now() - time).total_seconds()
if delta <= 600: # 10 minutes or less
return 'success'
if 600 < delta and delta < 900: # between 10 and 15 minutes
return 'warning'
if delta > 900: # plus de 15 minutes
return 'danger'
@blueprint.app_template_filter
def node_status(time):
"""
return a color base on the delta time between now and time
:param time: time we when to compare
:type time: datetime.datetime
:return: color
:rtype: str
"""
if not time:
return 'down'
delta = (datetime.datetime.now() - time).total_seconds()
if delta <= 600: # 10 minutes or less
return 'up'
if 600 < delta and delta < 900: # between 10 and 15 minutes
return 'likely down'
if delta > 900: # plus de 15 minutes
return 'down'
@blueprint.errorhandler(500)
def internal_error(err):
_, _, exc_traceback = sys.exc_info()
eco = j.core.errorhandler.parsePythonExceptionObject(err, tb=exc_traceback)
return jsonify(code=500, message=eco.errormessage, stack_trace=eco.traceback), 500
```
#### File: server/handlers/ListFarmersHandler.py
```python
from flask import request, jsonify
from gridcapacity.models import FarmerRegistration
def ListFarmersHandler():
farmers = FarmerRegistration.list()
output = []
for farmer in farmers:
f = farmer.ddict_hr
f['iyo_organization'] = f.pop('id')
output.append(f)
return jsonify(output)
```
|
{
"source": "jdeltoft/THEbutton",
"score": 2
}
|
#### File: jdeltoft/THEbutton/squeezeboxButton.py
```python
from machine import Pin
import urequests
import time
import json
import network
import re
import urandom
from neopixel import NeoPixel
## Constants
PLAY_PAUSE = const(1)
STOP = const(2)
VOL_UP = const(3)
VOL_DN = const(4)
NEXT_SONG = const(5)
PREV_SONG = const(6)
MIN_PRESS_THRESH_MS = const(580)
LONG_PRESS_THRESH_MS = const(800)
## Globals
url = "http://192.168.1.100:9000/jsonrpc.js" ## URL for SqueezeServer TODO: fill in your ip and port
btnPressed = False
## JSON button type data to send TODO: fill in your players MAC address
play_t = { "method": "slim.request", "params": [ "aa:bb:cc:dd:ee:ff", [ "button", "play" ] ], "id": 0 }
playpause_t = { "method": "slim.request", "params": [ "aa:bb:cc:dd:ee:ff", [ "button", "pause.single" ] ], "id": 0 }
status_t = { "method": "slim.request", "params": [ "aa:bb:cc:dd:ee:ff", [ "status" ] ], "id": 0 }
stop_t = { "method": "slim.request", "params": [ "aa:bb:cc:dd:ee:ff", [ "button", "stop" ] ], "id": 0 }
volup_t = { "method": "slim.request", "params": [ "aa:bb:cc:dd:ee:ff", [ "mixer", "volume", "+5"] ], "id": 0 }
voldn_t = { "method": "slim.request", "params": [ "aa:bb:cc:dd:ee:ff", [ "mixer", "volume", "-5"] ], "id": 0 }
nextsong_t = { "method": "slim.request", "params": [ "aa:bb:cc:dd:ee:ff", [ "button", "fwd.single"] ], "id": 0 }
prevsong_t = { "method": "slim.request", "params": [ "aa:bb:cc:dd:ee:ff", [ "button", "rew.single"] ], "id": 0 }
## TODO: pick your pin for a Neopixel (OPTIONAL)
np_pin = Pin(13, Pin.OUT) # set GPIO0 to output to drive NeoPixels
np = NeoPixel(np_pin, 8) # create NeoPixel driver on GPIO0 for 8 pixels
## UTIL Functions
def btn_isr(pin):
global btnPressed
btnPressed = True
def do_connect():
import network
sta_if = network.WLAN(network.STA_IF)
if not sta_if.isconnected():
#print('connecting to network...')
sta_if.active(True)
sta_if.connect('YourSSID', 'YourPassPhrase') ## TODO: type in your username and passphrase
while not sta_if.isconnected():
pass
## set led green when connected
np[0] = (0, 222, 0)
np.write()
#print('network config:', sta_if.ifconfig())
def handle_btns(btnType):
print ("handlebtn:"+str(btnType)) ## DEBUG
r = None
## random color on each press to know something is happening
np[0] = (urandom.getrandbits(8), urandom.getrandbits(8),urandom.getrandbits(8))
np.write()
if btnType == PLAY_PAUSE:
status = urequests.post(url, data=json.dumps(status_t))
mode = status.json()['result']['mode']
status.close()
if (mode == 'pause'):
r = urequests.post(url, data=json.dumps(playpause_t))
elif (mode == 'play'):
r = urequests.post(url, data=json.dumps(playpause_t))
elif (mode == 'stop'):
r = urequests.post(url, data=json.dumps(play_t))
else:
r = urequests.post(url, data=json.dumps(play_t))
elif btnType == STOP:
r = urequests.post(url, data=json.dumps(stop_t))
elif btnType == VOL_UP:
r = urequests.post(url, data=json.dumps(volup_t))
elif btnType == VOL_DN:
r = urequests.post(url, data=json.dumps(voldn_t))
elif btnType == NEXT_SONG:
r = urequests.post(url, data=json.dumps(nextsong_t))
elif btnType == PREV_SONG:
r = urequests.post(url, data=json.dumps(prevsong_t))
else:
pass
if (r):
r.close()
#gc.collect() ## don't need this now that we close the request above
## Setup Button TODO: make sure to use the pin of your choice
## (NODEMCU mapping can be found in the internet forums)
btn = machine.Pin(14, Pin.IN, Pin.PULL_UP)
btn.irq(trigger=Pin.IRQ_FALLING, handler=btn_isr)
## clear the led at boot up
np[0] = (0, 0, 0)
np.write()
gc.enable()
do_connect()
def mainLoop():
global btnPressed
pressCount = 0
longFlag = False
while True:
if (pressCount > 0):
tmpPress = time.ticks_ms()
delta = tmpPress - lastPress
if (delta > MIN_PRESS_THRESH_MS):
## button sequence must be over, check for long last press
while (btn.value() == 0) and (longFlag == False):
## TODO: DOES THIS CODE EVEN HIT???
#print ("d:"+str(delta))
time.sleep_ms(1)
tmpPress = time.ticks_ms()
delta = tmpPress - lastPress
#print ("final d:"+str(delta))
if (delta > LONG_PRESS_THRESH_MS):
longFlag = True
if (longFlag):
if (pressCount == 1):
print("singlePressLong")
elif (pressCount == 2):
print("doublePressLong")
elif (pressCount >= 3):
print("triplePressLong")
else:
if (pressCount == 1):
handle_btns(PLAY_PAUSE)
elif (pressCount == 2):
handle_btns(NEXT_SONG)
elif (pressCount >= 3):
handle_btns(STOP)
print("triplePress")
pressCount = 0
longFlag = False
if (btnPressed):
pressCount += 1
lastPress = time.ticks_ms()
keepGoing = True
while (keepGoing):
if (btn.value() == 0):
tmpPress = time.ticks_ms()
delta = tmpPress - lastPress
if (delta > LONG_PRESS_THRESH_MS):
time.sleep_ms(600)
## Only do this for single and double which are volume control
if (pressCount == 1):
print(".",end="") ## single long hold
handle_btns(VOL_DN)
elif (pressCount == 2):
print("=",end="") ## double long hold
handle_btns(VOL_UP)
elif (btn.value() == 1):
## require three back to back HIGH for cheap debounce
time.sleep_ms(10);
if (btn.value() == 1):
time.sleep_ms(10);
if (btn.value() == 1):
keepGoing = False
time.sleep_ms(10);
btnPressed = False
##tmpPress = time.ticks_ms()
##delta = tmpPress - lastPress
#time.sleep_ms(1);
## Call main loop now
mainLoop()
```
|
{
"source": "jdeluyck/core",
"score": 2
}
|
#### File: scripts/unbound/download_blacklists.py
```python
import os
import sys
import re
import syslog
import tempfile
import time
import fcntl
from configparser import ConfigParser
import requests
def uri_reader(uri):
req_opts = {
'url': uri,
'timeout': 120,
'stream': True
}
try:
req = requests.get(**req_opts)
except Exception as e:
syslog.syslog(syslog.LOG_ERR,'blacklist download : unable to download file from %s (error : %s)' % (uri, e))
return
if req.status_code >= 200 and req.status_code <= 299:
req.raw.decode_content = True
prev_chop = ''
while True:
chop = req.raw.read(1024).decode()
if not chop:
if prev_chop:
yield prev_chop
break
else:
parts = (prev_chop + chop).split('\n')
if parts[-1] != "\n":
prev_chop = parts.pop()
else:
prev_chop = ''
for part in parts:
yield part
else:
syslog.syslog(syslog.LOG_ERR,
'blacklist download : unable to download file from %s (status_code: %d)' % (uri, req.status_code)
)
if __name__ == '__main__':
# check for a running download process, this may take a while so it's better to check...
try:
lck = open('/tmp/unbound-download_blacklists.tmp', 'w+')
fcntl.flock(lck, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
# already running, exit status 99
sys.exit(99)
domain_pattern = re.compile(
r'^(([\da-zA-Z_])([_\w-]{,62})\.){,127}(([\da-zA-Z])[_\w-]{,61})'
r'?([\da-zA-Z]\.((xn\-\-[a-zA-Z\d]+)|([a-zA-Z\d]{2,})))$'
)
startup_time = time.time()
syslog.openlog('unbound', logoption=syslog.LOG_DAEMON, facility=syslog.LOG_LOCAL4)
blacklist_items = set()
if os.path.exists('/var/unbound/etc/blacklists.ini'):
cnf = ConfigParser()
cnf.read('/var/unbound/etc/blacklists.ini')
# exclude (white) lists, compile to regex to be used to filter blacklist entries
if cnf.has_section('exclude'):
exclude_list = set()
for exclude_item in cnf['exclude']:
try:
re.compile(cnf['exclude'][exclude_item], re.IGNORECASE)
exclude_list.add(cnf['exclude'][exclude_item])
except re.error:
syslog.syslog(syslog.LOG_ERR,
'blacklist download : skip invalid whitelist exclude pattern "%s" (%s)' % (
exclude_item, cnf['exclude'][exclude_item]
)
)
if not exclude_list:
exclude_list.add('$^')
wp = '|'.join(exclude_list)
whitelist_pattern = re.compile(wp, re.IGNORECASE)
syslog.syslog(syslog.LOG_NOTICE, 'blacklist download : exclude domains matching %s' % wp)
# fetch all blacklists
if cnf.has_section('blacklists'):
for blacklist in cnf['blacklists']:
file_stats = {'uri': cnf['blacklists'][blacklist], 'skip' : 0, 'blacklist': 0, 'lines' :0}
for line in uri_reader(cnf['blacklists'][blacklist]):
file_stats['lines'] += 1
# cut line into parts before comment marker (if any)
tmp = line.split('#')[0].split()
entry = None
while tmp:
entry = tmp.pop(-1)
if entry not in ['127.0.0.1', '0.0.0.0']:
break
if entry:
domain = entry.lower()
if whitelist_pattern.match(entry):
file_stats['skip'] += 1
else:
if domain_pattern.match(domain):
file_stats['blacklist'] += 1
blacklist_items.add(entry)
else:
file_stats['skip'] += 1
syslog.syslog(
syslog.LOG_NOTICE,
'blacklist download %(uri)s (lines: %(lines)d exclude: %(skip)d black: %(blacklist)d)' % file_stats
)
# write out results
with open("/var/unbound/etc/dnsbl.conf", 'w') as unbound_outf:
if blacklist_items:
unbound_outf.write('server:\n')
for entry in blacklist_items:
unbound_outf.write("local-data: \"%s A 0.0.0.0\"\n" % entry)
syslog.syslog(syslog.LOG_NOTICE, "blacklist download done in %0.2f seconds (%d records)" % (
time.time() - startup_time, len(blacklist_items)
))
```
|
{
"source": "jdeluyck/default-vpc-removal-lambda",
"score": 2
}
|
#### File: default-vpc-removal-lambda/default_vpc_removal_lambda/app.py
```python
import json
import boto3
import logging
import os
# import requests
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# If true we will do a dry-run
LOCAL_INVOKE = os.getenv("AWS_SAM_LOCAL")
def lambda_handler(event, context):
dry_run = False
if LOCAL_INVOKE == "true":
logger.info("==== Local invocation detected! ====")
session = boto3.session.Session()
account_vpcs = {}
# Do this for every region
for region in get_regions(session):
account_vpcs[region] = find_default_vpc(session, region)
logger.info(f"Default VPCS in every region {json.dumps(account_vpcs)}")
do_operations(session, account_vpcs)
return {
"message": "Function executed successfully!",
"event": event
}
def get_regions(session):
"""
Get all regions from the account. If new regions are added it will dynamically be included in the code through this
function
:param session: The general session from Boto3
:return: List of regions
"""
client = session.client("ec2")
result = []
response = client.describe_regions()
for region_obj in response['Regions']:
result.append(region_obj['RegionName'])
return result
def find_default_vpc(session, region_name):
"""
Looks for every default VPC in a region
:param session: Boto3 Session
:param region_name: The region name to look in
:return: list of vpc id's
"""
result = []
client = session.client("ec2", region_name)
response = client.describe_vpcs(
Filters=[{
'Name': 'isDefault',
'Values': ['true']
}]
)
for vpc in response['Vpcs']:
if vpc['IsDefault']:
result.append(vpc['VpcId'])
return result
def do_operations(session, vpc_dict):
for region, vpc_ids in vpc_dict.items():
client = session.client('ec2', region)
ec2 = session.resource('ec2', region)
for vpc_id in vpc_ids:
logger.info(f"Remove VPC {vpc_id} in {region}")
vpc = ec2.Vpc(vpc_id)
delete_nat_gws(client, vpc_id)
delete_network_interfaces(vpc.network_interfaces.all())
delete_security_groups(vpc.security_groups.all())
delete_vpc_peering_connections(vpc.accepted_vpc_peering_connections.all())
delete_vpc_peering_connections(vpc.requested_vpc_peering_connections.all())
delete_network_acls(vpc.network_acls.all())
delete_subnets(vpc.subnets.all())
delete_route_tables(vpc.route_tables.all())
detach_and_delete_internet_gateway(vpc.internet_gateways.all())
logger.info(f"Delete VPC: {vpc.id}")
if LOCAL_INVOKE != "true":
vpc.delete()
def delete_nat_gws(client,vpc_id):
response = client.describe_nat_gateways(
Filters=[
{
'Name': 'vpc-id',
'Values': [
vpc_id,
]
},
],
)
for nat_gateway in response['NatGateways']:
logger.info(f"Deleting NAT GW: {nat_gateway['NatGatewayId']}")
if LOCAL_INVOKE != "true":
client.delete_nat_gateway(NatGatewayId=nat_gateway['NatGatewayId'])
def delete_vpc_peering_connections(peering_connections):
for peering in peering_connections:
logger.info(f"Deleting VPC Peering Connections: {peering.id}")
if LOCAL_INVOKE != "true":
peering.delete()
def delete_network_interfaces(network_interfaces):
for network_interface in network_interfaces:
logger.info(f"Deleting Network Interfaces: {network_interface.network_interface_id}")
if LOCAL_INVOKE != "true":
network_interface.delete()
def delete_network_acls(network_acls):
for acl in network_acls:
logger.info(f"Deleting Network ACL: {acl.id}")
if LOCAL_INVOKE != "true" and acl.is_default is not True:
acl.delete()
def delete_route_tables(route_tables):
for rt in route_tables:
is_main = False
logger.info(f"Deleting Route Table: {rt.id}")
for association in rt.associations:
if association.main is True:
is_main = True
if LOCAL_INVOKE != "true" and is_main is not True:
rt.delete()
def delete_security_groups(security_groups):
for sg in security_groups:
logger.info(f"Delete Security Group: {sg.id}")
if LOCAL_INVOKE != "true" and sg.group_name != "default":
sg.delete()
def delete_subnets(subnets):
for subnet in subnets:
logger.info(f"Delete Subnet: {subnet.id}")
if LOCAL_INVOKE != "true":
subnet.delete()
def detach_and_delete_internet_gateway(gws):
for gw in gws:
logger.info(f"Detach and Delete: {gw.id}")
logger.info(f"{gw.attachments}")
for attachment in gw.attachments:
logger.info(f"Detatch from Vpc: {attachment['VpcId']}")
if LOCAL_INVOKE != "true":
gw.detach_from_vpc(VpcId=attachment['VpcId'])
if LOCAL_INVOKE != "true":
gw.delete()
```
|
{
"source": "jdeluyck/ical_to_gcal_sync",
"score": 3
}
|
#### File: jdeluyck/ical_to_gcal_sync/ical_to_gcal_sync.py
```python
from __future__ import print_function
import logging
import time
import sys
import string
import re
import googleapiclient
from apiclient import discovery
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
from googleapiclient.errors import HttpError
import requests
import ics
import arrow
import httplib2
from config import *
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(filename=LOGFILE, mode='a')
handler.setFormatter(logging.Formatter('%(asctime)s|[%(levelname)s] %(message)s'))
logger.addHandler(handler)
def get_current_events():
"""Retrieves data from iCal iCal feed and returns an ics.Calendar object
containing the parsed data.
Returns the parsed Calendar object or None if an error occurs.
"""
resp = requests.get(ICAL_FEED)
if resp.status_code != 200:
logger.error('> Error retrieving iCal feed!')
return None
try:
cal = ics.Calendar(resp.text)
except:
logger.error('> Error parsing iCal data')
return None
return cal
# modified from Google Calendar API quickstart example
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
store = Storage(CREDENTIAL_PATH)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
credentials = tools.run_flow(flow, store, None)
return credentials
def get_gcal_events(service, from_time):
"""Retrieves the current set of Google Calendar events from the selected
user calendar. Only includes upcoming events (those taking place from start
of the current day.
Returns a dict containing the event(s) existing in the calendar.
"""
eventsResult = service.events().list(calendarId=CALENDAR_ID, timeMin=from_time, maxResults=CALENDAR_MAX_EVENTS, singleEvents=True, orderBy='startTime', showDeleted=True).execute()
events = eventsResult.get('items', [])
logger.info('> Found %d upcoming events in Google Calendar' % len(events))
return events
def delete_all_events(service):
for gc in get_gcal_events(service):
try:
service.events().delete(calendarId=CALENDAR_ID, eventId=gc['id']).execute()
time.sleep(API_SLEEP_TIME)
except googleapiclient.errors.HttpError:
pass # event already marked as deleted
def get_gcal_datetime(arrow_datetime, gcal_timezone):
arrow_datetime = arrow_datetime.to(gcal_timezone)
return {u'dateTime': arrow_datetime.format('YYYY-MM-DDTHH:mm:ssZZ'), 'timeZone': gcal_timezone}
def get_gcal_date(arrow_datetime):
return {u'date': arrow_datetime.format('YYYY-MM-DD')}
def create_id(uid, begintime, endtime):
""" Converts ical UUID, begin and endtime to a valid Gcal ID
Characters allowed in the ID are those used in base32hex encoding, i.e. lowercase letters a-v and digits 0-9, see section 3.1.2 in RFC2938
Te length of the ID must be between 5 and 1024 characters
https://developers.google.com/resources/api-libraries/documentation/calendar/v3/python/latest/calendar_v3.events.html
Returns:
ID
"""
allowed_chars = string.ascii_lowercase[:22] + string.digits
temp = re.sub('[^%s]' % allowed_chars, '', uid.lower())
return re.sub('[^%s]' % allowed_chars, '', uid.lower()) + str(arrow.get(begintime).timestamp) + str(arrow.get(endtime).timestamp)
def is_multiday_event(ical_event):
delta = arrow.get(ical_event.end) - arrow.get(ical_event.begin)
if delta.days >= 1:
return True
else:
return False
if __name__ == '__main__':
# setting up Google Calendar API for use
logger.debug('> Loading credentials')
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('calendar', 'v3', http=http)
# retrieve events from Google Calendar, starting from beginning of current day
today = arrow.now().replace(hour=0, minute=0, second=0, microsecond=0)
logger.info('> Retrieving events from Google Calendar')
gcal_events = get_gcal_events(service, today.isoformat())
# retrieve events from the iCal feed
logger.info('> Retrieving events from iCal feed')
ical_cal = get_current_events()
# convert iCal event list into a dict indexed by (converted) iCal UID
ical_events = {}
for ev in ical_cal.events:
# filter out events in the past, don't care about syncing them
if arrow.get(ev.begin) > today:
ical_events[create_id(ev.uid, ev.begin, ev.end)] = ev
# retrieve the Google Calendar object itself
gcal_cal = service.calendars().get(calendarId=CALENDAR_ID).execute()
logger.info('> Processing Google Calendar events...')
gcal_event_ids = [ev['id'] for ev in gcal_events]
# first check the set of Google Calendar events against the list of iCal
# events. Any events in Google Calendar that are no longer in iCal feed
# get deleted. Any events still present but with changed start/end times
# get updated.
for gcal_event in gcal_events:
name = gcal_event['summary']
eid = gcal_event['id']
if eid not in ical_events:
# if a gcal event has been deleted from iCal, also delete it from gcal.
# Apparently calling delete() only marks an event as "deleted" but doesn't
# remove it from the calendar, so it will continue to stick around.
# If you keep seeing messages about events being deleted here, you can
# try going to the Google Calendar site, opening the options menu for
# your calendar, selecting "View bin" and then clicking "Empty bin
# now" to completely delete these events.
try:
logger.info('> Deleting event "%s" from Google Calendar...' % name)
service.events().delete(calendarId=CALENDAR_ID, eventId=eid).execute()
time.sleep(API_SLEEP_TIME)
except googleapiclient.errors.HttpError:
pass # event already marked as deleted
else:
# Gcal items which are still in the iCal file
ical_event = ical_events[eid]
gcal_begin = arrow.get(gcal_event['start'].get('dateTime', gcal_event['start'].get('date')))
gcal_end = arrow.get(gcal_event['end'].get('dateTime', gcal_event['end'].get('date')))
# Location changes?
if 'location' in gcal_event:
gcal_location = True
else:
gcal_location = False
if ical_event.location:
ical_location = True
else:
ical_location = False
# if the iCal event has a different start/end time from the gcal event,
# update the latter with the datetimes from the iCal event. Same if
# event name has changed, or the location has changed.
if gcal_begin != ical_event.begin \
or gcal_end != ical_event.end \
or gcal_event['summary'] != ical_event.name \
or gcal_location != ical_location \
or gcal_location and gcal_event['location'] != ical_event.location:
logger.info('> Updating event "%s" due to changes ...' % (name))
#delta = arrow.get(ical_event.end) - arrow.get(ical_event.begin)
# all-day events handled slightly differently
if is_multiday_event(ical_event):
gcal_event['start'] = get_gcal_date(ical_event.begin)
gcal_event['end'] = get_gcal_date(ical_event.end)
else:
gcal_event['start'] = get_gcal_datetime(ical_event.begin, gcal_cal['timeZone'])
if ical_event.has_end:
gcal_event['end'] = get_gcal_datetime(ical_event.end, gcal_cal['timeZone'])
gcal_event['summary'] = ical_event.name
gcal_event['description'] = ical_event.description
gcal_event['location'] = ical_event.location
gcal_event['recurrence'] = ical_event.rrule
service.events().update(calendarId=CALENDAR_ID, eventId=eid, body=gcal_event).execute()
time.sleep(API_SLEEP_TIME)
# now add any iCal events not already in the Google Calendar
logger.info('> Processing iCal events...')
for ical_event in ical_events.values():
if create_id(ical_event.uid, ical_event.begin, ical_event.end) not in gcal_event_ids:
gcal_event = {}
gcal_event['summary'] = ical_event.name
gcal_event['id'] = create_id(ical_event.uid, ical_event.begin, ical_event.end)
gcal_event['description'] = '%s (Imported from mycal.py)' % ical_event.description
gcal_event['location'] = ical_event.location
gcal_event['recurrence'] = ical_event.rrule
# check if no time specified in iCal, treat as all day event if so
#delta = arrow.get(ical_event.end) - arrow.get(ical_event.begin)
if is_multiday_event(ical_event):
gcal_event['start'] = get_gcal_date(ical_event.begin)
logger.info('iCal all-day event %s to be added at %s' % (ical_event.name, ical_event.begin))
if ical_event.has_end:
gcal_event['end'] = get_gcal_date(ical_event.end)
else:
gcal_event['start'] = get_gcal_datetime(ical_event.begin, gcal_cal['timeZone'])
logger.info('iCal event %s to be added at %s' % (ical_event.name, ical_event.begin))
if ical_event.has_end:
gcal_event['end'] = get_gcal_datetime(ical_event.end, gcal_cal['timeZone'])
try:
time.sleep(API_SLEEP_TIME)
service.events().insert(calendarId=CALENDAR_ID, body=gcal_event).execute()
except HttpError as err:
time.sleep(API_SLEEP_TIME)
if err.resp.status == 409: # Resource Already Exists
logger.info('iCal event %s updated' % ical_event.name)
service.events().update(calendarId=CALENDAR_ID, eventId=gcal_event['id'], body=gcal_event).execute()
else:
logger.error('HTTP Error %s' % err.resp.status)
raise
except:
logger.error ("Unexpected error:", sys.exc_info()[0])
raise
```
|
{
"source": "jdeluyck/zfs_uploader",
"score": 2
}
|
#### File: zfs_uploader/tests/test_backup_db.py
```python
import unittest
import warnings
from zfs_uploader.config import Config
from zfs_uploader.backup_db import BackupDB
class BackupDBTests(unittest.TestCase):
def setUp(self):
warnings.filterwarnings("ignore", category=ResourceWarning,
message="unclosed.*<ssl.SSLSocket.*>")
config = Config('config.cfg')
self.job = next(iter(config.jobs.values()))
self.bucket = self.job.bucket
self.filesystem = self.job.filesystem
def tearDown(self):
for item in self.bucket.objects.all():
item.delete()
def test_create_backup_db(self):
""" Test if backup.db file is properly uploaded/downloaded. """
# Given
backup_db = BackupDB(self.bucket, self.filesystem)
backup_time = '20210425_201838'
backup_type = 'full'
s3_key = f'{self.filesystem}/{backup_time}.{backup_type}'
# When
backup_db.create_backup(backup_time, backup_type, s3_key)
# Then
backup_db_new = BackupDB(self.bucket, self.filesystem)
self.assertEqual(
backup_db.get_backup(backup_time),
backup_db_new.get_backup(backup_time)
)
def test_delete_backup(self):
""" Test delete backup from backup_db. """
# Given
backup_db = BackupDB(self.bucket, self.filesystem)
backup_time = '20210425_201838'
backup_type = 'full'
s3_key = f'{self.filesystem}/{backup_time}.{backup_type}'
backup_db.create_backup(backup_time, backup_type, s3_key)
backup_db.get_backup(backup_time)
# When
backup_db.delete_backup(backup_time)
# Then
self.assertRaises(KeyError, backup_db.get_backup, backup_time)
def test_existing_backup(self):
""" Test create existing backup. """
# Given
backup_db = BackupDB(self.bucket, self.filesystem)
backup_time = '20210425_201838'
backup_type = 'full'
s3_key = f'{self.filesystem}/{backup_time}.{backup_type}'
# When
backup_db.create_backup(backup_time, backup_type, s3_key)
# Then
self.assertRaises(ValueError, backup_db.create_backup, backup_time,
backup_type, s3_key)
def test_bad_backup_time(self):
""" Test create backup with bad backup_time. """
# Given
backup_db = BackupDB(self.bucket, self.filesystem)
backup_time = '20210425-201838'
backup_type = 'full'
s3_key = f'{self.filesystem}/{backup_time}.{backup_type}'
# Then
self.assertRaises(ValueError, backup_db.create_backup, backup_time,
backup_type, s3_key)
def test_bad_backup_type(self):
""" Test create backup with bad backup type. """
# Given
backup_db = BackupDB(self.bucket, self.filesystem)
backup_time = '20210425_201838'
backup_type = 'badtype'
s3_key = f'{self.filesystem}/{backup_time}.{backup_type}'
# Then
self.assertRaises(ValueError, backup_db.create_backup, backup_time,
backup_type, s3_key)
def test_bad_dependency(self):
""" Test creating a backup with a bad dependency. """
# Given
backup_db = BackupDB(self.bucket, self.filesystem)
backup_time = '20210425_201838'
backup_type = 'full'
s3_key = f'{self.filesystem}/{backup_time}.{backup_type}'
dependency = '20200425-201838'
# Then
self.assertRaises(ValueError, backup_db.create_backup, backup_time,
backup_type, s3_key, dependency)
```
#### File: zfs_uploader/tests/test_zfs.py
```python
import os
import unittest
from zfs_uploader.config import Config
from zfs_uploader.zfs import (create_filesystem, create_snapshot,
destroy_filesystem, destroy_snapshot,
open_snapshot_stream,
open_snapshot_stream_inc, list_snapshots)
class ZFSTests(unittest.TestCase):
def setUp(self):
# Given
config = Config('config.cfg')
job = next(iter(config.jobs.values()))
self.filesystem = job.filesystem
self.snapshot_name = 'snap_1'
self.test_file = f'/{self.filesystem}/test_file'
self.test_data = str(list(range(100_000)))
out = create_filesystem(self.filesystem)
self.assertEqual(0, out.returncode, msg=out.stderr)
with open(self.test_file, 'w') as f:
f.write(self.test_data)
def tearDown(self):
out = destroy_filesystem(self.filesystem)
if out.returncode:
self.assertIn('dataset does not exist', out.stderr)
def test_create_snapshot(self):
""" Create snapshot. """
# When
out = create_snapshot(self.filesystem, self.snapshot_name)
self.assertEqual(0, out.returncode, msg=out.stderr)
# Then
out = list_snapshots()
self.assertIn(f'{self.filesystem}@{self.snapshot_name}',
list(out.keys()))
def test_create_incremental_snapshot(self):
""" Create incremental snapshot. """
# When
out = create_snapshot(self.filesystem, self.snapshot_name)
self.assertEqual(0, out.returncode, msg=out.stderr)
with open(self.test_file, 'a') as f:
f.write('append')
out = create_snapshot(self.filesystem, 'snap_2')
self.assertEqual(0, out.returncode, msg=out.stderr)
# Then
with open_snapshot_stream(self.filesystem, self.snapshot_name,
'r') as f:
snapshot = f.stdout.read()
stderr = f.stderr.read().decode('utf-8')
self.assertEqual(0, f.returncode, msg=stderr)
self.assertIn(b'1, 2', snapshot)
self.assertNotIn(b'append', snapshot)
with open_snapshot_stream_inc(self.filesystem, self.snapshot_name,
'snap_2') as f:
snapshot = f.stdout.read()
stderr = f.stderr.read().decode('utf-8')
self.assertEqual(0, f.returncode, msg=stderr)
self.assertIn(b'append', snapshot)
self.assertNotIn(b'1, 2', snapshot)
def test_restore_filesystem(self):
""" Restore filesystem from snapshot stream. """
# Given
out = create_snapshot(self.filesystem, self.snapshot_name)
self.assertEqual(0, out.returncode, msg=out.stderr)
# When
with open_snapshot_stream(self.filesystem, self.snapshot_name,
'r') as f:
snapshot = f.stdout.read()
stderr = f.stderr.read().decode('utf-8')
self.assertEqual(0, f.returncode, msg=stderr)
out = destroy_filesystem(self.filesystem)
self.assertEqual(0, out.returncode, msg=out.stderr)
# Then
with open_snapshot_stream(self.filesystem, self.snapshot_name,
'w') as f:
f.stdin.write(snapshot)
stderr = f.stderr.read().decode('utf-8')
self.assertEqual(0, f.returncode, msg=stderr)
with open(self.test_file, 'r') as f:
out = f.read()
self.assertEqual(self.test_data, out)
def test_restore_filesystem_with_increment(self):
""" Restore filesystem from initial and increment snapshot stream. """
# Given
out = create_snapshot(self.filesystem, self.snapshot_name)
self.assertEqual(0, out.returncode, msg=out.stderr)
with open(self.test_file, 'a') as f:
f.write('append')
out = create_snapshot(self.filesystem, 'snap_2')
self.assertEqual(0, out.returncode, msg=out.stderr)
# When
with open_snapshot_stream(self.filesystem, self.snapshot_name,
'r') as f:
snapshot_initial = f.stdout.read()
stderr = f.stderr.read().decode('utf-8')
self.assertEqual(0, f.returncode, msg=stderr)
with open_snapshot_stream_inc(self.filesystem, self.snapshot_name,
'snap_2') as f:
snapshot_increment = f.stdout.read()
stderr = f.stderr.read().decode('utf-8')
self.assertEqual(0, f.returncode, msg=stderr)
out = destroy_filesystem(self.filesystem)
self.assertEqual(0, out.returncode, msg=out.stderr)
# Then
with open_snapshot_stream(self.filesystem, self.snapshot_name,
'w') as f:
f.stdin.write(snapshot_initial)
stderr = f.stderr.read().decode('utf-8')
self.assertEqual(0, f.returncode, msg=stderr)
with open_snapshot_stream(self.filesystem, 'snap_2', 'w') as f:
f.stdin.write(snapshot_increment)
stderr = f.stderr.read().decode('utf-8')
self.assertEqual(0, f.returncode, msg=stderr)
with open(self.test_file, 'r') as f:
out = f.read()
self.assertEqual(self.test_data + 'append', out)
def test_destroy_filesystem(self):
""" Destroy filesystem. """
out = destroy_filesystem(self.filesystem)
self.assertEqual(0, out.returncode, msg=out.stderr)
self.assertFalse(os.path.isfile(self.test_file))
def test_destroy_snapshot(self):
""" Destroy snapshot. """
# Given
out = create_snapshot(self.filesystem, self.snapshot_name)
self.assertEqual(0, out.returncode, msg=out.stderr)
out = create_snapshot(self.filesystem, 'snap_2')
self.assertEqual(0, out.returncode, msg=out.stderr)
# When
out = destroy_snapshot(self.filesystem, self.snapshot_name)
self.assertEqual(0, out.returncode, msg=out.stderr)
# Then
out = list_snapshots()
self.assertNotIn(f'{self.filesystem}@{self.snapshot_name}',
list(out.keys()))
self.assertIn(f'{self.filesystem}@snap_2', list(out.keys()))
```
|
{
"source": "jdemel/GFDM-PHY-Reference",
"score": 3
}
|
#### File: GFDM-PHY-Reference/sdr_utils/waveform.py
```python
import numpy as np
import logging
logging.basicConfig(format='%(levelname)s:%(message)s',level=logging.INFO)
def psd(iq):
return np.fft.fftshift(20*np.log10(np.abs(np.fft.fft(iq))))
def get_tone( tone_freq = 1 * 10 ** 6, sampling_rate = 10 * 10 ** 6 ,samples = 2048):
'''returns complex beseband tone with given parameters'''
t = np.arange(samples)/sampling_rate
# q = np.sin(2*np.pi*tone_freq*t)
# i = np.cos(2*np.pi*tone_freq*t)
#return i + 1j*q
return np.exp(1j*2*np.pi*tone_freq * t)
def get_chirp(sampling_rate = 10*10**6,f_start = 1*10**6, f_stop = 2 * 10**6, samples = 2048):
t = np.arange(samples)/sampling_rate
f = np.linspace(f_start,f_stop,samples)
return np.exp(1j*2*np.pi*f*t)
def get_random(samples = 2048):
"""Returns sequence of random comples samples """
return 2*(np.random.sample((samples,)) + 1j*np.random.sample((samples,))) - (1+1j)
def plot_transmission(tx_signal, rx_signal):
'''Plots two given signals in time and frequency domains'''
from matplotlib.pyplot import figure, show
freq = np.linspace(-1/2,1/2,len(tx_signal))
fig = figure(1)
ax1 = fig.add_subplot(211)
ax1.plot(freq,psd(tx_signal))
ax1.grid(True)
ax1.set_xlim((-1/2, 1/2))
ax1.set_ylabel('psd tx signal')
freq = np.linspace(-1/2,1/2,len(rx_signal))
ax2 = fig.add_subplot(212)
ax2.plot(freq,psd(rx_signal))
ax2.grid(True)
ax2.set_xlim((-1/2, 1/2))
ax2.set_xlabel('frequency')
ax2.set_ylabel('psd rx signal')
fig = figure(2)
ax1 = fig.add_subplot(211)
ax1.plot(tx_signal.real)
ax1.plot(tx_signal.imag)
ax1.grid(True)
ax1.set_ylabel('amplitude tx signal')
ax2 = fig.add_subplot(212)
ax2.plot(rx_signal.real)
ax2.plot(rx_signal.imag)
ax2.grid(True)
ax2.set_xlabel('sample')
ax2.set_ylabel('amplitude rx signal')
show()
def plot_two_signals(signal_1, signal_2, same_axis = False, show_plot = True):
from matplotlib.pyplot import figure, show
if same_axis == True:
fig = figure(1)
ax_1 = fig.add_subplot(1,1,1)
ax_1.plot(signal_1.real)
if isinstance(signal_1[0], complex):
ax_1.plot(signal_1.imag)
ax_1.plot(signal_2.real, dashes = [6,2])
if isinstance(signal_2[0], complex):
ax_1.plot(signal_2.imag, dashes = [6,2])
if show_plot: show()
return ax_1
else:
fig = figure(1)
ax_1 = fig.add_subplot(2,1,1)
ax_1.plot(signal_1.real)
if isinstance(signal_1[0], complex):
ax_1.plot(signal_1.imag)
ax_2 = fig.add_subplot(2,1,2)
ax_2.plot(signal_2.real)
if isinstance(signal_2[0], complex):
ax_2.plot(signal_2.imag)
if show_plot: show()
return ax_1, ax_2
```
#### File: GFDM-PHY-Reference/simulation/example_waveform_test.py
```python
import numpy as np
import matplotlib.pyplot as plt
from modem.util.channel import Channel
from modem import Modem
from modem import get_gfdm_preamble_based
# from modem import get_ofdm_pilot_based
# from modem import get_ofdm_preamble_based
class Session():
def __init__(self, modem):
self.modem = modem
self.modem.param.qam_order = 16
self.modem.save_intern_data = True
self.channel = Channel()
self.channel.impulse_response = self.channel.test_impulse_response
self.nof_transmissions = 100
self.stop_flag = False
self._prepare_plot()
def _handle_plot_close(self, evt):
self.stop_flag = True
def _prepare_plot(self):
self.fig_constell = plt.figure(figsize=(2 * 6.4, 4.8))
self.fig_constell.canvas.mpl_connect(
'close_event', self._handle_plot_close)
self.ax_constell = self.fig_constell.add_subplot(1, 2, 1)
self.ax_sig = self.fig_constell.add_subplot(1, 2, 2)
def _update_plot(self, data):
# plot constellation
iq_data = data["phase_corrector"]
if iq_data.size > 0:
self.ax_constell.scatter(iq_data.real, iq_data.imag, marker=".")
self.ax_constell.grid(True)
else:
self.ax_constell.cla()
# plot received samples
rx_samples = data["receiver"]
if rx_samples.size > 0:
self.ax_sig.cla()
self.ax_sig.plot(rx_samples.real)
self.ax_sig.plot(rx_samples.imag)
plt.draw()
plt.pause(0.01)
def run(self):
self.stop_flag = False
for _ in range(self.nof_transmissions):
data = self._get_transmission()
self._update_plot(data)
if self.stop_flag:
return
def _get_transmission(self):
tx_samples = self.modem.transmitter()
rx_samples = self.channel.multipath(
np.pad(tx_samples, (100, 100), 'constant'), snr_db=70) * 0.1
# rx_samples = tx_samples
self.modem.receiver(rx_samples)
data = self.modem.intern_data
return data
def main():
# modem = Modem(get_ofdm_preamble_based())
modem = Modem(get_gfdm_preamble_based())
session = Session(modem)
session.run()
if __name__ == "__main__":
main()
```
#### File: modem/util/channel.py
```python
import commpy
import numpy as np
# TODO: make this code importable:
class _frozen_bounded_class(object):
# TODO: This code is copied from another project, so it should be imported
__is_frozen = False
def __setattr__(self, key, value):
if hasattr(self.__class__, "bounds"):
bounds = getattr(self.__class__,"bounds")
if key in bounds:
if value > bounds[key][1] or value < bounds[key][0]:
raise ValueError (str(value)+" is out of bounds: ("+
str(bounds[key][0])+", "+str(bounds[key][1])+")" )
if self.__is_frozen and not hasattr(self, key):
raise TypeError( "%r is a frozen class" % self )
object.__setattr__(self, key, value)
def _freeze(self):
self.__is_frozen = True
class Channel(_frozen_bounded_class):
def __init__(self):
self.impulse_response = []
self.last_impulse_response = None
test_impulse_response = np.array([1+1j,0,0,0,0,1+1j,0,0,0,0,1+1j,0,0,0,0,1+1j,0,0,0,0])
test_impulse_response *= np.exp(np.linspace(0,-3,len(test_impulse_response)))
test_impulse_response /= np.linalg.norm(test_impulse_response)
self.test_impulse_response = test_impulse_response
self._freeze()
def awgn(self, data, snr_db):
"""
Addditive White Gaussian Noise (AWGN) Channel.
:param data: 1D ndarray of complex floats
Input signal to the channel.
:param snr_dB: Output SNR required in dB.
:type snr_dB: float
:return output_signal: 1D ndarray of floats. Output signal from the channel with the specified SNR.
"""
return commpy.channels.awgn(data, snr_db)
def multipath(self, data, taps = 8, distrib = "exp", snr_db = None):
"""
Multipath channel with specified impulse_response. Thre are two ways to specify the impulse_response.
First option: define `taps` and `distrib` -> the impulse_response will be generated automatically.
Second optinon: define `Channel.impulse_response`, befor using Channel.multipath. If `Channel.impulse_response` is defined,
it is used for channel.
Last used impulse_response is stored in `Channel.last_impulse_response`
:param data: 1D ndarray of complex floats
Input signal to the channel.
:param typs: Number of channel taps, default: 8
:param distrib: Impulse response distribution.\"exp\" - exponentional, \"uniform\" - uniform
default: \"exp\"
:param snr_dB: If defeined, the singnal will be additinaly passed through the AWGN channel with SNR specified in dB.
:type snr_dB: float
:return output_signal: 1D ndarray of floats. Output signal from the channel with the specified SNR.
"""
if len(self.impulse_response) > 0:
self.last_impulse_response = self.impulse_response
else:
self.last_impulse_response = 1/2*(np.random.randn((taps)) + 1j*np.random.randn((taps)))
if distrib == "exp" : self.last_impulse_response *= np.exp(np.linspace(0,-3,taps))
self.last_impulse_response = self.last_impulse_response/np.linalg.norm(self.last_impulse_response)
if snr_db:
return self.awgn(np.convolve(self.last_impulse_response,data, mode= 'full'), snr_db)
else:
return np.convolve(self.last_impulse_response, data, mode= 'full')
```
#### File: modem/util/resource_mapper.py
```python
import numpy as np
class ResourceMapper():
def __init__(self, K, M, Kset = [0], Mset = [0], pilot_pos = []):
self.K = K
self.M = M
self.Kset = Kset
self.Mset = Mset
self.pilot_pos = pilot_pos
@property
def _Kset(self):
return np.delete( self.Kset,
np.where((np.array(self.Kset)>=self.K)|(np.array(self.Kset)<0))
)
@property
def _Mset(self):
return np.delete( self.Mset,
np.where((np.array(self.Mset)>=self.M)|(np.array(self.Mset)<0))
)
@property
def resource_map(self):
r_map = np.full((self.K,self.M),'-')
idx = np.array(np.meshgrid(self._Kset, self._Mset)).T.reshape(-1,2)
r_map[(idx[:,0],idx[:,1])] = 'D'
r_map[self.pilot_pos] = 'P'
return r_map
@property
def nof_pilots_per_block(self):
return (self.resource_map == 'P').sum()
@property
def nof_symbols_per_block(self):
return (self.resource_map == 'D').sum()
def mapper(self, data, pilots):
mapped_res = np.zeros((self.K,self.M),dtype=type(data[0]))
mapped_res[np.where(self.resource_map=='D')] = data
mapped_res[np.where(self.resource_map=='P')] = pilots
return mapped_res
def demapper(self, mapped_data):
data = mapped_data[np.where(self.resource_map=='D')]
pilots = mapped_data[np.where(self.resource_map=='P')]
return (data, pilots)
```
#### File: modem/util/vector.py
```python
import scipy.signal
import numpy as np
def find_peaks(x, max_num = -1, height = None, distance = None):
''' Find peaks positions and their heights in 1-d array of real data. (Based on `scipy.signal.find_peaks`)
:param x: 1-d array of real data
:param max_num: `optional` defines maximum number of peaks to return. Ignored not positive.
:param height: `optional` Required height of peaks. If `height` is not defined, than it is calculated as `height = np.mean(x) + 2*np.std(x)`
:param distance: `optional` Required minimal horizontal distance ( = 1) in samples between
neighbouring peaks. See: `scipy.signal.find_peaks`
:returns: tuple of arrays with peaks positions and their heights. Arrays are sorted regarding to peaks heights'''
if not height:
height = np.mean(x) + 2*np.std(x)
locs, props = scipy.signal.find_peaks( x,
height = height,
distance = distance,
)
locs = np.array(locs)
heights = np.array(props['peak_heights'])
sorted_idx = np.flip(heights.argsort(),axis = 0)
locs = locs[sorted_idx]
heights = heights[sorted_idx]
if max_num > 0:
if len(locs) > max_num:
return (locs[0:max_num], heights[0:max_num])
else :
return (locs, heights)
else:
return (locs, heights)
def shift(arr, num, mode = 'same', fill_value = 0):
''' Shifts 1D vector rigth if shift > 0, and left if shift < 0'''
if mode == "same":
# from: https://stackoverflow.com/questions/30399534/shift-elements-in-a-numpy-array
result = np.empty_like(arr)
if num > 0:
result[:num] = fill_value
result[num:] = arr[:-num]
elif num < 0:
result[num:] = fill_value
result[:num] = arr[-num:]
else:
result = arr
return result
else:
raise ValueError(f"mode \"{mode}\" is not implemented")
def norm_complex(data):
return data / max(max(data.real), max(data.imag))
```
#### File: modem/util/windowing.py
```python
import numpy as np
class Windowing:
def __init__(self, Ncp, Ncs, alpha):
self.alpha = alpha
self.Ncp = Ncp
self.Ncs = Ncs
raise_window_len = int(self.Ncp * self.alpha)
fall_window_len = int(self.Ncs * self.alpha)
self.raise_window = np.blackman(
raise_window_len * 2)[:raise_window_len]
self.fall_window = np.blackman(fall_window_len * 2)[-fall_window_len:]
def apply_window(self, samples):
window = np.concatenate(
[self.raise_window, np.ones(len(samples) - len(self.raise_window) - len(self.fall_window)), self.fall_window])
return samples * window
```
#### File: simulation/unittest/test_modem.py
```python
import unittest
import numpy as np
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from modem import Modem, ModemParameter, get_gfdm_preamble_based, get_ofdm_pilot_based, get_ofdm_preamble_based
import modem.util as util
def norm_complex(data):
max_val = max( max(abs(data.real)),max(abs(data.imag)) )
if max_val > 0:
return data / max_val
else:
return data
class TestModem(unittest.TestCase):
def test_gfdm_b_1(self):
p = get_gfdm_preamble_based()
m = Modem(p)
self._test_modem(m)
def test_ofdm_b_1(self):
p = get_ofdm_preamble_based()
m = Modem(p)
self._test_modem(m)
def test_ofdm_b_2(self):
p = get_ofdm_preamble_based()
p.B = 2
m = Modem(p)
self._test_modem(m)
def test_ofdm_b_1_pilot(self):
p = get_ofdm_pilot_based()
p.B = 1
m = Modem(p)
self._test_modem(m)
def _test_modem(self,modem):
modem.save_intern_data = True
modem.receiver(modem.transmitter())
p = modem.param
data = modem.intern_data
# TX
self.assertEqual(np.shape(data["bytes_in"]),(modem.bytes_per_frame,) )
self.assertEqual(np.shape(data["encoder"]),(modem.bits_per_frame,) )
self.assertEqual(np.shape(data["qam_mapper"]),(modem.symbols_per_frame,) )
self.assertEqual(np.shape(data["resource_mapper"]),(p.B,p.K,p.M) )
self.assertEqual(np.shape(data["modulator"]),(p.B,p.K*p.M) )
self.assertEqual(np.shape(data["add_cp_cs"]),(p.B*(p.Ncp+p.Ncs+p.Npayload),) )
self.assertEqual(np.shape(data["frame_multiplexer"]),(p.B*(p.Ncp+p.Ncs+p.Npayload)+(p.Ncp+p.Ncs+p.Npreamble),) )
# RX
self.assertEqual(np.shape(data["sync"][0]),(p.B,p.Npayload))
self.assertEqual(np.shape(data["sync"][1]),(p.Npreamble,))
self.assertEqual(np.shape(data["channel_est_eq"]),(p.B,p.Npayload) )
self.assertEqual(np.shape(data["demodulator"]),(p.B,p.K,p.M) )
self.assertEqual(np.shape(data["resource_demapper"]), (p.B, 2) )
self.assertEqual(np.shape(data["phase_corrector"]),(modem.symbols_per_frame,) )
self.assertEqual(np.shape(data["qam_demapper"]),(modem.bits_per_frame,) )
self.assertEqual(np.shape(data["decoder"]),(modem.bytes_per_frame,) )
np.testing.assert_array_equal(data["sync"][1], p.fullpreamble)
for b in range(p.B):
np.testing.assert_array_equal(
data["sync"][0][b], data["frame_multiplexer"][p.Ncp +p.Npreamble_cp_cs +np.arange(p.Npayload)+b*p.Npayload_cp_cs]
)
np.testing.assert_array_almost_equal(
norm_complex(data["channel_est_eq"][b]),
norm_complex(np.fft.fft(data["frame_multiplexer"][p.Ncp +p.Npreamble_cp_cs +np.arange(p.Npayload)+b*p.Npayload_cp_cs]))
)
np.testing.assert_array_almost_equal(
data["phase_corrector"], data["qam_mapper"]
)
np.testing.assert_array_equal(data["bytes_in"], data["decoder"])
if __name__ == "__main__":
unittest.main()
```
|
{
"source": "jdement/ibmsecurity",
"score": 2
}
|
#### File: network/felb/ha.py
```python
import ibmsecurity.utilities.tools
module_uri = "/isam/felb/configuration/ha"
requires_module = None
requires_version = None
def delete(isamAppliance, check_mode=False, force=False):
"""
Disables High Availability Configuration
"""
return isamAppliance.invoke_delete("Disabling High Availability", "{0}".format(module_uri))
def add(isamAppliance, is_primary, interface, remote, port, health_check_interval,
health_check_timeout, check_mode=False, force=False):
"""
enables HA
"""
return isamAppliance.invoke_post("Enabling High Availability Configuration", "{0}".format(module_uri),
{
"is_primary": is_primary,
"interface": interface,
"remote": remote,
"port": port,
"health_check_interval": health_check_interval,
"health_check_timeout": health_check_timeout
}, requires_version=requires_version, requires_modules=requires_module)
def get(isamAppliance, check_mode=False, force=False):
"""
Receives configuration
"""
return isamAppliance.invoke_get("Receiving Configuration", "{0}".format(module_uri))
def update(isamAppliance, is_primary, interface, remote, port, health_check_interval,
health_check_timeout, check_mode=False, force=False):
update_required = _check(isamAppliance, is_primary=is_primary, interface=interface, remote=remote,
port=port, health_check_interval=health_check_interval,
health_check_timeout=health_check_timeout)
if force is True or update_required is True:
return isamAppliance.invoke_put("Updating High Availability", module_uri,
{
"is_primary": is_primary,
"interface": interface,
"remote": remote,
"port": port,
"health_check_interval": health_check_interval,
"health_check_timeout": health_check_timeout
}, requires_modules=requires_module, requires_version=requires_version)
elif check_mode is True:
return isamAppliance.create_return_object(changed=False)
def _check(isamAppliance, is_primary, interface, remote, port, health_check_interval,
health_check_timeout, check_mode=False, force=False):
"""
idempotency test for each parameter
"""
ret_obj = get(isamAppliance)
if ret_obj['data']['enabled'] != True:
return True
elif ret_obj['data']['is_primary'] != is_primary:
return True
elif ret_obj['data']['interface'] != interface:
return True
elif ret_obj['data']['remote'] != remote:
return True
elif ret_obj['data']['port'] != port:
return True
elif ret_obj['data']['health_check_interval'] != health_check_interval:
return True
elif ret_obj['data']['health_check_timeout'] != health_check_timeout:
return True
else:
return False
```
#### File: jdement/ibmsecurity/testisds.py
```python
import logging.config
import pprint
from ibmsecurity.appliance.isdsappliance import ISDSAppliance
from ibmsecurity.user.applianceuser import ApplianceUser
import pkgutil
import importlib
def import_submodules(package, recursive=True):
"""
Import all submodules of a module, recursively, including subpackages
:param package: package (name or actual module)
:type package: str | module
:rtype: dict[str, types.ModuleType]
"""
if isinstance(package, str):
package = importlib.import_module(package)
results = {}
for loader, name, is_pkg in pkgutil.walk_packages(package.__path__):
full_name = package.__name__ + '.' + name
results[full_name] = importlib.import_module(full_name)
if recursive and is_pkg:
results.update(import_submodules(full_name))
return results
import ibmsecurity
# Import all packages within ibmsecurity - recursively
# Note: Advisable to replace this code with specific imports for production code
import_submodules(ibmsecurity)
# Setup logging to send to stdout, format and set log level
# logging.getLogger(__name__).addHandler(logging.NullHandler())
logging.basicConfig()
# Valid values are 'DEBUG', 'INFO', 'ERROR', 'CRITICAL'
logLevel = 'DEBUG'
DEFAULT_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '[%(asctime)s] [PID:%(process)d TID:%(thread)d] [%(levelname)s] [%(name)s] [%(funcName)s():%(lineno)s] %(message)s'
},
},
'handlers': {
'default': {
'level': logLevel,
'formatter': 'standard',
'class': 'logging.StreamHandler',
},
},
'loggers': {
'': {
'level': logLevel,
'handlers': ['default'],
'propagate': True
},
'requests.packages.urllib3.connectionpool': {
'level': 'ERROR',
'handlers': ['default'],
'propagate': True
}
}
}
logging.config.dictConfig(DEFAULT_LOGGING)
# Function to pretty print JSON data
def p(jdata):
pp = pprint.PrettyPrinter(indent=2)
pp.pprint(jdata)
if __name__ == "__main__":
"""
This test program should not execute when imported, which would otherwise
cause problems when generating the documentation.
"""
# Create a user credential for ISDS appliance
u = ApplianceUser(username="admin", password="<PASSWORD>")
# Create an ISDS appliance with above credential
isds_server = ISDSAppliance(hostname="192.168.203.116", user=u, lmi_port=443)
################ ACTIVE TEST ################
p(ibmsecurity.isds.host_records.get(isdsAppliance=isds_server))
################ ACTIVE TEST ################
#
# Successfully tested
#
# Any changes needed to the isam code that this is based on is documented,
# or new functions added that will flow to the isam code;
# see lines starting with "Note:".
#
# Lines starting with "TBD:" are work not done yet.
#
####
#
# APPLIANCE.PY
#
# p(ibmsecurity.isds.appliance.shutdown(isdsAppliance=isds_server))
# p(ibmsecurity.isds.appliance.reboot(isdsAppliance=isds_server))
# p(ibmsecurity.isds.firmware.swap(isdsAppliance=isds_server))
# Note: changed method from PUT to GET
# Note: changed URI
# Note: disabled check for pending changes (not supported)
# p(ibmsecurity.isds.appliance.commit(isdsAppliance=isds_server))
# Note: changed method from DELETE to GET
# Note: changed URI
# p(ibmsecurity.isds.appliance.rollback(isdsAppliance=isds_server))
# Note: dsabled check for pending changes (not supported)
# p(ibmsecurity.isds.appliance.commit_and_restart(isdsAppliance=isds_server))
#
# FIRMWARE.PY
#
# p(ibmsecurity.isds.firmware.get(isdsAppliance=isds_server))
# p(ibmsecurity.isds.firmware.set(isdsAppliance=isds_server, id="1", comment="NEW COMMENT"))
# p(ibmsecurity.isds.firmware.backup(isdsAppliance=isds_server))
# p(ibmsecurity.isds.firmware.swap(isdsAppliance=isds_server))
#
# SNAPSHOTS.PY
#
# p(ibmsecurity.isds.snapshots.get(isdsAppliance=isds_server))
# p(ibmsecurity.isds.snapshots.create(isdsAppliance=isds_server, comment="COMMENT"))
# p(ibmsecurity.isds.snapshots.delete(isdsAppliance=isds_server, id="82a69dda50c51854db1d83d80100267e"))
# p(ibmsecurity.isds.snapshots.download(isdsAppliance=isds_server, filename="jeff.zip", id="f908e2f7ec4a3e1cb60ca7fc8bfa24fd"))
# p(ibmsecurity.isds.snapshots.download_latest(isdsAppliance=isds_server))
# p(ibmsecurity.isds.snapshots.modify(isdsAppliance=isds_server, id="f908e2f7ec4a3e1cb60ca7fc8bfa24fd", comment="NEW COMMENT"))
# p(ibmsecurity.isds.snapshots.upload(isdsAppliance=isds_server, filename="jeff.zip"))
# TBD: p(ibmsecurity.isds.snapshots.apply - needs authentication token
#
# SUPPORT.PY
#
# p(ibmsecurity.isds.support.get(isdsAppliance=isds_server))
# p(ibmsecurity.isds.support.create(isdsAppliance=isds_server, comment="SECOND SUPPORT FILE COMMENT"))
# p(ibmsecurity.isds.support.delete(isdsAppliance=isds_server, id="<KEY>"))
# p(ibmsecurity.isds.support.download(isdsAppliance=isds_server, filename="jeff.zip", id="<KEY>"))
# p(ibmsecurity.isds.support.download_latest(isdsAppliance=isds_server))
# Note: REST API documentation says modify should require 4 arguments
# p(ibmsecurity.isds.support.modify(isdsAppliance=isds_server, id="<KEY>", filename="", comment="NEW COMMENT"))
#
# HOST_RECORDS.PY
#
# p(ibmsecurity.isds.host_records.get(isdsAppliance=isds_server))
# p(ibmsecurity.isds.host_records.set(isdsAppliance=isds_server, hostname="jeff", ip_addr="192.168.203.159"))
# p(ibmsecurity.isds.host_records.delete(isdsAppliance=isds_server, hostname="jeff", ip_addr="192.168.203.159"))
#
# DATE_TIME.PY
#
# p(ibmsecurity.isds.date_time.get(isdsAppliance=isds_server))
# p(ibmsecurity.isds.date_time.set(isdsAppliance=isds_server, ntpServers="host1,host2", timeZone="America/Phoenix"))
# p(ibmsecurity.isds.date_time.disable(isdsAppliance=isds_server))
# p(ibmsecurity.isds.date_time.compare(isdsAppliance1=isds_server, isdsAppliance2=isds_server2))
#
# SNMP_MONITORING.PY
#
# Note: changed URI
# p(ibmsecurity.isds.snmp_monitoring.get(isdsAppliance=isds_server))
# Note: changed URI
# p(ibmsecurity.isds.snmp_monitoring.disable(isdsAppliance=isds_server))
# Note: changed URI
# p(ibmsecurity.isds.snmp_monitoring.set_v1v2(isdsAppliance=isds_server, community="JLD"))
# Note: New function
# Note: Code changes to detect if snmpv1v2c or snmpv3 set or not (duplicated in ISAM version)
# p(ibmsecurity.isds.snmp_monitoring.set_v3(isdsAppliance=isds_server, securityLevel="authPriv", securityUser="JDEMENT", authPassword="<PASSWORD>", authProtocol="SHA", privacyPassword="<PASSWORD>", privacyProtocol="CBC-DES"))
#
# STATISTICS.PY`
#
# Note: changed URI
# Note: inconsistency between working code and documentation - had to remove ".json" from URI
# p(ibmsecurity.isds.statistics.get_cpu(isdsAppliance=isds_server, statistics_duration="1d"))
# Note: changed URI
# Note: inconsistency between working code and documentation - had to remove ".json" from URI
# p(ibmsecurity.isds.statistics.get_memory(isdsAppliance=isds_server, statistics_duration="1d"))
# Note: changed URI
# Note: inconsistency between working code and documentation - had to remove ".json" from URI
# p(ibmsecurity.isds.statistics.get_storage(isdsAppliance=isds_server, statistics_duration="1d"))
# Note: changed URI
# Note: inconsistency between working code and documentation - had to remove ".json" from URI
# p(ibmsecurity.isds.statistics.get_network(isdsAppliance=isds_server, application_interface="M.1", statistics_duration="1d"))
#
# FIXPACK.PY
#
# p(ibmsecurity.isds.fixpack.get(isdsAppliance=isds_server))
# Note: New function
# p(ibmsecurity.isds.fixpack.getfips(isdsAppliance=isds_server))
# Note: _check logic sort of working) - needs more work
# Note: worked without "authentication token"
# p(ibmsecurity.isds.fixpack.install(isdsAppliance=isds_server, file="8.0.1.0-ISS-ISDS-IF0003.fixpack"))
# p(ibmsecurity.isds.fixpack.compare(isdsAppliance1=isds_server, isdsAppliance2=isds_server2"))
#
# INTERFACES.PY
#
# Note: changed URI
# p(ibmsecurity.isds.fixpack.get_all(isdsAppliance=isds_server))
# Note: New function
# p(ibmsecurity.isds.interfaces.get_all_app(isdsAppliance=isds_server))
# Note: changed URI
# p(ibmsecurity.isds.interfaces.get(isdsAppliance=isds_server, uuid=P.1))
# p(ibmsecurity.isds.interfaces.compare(isdsAppliance1=isds_server, isdsAppliance2=isds_server2))
#
# CONFIG.PY (NEW)
#
# p(ibmsecurity.isds.config.get(isdsAppliance=isds_server))
# p(ibmsecurity.isds.config.set(isdsAppliance=isds_server, serverType="RDBM"))
# p(ibmsecurity.isds.config.set(isdsAppliance=isds_server, serverType="PROXY"))
# p(ibmsecurity.isds.config.set(isdsAppliance=isds_server, serverType="VD"))
# p(ibmsecurity.isds.date_time.compare(isdsAppliance1=isds_server, isdsAppliance2=isds_server2))
#
# SERVER.PY (NEW)
#
# p(ibmsecurity.isds.server.start(...)
# p(ibmsecurity.isds.server.startconfig(...)
# p(ibmsecurity.isds.server.stop(...)
# p(ibmsecurity.isds.server.restart(...)
# p(ibmsecurity.isds.server.stop(isdsAppliance=isds_server, serverID="directoryserver"))
# p(ibmsecurity.isds.server.stop(isdsAppliance=isds_server, serverID="directoryadminserver"))
# p(ibmsecurity.isds.server.stop(isdsAppliance=isds_server, serverID="directorywat"))
# p(ibmsecurity.isds.server.stop(isdsAppliance=isds_server, serverID="directoryintegrator"))
# p(ibmsecurity.isds.server.stop(isdsAppliance=isds_server, serverID="directoryintegratorscimtarget"))
# p(ibmsecurity.isds.server.stop(isdsAppliance=isds_server, serverID="scimservice"))
#
# LOGS.PY (NEW)
#
# p(ibmsecurity.isds.logs.get_event_log((isdsAppliance=isds_server))
#
# TOKEN.PY (NEW)
#
# p(ibmsecurity.isds.token.get(isdsAppliance=isds_server))
```
|
{
"source": "jdemeule/vscode-ninja",
"score": 2
}
|
#### File: jdemeule/vscode-ninja/vscode-ninja.py
```python
import argparse
import glob
import json
import multiprocessing
import os
import re
import shutil
import subprocess
import sys
import tempfile
import threading
import traceback
import io
def extract_targets(build_path):
targets = []
p = subprocess.run(["ninja", "-C", build_path, "-t", "targets", "rule", "phony"],
stdout=subprocess.PIPE)
if p.returncode == 0:
output = io.StringIO(p.stdout.decode("utf-8"))
for line in output:
if not re.search("(cmake|edit_cache|rebuild_cache|install)", line, re.IGNORECASE):
targets.append(line.rstrip('\n'))
else:
print("return code is %s" % p.returncode)
print(p.stderr)
return targets
def gen_build_task(target, build_path):
return {
"label": "build " + target,
"group": "build",
"type": "shell",
"command": "ninja",
"args": ["-C", build_path, target],
"problemMatcher": {
"owner": "cpp",
"fileLocation": [
"absolute"
],
"pattern": {
"regexp": "^(.*):(\\d+):(\\d+):\\s+(warning|error):\\s+(.*)$",
"file": 1,
"line": 2,
"column": 3,
"severity": 4,
"message": 5
}
}
}
def gen_run_task(build_path, target):
args = []
if target.endswith("test") or target.endswith("tests") or target.endswith("Test") or target.endswith("Tests"):
args.append("--gtest_color=yes")
return {
"name": target,
"type": "lldb",
"program": "${{workspaceRoot}}/{}/bin/{}".format(build_path, target),
"args": args,
"env": {
"DYLD_LIBRARY_PATH": "${{workspaceRoot}}/{}/lib".format(build_path)
},
"cwd": "${{workspaceRoot}}",
"request": "launch",
}
def guess_executables(targets):
executables = []
for t in targets:
if not (t == "test" or t == "all" or t == "clean" or t.endswith(".dylib") or t.endswith(".dll") or t.endswith(".dll")):
executables.append(t)
return executables
def main():
# Call ninja -C build-dir -t targets rule phony
# Filter out cmake related stuff
# Generate (or enrich?) in .vscode folder
# - tasks.json
# - launch.json
parser = argparse.ArgumentParser(
description='generate VSCode tasks.json and launch.json over Ninja')
parser.add_argument('-p', dest='build_path',
help='Locate build folder')
parser.add_argument('-o', dest='output_path',
help='Locate output vscode folder')
args = parser.parse_args()
build_path = "."
if args.build_path is not None:
build_path = args.build_path
vscode_path = "."
if args.output_path is not None:
vscode_path = args.output_path
targets = extract_targets(build_path)
tasks = {
"version": "2.0.0",
"tasks": [gen_build_task(target, build_path) for target in targets]
}
# print(json.dumps(tasks, indent=4))
with open(os.path.join(vscode_path, "tasks.json"), "w") as task_json:
json.dump(tasks, task_json, indent=4)
executable_targets = guess_executables(targets)
launch = {
"version": "0.2.0",
"configurations": [gen_run_task(build_path, t) for t in executable_targets]
}
# print(json.dumps(launch, indent=4))
with open(os.path.join(vscode_path, "launch.json"), "w") as launch_json:
json.dump(launch, launch_json, indent=4)
pass
if __name__ == "__main__":
main()
```
|
{
"source": "jdemeyer/callbench",
"score": 2
}
|
#### File: src/callbench/__init__.py
```python
from callbench.callables import *
obj = Callable()
meth = obj.meth
umeth = Callable.meth
try:
fastmeth = obj.fastmeth
ufastmeth = Callable.fastmeth
except AttributeError:
pass
def have_PEP_580(obj):
return bool(type(obj).__flags__ & 2048)
def have_PEP_590(obj):
return bool(type(obj).__flags__ & 2)
```
|
{
"source": "jdenda/sensor.emby_upcoming_media",
"score": 2
}
|
#### File: custom_components/emby_upcoming_media/sensor.py
```python
import logging
import json
import time
import re
import requests
import dateutil.parser
from datetime import date, datetime
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.components import sensor
from homeassistant.const import CONF_API_KEY, CONF_HOST, CONF_PORT, CONF_SSL
from homeassistant.helpers.entity import Entity
from .client import EmbyClient
__version__ = "0.0.1"
DOMAIN = "emby_upcoming_media"
DOMAIN_DATA = f"{DOMAIN}_data"
ATTRIBUTION = "Data is provided by Emby."
# Configuration
CONF_SENSOR = "sensor"
CONF_ENABLED = "enabled"
CONF_NAME = "name"
CONF_INCLUDE = "include"
CONF_MAX = "max"
CONF_USER_ID = "user_id"
CONF_USE_BACKDROP = "use_backdrop"
CATEGORY_NAME = "CategoryName"
CATEGORY_ID = "CategoryId"
SCAN_INTERVAL_SECONDS = 3600 # Scan once per hour
TV_DEFAULT = {"title_default": "$title", "line1_default": "$episode", "line2_default": "$release", "line3_default": "$rating - $runtime", "line4_default": "$number - $studio", "icon": "mdi:arrow-down-bold"}
MOVIE_DEFAULT = {"title_default": "$title", "line1_default": "$release", "line2_default": "$genres", "line3_default": "$rating - $runtime", "line4_default": "$studio", "icon": "mdi:arrow-down-bold"}
OTHER_DEFAULT = {"title_default": "$title", "line1_default": "$number - $studio", "line2_default": "$aired", "line3_default": "$episode", "line4_default": "$rating - $runtime", "icon": "mdi:arrow-down-bold"}
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_API_KEY): cv.string,
vol.Optional(CONF_USER_ID): cv.string,
vol.Optional(CONF_HOST, default="localhost"): cv.string,
vol.Optional(CONF_PORT, default=8096): cv.port,
vol.Optional(CONF_SSL, default=False): cv.boolean,
vol.Optional(CONF_INCLUDE, default=[]): vol.All(cv.ensure_list),
vol.Optional(CONF_MAX, default=5): cv.Number,
vol.Optional(CONF_USE_BACKDROP, default=False): cv.boolean
}
)
def setup_platform(hass, config, add_devices, discovery_info=None):
# Create DATA dict
hass.data[DOMAIN_DATA] = {}
# Get "global" configuration.
api_key = config.get(CONF_API_KEY)
host = config.get(CONF_HOST)
ssl = config.get(CONF_SSL)
port = config.get(CONF_PORT)
max_items = config.get(CONF_MAX)
user_id = config.get(CONF_USER_ID)
include = config.get(CONF_INCLUDE)
# Configure the client.
client = EmbyClient(host, api_key, ssl, port, max_items, user_id)
hass.data[DOMAIN_DATA]["client"] = client
categories = client.get_view_categories()
if include != []:
categories = filter(lambda el: el["Name"] in include, categories)
mapped = map(
lambda cat: EmbyUpcomingMediaSensor(
hass, {**config, CATEGORY_NAME: cat["Name"], CATEGORY_ID: cat["Id"]}
),
categories,
)
add_devices(mapped, True)
SCAN_INTERVAL = timedelta(seconds=SCAN_INTERVAL_SECONDS)
class EmbyUpcomingMediaSensor(Entity):
def __init__(self, hass, conf):
self._client = hass.data[DOMAIN_DATA]["client"]
self._state = None
self.data = []
self.use_backdrop = conf.get(CONF_USE_BACKDROP)
self.category_name = conf.get(CATEGORY_NAME)
self.category_id = conf.get(CATEGORY_ID)
self.friendly_name = "Emby Latest Media " + self.category_name
self.entity_id = sensor.ENTITY_ID_FORMAT.format(
"emby_latest_"
+ re.sub(
"\_$", "", re.sub("\W+", "_", self.category_name)
).lower() # remove special characters
)
@property
def name(self):
return "Latest {0} on Emby".format(self.category_name)
@property
def state(self):
return self._state
def handle_tv_show(self):
"""Return the state attributes."""
attributes = {}
default = TV_DEFAULT
card_json = []
card_json.append(default)
for show in self.data:
card_item = {}
card_item["title"] = show["SeriesName"]
card_item['episode'] = show.get('Name', '')
card_item["airdate"] = show.get("PremiereDate", datetime.now().isoformat())
if "RunTimeTicks" in show:
timeobject = timedelta(microseconds=show["RunTimeTicks"] / 10)
card_item["runtime"] = timeobject.total_seconds() / 60
else:
card_item["runtime"] = ""
if "ParentIndexNumber" and "IndexNumber" in show:
card_item["number"] = "S{:02d}E{:02d}".format(
show["ParentIndexNumber"], show["IndexNumber"]
)
if "ParentBackdropItemId" in show:
card_item["poster"] = self.hass.data[DOMAIN_DATA]["client"].get_image_url(
show["ParentBackdropItemId"], "Backdrop" if self.use_backdrop else "Primary"
)
if "CommunityRating" in show:
card_item["rating"] = "%s %s" % (
"\u2605", # Star character
show.get("CommunityRating", ""),
)
card_json.append(card_item)
attributes['data'] = card_json
attributes["attribution"] = ATTRIBUTION
return attributes
def handle_movie(self):
"""Return the state attributes."""
attributes = {}
default = MOVIE_DEFAULT
card_json = []
card_json.append(default)
for show in self.data:
card_item = {}
card_item["title"] = show["Name"]
card_item["officialrating"] = show.get("OfficialRating", "")
if "PremiereDate" in show:
card_item["release"] = dateutil.parser.isoparse(show.get("PremiereDate", "")).year
card_item["airdate"] = datetime.now().isoformat()
if "Studios" in show and len(show["Studios"]) > 0:
card_item["studio"] = show["Studios"][0]["Name"]
if "Genres" in show:
card_item["genres"] = ", ".join(show["Genres"])
if "RunTimeTicks" in show:
timeobject = timedelta(microseconds=show["RunTimeTicks"] / 10)
card_item["runtime"] = timeobject.total_seconds() / 60
else:
card_item["runtime"] = ""
card_item["poster"] = self.hass.data[DOMAIN_DATA]["client"].get_image_url(
show["Id"], "Backdrop" if self.use_backdrop else "Primary"
)
card_item["rating"] = "%s %s" % (
"\u2605", # Star character
show.get("CommunityRating", ""),
)
card_json.append(card_item)
attributes['data'] = card_json
attributes["attribution"] = ATTRIBUTION
return attributes
@property
def device_state_attributes(self):
"""Return the state attributes."""
attributes = {}
default = OTHER_DEFAULT
card_json = []
if len(self.data) == 0:
return attributes
elif self.data[0]["Type"] == "Episode":
return self.handle_tv_show()
elif self.data[0]["Type"] == "Movie":
return self.handle_movie()
else:
card_json.append(default)
# for show in self.data[self._category_id]:
for show in self.data:
card_item = {}
card_item["title"] = show["Name"]
card_item["episode"] = show.get("OfficialRating", "")
card_item["officialrating"] = show.get("OfficialRating", "")
card_item["airdate"] = show.get("PremiereDate", datetime.now().isoformat())
if "RunTimeTicks" in show:
timeobject = timedelta(microseconds=show["RunTimeTicks"] / 10)
card_item["runtime"] = timeobject.total_seconds() / 60
else:
card_item["runtime"] = ""
if "ParentIndexNumber" in show and "IndexNumber" in show:
card_item["number"] = "S{:02d}E{:02d}".format(
show["ParentIndexNumber"], show["IndexNumber"]
)
else:
card_item["number"] = show.get("ProductionYear", "")
card_item["poster"] = self.hass.data[DOMAIN_DATA]["client"].get_image_url(
show["Id"], "Backdrop" if self.use_backdrop else "Primary"
)
card_item["rating"] = "%s %s" % (
"\u2605", # Star character
show.get("CommunityRating", ""),
)
card_json.append(card_item)
attributes['data'] = card_json
attributes["attribution"] = ATTRIBUTION
return attributes
def update(self):
data = self._client.get_data(self.category_id)
if data is not None:
self._state = "Online"
self.data = data
else:
self._state = "error"
_LOGGER.error("ERROR")
```
|
{
"source": "jdenes/ETM",
"score": 2
}
|
#### File: jdenes/ETM/utils.py
```python
import torch
import numpy as np
from sklearn.svm import SVC
from sklearn.neighbors import NearestNeighbors
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from gensim.corpora.dictionary import Dictionary
from gensim.matutils import corpus2dense
from gensim.models import LdaModel, TfidfModel, word2vec
def get_topic_diversity(beta, topk):
num_topics = beta.shape[0]
list_w = np.zeros((num_topics, topk))
for k in range(num_topics):
idx = beta[k,:].argsort()[-topk:][::-1]
list_w[k,:] = idx
n_unique = len(np.unique(list_w))
TD = n_unique / (topk * num_topics)
print('Topic diveristy is: {}'.format(TD))
def get_document_frequency(data, wi, wj=None):
if wj is None:
D_wi = 0
for l in range(len(data)):
doc = data[l].squeeze(0)
if len(doc) == 1:
continue
else:
doc = doc.squeeze()
if wi in doc:
D_wi += 1
return D_wi
D_wj = 0
D_wi_wj = 0
for l in range(len(data)):
doc = data[l].squeeze(0)
if len(doc) == 1:
doc = [doc.squeeze()]
else:
doc = doc.squeeze()
if wj in doc:
D_wj += 1
if wi in doc:
D_wi_wj += 1
return D_wj, D_wi_wj
def get_topic_coherence(beta, data, vocab):
D = len(data) ## number of docs...data is list of documents
print('D: ', D)
TC = []
num_topics = len(beta)
for k in range(num_topics):
print('k: {}/{}'.format(k, num_topics))
top_10 = list(beta[k].argsort()[-11:][::-1])
top_words = [vocab[a] for a in top_10]
TC_k = 0
counter = 0
for i, word in enumerate(top_10):
# get D(w_i)
D_wi = get_document_frequency(data, word)
j = i + 1
tmp = 0
while j < len(top_10) and j > i:
# get D(w_j) and D(w_i, w_j)
D_wj, D_wi_wj = get_document_frequency(data, word, top_10[j])
# get f(w_i, w_j)
if D_wi_wj == 0:
f_wi_wj = -1
else:
f_wi_wj = -1 + ( np.log(D_wi) + np.log(D_wj) - 2.0 * np.log(D) ) / ( np.log(D_wi_wj) - np.log(D) )
# update tmp:
tmp += f_wi_wj
j += 1
counter += 1
# update TC_k
TC_k += tmp
TC.append(TC_k)
print('counter: ', counter)
print('num topics: ', len(TC))
TC = np.mean(TC) / counter
print('Topic coherence is: {}'.format(TC))
def get_classif_perf(theta, tokens, labels, embeds, methods=['theta', 'lda', 's-bert', 'tfidf']):
# print('Checking inputs dim for classif:', len(theta), len(labels))
import pandas as pd
perf = []
if 'theta' in methods:
X = theta
perf.append(train_predict(X, labels))
if 'lda' in methods:
corpus = tokens.tolist()
corpus = [[str(w) for w in d[0]] for d in corpus]
dictionary = Dictionary(corpus)
bow_corpus = [dictionary.doc2bow(x) for x in corpus]
mod = LdaModel(bow_corpus, num_topics=theta.shape[1])
transcorp = mod[bow_corpus]
X = transcorp2matrix(transcorp, bow_corpus, theta.shape[1])
perf.append(train_predict(X, labels))
if 's-bert' in methods:
from sklearn.decomposition import PCA
X = PCA(n_components=theta.shape[1]).fit_transform(embeds)
perf.append(train_predict(X, labels))
if 'tfidf' in methods:
corpus = tokens.tolist()
corpus = [[str(w) for w in d[0]] for d in corpus]
dictionary = Dictionary(corpus)
dictionary.filter_extremes(keep_n=theta.shape[1])
bow_corpus = [dictionary.doc2bow(x) for x in corpus]
mod = TfidfModel(bow_corpus, dictionary=dictionary)
corpus_tfidf = mod[bow_corpus]
X = corpus2dense(corpus_tfidf, num_terms=theta.shape[1]).T
perf.append(train_predict(X, labels))
perf = pd.DataFrame(perf, index=methods)
print('Model performances on classification is:\n{}'.format(perf))
def train_predict(X, labels):
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
import tensorflow as tf
X_train, X_test, y_train, y_test = train_test_split(X, labels, test_size=0.2, random_state=123)
train_data = tf.data.Dataset.from_tensor_slices((X_train, y_train)).cache().shuffle(1000).batch(1000).repeat()
val_data = tf.data.Dataset.from_tensor_slices((X_test, y_test)).batch(1000).repeat()
model = Sequential([
Dense(248, name='first', activation='relu', input_shape=(X_test.shape[1],)),
Dense(248, name='hidden', activation='relu'),
Dense(len(np.unique(y_train)), name='output', activation='softmax') ])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
checkpoint = tf.keras.callbacks.ModelCheckpoint('checkpoint.hdf5', monitor='val_loss', save_best_only=True)
earlystop = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=3)
model.fit(train_data, steps_per_epoch=1000, epochs=100, validation_steps=100, validation_data=val_data, callbacks=[checkpoint, earlystop])
perf = evaluate_model(model, X_train, X_test, y_train, y_test)
return perf
def evaluate_model(model, X_train, X_test, y_train, y_test):
y_pred = np.argmax(model.predict(X_test), axis=1)
acc = accuracy_score(y_test, y_pred, normalize=True)
pr = precision_score(y_test, y_pred, average='macro')
rec = recall_score(y_test, y_pred, average='macro')
f1 = f1_score(y_test, y_pred, average='macro')
return {'Accuracy': acc, 'Precision': pr, 'Recall': rec, 'F1-score':f1}
def nearest_neighbors(word, embeddings, vocab):
vectors = embeddings.data.cpu().numpy()
index = vocab.index(word)
# print('vectors: ', vectors.shape)
query = vectors[index]
# print('query: ', query.shape)
ranks = vectors.dot(query).squeeze()
denom = query.T.dot(query).squeeze()
denom = denom * np.sum(vectors**2, 1)
denom = np.sqrt(denom)
ranks = ranks / denom
mostSimilar = []
[mostSimilar.append(idx) for idx in ranks.argsort()[::-1]]
nearest_neighbors = mostSimilar[:12]
nearest_neighbors = [vocab[comp] for comp in nearest_neighbors]
return nearest_neighbors
# From a sparse transformed corpus of gensim, i.e. [(0, 12), (1, 15)], return matrix format: [12, 15].
def transcorp2matrix(transcorp, bow_corpus, vector_size):
x = np.zeros((len(bow_corpus), vector_size))
for i, doc in enumerate(transcorp):
for wpair in doc:
x[i][wpair[0]] = wpair[1]
return x
```
|
{
"source": "jdenisgiguere/rasterframes",
"score": 2
}
|
#### File: python/geomesa_pyspark/types.py
```python
from pyspark.sql.types import UserDefinedType
from pyspark.sql import Row
from pyspark.sql.types import *
from pyrasterframes.context import RFContext
class GeometryUDT(UserDefinedType):
@classmethod
def sqlType(self):
# return StructField("wkb", BinaryType(), False)
return StructType([StructField("wkb", BinaryType(), True)])
@classmethod
def module(cls):
return 'geomesa_pyspark.types'
@classmethod
def scalaUDT(cls):
return 'org.apache.spark.sql.jts.' + cls.__name__
def serialize(self, obj):
if (obj is None): return None
return Row(obj.toBytes)
def deserialize(self, datum):
return RFContext._jvm_mirror().generate_geometry(datum[0])
class PointUDT(GeometryUDT):
pass
class LineStringUDT(GeometryUDT):
pass
class PolygonUDT(GeometryUDT):
pass
class MultiPointUDT(GeometryUDT):
pass
class MultiLineStringUDT(GeometryUDT):
pass
class MultiPolygonUDT(GeometryUDT):
pass
class GeometryUDT(GeometryUDT):
pass
class GeometryCollectionUDT(GeometryUDT):
pass
```
#### File: python/pyrasterframes/context.py
```python
from pyspark import SparkContext
__all__ = ['RFContext']
class RFContext(object):
"""
Entrypoint to RasterFrames services
"""
def __init__(self, spark_session):
self._spark_session = spark_session
self._gateway = spark_session.sparkContext._gateway
self._jvm = self._gateway.jvm
jsess = self._spark_session._jsparkSession
self._jrfctx = self._jvm.astraea.spark.rasterframes.py.PyRFContext(jsess)
def list_to_seq(self, py_list):
conv = self.lookup('listToSeq')
return conv(py_list)
def lookup(self, function_name):
return getattr(self._jrfctx, function_name)
@staticmethod
def active():
"""
Get the active Pythono RFContext and throw an error if it is not enabled for RasterFrames.
"""
sc = SparkContext._active_spark_context
if not hasattr(sc, '_rf_context'):
raise AttributeError(
"RasterFrames have not been enabled for the active session. Call 'SparkSession.withRasterFrames()'.")
return sc._rf_context
@staticmethod
def _jvm_mirror():
"""
Get the active Scala PyRFContext and throw an error if it is not enabled for RasterFrames.
"""
return RFContext.active()._jrfctx
```
#### File: python/pyrasterframes/rasterfunctions.py
```python
from __future__ import absolute_import
from pyspark.sql.types import *
from pyspark.sql.column import Column, _to_java_column
from .context import RFContext
THIS_MODULE = 'pyrasterframes'
def _context_call(name, *args):
f = RFContext.active().lookup(name)
return f(*args)
def _celltype(cellTypeStr):
""" Convert the string cell type to the expected CellType object."""
return _context_call('cell_type', cellTypeStr)
def _create_assembleTile():
""" Create a function mapping to the Scala implementation."""
def _(colIndex, rowIndex, cellData, numCols, numRows, cellType):
jfcn = RFContext.active().lookup('assemble_tile')
return Column(jfcn(_to_java_column(colIndex), _to_java_column(rowIndex), _to_java_column(cellData), numCols, numRows, _celltype(cellType)))
_.__name__ = 'assemble_tile'
_.__doc__ = "Create a Tile from a column of cell data with location indices"
_.__module__ = THIS_MODULE
return _
def _create_arrayToTile():
""" Create a function mapping to the Scala implementation."""
def _(arrayCol, numCols, numRows):
jfcn = RFContext.active().lookup('array_to_tile')
return Column(jfcn(_to_java_column(arrayCol), numCols, numRows))
_.__name__ = 'array_to_tile'
_.__doc__ = "Convert array in `arrayCol` into a Tile of dimensions `numCols` and `numRows'"
_.__module__ = THIS_MODULE
return _
def _create_convertCellType():
""" Create a function mapping to the Scala implementation."""
def _(tileCol, cellType):
jfcn = RFContext.active().lookup('convert_cell_type')
return Column(jfcn(_to_java_column(tileCol), _celltype(cellType)))
_.__name__ = 'convert_cell_type'
_.__doc__ = "Convert the numeric type of the Tiles in `tileCol`"
_.__module__ = THIS_MODULE
return _
def _create_makeConstantTile():
""" Create a function mapping to the Scala implementation."""
def _(value, cols, rows, cellType):
jfcn = RFContext.active().lookup('make_constant_tile')
return Column(jfcn(value, cols, rows, cellType))
_.__name__ = 'make_constant_tile'
_.__doc__ = "Constructor for constant tile column"
_.__module__ = THIS_MODULE
return _
def _create_tileZeros():
""" Create a function mapping to the Scala implementation."""
def _(cols, rows, cellType = 'float64'):
jfcn = RFContext.active().lookup('tile_zeros')
return Column(jfcn(cols, rows, cellType))
_.__name__ = 'tile_zeros'
_.__doc__ = "Create column of constant tiles of zero"
_.__module__ = THIS_MODULE
return _
def _create_tileOnes():
""" Create a function mapping to the Scala implementation."""
def _(cols, rows, cellType = 'float64'):
jfcn = RFContext.active().lookup('tile_ones')
return Column(jfcn(cols, rows, cellType))
_.__name__ = 'tile_ones'
_.__doc__ = "Create column of constant tiles of one"
_.__module__ = THIS_MODULE
return _
def _create_rasterize():
""" Create a function mapping to the Scala rasterize function. """
def _(geometryCol, boundsCol, valueCol, numCols, numRows):
jfcn = RFContext.active().lookup('rasterize')
return Column(jfcn(_to_java_column(geometryCol), _to_java_column(boundsCol), _to_java_column(valueCol), numCols, numRows))
_.__name__ = 'rasterize'
_.__doc__ = 'Create a tile where cells in the grid defined by cols, rows, and bounds are filled with the given value.'
_.__module__ = THIS_MODULE
return _
def _create_reproject_geometry():
""" Create a function mapping to the Scala reproject_geometry function. """
def _(geometryCol, srcCRSName, dstCRSName):
jfcn = RFContext.active().lookup('reproject_geometry')
return Column(jfcn(_to_java_column(geometryCol), srcCRSName, dstCRSName))
_.__name__ = 'reproject_geometry'
_.__doc__ = """Reproject a column of geometry given the CRS names of the source and destination.
Currently supported registries are EPSG, ESRI, WORLD, NAD83, & NAD27.
An example of a valid CRS name is EPSG:3005.
"""
_.__module__ = THIS_MODULE
return _
def _create_explode_tiles():
""" Create a function mapping to Scala explode_tiles function """
def _(*args):
jfcn = RFContext.active().lookup('explode_tiles')
jcols = [_to_java_column(arg) for arg in args]
return Column(jfcn(RFContext.active().list_to_seq(jcols)))
_.__name__ = 'explode_tiles'
_.__doc__ = 'Create a row for each cell in Tile.'
_.__module__ = THIS_MODULE
return _
def _create_explode_tiles_sample():
""" Create a function mapping to Scala explode_tiles_sample function"""
def _(sample_frac, seed, *tile_cols):
jfcn = RFContext.active().lookup('explode_tiles_sample')
jcols = [_to_java_column(arg) for arg in tile_cols]
return Column(jfcn(sample_frac, seed, RFContext.active().list_to_seq(jcols)))
_.__name__ = 'explode_tiles_sample'
_.__doc__ = 'Create a row for a sample of cells in Tile columns.'
_.__module__ = THIS_MODULE
return _
def _create_maskByValue():
""" Create a function mapping to Scala mask_by_value function """
def _(data_tile, mask_tile, mask_value):
jfcn = RFContext.active().lookup('mask_by_value')
return Column(jfcn(_to_java_column(data_tile), _to_java_column(mask_tile), _to_java_column(mask_value)))
_.__name__ = 'mask_by_value'
_.__doc__ = 'Generate a tile with the values from the data tile, but where cells in the masking tile contain the masking value, replace the data value with NODATA.'
_.__module__ = THIS_MODULE
return _
_rf_unique_functions = {
'array_to_tile': _create_arrayToTile(),
'assemble_tile': _create_assembleTile(),
'cellTypes': lambda: _context_call('cellTypes'),
'convert_cell_type': _create_convertCellType(),
'explode_tiles': _create_explode_tiles(),
'explode_tiles_sample': _create_explode_tiles_sample(),
'make_constant_tile': _create_makeConstantTile(),
'mask_by_value': _create_maskByValue(),
'rasterize': _create_rasterize(),
'reproject_geometry': _create_reproject_geometry(),
'tile_ones': _create_tileOnes(),
'tile_zeros': _create_tileZeros(),
}
_rf_column_scalar_functions = {
'with_no_data': 'Assign a `NoData` value to the Tiles in the given Column.',
'local_add_scalar': 'Add a scalar to a Tile',
'local_add_scalar_int': 'Add a scalar to a Tile',
'local_subtract_scalar': 'Subtract a scalar from a Tile',
'local_subtract_scalar_int': 'Subtract a scalar from a Tile',
'local_multiply_scalar': 'Multiply a Tile by a scalar',
'local_multiply_scalar_int': 'Multiply a Tile by a scalar',
'local_divide_scalar': 'Divide a Tile by a scalar',
'local_divide_scalar_int': 'Divide a Tile by a scalar',
'local_less_scalar': 'Return a Tile with values equal 1 if the cell is less than a scalar, otherwise 0',
'local_less_scalar_int': 'Return a Tile with values equal 1 if the cell is less than a scalar, otherwise 0',
'local_less_equal_scalar': 'Return a Tile with values equal 1 if the cell is less than or equal to a scalar, otherwise 0',
'local_less_equal_scalar_int': 'Return a Tile with values equal 1 if the cell is less than or equal to a scalar, otherwise 0',
'local_greater_scalar': 'Return a Tile with values equal 1 if the cell is greater than a scalar, otherwise 0',
'local_greater_scalar_int': 'Return a Tile with values equal 1 if the cell is greater than a scalar, otherwise 0',
'local_greater_equal_scalar': 'Return a Tile with values equal 1 if the cell is greater than or equal to a scalar, otherwise 0',
'local_greater_equal_scalar_int': 'Return a Tile with values equal 1 if the cell is greater than or equal to a scalar, otherwise 0',
'local_equal_scalar': 'Return a Tile with values equal 1 if the cell is equal to a scalar, otherwise 0',
'local_equal_scalar_int': 'Return a Tile with values equal 1 if the cell is equal to a scalar, otherwise 0',
'local_unequal_scalar': 'Return a Tile with values equal 1 if the cell is not equal to a scalar, otherwise 0',
'local_unequal_scalar_int': 'Return a Tile with values equal 1 if the cell is not equal to a scalar, otherwise 0',
}
_rf_column_functions = {
# ------- RasterFrames functions -------
'tile_dimensions': 'Query the number of (cols, rows) in a Tile.',
'envelope': 'Extracts the bounding box (envelope) of the geometry.',
'tile_to_int_array': 'Flattens Tile into an array of integers.',
'tile_to_double_array': 'Flattens Tile into an array of doubles.',
'cell_type': 'Extract the Tile\'s cell type',
'agg_histogram': 'Compute the full column aggregate floating point histogram',
'agg_stats': 'Compute the full column aggregate floating point statistics',
'agg_mean': 'Computes the column aggregate mean',
'agg_data_cells': 'Computes the number of non-NoData cells in a column',
'agg_no_data_cells': 'Computes the number of NoData cells in a column',
'tile_histogram': 'Compute the Tile-wise histogram',
'tile_mean': 'Compute the Tile-wise mean',
'tile_sum': 'Compute the Tile-wise sum',
'tile_min': 'Compute the Tile-wise minimum',
'tile_max': 'Compute the Tile-wise maximum',
'tile_stats': 'Compute the Tile-wise floating point statistics',
'render_ascii': 'Render ASCII art of tile',
'no_data_cells': 'Count of NODATA cells',
'data_cells': 'Count of cells with valid data',
'local_add': 'Add two Tiles',
'local_subtract': 'Subtract two Tiles',
'local_multiply': 'Multiply two Tiles',
'local_divide': 'Divide two Tiles',
'normalized_difference': 'Compute the normalized difference of two tiles',
'local_agg_stats': 'Compute cell-local aggregate descriptive statistics for a column of Tiles.',
'local_agg_max': 'Compute the cell-wise/local max operation between Tiles in a column.',
'local_agg_min': 'Compute the cellwise/local min operation between Tiles in a column.',
'local_agg_mean': 'Compute the cellwise/local mean operation between Tiles in a column.',
'local_agg_data_cells': 'Compute the cellwise/local count of non-NoData cells for all Tiles in a column.',
'local_agg_no_data_cells': 'Compute the cellwise/local count of NoData cells for all Tiles in a column.',
'mask': 'Where the mask (second) tile contains NODATA, replace values in the source (first) tile with NODATA.',
'inverse_mask': 'Where the mask (second) tile DOES NOT contain NODATA, replace values in the source (first) tile with NODATA.',
'local_less': 'Cellwise less than comparison between two tiles',
'local_less_equal': 'Cellwise less than or equal to comparison between two tiles',
'local_greater': 'Cellwise greater than comparison between two tiles',
'local_greater_equal': 'Cellwise greater than or equal to comparison between two tiles',
'local_equal': 'Cellwise equality comparison between two tiles',
'local_unequal': 'Cellwise inequality comparison between two tiles',
# ------- JTS functions -------
# spatial constructors
'st_geomFromGeoHash': '',
'st_geomFromWKT': '',
'st_geomFromWKB': '',
'st_lineFromText': '',
'st_makeBox2D': '',
'st_makeBBox': '',
'st_makePolygon': '',
'st_makePoint': '',
'st_makeLine': '',
'st_makePointM': '',
'st_mLineFromText': '',
'st_mPointFromText': '',
'st_mPolyFromText': '',
'st_point': '',
'st_pointFromGeoHash': '',
'st_pointFromText': '',
'st_pointFromWKB': '',
'st_polygon': '',
'st_polygonFromText': '',
# spatial converters
'st_castToPoint': '',
'st_castToPolygon': '',
'st_castToLineString': '',
'st_byteArray': '',
# spatial accessors
'st_boundary': '',
'st_coordDim': '',
'st_dimension': '',
'st_envelope': '',
'st_exteriorRing': '',
'st_geometryN': '',
'st_geometryType': '',
'st_interiorRingN': '',
'st_isClosed': '',
'st_isCollection': '',
'st_isEmpty': '',
'st_isRing': '',
'st_isSimple': '',
'st_isValid': '',
'st_numGeometries': '',
'st_numPoints': '',
'st_pointN': '',
'st_x': '',
'st_y': '',
# spatial outputs
'st_asBinary': '',
'st_asGeoJSON': '',
'st_asLatLonText': '',
'st_asText': '',
'st_geoHash': '',
# spatial processors
'st_bufferPoint': '',
'st_antimeridianSafeGeom': '',
# spatial relations
'st_translate': '',
'st_contains': '',
'st_covers': '',
'st_crosses': '',
'st_disjoint': '',
'st_equals': '',
'st_intersects': '',
'st_overlaps': '',
'st_touches': '',
'st_within': '',
'st_relate': '',
'st_relateBool': '',
'st_area': '',
'st_closestPoint': '',
'st_centroid': '',
'st_distance': '',
'st_distanceSphere': '',
'st_length': '',
'st_aggregateDistanceSphere': '',
'st_lengthSphere': '',
}
__all__ = list(_rf_column_functions.keys()) + \
list(_rf_column_scalar_functions.keys()) + \
list(_rf_unique_functions.keys())
def _create_column_function(name, doc=""):
""" Create a mapping to Scala UDF for a column function by name"""
def _(*args):
jfcn = RFContext.active().lookup(name)
jcols = [_to_java_column(arg) for arg in args]
return Column(jfcn(*jcols))
_.__name__ = name
_.__doc__ = doc
_.__module__ = THIS_MODULE
return _
def _create_columnScalarFunction(name, doc=""):
""" Create a mapping to Scala UDF for a (column, scalar) -> column function by name"""
def _(col, scalar):
jfcn = RFContext.active().lookup(name)
return Column(jfcn(_to_java_column(col), scalar))
_.__name__ = name
_.__doc__ = doc
_.__module__ = THIS_MODULE
return _
def _register_functions():
""" Register each function in the scope"""
for name, doc in _rf_column_functions.items():
globals()[name] = _create_column_function(name, doc)
for name, doc in _rf_column_scalar_functions.items():
globals()[name] = _create_columnScalarFunction(name, doc)
for name, func in _rf_unique_functions.items():
globals()[name] = func
_register_functions()
```
|
{
"source": "jdepoix/goto_cloud",
"score": 4
}
|
#### File: goto_cloud/cloud_management/cloud_adapter.py
```python
from abc import ABCMeta, abstractmethod
class CloudAdapter(metaclass=ABCMeta):
"""
defines an interface which should be used to implement functionality for a cloud provider
"""
class CloudConnectionException(Exception):
"""
raised if something goes wrong, while communicating with the cloud
"""
class InvalidCloudSettingsException(Exception):
"""
raised if the given cloud settings are not valid
"""
pass
def __init__(self, settings):
self._settings = settings
@abstractmethod
def create_target(self, name, bootstrapping_network_interface, network_interfaces, volumes, ram, cores):
"""
creates a target machine in the cloud
:param bootstrapping_network_interface:
:param name: the name of the target machine
:type name: str
:param bootstrapping_network_interface: the network interface which is used during the migration
:type bootstrapping_network_interface: {'ip: str, 'network_id': str}
:param network_interfaces: the network interfaces which should be created
:type network_interfaces: [{'ip: str, 'network_id': str}]
:param volumes: a list of volume sizes in gb, which should be created
:type volumes: list[int]
:param ram: the ram size in mb as a multiple of 256
:type ram: int
:param cores: the number of cores the target machine should get
:type cores: int
:return: the created target
:rtype: dict
"""
pass
@abstractmethod
def delete_target(self, server_id):
"""
deletes the target with the given id
:param server_id: the cloud id of the target machine
:type server_id: str
"""
pass
@abstractmethod
def start_target(self, server_id):
"""
starts the target with the given id and waits for it to be started
:param server_id: the cloud id of the target machine
:type server_id: str
"""
pass
@abstractmethod
def stop_target(self, server_id):
"""
stops the target with the given id and waits for it to be stopped
:param server_id: the cloud id of the target machine
:type server_id: str
"""
pass
@abstractmethod
def delete_volume(self, volume_id):
"""
deletes the volume with the given id
:param volume_id: the volume id of the volume which should be deleted
:type volume_id: str
"""
pass
@abstractmethod
def make_volume_boot(self, server_id, volume_id):
"""
changes which device a target machine should boot from and waits for the change to be finished
:param server_id: the cloud id of the target machine
:type server_id: str
:param volume_id: the volume id of the volume which should become the new boot device
:type volume_id: str
:return:
"""
pass
@abstractmethod
def delete_nic(self, server_id, nic_id):
"""
deletes the network interface with the given id
:param server_id: the cloud id of the target machine
:type server_id: str
:param nic_id: the volume id of the volume which should be deleted
:type nic_id: str
"""
pass
```
#### File: goto_cloud/dict_utils/dict_utils.py
```python
import operator
from functools import reduce
class DictUtils():
"""
collection of static methods, helping with dicts
"""
@staticmethod
def flatten_child_elements(dic):
"""
flattens all child keys, and values of a tree-like dict
:param dic: the dict to get the children for
:type dic: dict
:return: flat child values
:rtype: list
"""
if isinstance(dic, dict):
return (
reduce(
operator.add,
[(DictUtils.flatten_child_elements(child)) for child in dic.values()] + [list(dic.keys())]
)
if dic else []
)
else:
return [dic]
@staticmethod
def find_sub_dict_by_key(dic, key):
"""
traverses over a tree like dict structure in order, and returns the first "sub-dict" which keys matches the
given key
:param dic: the dict to search in
:type dic: dict
:param key: the key to look for
:type key: str
:return: the sub dict, or None if none was found
:rtype: dict
"""
if isinstance(dic, dict):
if key in dic.keys():
return dic[key]
else:
for sub_dict in dic.values():
sub_dict_return_value = DictUtils.find_sub_dict_by_key(sub_dict, key)
if sub_dict_return_value:
return sub_dict_return_value
else:
return None
@staticmethod
def merge_dicts(dominant_dict, other_dict):
"""
returns a new dict which contains a merge of the given dicts. If there are key collisions, the first given dict
overwrites the values, of the second dict. In case of both values belonging to the collision keys, are dicts
too, these are merged as well.
same.
:param dominant_dict: the dominant dict
:type dominant_dict: dict
:param other_dict: the other dict
:type other_dict: dict
:return: merged dict
:rtype: dict
"""
dominant_dict_keys = dominant_dict.keys()
other_dict_keys = other_dict.keys()
common_keys = dominant_dict_keys & other_dict_keys
merged_dict = {
**DictUtils.filter_dict(other_dict, other_dict_keys - common_keys),
**DictUtils.filter_dict(dominant_dict, dominant_dict_keys - common_keys),
}
for common_key in common_keys:
if isinstance(dominant_dict[common_key], dict) and isinstance(other_dict[common_key], dict):
merged_dict[common_key] = DictUtils.merge_dicts(dominant_dict[common_key], other_dict[common_key])
else:
merged_dict[common_key] = dominant_dict[common_key]
return merged_dict
@staticmethod
def filter_dict(target_dict, keys):
"""
filters a dict, by a given set of keys
:param target_dict: the dict to filter
:type target_dict: dict
:param keys: the keys to filter for
:type keys: list or tuple or set
:return: filtered dict
:rtype: dict
"""
return {key: target_dict[key] for key in keys}
```
#### File: goto_cloud/migration_commander/config_adjustment.py
```python
from command.public import SourceCommand
from remote_execution.public import RemoteHostExecutor
from .device_modification import DeviceModifyingCommand
from .remote_file_edit import RemoteFileEditor
from .source_file_location_resolving import SourceFileLocationResolver
class SshConfigAdjustmentCommand(SourceCommand):
"""
Takes care of adjusting the ssh configs of the copied data on the target machine, before the machine goes live. This
is needed to make sure, that it will be possible to access the target machine via ssh after golive.
"""
class SshConfigAdjustmentException(DeviceModifyingCommand.CommandExecutionException):
"""
raised in case something goes wrong, while adjusting the ssh config
"""
COMMAND_DOES = 'adjust ssh config'
ERROR_REPORT_EXCEPTION_CLASS = SshConfigAdjustmentException
SSHD_CONFIG_LOCATION = '/etc/ssh/sshd_config'
def _execute(self):
self._comment_out_listen_address()
def _comment_out_listen_address(self):
"""
comments out the ListenAddress line in the sshd_config file
"""
RemoteFileEditor(RemoteHostExecutor(self._target.remote_host)).edit(
SourceFileLocationResolver(self._source).resolve_path(self.SSHD_CONFIG_LOCATION),
'ListenAddress',
'# ListenAddress'
)
class FstabAdjustmentCommand(DeviceModifyingCommand):
"""
Takes care of adjusting the /etc/fstab, to make sure, that the correct devices are mounted with the correct
mountpoints, to make sure, the machine will actually be able to boot, after go live.
It will try to replace all occurrences, of all device ids with their UUIDs or Labels.
"""
class FstabAdjustmentException(DeviceModifyingCommand.CommandExecutionException):
COMMAND_DOES = 'adjust fstab'
ERROR_REPORT_EXCEPTION_CLASS = FstabAdjustmentException
FSTAB_LOCATION = '/etc/fstab'
def _execute(self):
self._execute_on_every_partition(self._replace_partition_in_fstab)
self._execute_on_every_device(self._replace_disk_in_fstab)
def _replace_disk_in_fstab(
self, remote_executor, source_device, target_device
):
"""
replaces a disks information in fstab with the information of the new device
:param remote_executor: remote executor to use for execution
:type remote_executor: RemoteHostExecutor
:param source_device: the source device
:type source_device: (str, dict)
:param target_device: the target device
:type target_device_id: (str, dict)
"""
self._replace_device_information(
remote_executor,
source_device[0],
target_device[0],
source_device[1]['uuid'],
source_device[1]['label'],
)
def _replace_partition_in_fstab(
self, remote_executor, source_device, target_device, partition_device, target_partition_device
):
"""
replaces a partitions information in fstab with the information of the new device
:param remote_executor: remote executor to use for execution
:type remote_executor: RemoteHostExecutor
:param source_device: the source device
:type source_device: (str, dict)
:param target_device: the target device
:type target_device_id: (str, dict)
:param partition_device: the original partition device
:type partition_device: (str, dict)
:param target_partition_device: the target partition device
:type target_partition_device: (str, dict)
"""
self._replace_device_information(
remote_executor,
partition_device[0],
target_partition_device[0],
partition_device[1]['uuid'],
partition_device[1]['label'],
)
@DeviceModifyingCommand._collect_errors
def _replace_device_information(self, remote_executor, old_device_id, device_id, uuid=None, label=None):
RemoteFileEditor(remote_executor).edit(
SourceFileLocationResolver(self._source).resolve_path(self.FSTAB_LOCATION),
'/dev/{device_id}'.format(device_id=old_device_id),
'UUID={uuid}'.format(uuid=uuid) if uuid else 'LABEL={label}'.format(label=label),
)
class NetworkConfigAdjustmentCommand(SourceCommand):
"""
takes care of applying a dhcp default network setting, to make sure that networking works, after going live
"""
class NetworkConfigAdjustmentException(SourceCommand.CommandExecutionException):
"""
raised if something goes wrong, while trying to adjust the network config
"""
COMMAND_DOES = 'adjust the network settings'
ERROR_REPORT_EXCEPTION_CLASS = NetworkConfigAdjustmentException
NETWORK_CONFIG_FILE_LOCATION = '/etc/network/interfaces'
NETWORK_CONFIG_ENTRY = 'auto {interface_name}\niface {interface_name} inet {interface_type}\n\n'
@SourceCommand._collect_errors
def _execute(self):
self._write_network_config(self._generate_network_config())
def _write_network_config(self, network_config):
"""
wirtes a given network config to the network config file
:param network_config: the network config to persist
:type network_config: str
"""
RemoteFileEditor(RemoteHostExecutor(self._target.remote_host)).write(
SourceFileLocationResolver(self._source).resolve_path(self.NETWORK_CONFIG_FILE_LOCATION),
network_config
)
def _generate_network_config(self):
"""
generates the network config from the targets blueprint
:return: the generated network config
:rtype: str
"""
network_config = self._generate_network_config_entry('lo', 'loopback')
for network_interface_number in range(len(self._target.blueprint['network_interfaces'])):
network_config += self._generate_network_config_entry(
'eth{network_interface_number}'.format(
network_interface_number=network_interface_number
),
'dhcp'
)
return network_config
def _generate_network_config_entry(self, interface_name, interface_type):
"""
generates a single network config entry using the given network interface data
:param interface_name: the name of the interface
:type interface_name: str
:param interface_type: the type of the interface (dhcp, lo etc.)
:type interface_type: str
:return: the generated network config entry
:rtype: str
"""
return self.NETWORK_CONFIG_ENTRY.format(interface_name=interface_name, interface_type=interface_type)
```
#### File: goto_cloud/migration_commander/device_modification.py
```python
from command.public import SourceCommand
from remote_execution.public import RemoteHostExecutor
class DeviceModifyingCommand(SourceCommand):
"""
a command supplying utility methods for command which iterate over the devices of the source and target
"""
def _execute_on_every_device(self, executable_for_disks, executable_for_partitions=None, include_swap=False):
"""
execute the given executable with all devices.
:param executable_for_disks: a function which takes the parameters: remote_executor, source_device,
target_device
:type executable_for_disks: (self: Any, RemoteExecutor, (str, dict), (str, dict)) -> None
:param executable: a function which takes the parameters: remote_executor, source_device,
target_device, source_partition_device, target_partition_device
:type executable_for_partitions: (self: Any, RemoteExecutor, (str, dict), (str, dict), (str, dict), (str, dict)
) -> None
:param include_swap: should a swap device also be iterated over
:type include_swap: bool
:return: the used remote_executor, for extended use
:rtype: RemoteHostExecutor
"""
remote_executor = RemoteHostExecutor(self._target.remote_host)
for source_device_id, target_device in self._target.device_mapping.items():
source_device = self._source.remote_host.system_info['block_devices'][source_device_id]
if executable_for_disks and (include_swap or not include_swap and source_device['fs'] != 'swap'):
executable_for_disks(
remote_executor,
(source_device_id, source_device),
(target_device['id'], target_device),
)
if executable_for_partitions:
for source_partition_id, target_partition in target_device['children'].items():
source_partition = source_device['children'][source_partition_id]
if (include_swap or not include_swap and source_partition['fs'] != 'swap'):
executable_for_partitions(
remote_executor,
(source_device_id, source_device),
(target_device['id'], target_device),
(
source_partition_id,
source_partition
),
(target_partition['id'], target_partition),
)
return remote_executor
def _execute_on_every_partition(self, executable):
"""
executes the given executable on all partitions
:param executable: a function which takes the parameters: remote_executor, source_device,
target_device, source_partition_device, target_partition_device
:type executable: (self: Any, RemoteExecutor, (str, dict), (str, dict), (str, dict), (str, dict)) -> None
"""
return self._execute_on_every_device(None, executable)
```
#### File: goto_cloud/migration_commander/mountpoint_mapping.py
```python
import sys
class MountpointMapper(object):
@staticmethod
def map_mountpoint(parent_directory, mountpoint):
"""
maps a mountpoint onto a hashed directory name
:param parent_directory: the directory the mapped directory should reside in
:type parent_directory: str
:param mountpoint: the mountpoint which will be mapped
:type mountpoint: str
:return: the mapped mountpoint
:rtype: str
"""
return '{parent_directory}{mountpoint_hash}'.format(
parent_directory=''
if not parent_directory
else (parent_directory + '/' if parent_directory[-1] != '/' else parent_directory),
mountpoint_hash=str(hash(mountpoint) + sys.maxsize + 1)
)
```
#### File: goto_cloud/migration_commander/remote_file_edit.py
```python
from remote_host_command.public import RemoteHostCommand
class RemoteFileEditor():
"""
takes care of editing a RemoteHosts files
"""
_READ_FILE = RemoteHostCommand('sudo cat {FILE}')
_WRITE_FILE = RemoteHostCommand('sudo bash -c "echo -e \\"{FILE_CONTENT}\\" > {FILE}"')
_APPEND_FILE = RemoteHostCommand('sudo bash -c "echo -e \\"{FILE_CONTENT}\\" >> {FILE}"')
def __init__(self, remote_executor):
"""
is initialized with the remote executor the files are edited with
:param remote_executor: remote executor the files are edited with
:type remote_executor: remote_execution.public.RemoteHostExecutor
"""
self.remote_executor = remote_executor
def edit(self, file, text_to_replace, text_to_replace_with):
"""
replaces a given string in a file by another one
:param file: the file which will be edited
:type file: str
:param text_to_replace: the text to replace
:type text_to_replace: str
:param text_to_replace_with: the text to replace the text with
:type text_to_replace_with: str
"""
file_content = self.remote_executor.execute(self._READ_FILE.render(file=file))
if text_to_replace in file_content:
self.write(
file,
file_content.replace(text_to_replace, text_to_replace_with),
)
def append(self, file, text_to_append):
"""
append something to the given file
:param file: the file which will be edited
:type file: str
:param text_to_append: the text to append
:type text_to_append: str
"""
self.remote_executor.execute(
self._APPEND_FILE.render(file=file, file_content=self._make_string_echo_safe(text_to_append))
)
def write(self, file, text_to_write):
"""
Writes a file. Current content will be overwritten
:param file: the file which will be edited
:type file: str
:param text_to_write: the text to replace
:type text_to_write: str
"""
self.remote_executor.execute(
self._WRITE_FILE.render(file=file, file_content=self._make_string_echo_safe(text_to_write))
)
def _make_string_echo_safe(self, string):
return string.replace('"', '\\"')
```
#### File: goto_cloud/migration_commander/syncing.py
```python
from commander.public import Commander
from remote_execution.public import RemoteHostExecutor
from remote_host_command.public import RemoteHostCommand
from .device_modification import DeviceModifyingCommand
from .default_remote_host_commands import DefaultRemoteHostCommand
from .mountpoint_mapping import MountpointMapper
class SyncCommand(DeviceModifyingCommand):
"""
does the actual sync and makes sure, that temp mounts are created and used, to avoid problems introduced by
overlapping mountpoints.
"""
class SyncingException(DeviceModifyingCommand.CommandExecutionException):
"""
raised if an error occurs during the sync
"""
COMMAND_DOES = 'do the sync'
ERROR_REPORT_EXCEPTION_CLASS = SyncingException
ACCEPTED_EXIT_CODES = (24,)
def _execute(self):
self.source_remote_executor = RemoteHostExecutor(self._source.remote_host)
self._execute_on_every_device(self._sync_disk, self._sync_partition)
self.source_remote_executor.close()
self.source_remote_executor = None
return Commander.Signal.SLEEP
def _sync_disk(self, remote_executor, source_device, target_device):
self._sync_device(self.source_remote_executor, source_device[1]['mountpoint'], target_device[1]['mountpoint'])
def _sync_partition(
self, remote_executor, source_device, target_device, source_partition_device, target_partition_device
):
self._sync_device(
self.source_remote_executor,
source_partition_device[1]['mountpoint'],
target_partition_device[1]['mountpoint'],
)
@DeviceModifyingCommand._collect_errors
def _sync_device(self, remote_executor, source_directory, target_directory):
if source_directory:
remote_executor.execute(
RemoteHostCommand(self._target.blueprint['commands']['sync']).render(
source_dir=self._create_temp_bind_mount(remote_executor, source_directory),
target_dir='{user}{remote_host_address}:{target_directory}'.format(
user=('{username}@'.format(username=self._target.remote_host.username))
if self._target.remote_host.username else '',
remote_host_address=self._target.remote_host.address,
target_directory=target_directory,
)
),
accepted_exit_codes=self.ACCEPTED_EXIT_CODES
)
def _create_temp_bind_mount(self, remote_executor, source_directory):
temp_mountpoint = MountpointMapper.map_mountpoint('/tmp', source_directory)
remote_executor.execute(
DefaultRemoteHostCommand.MAKE_DIRECTORY.render(directory=temp_mountpoint)
)
try:
remote_executor.execute(DefaultRemoteHostCommand.CHECK_MOUNTPOINT.render(directory=temp_mountpoint))
except RemoteHostExecutor.ExecutionException:
remote_executor.execute(
DefaultRemoteHostCommand.BIND_MOUNT.render(
directory=source_directory,
mountpoint=temp_mountpoint
)
)
return temp_mountpoint
class FinalSyncCommand(SyncCommand):
"""
does a sync like the SyncCommand, but does not return a sleep signal
"""
def _execute(self):
super()._execute()
```
#### File: migration_commander/tests/test_cloud_commanding.py
```python
from unittest.mock import patch
from django.test import TestCase
from test_assets.public import TestAsset
from migration_plan_parsing.public import MigrationPlanParser
from source.public import Source
from remote_host.public import RemoteHost
from ..cloud_commanding import \
CreateTargetCommand, \
DeleteBootstrapNetworkInterfaceCommand,\
DeleteBootstrapVolumeCommand, \
ConfigureBootDeviceCommand, \
StopTargetCommand, \
StartTargetCommand
from ..device_identification import DeviceIdentificationCommand
from ..target_system_info_inspection import GetTargetSystemInfoCommand
class CloudCommandTestCase(TestCase, metaclass=TestAsset.PatchTrackedRemoteExecutionMeta):
HOSTNAME = 'ubuntu16'
BOOTSTRAP_VOLUME_ID = 'BOOTSTRAP_VOLUME_ID'
BOOTSTRAP_INTERFACE_ID = 'BOOTSTRAP_INTERFACE_ID'
NEW_BOOT_VOLUME_ID = 'NEW_BOOT_VOLUME_ID'
CLOUD_DATA = {
'id': 'ID',
'volumes': [
{
'id': BOOTSTRAP_VOLUME_ID,
'size': 10,
'device_number': 1,
'name': '{hostname}.bootstrap'.format(hostname=HOSTNAME),
},
{
'id': NEW_BOOT_VOLUME_ID,
'size': 10,
'device_number': 2,
'name': '{hostname}.clone-0'.format(hostname=HOSTNAME),
},
{
'id': 'ID',
'size': 10,
'device_number': 3,
'name': '{hostname}.clone-1'.format(hostname=HOSTNAME),
},
{
'id': 'ID',
'size': 10,
'device_number': 4,
'name': '{hostname}.clone-2'.format(hostname=HOSTNAME),
},
],
'network_interfaces': [
{
'id': BOOTSTRAP_INTERFACE_ID,
'ip': '10.17.32.50',
'lan': 2,
'name': '{hostname}.bootstrap'.format(hostname=HOSTNAME),
},
{
'id': 'ID',
'ip': '10.17.34.100',
'lan': 4,
'name': None,
},
]
}
def _init_test_data(self):
self.executed_commands = set()
# MOCKCEPTION!!!
with patch.dict(TestAsset.MIGRATION_PLAN_MOCK, {'sources': [{'address': 'ubuntu16', 'blueprint': 'default'}]}):
self.source = MigrationPlanParser().parse(TestAsset.MIGRATION_PLAN_MOCK).sources.first()
class TestCreateTarget(CloudCommandTestCase):
@patch('cloud_management.public.CloudManager.create_target', return_value=CloudCommandTestCase.CLOUD_DATA)
def test_execute__create_target_executed(self, mocked_create_target):
self._init_test_data()
CreateTargetCommand(self.source).execute()
mocked_create_target.assert_called_with(
TestCreateTarget.HOSTNAME,
{
'ip': '10.17.32.50',
'gateway': '10.17.32.1',
'net_mask': '255.255.255.0',
'network_id': 'LAN 2',
},
[
{
'ip': '10.17.34.100',
'gateway': '10.17.34.1',
'net_mask': '255.255.255.0',
'network_id': 'LAN 4',
'source_interface': 'eth0',
},
],
[10, 10, 10,],
1024,
1,
)
@patch(
'cloud_management.public.CloudManager.create_target',
lambda *args, **kwargs: TestCreateTarget.CLOUD_DATA
)
def test_execute__target_remote_host_created(self):
self._init_test_data()
CreateTargetCommand(self.source).execute()
self.assertIsNotNone(self.source.target.remote_host)
@patch(
'cloud_management.public.CloudManager.create_target',
lambda *args, **kwargs: TestCreateTarget.CLOUD_DATA
)
def test_execute__target_remote_host_cloud_data_set(self):
self._init_test_data()
CreateTargetCommand(self.source).execute()
self.assertEquals(self.source.target.remote_host.cloud_metadata, TestCreateTarget.CLOUD_DATA)
@patch(
'cloud_management.public.CloudManager.create_target',
lambda *args, **kwargs: TestCreateTarget.CLOUD_DATA
)
def test_execute__target_remote_host_ip_set(self):
self._init_test_data()
CreateTargetCommand(self.source).execute()
self.assertEquals(
self.source.target.remote_host.address,
next(
network_interface['ip']
for network_interface in TestCreateTarget.CLOUD_DATA['network_interfaces']
if network_interface['name'] and '.bootstrap' in network_interface['name']
)
)
@patch(
'cloud_management.public.CloudManager.create_target',
lambda *args, **kwargs: TestCreateTarget.CLOUD_DATA
)
def test_execute__target_remote_host_credentials_set(self):
self._init_test_data()
CreateTargetCommand(self.source).execute()
self.assertEquals(
self.source.target.remote_host.private_key,
TestAsset.MIGRATION_PLAN_MOCK['target_cloud']['bootstrapping']['ssh']['private_key']
)
self.assertEquals(
self.source.target.remote_host.private_key_file_path,
TestAsset.MIGRATION_PLAN_MOCK['target_cloud']['bootstrapping']['ssh']['private_key_file_path']
)
self.assertEquals(
self.source.target.remote_host.username,
TestAsset.MIGRATION_PLAN_MOCK['target_cloud']['bootstrapping']['ssh']['username']
)
self.assertEquals(
self.source.target.remote_host.password,
TestAsset.MIGRATION_PLAN_MOCK['target_cloud']['bootstrapping']['ssh']['password']
)
self.assertEquals(
self.source.target.remote_host.port,
TestAsset.MIGRATION_PLAN_MOCK['target_cloud']['bootstrapping']['ssh']['port']
)
class AfterCreationCoudCommandTestCase(CloudCommandTestCase):
@patch('cloud_management.public.CloudManager.create_target', lambda *args, **kwargs: TestCreateTarget.CLOUD_DATA)
def _init_test_data(self):
super()._init_test_data()
CreateTargetCommand(self.source).execute()
class TestStopTargetCommand(AfterCreationCoudCommandTestCase):
@patch('cloud_management.public.CloudManager.stop_target')
def test_execute__stop_server_called(self, mocked_stop_target):
self._init_test_data()
self.source.target.remote_host.address = 'ubuntu16'
self.source.target.remote_host.save()
StopTargetCommand(self.source).execute()
mocked_stop_target.assert_called_with(self.CLOUD_DATA['id'])
class TestDeleteBootstrapVolumeCommand(AfterCreationCoudCommandTestCase):
@patch('cloud_management.public.CloudManager.delete_volume')
def test_execute(self, mocked_delete_volume):
self._init_test_data()
DeleteBootstrapVolumeCommand(self.source).execute()
mocked_delete_volume.assert_called_with(self.BOOTSTRAP_VOLUME_ID)
def test_execute__no_bootstrapping_volume_found(self):
self._init_test_data()
self.source.target.remote_host.refresh_from_db()
self.source.target.remote_host.cloud_metadata['volumes'].pop(0)
self.source.target.remote_host.save()
with self.assertRaises(DeleteBootstrapVolumeCommand.NoBootstrapVolumeFoundException):
DeleteBootstrapVolumeCommand(self.source).execute()
class TestDeleteBootstrapNetworkInterfaceCommand(AfterCreationCoudCommandTestCase):
@patch('cloud_management.public.CloudManager.delete_nic')
def test_execute(self, mocked_delete_nic):
self._init_test_data()
DeleteBootstrapNetworkInterfaceCommand(self.source).execute()
mocked_delete_nic.assert_called_with(self.CLOUD_DATA['id'], self.BOOTSTRAP_INTERFACE_ID)
@patch('cloud_management.public.CloudManager.delete_nic', lambda *args, **kwargs: None)
def test_execute__remote_host_updated(self):
self._init_test_data()
cloud_metadata = self.source.target.remote_host.cloud_metadata
old_remote_host_id = self.source.target.remote_host.id
DeleteBootstrapNetworkInterfaceCommand(self.source).execute()
self.source.refresh_from_db()
self.assertEqual(
self.source.target.remote_host.address,
self.source.target.blueprint['network_interfaces'][0]['ip']
)
self.assertEqual(
self.source.target.remote_host.cloud_metadata,
cloud_metadata
)
self.assertEqual(
self.source.target.remote_host.username,
self.source.remote_host.username
)
self.assertEqual(
self.source.target.remote_host.os,
self.source.remote_host.os
)
self.assertEqual(
self.source.target.remote_host.version,
self.source.remote_host.version
)
self.assertEqual(
self.source.target.remote_host.port,
self.source.remote_host.port
)
self.assertEqual(
self.source.target.remote_host.password,
self.source.remote_host.password
)
self.assertEqual(
self.source.target.remote_host.private_key,
self.source.remote_host.private_key
)
self.assertFalse(RemoteHost.objects.filter(id=old_remote_host_id).exists())
def test_execute__no_bootstrapping_interface_found(self):
self._init_test_data()
self.source.target.remote_host.refresh_from_db()
self.source.target.remote_host.cloud_metadata['network_interfaces'].pop(0)
self.source.target.remote_host.save()
with self.assertRaises(DeleteBootstrapNetworkInterfaceCommand.NoBootstrapNetworkInterfaceFoundException):
DeleteBootstrapNetworkInterfaceCommand(self.source).execute()
class TestConfigureBootDeviceCommand(AfterCreationCoudCommandTestCase):
@patch('cloud_management.public.CloudManager.create_target', lambda *args, **kwargs: TestCreateTarget.CLOUD_DATA)
def _init_test_data(self):
super()._init_test_data()
self.source = Source.objects.get(remote_host__address='ubuntu16')
self.source.target.remote_host = RemoteHost.objects.create(address='target__device_identification')
self.source.target.save()
GetTargetSystemInfoCommand(self.source).execute()
DeviceIdentificationCommand(self.source).execute()
CreateTargetCommand(self.source).execute()
@patch('cloud_management.public.CloudManager.make_volume_boot')
def test_execute(self, mocked_make_volume_boot):
self._init_test_data()
ConfigureBootDeviceCommand(self.source).execute()
mocked_make_volume_boot.assert_called_with(self.CLOUD_DATA['id'], self.NEW_BOOT_VOLUME_ID)
class TestStartTargetCommand(AfterCreationCoudCommandTestCase):
@patch('cloud_management.public.CloudManager.start_target')
def test_execute(self, mocked_start_target):
self._init_test_data()
StartTargetCommand(self.source).execute()
mocked_start_target.assert_called_with(self.CLOUD_DATA['id'])
```
#### File: migration_commander/tests/test_config_adjustment.py
```python
from migration_commander.source_file_location_resolving import SourceFileLocationResolver
from test_assets.public import TestAsset
from ..config_adjustment import SshConfigAdjustmentCommand, FstabAdjustmentCommand, NetworkConfigAdjustmentCommand
from ..remote_file_edit import RemoteFileEditor
from ..tests.utils import MigrationCommanderTestCase
class TestSshConfigAdjustment(MigrationCommanderTestCase):
SSHD_CONFIG = (
'ListenAddress 10.17.32.4:22\n'
'\n'
'PermitRootLogin No\n'
'Protocol 2\n'
'AuthorizedKeysFile .ssh/authorized_keys\n'
'GSSAPIAuthentication no\n'
'HostbasedAuthentication no\n'
'KerberosAuthentication no\n'
'PasswordAuthentication no\n'
'PubkeyAuthentication yes\n'
'RhostsRSAAuthentication no\n'
'RSAAuthentication no\n'
'UsePAM no\n'
'UseDNS no\n'
'\n'
'PrintMotd no\n'
'\n'
'MaxSessions 512\n'
'MaxStartups 512:30:768\n'
'\n'
'Subsystem sftp internal-sftp\n'
'\n'
'Match Group sftp\n'
'ChrootDirectory %h\n'
'ForceCommand internal-sftp\n'
'AllowTcpForwarding no\n'
'PasswordAuthentication no'
)
def _init_test_data(self, source_host, target_host):
super()._init_test_data(source_host, target_host)
TestAsset.REMOTE_HOST_MOCKS['target__device_identification'].add_command(
'sudo cat {mountpoint}{config_path}'.format(
mountpoint=self.source.target.device_mapping['vda']['children']['vda1']['mountpoint'],
config_path=SshConfigAdjustmentCommand.SSHD_CONFIG_LOCATION,
),
self.SSHD_CONFIG
)
def test_execute__sshd_config_edited(self):
self._init_test_data('ubuntu16', 'target__device_identification')
SshConfigAdjustmentCommand(self.source).execute()
self.assertIn(
RemoteFileEditor._WRITE_FILE.render(
file=self.source.target.device_mapping['vda']['children']['vda1']['mountpoint']
+ SshConfigAdjustmentCommand.SSHD_CONFIG_LOCATION,
file_content=self.SSHD_CONFIG.replace(
'ListenAddress',
'# ListenAddress'
),
),
self.executed_commands
)
class TestFstabAdjustment(MigrationCommanderTestCase):
FSTAB = (
'/dev/vda1 / ext4 errors=remount-ro 0 1\n'
'/dev/vdc1 /mnt/vdc1 ext4 defaults 0 2\n'
'/dev/vdc2 /mnt/vdc2 ext4 defaults 0 2'
)
def _init_test_data(self, source_host, target_host):
super()._init_test_data(source_host, target_host)
TestAsset.REMOTE_HOST_MOCKS['target__device_identification'].add_command(
'sudo cat {mountpoint}{config_path}'.format(
mountpoint=self.source.target.device_mapping['vda']['children']['vda1']['mountpoint'],
config_path=FstabAdjustmentCommand.FSTAB_LOCATION,
),
self.FSTAB
)
def test_execute(self):
self._init_test_data('ubuntu16', 'target__device_identification')
FstabAdjustmentCommand(self.source).execute()
self.assertIn(
RemoteFileEditor._WRITE_FILE.render(
file=self.source.target.device_mapping['vda']['children']['vda1']['mountpoint']
+ FstabAdjustmentCommand.FSTAB_LOCATION,
file_content=self.FSTAB.replace(
'/dev/vda1', 'UUID=549c8755-2757-446e-8c78-f76b50491f21'
)
),
self.executed_commands
)
self.assertIn(
RemoteFileEditor._WRITE_FILE.render(
file=self.source.target.device_mapping['vda']['children']['vda1']['mountpoint']
+ FstabAdjustmentCommand.FSTAB_LOCATION,
file_content=self.FSTAB.replace(
'/dev/vdc1', 'UUID=53ad2170-488d-481a-a6ab-5ce0e538f247'
)
),
self.executed_commands
)
self.assertIn(
RemoteFileEditor._WRITE_FILE.render(
file=self.source.target.device_mapping['vda']['children']['vda1']['mountpoint']
+ FstabAdjustmentCommand.FSTAB_LOCATION,
file_content=self.FSTAB.replace(
'/dev/vdc2', 'UUID=bcab224c-8407-4783-8cea-f9ea4be3fabf'
)
),
self.executed_commands
)
class TestNetworkConfigAdjustment(MigrationCommanderTestCase):
def test_execute(self):
self._init_test_data('ubuntu16', 'target__device_identification')
NetworkConfigAdjustmentCommand(self.source).execute()
self.assertIn(
(
'sudo bash -c "echo -e \\"'
'auto lo\n'
'iface lo inet loopback\n'
'\n'
'auto eth0\n'
'iface eth0 inet dhcp\n'
'\n'
'\\" > {file}"'.format(file=SourceFileLocationResolver(self.source).resolve_path('/etc/network/interfaces'))
),
self.executed_commands
)
```
#### File: migration_commander/tests/test_filesystem_mounting.py
```python
from unittest.mock import patch, Mock
from remote_host_event_logging.public import RemoteHostEventLogger
from ..filesystem_mounting import FilesystemMountCommand
from ..device_identification import DeviceIdentificationCommand
from .utils import MigrationCommanderTestCase
class TestFilesystemMountCommand(MigrationCommanderTestCase):
def test_execute__mount_applied(self):
self._init_test_data('ubuntu16', 'target__device_identification')
FilesystemMountCommand(self.source).execute()
self.assertIn('sudo mount -a', self.executed_commands)
def test_execute__fstab_edited(self):
self._init_test_data('ubuntu16', 'target__device_identification')
FilesystemMountCommand(self.source).execute()
self.assertIn(
(
'sudo bash -c "echo -e \\"'
'UUID=549c8755-2757-446e-8c78-f76b50491f21\t'
+ DeviceIdentificationCommand._map_mountpoint('/')
+ '\text4\tdefaults\t0\t2'
'\\" >> /etc/fstab"'
),
self.executed_commands
)
self.assertIn(
(
'sudo bash -c "echo -e \\"'
'UUID=53ad2170-488d-481a-a6ab-5ce0e538f247\t'
+ DeviceIdentificationCommand._map_mountpoint('/mnt/vdc1')
+ '\text4\tdefaults\t0\t2'
'\\" >> /etc/fstab"'
),
self.executed_commands
)
self.assertIn(
(
'sudo bash -c "echo -e \\"'
'UUID=bcab224c-8407-4783-8cea-f9ea4be3fabf\t'
+ DeviceIdentificationCommand._map_mountpoint('/mnt/vdc2')
+ '\text4\tdefaults\t0\t2'
'\\" >> /etc/fstab"'
),
self.executed_commands
)
def test_execute__mount_dirs_created(self):
self._init_test_data('ubuntu16', 'target__device_identification')
FilesystemMountCommand(self.source).execute()
self.assertIn(
'sudo mkdir -p ' + DeviceIdentificationCommand._map_mountpoint('/'),
self.executed_commands
)
self.assertIn(
'sudo mkdir -p ' + DeviceIdentificationCommand._map_mountpoint('/mnt/vdc1'),
self.executed_commands
)
self.assertIn(
'sudo mkdir -p ' + DeviceIdentificationCommand._map_mountpoint('/mnt/vdc2'),
self.executed_commands
)
@patch(
'migration_commander.remote_file_edit.RemoteFileEditor.append',
Mock(side_effect=Exception())
)
def test_execute__failed(self):
self._init_test_data('ubuntu16', 'target__device_identification')
with RemoteHostEventLogger.DisableLoggingContextManager():
with self.assertRaises(FilesystemMountCommand.MountingException):
FilesystemMountCommand(self.source).execute()
def test_execute__with_swap(self):
self._init_test_data('ubuntu12', 'target__device_identification')
FilesystemMountCommand(self.source).execute()
```
#### File: migration_commander/tests/test_migration_commander.py
```python
from unittest.mock import patch
from remote_host.public import RemoteHost
from migration_plan_parsing.public import MigrationPlanParser
from source.public import Source
from test_assets.public import TestAsset
from command.public import SourceCommand
from migration_commander.migration_commander import MigrationCommander
from .utils import MigrationCommanderTestCase
class CreateTargetCommandMock(SourceCommand):
def _execute(self):
self._target.remote_host = RemoteHost.objects.create(address='target__device_identification')
self._target.save()
class NoopCommand(SourceCommand):
def _execute(self):
pass
class TestMigrationCommander(MigrationCommanderTestCase):
def _init_test_data(self, source_host, target_host):
MigrationPlanParser().parse(TestAsset.MIGRATION_PLAN_MOCK)
self.source = Source.objects.get(remote_host__address=source_host)
TestAsset.REMOTE_HOST_MOCKS['ubuntu16'].add_command(
'sudo sfdisk -d /dev/vdb',
'PART "TABLE"'
)
TestAsset.REMOTE_HOST_MOCKS['ubuntu16'].add_command(
'sudo sfdisk -d /dev/vdc',
'PART "TABLE"'
)
TestAsset.REMOTE_HOST_MOCKS[target_host].add_command(
'sfdisk',
''
)
@patch.dict(MigrationCommander._COMMAND_DRIVER, {
Source.Status.CREATE_TARGET: CreateTargetCommandMock,
Source.Status.STOP_TARGET: NoopCommand,
Source.Status.DELETE_BOOTSTRAP_VOLUME: NoopCommand,
Source.Status.DELETE_BOOTSTRAP_NETWORK_INTERFACE: NoopCommand,
Source.Status.CONFIGURE_BOOT_DEVICE: NoopCommand,
Source.Status.START_TARGET: NoopCommand,
})
def test_execute(self):
self._init_test_data('ubuntu16', 'target__device_identification')
MigrationCommander(self.source).execute()
self.assertEqual(self.source.status, Source.Status.SYNC)
MigrationCommander(self.source).increment_status_and_execute()
self.assertEqual(self.source.status, Source.Status.LIVE)
```
#### File: migration_commander/tests/utils.py
```python
from django.test import TestCase
from migration_plan_parsing.public import MigrationPlanParser
from remote_host.public import RemoteHost
from source.public import Source
from test_assets.public import TestAsset
from ..device_identification import DeviceIdentificationCommand
from ..target_system_info_inspection import GetTargetSystemInfoCommand
class MigrationCommanderTestCase(TestCase, metaclass=TestAsset.PatchTrackedRemoteExecutionMeta):
def setUp(self):
self.executed_commands.clear()
def _init_test_data(self, source_host, target_host):
MigrationPlanParser().parse(TestAsset.MIGRATION_PLAN_MOCK)
self.source = Source.objects.get(remote_host__address=source_host)
self.source.target.remote_host = RemoteHost.objects.create(address=target_host)
self.source.target.save()
GetTargetSystemInfoCommand(self.source).execute()
DeviceIdentificationCommand(self.source).execute()
self.executed_commands.clear()
```
#### File: migration_plan_parsing/tests/test_migration_plan_parsing.py
```python
from django.test import TestCase
from migration_plan.public import MigrationPlan
from migration_run.public import MigrationRun
from source.public import Source
from target.public import Target
from test_assets.public import TestAsset
from ..migration_plan_parsing import MigrationPlanParser
def failing_method(*args, **kwargs):
raise Exception()
class TestMigrationPlanParser(TestCase, metaclass=TestAsset.PatchRemoteHostMeta):
def test_parse(self):
returned_migration_run = MigrationPlanParser().parse(TestAsset.MIGRATION_PLAN_MOCK)
migration_plan = MigrationPlan.objects.first()
migration_run = MigrationRun.objects.first()
self.assertEquals(returned_migration_run, migration_run)
self.assertEquals(migration_plan.plan, TestAsset.MIGRATION_PLAN_MOCK)
self.assertEquals(migration_run.plan, migration_plan)
self.assertEquals(Source.objects.count(), len(TestAsset.MIGRATION_PLAN_MOCK['sources']))
for source in Source.objects.all():
self.assertIsNotNone(source.target)
self.assertIsNotNone(source.remote_host)
self.assertEquals(source.migration_run, migration_run)
self.assertEquals(Target.objects.count(), len(TestAsset.MIGRATION_PLAN_MOCK['sources']))
def test_parse__delete(self):
migration_plan_parser = MigrationPlanParser()
migration_plan_parser.parse(TestAsset.MIGRATION_PLAN_MOCK)
migration_plan_parser.delete()
self.assertEquals(MigrationPlan.objects.count(), 0)
self.assertEquals(MigrationRun.objects.count(), 0)
self.assertEquals(Source.objects.count(), 0)
self.assertEquals(Target.objects.count(), 0)
def test_parse__delete_on_failure(self):
migration_plan_parser = MigrationPlanParser()
migration_plan_parser._create_migration_run = failing_method
with self.assertRaises(Exception):
migration_plan_parser.parse(TestAsset.MIGRATION_PLAN_MOCK)
self.assertEquals(MigrationPlan.objects.count(), 0)
self.assertEquals(MigrationRun.objects.count(), 0)
self.assertEquals(Source.objects.count(), 0)
self.assertEquals(Target.objects.count(), 0)
```
#### File: goto_cloud/remote_host_event_logging/remote_host_event_logging.py
```python
import datetime
import logging
class RemoteHostEventLogger():
"""
Logs event based on a given RemoteHost. Supports all log levels of the default Logger.
"""
class DisableLoggingContextManager():
"""
context manager which disables logging in the managed context
"""
def __enter__(self):
logging.disable(logging.CRITICAL)
def __exit__(self, exc_type, exc_val, exc_tb):
logging.disable(logging.NOTSET)
def __init__(self, remote_host):
"""
initialized with the remote host which the events are logged for
:param remote_host: the remote host to log for
:type remote_host: remote_host.public.RemoteHost
"""
self.logger = logging.getLogger(__name__)
self.remote_host = remote_host
def debug(self, msg, *args, **kwargs):
self.logger.debug(self._format_message(msg), *args, **kwargs)
def info(self, msg, *args, **kwargs):
self.logger.info(self._format_message(msg), *args, **kwargs)
def warning(self, msg, *args, **kwargs):
self.logger.warning(self._format_message(msg), *args, **kwargs)
def error(self, msg, *args, **kwargs):
self.logger.error(self._format_message(msg), *args, **kwargs)
def critical(self, msg, *args, **kwargs):
self.logger.critical(self._format_message(msg), *args, **kwargs)
@property
def _logging_message_prefix(self):
return '[{timestamp}] <{source_address}>'.format(
source_address=self.remote_host.address if self.remote_host else 'unknown host',
timestamp=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
)
def _format_message(self, message):
formatted_message = '+------------- {message_prefix} -------------'.format(
message_prefix=self._logging_message_prefix
)
if message:
for line in message.split('\n'):
formatted_message += '\n| {logging_line}'.format(logging_line=line)
formatted_message += '\n+--------------{sized_gap}--------------\n'.format(
sized_gap='-' * len(self._logging_message_prefix)
)
return formatted_message
```
#### File: remote_script_execution/tests/test_remote_script_execution.py
```python
import base64
from django.test import TestCase
from test_assets.public import TestAsset
from remote_host.public import RemoteHost
from ..remote_script_execution import RemoteScriptExecutor
class TestRemoteScriptExecutor(TestCase, metaclass=TestAsset.PatchTrackedRemoteExecutionMeta):
def test_execute(self):
remote_script_executor = RemoteScriptExecutor(RemoteHost.objects.create(address='ubuntu16'))
env = {
'1': 1,
'2': 2,
'3': 3,
'4': '{"test_\'new\'_line": "\n"}'
}
remote_script_executor.execute('script', env)
self.assertIn(
'python -c "import base64;exec(base64.b64decode({encoded_script}))"'.format(
encoded_script=base64.b64encode(
RemoteScriptExecutor.REMOTE_SCRIPT_BASE_TEMPLATE.format(
env_string=str(env),
script_string='script'
).encode()
)
),
self.executed_commands
)
def test_execute__no_env(self):
remote_script_executor = RemoteScriptExecutor(RemoteHost.objects.create(address='ubuntu16'))
remote_script_executor.execute('script')
'python -c "import base64;exec(base64.b64decode({encoded_script}))"'.format(
encoded_script=base64.b64encode(
RemoteScriptExecutor.REMOTE_SCRIPT_BASE_TEMPLATE.format(
env_string=str({}),
script_string='script'
).encode()
)
),
def test_execute__sudo(self):
remote_script_executor = RemoteScriptExecutor(RemoteHost.objects.create(address='ubuntu16'))
env = {
'1': 1,
'2': 2,
'3': 3,
'4': '{"test_\'new\'_line": "\n"}'
}
remote_script_executor.execute('script', env=env, sudo=True)
self.assertIn(
'sudo python -c "import base64;exec(base64.b64decode({encoded_script}))"'.format(
encoded_script=base64.b64encode(
RemoteScriptExecutor.REMOTE_SCRIPT_BASE_TEMPLATE.format(
env_string=str(env),
script_string='script'
).encode()
)
),
self.executed_commands
)
```
#### File: goto_cloud/source/models.py
```python
from django.db import models
from migration_run.public import MigrationRun
from remote_host.public import RemoteHost
from target.public import Target
from status_model.public import StatusModel
class Source(StatusModel):
"""
represents a source system, which will be migrated to a Target during the migration
"""
class Status(StatusModel.Status):
DRAFT = 'DRAFT'
CREATE_TARGET = 'CREATE_TARGET'
GET_TARGET_SYSTEM_INFORMATION = 'GET_TARGET_SYSTEM_INFORMATION'
IDENTIFY_DEVICES = 'IDENTIFY_DEVICES'
CREATE_PARTITIONS = 'CREATE_PARTITIONS'
CREATE_FILESYSTEMS = 'CREATE_FILESYSTEMS'
MOUNT_FILESYSTEMS = 'MOUNT_FILESYSTEMS'
SYNC = 'SYNC'
FINAL_SYNC = 'FINAL_SYNC'
ADJUST_NETWORK_CONFIG = 'ADJUST_NETWORK_CONFIG'
ADJUST_SSH_CONFIG = 'ADJUST_SSH_CONFIG'
ADJUST_FSTAB = 'ADJUST_FSTAB'
REINSTALL_BOOTLOADER = 'REINSTALL_BOOTLOADER'
STOP_TARGET = 'STOP_TARGET'
DELETE_BOOTSTRAP_VOLUME = 'DELETE_BOOTSTRAP_VOLUME'
DELETE_BOOTSTRAP_NETWORK_INTERFACE = 'DELETE_BOOTSTRAP_NETWORK_INTERFACE'
CONFIGURE_BOOT_DEVICE = 'CONFIGURE_BOOT_DEVICE'
START_TARGET = 'START_TARGET'
LIVE = 'LIVE'
_LIFECYCLE = (
Status.DRAFT,
Status.CREATE_TARGET,
Status.GET_TARGET_SYSTEM_INFORMATION,
Status.IDENTIFY_DEVICES,
Status.CREATE_PARTITIONS,
Status.CREATE_FILESYSTEMS,
Status.MOUNT_FILESYSTEMS,
Status.SYNC,
Status.FINAL_SYNC,
Status.ADJUST_NETWORK_CONFIG,
Status.ADJUST_SSH_CONFIG,
Status.ADJUST_FSTAB,
Status.REINSTALL_BOOTLOADER,
Status.STOP_TARGET,
Status.DELETE_BOOTSTRAP_VOLUME,
Status.DELETE_BOOTSTRAP_NETWORK_INTERFACE,
Status.CONFIGURE_BOOT_DEVICE,
Status.START_TARGET,
Status.LIVE,
)
@property
def lifecycle(self):
return self._LIFECYCLE
migration_run = models.ForeignKey(MigrationRun, related_name='sources', null=True,)
target = models.OneToOneField(Target, related_name='source', null=True,)
remote_host = models.ForeignKey(RemoteHost, related_name='sources')
```
#### File: source/tests/test_source.py
```python
from unittest.mock import patch
from django.test import TestCase
from source.public import Source
from remote_host.public import RemoteHost
TEST_LIFECYCLE = (
'FIRST',
'SECOND',
'THIRD',
)
class TestSource(TestCase):
@patch('source.models.Source._LIFECYCLE', TEST_LIFECYCLE)
def setUp(self):
self.test_source = Source.objects.create(remote_host=RemoteHost.objects.create())
def test_init__default_value(self):
self.assertEquals(self.test_source.status, 'FIRST')
def test_init__choices(self):
with self.assertRaises(Source.InvalidStatusException):
Source.objects.create(remote_host=RemoteHost.objects.create(), status='status_does_not_exist')
def test_increment_status(self):
self.assertEquals(self.test_source.status, 'FIRST')
self.test_source.increment_status()
self.assertEquals(self.test_source.status, 'SECOND')
def test_decrement_status(self):
self.test_source.status = 'SECOND'
self.test_source.save()
self.assertEquals(self.test_source.status, 'SECOND')
self.test_source.decrement_status()
self.assertEquals(self.test_source.status, 'FIRST')
```
#### File: goto_cloud/status_model/lifecycle_management.py
```python
class StatusLifecycle():
"""
This class represents a lifecycle of statuses.
"""
class InvalidLifecycleException(Exception):
"""
raised if a lifecycle is invalid
"""
pass
def __init__(self, *lifecycle_statuses):
"""
:param lifecycle_statuses: a set of unique statuses, in the correct order, in which they should reside in this
lifecycle
:raises StatusLifecycle.InvalidLifecycleException: if the provided statuses aren't unique
"""
if len(lifecycle_statuses) < 1:
raise StatusLifecycle.InvalidLifecycleException('at least one status must be provided')
if len(lifecycle_statuses) != len(set(lifecycle_statuses)):
raise StatusLifecycle.InvalidLifecycleException('Lifecycle statuses need to be unique!')
self.statuses = lifecycle_statuses
self.index_lookup = {status: index for index, status in enumerate(self.statuses)}
class ObjectStatusLifecycleManager():
"""
This class manages the StatusLifecycle belonging to a specific object, to be able to provide additional
functionality, like getting a next of previous status.
"""
class InvalidStatusException(Exception):
"""
raise if a status is invalid
"""
pass
def __init__(self, status_lifecycle, object_with_status, status_attribute):
"""
:param status_lifecycle: the StatusLifecycle which should be managed
:type status_lifecycle: StatusLifecycle
:param object_with_status: the Object to manage the Lifecycle for
:type object_with_status: Any
:param status_attribute: the attribute of the status which contains the current status
:type status_attribute: str
"""
self.status_lifecycle = status_lifecycle
self._object_with_status = object_with_status
self._status_attribute = status_attribute
def get_status_by_offset(self, offset, raise_exception=True):
"""
gets a status by a given offset
:param offset: the offset
:type offset: int
:param raise_exception: if False None will be returned in case the offset points to an invalid status, if True
an InvalidStatusException will be thrown
:type raise_exception: bool
:raises ObjectStatusLifecycleManager.InvalidStatusException: in case raise_exception == True and offset is
invalid
:return: the status relative to the provided status, by the given offset
"""
return self._execute_method_and_handle_index_exceptions(
raise_exception,
self._get_status_by_offset,
offset
)
def get_next_status(self, raise_exception=True):
"""
gets the next status, relative to the given status
:param raise_exception: if False None will be returned in case there is no next status, if True
an InvalidStatusException will be thrown
:type raise_exception: bool
:raises ObjectStatusLifecycleManager.InvalidStatusException: in case raise_exception == True and offset is
invalid
:return: the next status, relative to the given status
"""
return self._execute_method_and_handle_index_exceptions(
raise_exception,
self._get_next_status
)
def get_previous_status(self, raise_exception=True):
"""
gets the previous status, relative to the given status
:param raise_exception: if False None will be returned in case there is no previous status, if True
an InvalidStatusException will be thrown
:type raise_exception: bool
:raises ObjectStatusLifecycleManager.InvalidStatusException: in case raise_exception == True and offset is
invalid
:return: the previous status, relative to the given status
"""
return self._execute_method_and_handle_index_exceptions(
raise_exception,
self._get_previous_status
)
def is_status_valid(self, status, raise_exception=False):
"""
evaluates whether a status is valid or not
:param status: the status to check
:type status: str
:param raise_exception: if True an ObjectStatusLifecycleManager.InvalidStatusException will be raised in case
the status is invalid
:type raise_exception: bool
:raises ObjectStatusLifecycleManager.InvalidStatusException: in case raise_exception == True and invalid
:return: is the status valid
:rtype: bool
"""
valid = status in self.status_lifecycle.statuses
if not valid and raise_exception:
raise ObjectStatusLifecycleManager.InvalidStatusException()
else:
return valid
def compare_status_to(self, status_to_compare_to):
"""
compares the position of current status to another one, to evaluate whether a status is before or after another
one in the lifecycle.
:param status_to_compare_to: the status which the current one is compared to
:type status: str
:return: returns
1 -> status > status_to_compare_to
-1 -> status < status_to_compare_to
0 -> status == status_to_compare_to
:rtype int in (-1, 0, 1)
"""
status_index = self._get_position_of_current_status()
status_to_compare_to_index = self._get_position_of_status(status_to_compare_to)
if status_index > status_to_compare_to_index:
return 1
if status_index < status_to_compare_to_index:
return -1
return 0
def _get_current_status(self):
return getattr(self._object_with_status, self._status_attribute)
def _get_status_by_offset(self, offset):
index = self._get_position_of_current_status() + offset
if index < 0:
raise ObjectStatusLifecycleManager.InvalidStatusException()
return self.status_lifecycle.statuses[index]
def _get_next_status(self):
return self._get_status_by_offset(1)
def _get_previous_status(self):
return self._get_status_by_offset(-1)
def _get_position_of_status(self, status):
return self.status_lifecycle.index_lookup[status]
def _get_position_of_current_status(self):
return self._get_position_of_status(self._get_current_status())
def _execute_method_and_handle_index_exceptions(self, raise_exception, method, *args):
try:
return method(*args)
except Exception as e:
if raise_exception:
raise ObjectStatusLifecycleManager.InvalidStatusException(str(e))
else:
return None
```
#### File: goto_cloud/status_model/models.py
```python
from abc import abstractmethod
from django.db import models
from enums.public import StringEnum
from tracked_model.public import TrackedModel
from .lifecycle_management import ObjectStatusLifecycleManager, StatusLifecycle
class StatusModel(TrackedModel):
"""
This Model can be inherited by models which have a status in a lifecycle. The property model.lifecycle_manager
returns a ObjectStatusLifecycleManager containing the relevant lifecycle.
"""
class InvalidStatusException(Exception):
"""
raise if a status is invalid
"""
pass
class Status(StringEnum):
pass
@property
@abstractmethod
def lifecycle(self):
"""
:return: the lifecycle of statuses this StatusModel relies on
:rtype: tuple
"""
raise NotImplementedError('implement abstractproperty lifecycle!')
def __init__(self, *args, **kwargs):
self._status_lifecycle = StatusLifecycle(*self.lifecycle)
self._lifecycle_manager = ObjectStatusLifecycleManager(self._status_lifecycle, self, 'status')
self._meta.get_field('status').default = self._status_lifecycle.statuses[0]
self._meta.get_field('status').choices = self.Status.get_django_choices()
super().__init__(*args, **kwargs)
def save(self, *args, **kwargs):
if not self._lifecycle_manager.is_status_valid(self.status):
raise StatusModel.InvalidStatusException('status: {status} is not valid'.format(
status=self.status
))
return super().save(*args, **kwargs)
status = models.CharField(max_length=255)
def increment_status(self):
"""
increments the status of this StatusModel
:raises: ObjectStatusLifecycleManager.InvalidStatusException in case there is no next status
"""
self.status = self._lifecycle_manager.get_next_status()
self.save()
def decrement_status(self):
"""
decrements the status of this StatusModel
:raises: ObjectStatusLifecycleManager.InvalidStatusException in case there is no previous status
"""
self.status = self._lifecycle_manager.get_previous_status()
self.save()
class Meta:
abstract = True
```
#### File: goto_cloud/test_assets/remote_host_mocks.py
```python
import os
from settings.base import BASE_DIR
class RemoteHostMock(object):
def __init__(self, commands, expected_config):
self.commands = commands
self.expected_config = expected_config
def execute(self, command):
matching_commands = [known_command for known_command in self.commands if known_command in command]
if matching_commands:
return {
'exit_code': 0,
'stdout': self.commands[matching_commands[0]].strip() if self.commands[matching_commands[0]] else '',
'stderr': '',
}
return {
'exit_code': 1,
'stdout': '',
'stderr': 'Command {command_name} not known!'.format(command_name=command).encode(),
}
def get_config(self):
return self.expected_config
def add_command(self, command_name, command_output):
self.commands[command_name] = command_output
@staticmethod
def create_from_file(
commands_root_directory_path,
filename,
command_directory_map,
expected_config,
):
commands_root_directory = os.path.realpath(commands_root_directory_path)
commands = {}
for command in command_directory_map:
if command_directory_map[command]:
with open(
os.path.join(os.path.join(commands_root_directory, command_directory_map[command]), filename)
) as command_output:
commands[command] = command_output.read()
else:
commands[command] = None
return RemoteHostMock(commands, expected_config)
COMMANDS_OUTPUT_ROOT_DIRECTORY_PATH = BASE_DIR + '/test_assets/static_assets/test_vms_command_output'
COMMAND_DIRECTORY_MAP = {
'cat /proc/cpuinfo': 'cpuinfo',
'sudo fdisk -l': 'fdisk',
'ifconfig': 'ifconfig',
'lsblk -bPo NAME,FSTYPE,LABEL,UUID,MOUNTPOINT,TYPE,SIZE': 'lsblk',
'cat /proc/meminfo | grep MemTotal:': 'meminfo',
'cat /etc/os-release': 'os-release',
'route -n': 'route',
'hostname': 'hostname',
'lsblk -no NAME': 'lsblkl',
'| sudo fdisk': None,
'sudo mkfs': None,
'rsync': None,
'mount': None,
'grub-install': None,
'echo -e': None,
'mkdir': None,
'(': None,
'&': None,
}
UBUNTU_12_04 = RemoteHostMock.create_from_file(COMMANDS_OUTPUT_ROOT_DIRECTORY_PATH, 'ubuntu-12.04', COMMAND_DIRECTORY_MAP, {
'block_devices': {
'vda': {
'type': 'disk',
'fs': '',
'uuid': '',
'label': '',
'mountpoint': '',
'size': 10737418240,
'children': {
'vda1': {
'type': 'part',
'fs': 'ext4',
'uuid': 'fbea8976-8be3-475d-a97b-9507ff21be36',
'label': '',
'mountpoint': '/',
'size': 8588886016,
'start': 2048,
'end': 16777215,
'bootable': True,
'children': {},
},
'vda2': {
'type': 'part',
'fs': 'swap',
'uuid': '9b75df97-41ff-4475-b753-24584671b2b2',
'label': '',
'mountpoint': '[SWAP]',
'size': 2147483648,
'start': 16777216,
'end': 20971519,
'bootable': False,
'children': {},
}
}
}
},
'network': {
'hostname': 'ubuntu12',
'interfaces' : {
'lo': {
'ip': '127.0.0.1',
'net_mask': '255.0.0.0',
'routes': []
},
'eth0': {
'ip': '10.17.32.6',
'net_mask': '255.255.255.0',
'routes': [
{
'net': '0.0.0.0',
'gateway': '10.17.32.1',
'net_mask': '0.0.0.0'
},
{
'net': '10.17.32.0',
'gateway': '0.0.0.0',
'net_mask': '255.255.255.0'
}
]
}
}
},
'os': {
'name': 'Ubuntu',
'version': '12.04'
},
'hardware': {
'ram': {
'size': 1010504000
},
'cpus': [
{
'model': 'AMD Opteron 62xx class CPU',
'mhz': 2799.948
},
{
'model': 'AMD Opteron 62xx class CPU',
'mhz': 2799.948
},
{
'model': 'AMD Opteron 62xx class CPU',
'mhz': 2799.948
},
]
}
})
UBUNTU_14_04 = RemoteHostMock.create_from_file(COMMANDS_OUTPUT_ROOT_DIRECTORY_PATH, 'ubuntu-14.04', COMMAND_DIRECTORY_MAP, {
'block_devices': {
'sda': {
'type': 'disk',
'fs': '',
'uuid': '',
'label': '',
'mountpoint': '',
'size': 42949672960,
'children': {
'sda1': {
'type': 'part',
'fs': 'ext4',
'uuid': '53baba04-22c7-4928-94d5-34f5737c025b',
'label': 'cloudimg-rootfs',
'mountpoint': '/',
'size': 42948624384,
'start': 2048,
'end': 83886079,
'bootable': True,
'children': {},
},
},
},
'sdb': {
'type': 'disk',
'fs': '',
'uuid': '',
'label': '',
'mountpoint': '',
'size': 5368709120,
'children': {},
}
},
'network': {
'hostname': 'ubuntu14',
'interfaces' : {
'lo': {
'ip': '127.0.0.1',
'net_mask': '255.0.0.0',
'routes': []
},
'eth0': {
'ip': '10.0.2.15',
'net_mask': '255.255.255.0',
'routes': [
{
'net': '0.0.0.0',
'gateway': '10.0.2.2',
'net_mask': '0.0.0.0'
},
{
'net': '10.0.2.0',
'gateway': '0.0.0.0',
'net_mask': '255.255.255.0'
},
]
},
'eth1': {
'ip': '192.168.33.10',
'net_mask': '255.255.255.0',
'routes': [
{
'net': '192.168.33.0',
'gateway': '0.0.0.0',
'net_mask': '255.255.255.0',
},
]
},
}
},
'os': {
'name': 'Ubuntu',
'version': '14.04'
},
'hardware': {
'ram': {
'size': 1017796000
},
'cpus': [
{
'model': 'Intel(R) Core(TM) i7-5557U CPU @ 3.10GHz',
'mhz': 3099.790
}
]
}
})
UBUNTU_16_04__LVM = RemoteHostMock.create_from_file(
COMMANDS_OUTPUT_ROOT_DIRECTORY_PATH,
'ubuntu-16.04',
COMMAND_DIRECTORY_MAP,
{
'block_devices': {
'vda': {
'type': 'disk',
'fs': '',
'uuid': '',
'label': '',
'mountpoint': '',
'size': 10737418240,
'children': {
'vda1': {
'type': 'part',
'fs': 'ext4',
'uuid': '549c8755-2757-446e-8c78-f76b50491f21',
'label': '',
'mountpoint': '/',
'size': 10736369664,
'start': 2048,
'end': 20971519,
'bootable': True,
'children': {},
},
},
},
'vdb': {
'type': 'disk',
'fs': '',
'uuid': '',
'label': '',
'mountpoint': '',
'size': 10737418240,
'children': {
'vdb1': {
'type': 'part',
'fs': 'LVM2_member',
'uuid': '25fyrr-TMlE-KoqC-mlFS-BHtQ-oSat-orQK1v',
'label': '',
'mountpoint': '',
'size': 10736369664,
'bootable': False,
'start': 2048,
'end': 20971519,
'children': {
'vol1-lvol1': {
'type': 'lvm',
'fs': '',
'uuid': '',
'label': '',
'mountpoint': '',
'size': 83886080,
'children': {},
},
},
},
},
},
'vdc': {
'type': 'disk',
'fs': '',
'uuid': '',
'label': '',
'mountpoint': '',
'size': 10737418240,
'children': {
'vdc1': {
'type': 'part',
'fs': 'ext4',
'uuid': 'f52fdfe8-d862-44f9-b9b7-e35c0ada68cf',
'label': '',
'mountpoint': '',
'size': 5368709120,
'start': 2048,
'end': 10487807,
'bootable': False,
'children': {},
},
'vdc2': {
'type': 'part',
'fs': '',
'uuid': '',
'label': '',
'mountpoint': '',
'size': 5367660544,
'start': 10487808,
'end': 20971519,
'bootable': False,
'children': {},
},
}
},
},
'network': {
'hostname': 'ubuntu16__lvm',
'interfaces' : {
'lo': {
'ip': '127.0.0.1',
'net_mask': '255.0.0.0',
'routes': []
},
'eth0': {
'ip': '10.17.32.4',
'net_mask': '255.255.255.0',
'routes': [
{
'net': '0.0.0.0',
'gateway': '10.17.32.1',
'net_mask': '0.0.0.0',
},
{
'net': '10.0.0.0',
'gateway': '10.17.32.1',
'net_mask': '255.0.0.0',
},
{
'net': '10.17.32.0',
'gateway': '0.0.0.0',
'net_mask': '255.255.255.0',
},
]
},
}
},
'os': {
'name': 'Ubuntu',
'version': '16.04'
},
'hardware': {
'ram': {
'size': 1007264000
},
'cpus': [
{
'model': 'AMD Opteron 62xx class CPU',
'mhz': 2799.980
}
]
}
}
)
UBUNTU_16_04 = RemoteHostMock.create_from_file(COMMANDS_OUTPUT_ROOT_DIRECTORY_PATH, 'ubuntu16__lvm', COMMAND_DIRECTORY_MAP, {
'block_devices': {
'vda': {
'type': 'disk',
'fs': '',
'uuid': '',
'label': '',
'mountpoint': '',
'size': 10737418240,
'children': {
'vda1': {
'type': 'part',
'fs': 'ext4',
'uuid': '549c8755-2757-446e-8c78-f76b50491f21',
'label': '',
'mountpoint': '/',
'size': 10736369664,
'bootable': True,
'start': 2048,
'end': 20971519,
'children': {}
}
}
},
'vdb': {
'type': 'disk',
'fs': 'ext3',
'uuid': 'd04ba532-cd2d-4406-a5ef-114acf019cc8',
'label': '',
'mountpoint': '',
'size': 10737418240,
'children': {}
},
'vdc': {
'type': 'disk',
'fs': '',
'uuid': '',
'label': '',
'mountpoint': '',
'size': 10737418240,
'children': {
'vdc1': {
'type': 'part',
'fs': 'ext4',
'uuid': '53ad2170-488d-481a-a6ab-5ce0e538f247',
'label': '',
'mountpoint': '/mnt/vdc1',
'size': 5368709120,
'bootable': False,
'start': 2048,
'end': 10487807,
'children': {}
},
'vdc2': {
'type': 'part',
'fs': 'ext4',
'uuid': 'bcab224c-8407-4783-8cea-f9ea4be3fabf',
'label': '',
'mountpoint': '/mnt/vdc2',
'size': 5367660544,
'bootable': False,
'start': 10487808,
'end': 20971519,
'children': {}
}
}
},
},
'network': {
'hostname': 'ubuntu16',
'interfaces': {
'eth0': {
'ip': '10.17.32.4',
'net_mask': '255.255.255.0',
'routes': [
{
'net': '0.0.0.0',
'gateway': '10.17.32.1',
'net_mask': '0.0.0.0'
},
{
'net': '10.17.32.0',
'gateway': '0.0.0.0',
'net_mask': '255.255.255.0'
}
]
},
'lo': {
'ip': '127.0.0.1',
'net_mask': '255.0.0.0',
'routes': []
}
}
},
'os': {
'name': 'Ubuntu',
'version': '16.04'
},
'hardware': {
'cpus': [
{
'model': 'AMD Opteron 62xx class CPU',
'mhz': 2799.98
}
],
'ram': {
'size': 1007168000
}
}
})
TARGET__DEVICE_IDENTIFICATION = RemoteHostMock.create_from_file(
COMMANDS_OUTPUT_ROOT_DIRECTORY_PATH,
'target__device_identification',
COMMAND_DIRECTORY_MAP,
{
'block_devices': {
'vda': {
'type': 'disk',
'fs': '',
'uuid': '',
'label': '',
'mountpoint': '',
'size': 10737418240,
'children': {
'vda1': {
'type': 'part',
'fs': 'ext4',
'uuid': '549c8755-2757-446e-8c78-f76b50491f21',
'label': '',
'mountpoint': '/',
'size': 3219128320,
'bootable': True,
'start': 2048,
'end': 6289407,
'children': {}
}
}
},
'vdb': {
'type': 'disk',
'fs': '',
'uuid': '',
'label': '',
'mountpoint': '',
'size': 10737418240,
'children': {}
},
'vdc': {
'type': 'disk',
'fs': '',
'uuid': '',
'label': '',
'mountpoint': '',
'size': 10737418240,
'children': {}
},
'vdd': {
'type': 'disk',
'fs': '',
'uuid': '',
'label': '',
'mountpoint': '',
'size': 10737418240,
'children': {}
}
},
'network': {
'hostname': 'target__device_identification',
'interfaces': {
'eth0': {
'ip': '10.17.32.15',
'net_mask': '255.255.255.0',
'routes': [
{
'net': '0.0.0.0',
'gateway': '10.17.32.1',
'net_mask': '0.0.0.0',
},
{
'net': '10.0.0.0',
'gateway': '10.17.32.1',
'net_mask': '255.0.0.0'
},
{
'net': '10.17.32.0',
'gateway': '0.0.0.0',
'net_mask': '255.255.255.0'
}
]
},
'lo': {
'ip': '127.0.0.1',
'net_mask': '255.0.0.0',
'routes': []
}
}
},
'os': {
'name': 'Ubuntu',
'version': '16.04'
},
'hardware': {
'cpus': [
{
'model': 'AMD Opteron 62xx class CPU',
'mhz': 2799.998
}
],
'ram': {
'size': 1007256000
}
}
}
)
TARGET__FILESYSTEM_CREATION = RemoteHostMock.create_from_file(
COMMANDS_OUTPUT_ROOT_DIRECTORY_PATH,
'target__filesystem_creation',
COMMAND_DIRECTORY_MAP,
{
'block_devices': {
'vda': {
'type': 'disk',
'fs': '',
'uuid': '',
'label': '',
'mountpoint': '',
'size': 10737418240,
'children': {
'vda1': {
'type': 'part',
'fs': 'ext4',
'uuid': '549c8755-2757-446e-8c78-f76b50491f21',
'label': '',
'mountpoint': '/',
'size': 3219128320,
'bootable': True,
'start': 2048,
'end': 6289407,
'children': {}
}
}
},
'vdb': {
'type': 'disk',
'fs': '',
'uuid': '',
'label': '',
'mountpoint': '',
'size': 10737418240,
'children': {
'vdb1': {
'type': 'part',
'fs': '',
'uuid': '',
'label': '',
'mountpoint': '',
'size': 10736369664,
'bootable': True,
'start': 2048,
'end': 20971519,
'children': {}
}
}
},
'vdc': {
'type': 'disk',
'fs': '',
'uuid': '',
'label': '',
'mountpoint': '',
'size': 10737418240,
'children': {}
},
'vdd': {
'type': 'disk',
'fs': '',
'uuid': '',
'label': '',
'mountpoint': '',
'size': 10737418240,
'children': {
'vdd1': {
'type': 'part',
'fs': '',
'uuid': '',
'label': '',
'mountpoint': '',
'size': 5368709120,
'bootable': False,
'start': 2048,
'end': 10487807,
'children': {}
},
'vdd2': {
'type': 'part',
'fs': '',
'uuid': '',
'label': '',
'mountpoint': '',
'size': 5367660544,
'bootable': False,
'start': 10487808,
'end': 20971519,
'children': {}
}
}
},
},
'network': {
'hostname': 'target__filesystem_creation',
'interfaces': {
'eth0': {
'ip': '10.17.32.15',
'net_mask': '255.255.255.0',
'routes': [
{
'net': '0.0.0.0',
'gateway': '10.17.32.1',
'net_mask': '0.0.0.0',
},
{
'net': '10.0.0.0',
'gateway': '10.17.32.1',
'net_mask': '255.0.0.0'
},
{
'net': '10.17.32.0',
'gateway': '0.0.0.0',
'net_mask': '255.255.255.0'
}
]
},
'lo': {
'ip': '127.0.0.1',
'net_mask': '255.0.0.0',
'routes': []
}
}
},
'os': {
'name': 'Ubuntu',
'version': '16.04'
},
'hardware': {
'cpus': [
{
'model': 'AMD Opteron 62xx class CPU',
'mhz': 2799.998
}
],
'ram': {
'size': 1007256000
}
}
}
)
```
|
{
"source": "jdepoix/spectre-real-time-detection",
"score": 3
}
|
#### File: spectre-real-time-detection/spectre_real_time_detection/detector.py
```python
from keras.models import load_model
from sklearn.preprocessing import StandardScaler
import numpy as np
from bcolors import bcolors
class Detector(object):
def __init__(self, recv_conn, send_conn):
self.recv_conn = recv_conn
self.send_conn = send_conn
self.scaler = self._create_scaler()
def _create_scaler(self):
scaler = StandardScaler()
scaler.mean_ = np.array([1.22685548e+08, 6.15609944e+05, 2.55416063e+06])
scaler.scale_ = np.array([2.94523441e+08, 1.39027033e+06, 6.15530731e+06])
return scaler
def start(self):
model = load_model("model.h5")
while True:
data = self.recv_conn.recv()
pids = data[0]
readings = data[1]
scaled_readings = self.scaler.transform(readings)
res = model.predict(scaled_readings)
for i in range(res.size):
if res[i][0] > 0.5:
print(f'{bcolors.FAIL}{pids[i]}: {readings[i]} {res[i][0]}')
#self.send_conn.send(res)
```
#### File: spectre-real-time-detection/spectre_real_time_detection/watcher.py
```python
from pypapi import papi_low as papi
import time
import os
import numpy as np
from pypapi.exceptions import PapiError
import proc_events
class Watcher(object):
def __init__(self, events, precision, recv_conn, send_conn):
self.events = events
self.precision = precision
self.recv_conn = recv_conn
self.send_conn = send_conn
self.eventsets = {}
def _attach_process(self, pid):
if pid not in self.eventsets:
try:
eventset = papi.create_eventset()
papi.add_events(eventset, self.events)
papi.attach(eventset, pid)
papi.start(eventset)
self.eventsets[pid] = eventset
except PapiError as err:
#print(err)
pass
def _detach_process(self, pid):
if pid in self.eventsets:
eventset = self.eventsets[pid]
papi.stop(eventset)
papi.cleanup_eventset(eventset)
papi.destroy_eventset(eventset)
del self.eventsets[pid]
def _close(self):
for eventset in self.eventsets.values():
papi.stop(eventset)
papi.cleanup_eventset(eventset)
papi.destroy_eventset(eventset)
self.eventsets = {}
def start(self, processes_to_track):
values = [0 for _ in self.events]
papi.library_init()
own_pid = os.getpid()
if own_pid in processes_to_track:
processes_to_track.remove(own_pid)
for pid in processes_to_track:
self._attach_process(pid)
while True:
pids = []
readings = []
for pid, eventset in self.eventsets.items():
res = papi.accum(eventset, values)
if res != [0,0,0]:
pids.append(pid)
readings.append(res)
# print('{0}: {1}'.format(pid, res))
if readings:
self.send_conn.send((pids, np.array(readings)))
if self.recv_conn.poll():
event, pid = self.recv_conn.recv()
if event == proc_events.PROC_START:
self._attach_process(pid)
elif event == proc_events.PROC_END:
self._detach_process(pid)
elif event == proc_events.EXIT:
break
time.sleep(self.precision)
self._close()
```
|
{
"source": "jderam/assh-char-gen",
"score": 3
}
|
#### File: jderam/assh-char-gen/app.py
```python
from pc import PlayerCharacter
from flask import Flask, jsonify, request
app = Flask(__name__)
@app.route("/assh-character", methods=["GET"])
def assh_char():
posted_data = request.get_json()
# name = posted_data['name']
# return jsonify(" Hope you are having a good time " + name + "!!!")
player_character = PlayerCharacter(magician_spell_src='dying_earth')
return player_character.to_dict()
if __name__=='__main__':
app.run(debug=True)
```
#### File: assh-char-gen/test/test_dice_rolls.py
```python
import pytest
import dice
def test_dice_roll():
"""
docstring
"""
rolls = []
for i in range(1000):
rolls.append(dice.roll_ndn(1, 6))
assert len([x for x in rolls if x < 1]) == 0
assert len([x for x in rolls if x > 6]) == 0
```
|
{
"source": "jderam/hyperborea3",
"score": 3
}
|
#### File: hyperborea3/hyperborea3/chargen.py
```python
from importlib.resources import path
import random
import sqlite3
from typing import Any, Dict, List, Optional
from hyperborea3.valid_data import VALID_ALIGMENTS_SHORT, VALID_SQL_TABLES
with path("hyperborea3", "hyperborea.sqlite3") as p:
DBPATH = p
URI = f"file:{str(DBPATH)}?mode=ro"
con = sqlite3.connect(URI, check_same_thread=False, uri=True)
con.row_factory = sqlite3.Row
cur = con.cursor()
def list_tables() -> List[str]:
"""List all tables in sqlite database."""
cur.execute(
"""
SELECT name
FROM sqlite_schema
WHERE type = 'table'
AND name NOT LIKE 'sqlite_%'
ORDER BY name;
"""
)
tables: List[str] = [dict(x)["name"] for x in cur.fetchall()]
return tables
def list_views() -> List[str]:
"""List all views in sqlite database."""
cur.execute(
"""
SELECT name
FROM sqlite_schema
WHERE type = 'view'
ORDER BY name;
"""
)
views: List[str] = [dict(x)["name"] for x in cur.fetchall()]
return views
def get_count_from_table(table_name: str) -> int:
"""Get the row count of a table in sqlite database."""
assert table_name in VALID_SQL_TABLES
cur.execute(
f"""
SELECT Count(1) AS row_count
FROM {table_name};
"""
)
row_count: int = cur.fetchone()["row_count"]
return row_count
def roll_dice(qty: int, sides: int) -> int:
result = 0
for i in range(qty):
result += random.randint(1, sides)
return result
def roll_ndn_drop_lowest(qty: int, sides: int, drop_qty: int) -> int:
result = []
for i in range(qty):
result.append(roll_dice(1, sides))
result.sort()
return sum(result[drop_qty:])
def get_class_id_map():
"""Get mapping between class_id and class_name"""
sql = """
SELECT class_id
, class_name
FROM classes
"""
cur.execute(f"{sql};")
result = [dict(x) for x in cur.fetchall()]
class_map = {}
for r in result:
class_map[r["class_id"]] = r["class_name"]
return class_map
def class_id_to_name(class_id: int) -> str:
cur.execute("SELECT class_name FROM classes WHERE class_id = ?;", (class_id,))
class_name = str(cur.fetchone()["class_name"])
return class_name
def get_class_requirements(class_id: int):
cur.execute("SELECT * FROM class_attr_req WHERE class_id = ?;", (class_id,))
return [dict(x) for x in cur.fetchall()]
def roll_stats(method: int = 3, class_id: int = 0) -> Dict[str, Dict[str, int]]:
"""Roll stats using the various methods in the Player's Manual"""
attr = {
"st": {
"score": 0,
},
"dx": {
"score": 0,
},
"cn": {
"score": 0,
},
"in": {
"score": 0,
},
"ws": {
"score": 0,
},
"ch": {
"score": 0,
},
}
# Ensure scores at least qualify for one of the principal classes
while (
attr["st"]["score"] < 9
and attr["dx"]["score"] < 9
and attr["in"]["score"] < 9
and attr["ws"]["score"] < 9
):
if method == 1:
"""Roll 3d6 for each attribute in order of strength, dexterity,
constitution, intelligence, wisdom, and charisma; the results
are your character's attribute scores.
"""
for stat in attr.keys():
attr[stat]["score"] = roll_dice(qty=3, sides=6)
elif method == 2:
"""Roll 3d6 for each attribute in order of strength, dexterity,
constitution, intel- ligence, wisdom, and charisma. Repeat
these steps twice more, producing three sets of scores. Choose
the set that best suits the type of character you would like
to play.
"""
max_total = 0
for s in range(3):
scores = [roll_dice(qty=3, sides=6) for x in range(6)]
# print(s, scores, sum(scores)) # debug
if sum(scores) > max_total:
max_total = sum(scores)
best_set = scores
for stat in attr.keys():
attr[stat]["score"] = best_set.pop(0)
elif method == 3:
"""Roll 4d6 and discard the lowest die roll. Generate six scores
using this method. Assign scores to attributes as desired.
"""
for stat in attr.keys():
attr[stat]["score"] = roll_ndn_drop_lowest(qty=4, sides=6, drop_qty=1)
elif method == 4:
"""Roll 3d6 three times for each attribute in order of
strength, dexterity, constitution, intelligence, wisdom,
and charisma. Select the best result for each attribute.
"""
for stat in attr.keys():
attr[stat]["score"] = max([roll_dice(qty=3, sides=6) for i in range(3)])
elif method == 5:
"""Roll 2d6+6 for each attribute in order of strength, dexterity,
constitution, intelligence, wisdom, and charisma; the results
are your character's attribute scores.
"""
for stat in attr.keys():
attr[stat]["score"] = roll_dice(qty=2, sides=6) + 6
elif method == 6:
"""Choose your character class first (see Chapter 4: Classes),
and then use the following technique:
* Roll 3d6 for each attribute that does not have a
required minimum score.
* Roll 4d6 (discard low die result) for each attribute
that does have a minimum requirement score, rerolling
until you achieve the requisite minimum.
"""
if class_id == 0:
raise ValueError(
"If rolling with Method VI, you must select a specific class"
)
class_req = get_class_requirements(class_id)
for stat in attr.keys():
req = [x["min_score"] for x in class_req if x["attr"] == stat]
if len(req) == 0:
attr[stat]["score"] = roll_dice(qty=3, sides=6)
else:
min_score = req[0]
score = 0
while score < min_score:
score = roll_ndn_drop_lowest(qty=4, sides=6, drop_qty=1)
attr[stat]["score"] = score
else:
raise ValueError(f"Invalid value for method: {method}")
return attr
def get_attr_mod(stat: str, score: int) -> Dict[str, int]:
"""Get the mods for a given stat."""
if stat.lower() not in ["st", "dx", "cn", "in", "ws", "ch"]:
raise ValueError(f"Invalid value for stat: {stat}")
stat = stat.lower()
tbl_map = {
"st": "t001_strength",
"dx": "t002_dexterity",
"cn": "t003_constitution",
"in": "t004_intelligence",
"ws": "t005_wisdom",
"ch": "t006_charisma",
}
tbl = tbl_map[stat]
cur.execute(f"SELECT * FROM {tbl} WHERE score = ?;", (score,))
result = dict(cur.fetchone())
return result
def get_attr(method: int = 3, class_id: int = 0) -> Dict[str, Dict[str, int]]:
attr = roll_stats(method, class_id)
for stat in attr.keys():
score = attr[stat]["score"]
mods = get_attr_mod(stat, score)
attr[stat] = mods
return attr
def get_qualifying_classes(
attr: Dict[str, Dict[str, int]], subclasses: int
) -> List[int]:
"""Return list of class_ids that can be used given the attr."""
# principal classes, subclasses, and sub-subclasses
if subclasses == 2:
cur.execute("SELECT * FROM class_attr_req;")
# principal classes and subclasses
elif subclasses == 1:
cur.execute(
"""
SELECT car.*
FROM classes c
JOIN class_attr_req car
ON c.class_id = car.class_id
WHERE c.class_type IN ('P', 'S');
"""
)
# principal classes only
elif subclasses == 0:
cur.execute(
"""
SELECT car.*
FROM classes c
JOIN class_attr_req car
ON c.class_id = car.class_id
WHERE c.class_type = 'P';
"""
)
class_req = [dict(x) for x in cur.fetchall()]
not_met = list(
set(
[
x["class_id"]
for x in class_req
if x["min_score"] > attr[x["attr"]]["score"]
]
)
)
qual_classes = list(
set([x["class_id"] for x in class_req if x["class_id"] not in not_met])
)
assert len(qual_classes) > 0, "There are no qualifying classes to choose from"
return qual_classes
def select_random_class(attr: Dict[str, Dict[str, int]], subclasses: int) -> int:
"""Given a set of stats, determine an appropriate class.
1. Find all qualifying classes by checking stat requirements.
2. Randomly choose from among them.
TODO: Might decide to add weighting based on primary attributes.
"""
qual_classes = get_qualifying_classes(attr, subclasses)
class_id = random.choice(qual_classes)
return class_id
def get_level(class_id: int, xp: int) -> int:
cur.execute(
"""
SELECT Max(level) as level
FROM class_level
WHERE class_id = ?
AND xp <= ?
""",
(class_id, xp),
)
level: int = cur.fetchone()["level"]
return level
def get_xp_to_next(class_id: int, level: int) -> Optional[int]:
"""Get XP need to reach next level."""
# if level is 12, there is no "next level"
if level == 12:
return None
next_level = level + 1
cur.execute(
"SELECT xp FROM class_level WHERE class_id = ? AND level = ?;",
(class_id, next_level),
)
xp_to_next: int = cur.fetchone()["xp"]
return xp_to_next
def get_xp_bonus(class_id: int, attr: Dict[str, Dict[str, int]]) -> bool:
"""Determine if character qualifies for +10% XP bonus."""
cur.execute(
"SELECT attr FROM class_prime_attr WHERE class_id = ?;",
(class_id,),
)
prime_attrs = [dict(x)["attr"] for x in cur.fetchall()]
xp_bonus = all([attr[p]["score"] >= 16 for p in prime_attrs])
return xp_bonus
def get_save_bonuses(class_id: int) -> Dict[str, int]:
cur.execute(
"""
SELECT death
, transformation
, device
, avoidance
, sorcery
FROM classes
WHERE class_id = ?
""",
(class_id,),
)
sv_bonus = dict(cur.fetchone())
return sv_bonus
def get_class_level_data(class_id: int, level: int) -> Dict[str, Any]:
cur.execute(
"""
SELECT *
FROM classes c
JOIN class_level cl
ON c.class_id = cl.class_id
WHERE c.class_id = ?
AND cl.level = ?
""",
(class_id, level),
)
result = dict(cur.fetchone())
return result
def get_hd(class_id: int, level: int) -> str:
"""Returns string form of HD, e.g. '4d8' or '9d10+3'"""
cl_data = get_class_level_data(class_id, level)
hd_qty = cl_data["hd_qty"]
hd_size = cl_data["hd_size"]
hp_plus = cl_data["hp_plus"]
hd = f"{hd_qty}d{hd_size}"
if hp_plus > 0:
hd += f"+{hp_plus}"
return hd
def roll_hit_points(class_id: int, level: int, hp_adj: int) -> int:
"""Roll hit points for the PC.
Minimum 1 hp per level.
"""
cl_data = get_class_level_data(class_id, level)
hd_qty = cl_data["hd_qty"]
hd_size = cl_data["hd_size"]
hp_plus = cl_data["hp_plus"]
hp: int = roll_dice(hd_qty, hd_size) + hp_plus + (level * hp_adj)
# TODO: If we want to get pedantic about this, it should actually be a minimum
# of 1 hp on each die roll. We can do an accumulator instead, although this
# is likely an edge case where no one would actually be playing a PC this bad.
if hp < level:
hp = level
return hp
def get_combat_matrix(fa: int) -> Dict[int, int]:
"""Return combat matrix based on FA."""
combat_matrix = {}
for k in range(-9, 10):
combat_matrix[k] = 20 - k - fa
return combat_matrix
def get_alignment(class_id: int) -> Dict[str, Any]:
"""Choose a random alignment based on the options available to a given class."""
cur.execute(
"""
SELECT a.*
FROM class_alignment ca
JOIN alignment a
ON ca.align_id = a.align_id
WHERE ca.class_id = ?
""",
(class_id,),
)
allowed_alignments = [dict(x) for x in cur.fetchall()]
alignment = random.choice(allowed_alignments)
return alignment
def get_deity(short_alignment: str) -> Dict[str, Any]:
"""Randomly select a deity based on alignment."""
assert (
short_alignment in VALID_ALIGMENTS_SHORT
), f"Invalid alignment: {short_alignment}"
if short_alignment[0] == "C":
lkp_align = "Chaotic"
elif short_alignment[0] == "L":
lkp_align = "Lawful"
elif short_alignment[0] == "N":
lkp_align = "Neutral"
cur.execute(
"""
SELECT *
FROM deities
WHERE primary_alignment = ?;
""",
(lkp_align,),
)
deities = [dict(x) for x in cur.fetchall()]
if short_alignment in ["CE", "LE"]:
lkp_align = "Evil"
cur.execute(
"""
SELECT *
FROM deities
WHERE primary_alignment = ?;
""",
(lkp_align,),
)
deities.extend([dict(x) for x in cur.fetchall()])
deity = random.choice(deities)
return deity
def get_race_id() -> int:
"""Roll on race tables to get a randomly selected race."""
d100_roll = roll_dice(1, 100)
cur.execute(
"""SELECT race_id
FROM t066_primary_races
WHERE ? BETWEEN d100_min AND d100_max;
""",
(d100_roll,),
)
race_id: int = cur.fetchone()["race_id"]
if race_id == 99:
d12_roll = roll_dice(1, 12)
cur.execute(
"""SELECT race_id
FROM t067_ancillary_races
WHERE d12_roll = ?;
""",
(d12_roll,),
)
race_id = cur.fetchone()["race_id"]
if race_id not in range(1, 25):
raise ValueError(f"Unexpected race_id value: {race_id}. d100_roll={d100_roll}")
return race_id
def get_race(race_id: int) -> str:
cur.execute(
"""SELECT race
FROM v_race_lkp
WHERE race_id = ?;
""",
(race_id,),
)
race: str = cur.fetchone()["race"]
return race
def get_gender():
genders = ["Male", "Female", "Non-Binary"]
gender = random.choices(genders, weights=[47.5, 47.5, 5.0])[0]
return gender
def get_starting_armour(class_id: int) -> Dict[str, Any]:
"""Get starting armour by class.
The SQL should always return one and only one result.
"""
cur.execute(
"""
SELECT a.*
FROM starting_armour s
JOIN t074_armour a
ON s.armour_id = a.armour_id
WHERE s.class_id = ?
""",
(class_id,),
)
armour = dict(cur.fetchone())
return armour
def get_starting_shield(class_id: int) -> Optional[Dict[str, Any]]:
"""Get starting shield by class.
SQL should return one or zero results.
"""
cur.execute(
"""
SELECT ts.*
FROM starting_shield ss
JOIN t075_shields ts
ON ss.shield_id = ts.shield_id
WHERE ss.class_id = ?
""",
(class_id,),
)
result = cur.fetchone()
shield = dict(result) if result is not None else result
return shield
def get_starting_weapons_melee(class_id: int) -> List[Dict[str, Any]]:
"""Get starting melee weapons by class."""
cur.execute(
"""
SELECT w.*
, sw.qty
FROM starting_weapons_melee sw
JOIN t076_melee_weapons w
ON sw.weapon_id = w.weapon_id
WHERE sw.class_id = ?;
""",
(class_id,),
)
melee_weapons = [dict(x) for x in cur.fetchall()]
for mw in melee_weapons:
mw["hurlable"] = bool(mw["hurlable"])
mw["atk_rate"] = "1/1"
mw["melee_atk"] = 0
mw["hurled_atk"] = 0 if mw["hurlable"] else None
mw["dmg_adj"] = 0
mw["mastery"] = False
return melee_weapons
def get_starting_weapons_missile(class_id: int) -> List[Dict[str, Any]]:
"""Get starting missile weapons by class."""
cur.execute(
"""
SELECT w.*
, sw.qty
, sw.ammunition
FROM starting_weapons_missile sw
JOIN t077_missile_weapons w
ON sw.weapon_id = w.weapon_id
WHERE sw.class_id = ?;
""",
(class_id,),
)
missile_weapons = [dict(x) for x in cur.fetchall()]
for mw in missile_weapons:
mw["hurled"] = bool(mw["hurled"])
mw["launched"] = bool(mw["launched"])
mw["missile_atk"] = 0
mw["dmg_adj"] = 0
mw["mastery"] = False
return missile_weapons
def get_unskilled_weapon_penalty(class_id: int) -> int:
"""Get penalty when using a weapon not in the favoured list."""
cur.execute(
"""
SELECT attack_penalty
FROM t134_unskilled_weapon_attack_penalty
WHERE class_id = ?;
""",
(class_id,),
)
unskilled_penalty: int = cur.fetchone()["attack_penalty"]
return unskilled_penalty
def get_favoured_weapons(class_id: int) -> Dict[str, Any]:
"""Get list of favoured weapons for a given class_id."""
# get favoured melee weapons
cur.execute(
"""
SELECT tmw.*
FROM class_favoured_weapons_melee cfwm
JOIN t076_melee_weapons tmw
ON cfwm.weapon_id = tmw.weapon_id
WHERE cfwm.class_id = ?
ORDER BY tmw.weapon_id;
""",
(class_id,),
)
fav_wpns_melee: List[Dict[str, Any]] = [dict(x) for x in cur.fetchall()]
# get favoured missile weapons
cur.execute(
"""
SELECT tmw.*
FROM class_favoured_weapons_missile cfwm
JOIN t077_missile_weapons tmw
ON cfwm.weapon_id = tmw.weapon_id
WHERE cfwm.class_id = ?
ORDER BY tmw.weapon_id;
""",
(class_id,),
)
fav_wpns_missile: List[Dict[str, Any]] = [dict(x) for x in cur.fetchall()]
# get unskilled penalty
unskilled_penalty = get_unskilled_weapon_penalty(class_id)
# get "any" (set True for classes proficient in any/all weapons)
favoured_any: bool = True if unskilled_penalty == 0 else False
favoured_weapons = {
"any": favoured_any,
"weapons_melee": fav_wpns_melee,
"weapons_missile": fav_wpns_missile,
"unskilled_penalty": unskilled_penalty,
}
return favoured_weapons
def get_starting_gear(class_id: int) -> List[str]:
"""Get starting equipment items by class."""
cur.execute(
"""
SELECT item
FROM starting_gear
WHERE class_id = ?;
""",
(class_id,),
)
equipment = [x["item"] for x in cur.fetchall()]
return equipment
def get_starting_money() -> Dict[str, int]:
"""Get starting money."""
gp = roll_dice(1, 4) + 1
money = {
"pp": 0,
"gp": gp,
"ep": 0,
"sp": 0,
"cp": 0,
}
return money
def calculate_ac(armour_ac: int, shield_def_mod: int, dx_def_adj: int) -> int:
ac = armour_ac
ac -= shield_def_mod
ac -= dx_def_adj
return ac
def get_next_atk_rate(atk_rate: str) -> str:
atk_progression = [
"1/1",
"3/2",
"2/1",
"5/2",
"3/1",
]
atk_prog_idx = atk_progression.index(atk_rate)
atk_prog_idx += 1
return atk_progression[atk_prog_idx]
def ac_to_aac(ac: int) -> int:
aac = 19 - ac
return aac
def get_thief_skills(
class_id: int,
level: int,
dx_score: int,
in_score: int,
ws_score: int,
) -> Optional[List[Dict[str, Any]]]:
"""Returns a list of dictionaries of thief skills.
thief_skill (str): The key value for the skill used in db lookups
skill_name (str): The user-friendly name of the skill for display
skill_roll (int): The x in 12 chance of success
stat (str): The associated ability, which grats a +1 bonus for 16+
"""
# input validation
if class_id not in range(1, 34):
raise ValueError(f"Invalid class_id: {class_id}")
if level not in range(1, 13):
raise ValueError(f"Invalid value for level: {level}")
if dx_score not in range(1, 19):
raise ValueError(f"Invalid value for dx_score: {dx_score}")
if in_score not in range(1, 19):
raise ValueError(f"Invalid value for in_score: {in_score}")
if ws_score not in range(1, 19):
raise ValueError(f"Invalid value for ws_score: {ws_score}")
# get the skills for this class
cur.execute(
"""SELECT thief_skill
FROM class_thief_abilities
WHERE class_id = ?;
""",
(class_id,),
)
skills_list = [dict(x) for x in cur.fetchall()]
if len(skills_list) == 0:
return None
# get friendly skill names, with special rule for Huntsman
# ("Manipulate Traps" becomes "Wilderness Traps" for Huntsman only)
for sk in skills_list:
if class_id == 8 and sk["thief_skill"] == "manipulate_traps":
skill_name = "Wilderness Traps"
else:
skill_name = sk["thief_skill"].replace("_", " ").title()
sk.update({"skill_name": skill_name})
# get thief skill scores
for sk in skills_list:
sql = f"SELECT {sk['thief_skill']} FROM t016_thief_abilities WHERE level = ?;"
cur.execute(sql, (level,))
skill_roll = dict(cur.fetchone())[sk["thief_skill"]]
sk.update({"skill_roll": skill_roll})
# apply bonuses (if any)
for sk in skills_list:
sql = "SELECT stat FROM thief_ability_bonuses WHERE thief_skill = ?;"
cur.execute(sql, (sk["thief_skill"],))
stat = dict(cur.fetchone())["stat"]
sk.update({"stat": stat})
if stat == "dx" and dx_score >= 16:
sk["skill_roll"] += 1
if stat == "in" and in_score >= 16 and sk["skill_roll"] is not None:
sk["skill_roll"] += 1
if stat == "ws" and ws_score >= 16:
sk["skill_roll"] += 1
return skills_list
def get_turn_undead_matrix(ta: int, turn_adj: int) -> Optional[Dict[str, str]]:
"""Get turn undead matrix. Apply CH turning adjustment if applicable."""
if ta == 0:
return None
cur.execute(
"""
SELECT undead_type_00
, undead_type_01
, undead_type_02
, undead_type_03
, undead_type_04
, undead_type_05
, undead_type_06
, undead_type_07
, undead_type_08
, undead_type_09
, undead_type_10
, undead_type_11
, undead_type_12
, undead_type_13
FROM t013_turn_undead
WHERE ta = ?;
""",
(ta,),
)
turn_undead_matrix = dict(cur.fetchone())
if turn_adj != 0:
for k, v in turn_undead_matrix.items():
if ":" in v:
turn_roll = int(v.split(":")[0])
turn_roll += turn_adj
if turn_roll > 0:
turn_undead_matrix[k] = f"{turn_roll}:12"
else:
turn_undead_matrix[k] = "NT"
return turn_undead_matrix
def get_caster_schools(class_id: int) -> List[str]:
"""Get the school(s) the character will get their spells known from."""
cur.execute(
"SELECT school_code FROM classes WHERE class_id = ?;",
(class_id,),
)
school_code: Optional[str] = cur.fetchone()["school_code"]
if school_code is None:
return []
schools = [x.strip() for x in school_code.split(",")]
# need to make a random school selection for shaman
if len(schools) > 1:
for i in range(len(schools)):
school_choices = schools[i].split("/")
if len(school_choices) > 1:
schools[i] = random.choice(school_choices)
return schools
def get_random_spell(
school: str,
spell_level: int,
d100_roll: Optional[int] = None,
) -> Dict[str, Any]:
"""Get a randomly rolled-for spell."""
if d100_roll is None:
d100_roll = roll_dice(1, 100)
assert d100_roll in range(1, 101)
cur.execute(
"""
SELECT school
, spell_level
, spell_id
, spell_name
, rng as 'range'
, dur as duration
, reversible
, pp
, spell_desc
FROM v_complete_spell_list
WHERE school = ?
AND spell_level = ?
AND ? BETWEEN d100_min AND d100_max;
""",
(school, spell_level, d100_roll),
)
try:
result = dict(cur.fetchone())
except TypeError:
print(f"Got no result back. {school=} {spell_level=} {d100_roll=}")
raise
if result["reversible"] is not None:
result["reversible"] = bool(result["reversible"])
return result
def get_spells(class_id: int, level: int, ca: int) -> Optional[Dict[str, Any]]:
"""Return the list of spells known for the character."""
if ca == 0:
return None
schools = get_caster_schools(class_id)
if len(schools) == 0:
return None
else:
spells: Dict[str, Any] = {}
for school in schools:
spells[school] = {}
cur.execute(
"""
SELECT *
FROM class_spells_by_level
WHERE class_id = ?
AND level = ?
AND school = ?;
""",
(class_id, level, school),
)
result = cur.fetchone()
if result is None:
continue
try:
class_spells = dict(result)
except TypeError:
print(
"No entry found in class_spells_by_level."
f" {class_id=} {level=} {school=}"
)
raise
spells[school]["spells_per_day"] = {
"lvl1": class_spells["spells_per_day1"],
"lvl2": class_spells["spells_per_day2"],
"lvl3": class_spells["spells_per_day3"],
"lvl4": class_spells["spells_per_day4"],
"lvl5": class_spells["spells_per_day5"],
"lvl6": class_spells["spells_per_day6"],
}
spells[school]["spells_known"] = []
for k in [
"spells_known1",
"spells_known2",
"spells_known3",
"spells_known4",
"spells_known5",
"spells_known6",
]:
spell_level = int(k[-1])
spell_qty = class_spells[k]
added_counter = 0
while added_counter < spell_qty:
# Make a 1-99 roll for Runegravers so we don't have one of the 3
# runes having a 1% greater chance of getting selected.
if class_id == 20:
d100_roll = roll_dice(1, 99)
random_spell = get_random_spell(
school, spell_level, d100_roll=d100_roll
)
else:
random_spell = get_random_spell(school, spell_level)
already_known = [x["spell_id"] for x in spells[school]["spells_known"]]
if random_spell["spell_id"] not in already_known:
spells[school]["spells_known"].append(random_spell)
added_counter += 1
return spells
def apply_spells_per_day_bonus(
spells: Optional[Dict[str, Any]],
bonus_spells_in: int,
bonus_spells_ws: int,
) -> Optional[Dict[str, Any]]:
"""Increase spells per day for high IN/WS scores. Must already have at least
one spell per day for the given level.
"""
if spells is None:
return None
for school in spells.keys():
if school in ["clr", "drd"]:
for i in range(bonus_spells_ws, 0, -1):
lvl_key = f"lvl{i}"
# if spells[school]["spells_per_day"][lvl_key] > 0:
if spells[school].get("spells_per_day", {}).get(lvl_key, 0) > 0:
spells[school]["spells_per_day"][lvl_key] += 1
elif school in [
"mag",
"cry",
"ill",
"nec",
"pyr",
"wch",
]:
for i in range(bonus_spells_in, 0, -1):
lvl_key = f"lvl{i}"
# if spells[school]["spells_per_day"][lvl_key] > 0:
if spells[school].get("spells_per_day", {}).get(lvl_key, 0) > 0:
spells[school]["spells_per_day"][lvl_key] += 1
elif school == "run":
# no bonus for runegravers
continue
else:
raise ValueError(f"Invalid value for school: {school}")
return spells
def get_class_abilities(class_id: int, level: int) -> List[Dict[str, Any]]:
"""Get class abilities from class abilities table."""
cur.execute(
"""
SELECT *
FROM class_abilities
WHERE class_id = ?
AND level <= ?
ORDER BY level, ability_title;
""",
(class_id, level),
)
class_abilities = [dict(x) for x in cur.fetchall()]
return class_abilities
def get_random_familiar() -> str:
"""Roll 2d8 to get a random familiar."""
roll = roll_dice(2, 8)
cur.execute(
"""
SELECT animal
FROM t010_familiars
WHERE roll_2d8 = ?;
""",
(roll,),
)
animal: str = cur.fetchone()["animal"]
return animal
def get_priest_abilities(deity_id: int, level: int) -> List[Dict[str, Any]]:
"""Get priest Specialized Faith abilities."""
cur.execute(
"""
SELECT *
FROM t047_priest_abilities
WHERE deity_id = ?
AND level <= ?
ORDER BY level;
""",
(deity_id, level),
)
priest_abilities = [dict(x) for x in cur.fetchall()]
return priest_abilities
```
|
{
"source": "jdereus/LabControl",
"score": 2
}
|
#### File: handlers/process_handlers/pooling_process.py
```python
from datetime import datetime
from tornado.web import authenticated, HTTPError
from tornado.escape import json_decode, json_encode
import numpy as np
from labcontrol.gui.handlers.base import BaseHandler, BaseDownloadHandler
from labcontrol.db.process import PoolingProcess, QuantificationProcess
from labcontrol.db.plate import Plate
from labcontrol.db.equipment import Equipment
from labcontrol.db.composition import (PoolComposition,
LibraryPrep16SComposition,
LibraryPrepShotgunComposition)
from labcontrol.db.exceptions import LabControlUnknownIdError
POOL_FUNCS = {
'equal': {'function': PoolingProcess.compute_pooling_values_eqvol,
'parameters': [('total_vol', 'volume-'),
('size', 'lib-size-'),
('robot', 'robot-'),
('destination', 'dest-tube-'),
('blank_vol', 'blank-vol-'),
('blank_num', 'blank-number-')]},
'min': {'function': PoolingProcess.compute_pooling_values_minvol,
'parameters': [('floor_vol', 'floor-vol-'),
('floor_conc', 'floor-conc-'),
('total', 'total-'),
('size', 'lib-size-'),
('robot', 'robot-'),
('destination', 'dest-tube-'),
('blank_vol', 'blank-vol-'),
('blank_num', 'blank-number-')]}}
HTML_POOL_PARAMS_SHOTGUN = {
'min': [{'prefix': 'floor-vol-', 'value': '100',
'desc': 'volume for low conc samples (nL):', 'min': '1',
'step': '1'},
{'prefix': 'floor-conc-', 'value': '20',
'desc': 'minimum value for pooling at real estimated value (nM):',
'min': '0.1', 'step': '0.1'},
{'prefix': 'total-', 'value': '0.002',
'desc': 'total number of nM to have in pool (nM):',
'min': '0.00001', 'step': '0.00001'},
{'prefix': 'lib-size-', 'value': '500',
'desc': 'Average library molecule size (bp):', 'min': '1',
'step': '1'},
{'prefix': 'robot-'}, {'prefix': 'dest-tube-'},
{'prefix': 'blank-number-', 'value': '',
'desc': 'Pool only highest N blanks, N=', 'min': 0,
'step': 1},
{'prefix': 'blank-vol-', 'value': '',
'desc': 'Pool all blanks at volume (nL):', 'min': 0,
'step': 2.5}],
'equal': [{'prefix': 'volume-', 'value': '200',
'desc': 'volume to pool per sample (nL):', 'min': '1',
'step': '1'},
{'prefix': 'lib-size-', 'value': '500',
'desc': 'Average library molecule size (bp):', 'min': '1',
'step': '1'},
{'prefix': 'robot-'}, {'prefix': 'dest-tube-'},
{'prefix': 'blank-number-', 'value': '',
'desc': 'Pool only highest N blanks, N=', 'min': 0,
'step': 1},
{'prefix': 'blank-vol-', 'value': '',
'desc': 'Pool all blanks at volume (nL):', 'min': 0,
'step': 2.5}]}
HTML_POOL_PARAMS_16S = {
'min': [{'prefix': 'floor-vol-', 'value': '2',
'desc': 'volume for low conc samples (µL):', 'min': '1',
'step': '1'},
{'prefix': 'floor-conc-', 'value': '16',
'desc': 'minimum value for pooling at real estimated value '
'(ng/µL):',
'min': '0.1', 'step': '0.1'},
{'prefix': 'total-', 'value': '240',
'desc': 'total quantity of DNA to pool per sample (ng):',
'min': '1', 'step': '0.1'},
{'prefix': 'lib-size-', 'value': '390',
'desc': 'Average library molecule size (bp):', 'min': '1',
'step': '1'},
{'prefix': 'robot-'}, {'prefix': 'dest-tube-'},
{'prefix': 'blank-number-', 'value': 2,
'desc': 'Pool only highest N blanks, N=', 'min': 0,
'step': 1},
{'prefix': 'blank-vol-', 'value': 5,
'desc': 'Pool all blanks at volume (µL):', 'min': 0,
'step': 0.1}],
'equal': [{'prefix': 'volume-', 'value': '5',
'desc': 'volume to pool per sample (µL):', 'min': '1',
'step': '1'},
{'prefix': 'lib-size-', 'value': '390',
'desc': 'Average library molecule size (bp):', 'min': '1',
'step': '1'},
{'prefix': 'robot-'}, {'prefix': 'dest-tube-'},
{'prefix': 'blank-number-', 'value': 2,
'desc': 'Pool only highest N blanks, N=', 'min': 0,
'step': 1},
{'prefix': 'blank-vol-', 'value': 5,
'desc': 'Pool all blanks at volume (µL):', 'min': 0,
'step': 0.1}]}
HTML_POOL_PARAMS = {'16S library prep': HTML_POOL_PARAMS_16S,
'shotgun library prep': HTML_POOL_PARAMS_SHOTGUN}
PLATE_TYPES = {LibraryPrep16SComposition: '16S library prep',
LibraryPrepShotgunComposition: 'shotgun library prep'}
PLATE_TYPE_TO_POOL_TYPE = {'16S library prep': 'amplicon_sequencing',
'shotgun library prep': 'shotgun_plate'}
POOL_TYPE_TO_PLATE_TYPE = {value: key for key, value in
PLATE_TYPE_TO_POOL_TYPE.items()}
POOL_TYPE_PARAMS = {
'amplicon_sequencing': {'abbreviation': 'amplicon',
'template': 'library_pooling_16S.html'},
'shotgun_plate': {'abbreviation': 'shotgun',
'template': 'library_pooling_shotgun.html'}}
# quick function to create 2D representation of well-associated numbers
def make_2D_arrays(plate, quant_process):
"""Returns 2D arrays of the quantification values
Parameters
----------
plate: Plate
The quantified plate
quant_process: QuantificationProcess
The quantification process that quantified 'plate'
Returns
-------
(np.array, np.array, np.array, np.array)
Four 2D np.arrays containing the raw concentration values, the
the computed concentration values, a boolean array indicating whether
each well is a blank, and an array of str with the name of the sample
in each well.
"""
layout = plate.layout
raw_concs = np.zeros_like(layout, dtype=float)
comp_concs = np.zeros_like(layout, dtype=float)
comp_is_blank = np.zeros_like(layout, dtype=bool)
plate_names = np.empty_like(layout, dtype='object')
for comp, raw_conc, conc in quant_process.concentrations:
well = comp.container
row = well.row - 1
column = well.column - 1
raw_concs[row][column] = raw_conc
comp_concs[row][column] = conc
# cache the sample compositions to avoid extra intermediate queries
if isinstance(comp, LibraryPrep16SComposition):
smp = comp.gdna_composition.sample_composition
comp_is_blank[row][column] = smp.sample_composition_type == 'blank'
plate_names[row][column] = smp.sample_id
elif isinstance(comp, LibraryPrepShotgunComposition):
smp = comp.normalized_gdna_composition \
.compressed_gdna_composition\
.gdna_composition.sample_composition
comp_is_blank[row][column] = smp.sample_composition_type == 'blank'
plate_names[row][column] = smp.sample_id
return raw_concs, comp_concs, comp_is_blank, plate_names
class BasePoolHandler(BaseHandler):
def _compute_pools(self, plate_info):
self.plate_id = plate_info['plate-id']
self.func_name = plate_info['pool-func']
self.plate_type = plate_info['plate-type']
self.quant_process_id = plate_info['quant-process-id']
func_info = POOL_FUNCS[self.func_name]
self.function = func_info['function']
plate = Plate(self.plate_id)
quant_process = QuantificationProcess(self.quant_process_id)
# make params dictionary for function
params = {}
for arg, pfx in func_info['parameters']:
param_key = '%s%s' % (pfx, self.plate_id)
if param_key not in plate_info:
raise HTTPError(
400, reason='Missing parameter %s' % param_key)
# empty strings are sent when we have disabled inputs.
# we are testing for them explicitly where expected.
if plate_info[param_key] != '':
params[arg] = float(plate_info[param_key])
else:
params[arg] = plate_info[param_key]
self.params = params
# compute molar concentrations
quant_process.compute_concentrations(size=params['size'])
# calculate pooled values
self.raw_concs, self.comp_concs, self.comp_blanks, \
self.plate_names = make_2D_arrays(plate, quant_process)
class PoolPoolProcessHandler(BaseHandler):
@authenticated
def get(self):
pool_ids = self.get_arguments('pool_id')
process_id = self.get_argument('process_id', None)
pool_comp_info = None
pool_name = None
if process_id is not None:
try:
process = PoolingProcess(process_id)
except LabControlUnknownIdError:
raise HTTPError(404, reason="Pooling process %s doesn't exist"
% process_id)
pool_comp_info = [[p.id, p.raw_concentration]
for p, _ in process.components]
pool_name = process.pool.container.external_id
self.render('pool_pooling.html', pool_ids=pool_ids,
process_id=process_id, pool_comp_info=pool_comp_info,
pool_name=pool_name)
@authenticated
def post(self):
pool_name = self.get_argument('pool_name')
pools_info = json_decode(self.get_argument('pools_info'))
concentrations = []
input_compositions = []
for p_info in pools_info:
pool_comp = PoolComposition(p_info['pool_id'])
concentrations.append({'composition': pool_comp,
'concentration': p_info['concentration']})
input_compositions.append(
{'composition': pool_comp, 'input_volume': p_info['volume'],
'percentage_of_output': p_info['percentage']})
# Create the quantification process (DNA conc)
q_process = QuantificationProcess.create_manual(
self.current_user, concentrations)
# Create the pool - Magic number 5 - > the volume for this pooling
# is always 5 according to the wet lab.
p_process = PoolingProcess.create(
self.current_user, q_process, pool_name, 5, input_compositions,
{"function": "amplicon_pool", "parameters": {}})
self.write({'process': p_process.id})
class LibraryPool16SProcessHandler(BasePoolHandler):
@authenticated
def get(self):
pool_type = 'amplicon_sequencing'
plate_ids = self.get_arguments('plate_id')
process_id = self.get_argument('process_id', None)
input_plate = None
pool_func_data = None
pool_values = []
pool_blanks = []
plate_names = []
if process_id is not None:
try:
process = PoolingProcess(process_id)
except LabControlUnknownIdError:
raise HTTPError(404, reason="Pooling process %s doesn't exist"
% process_id)
plate = process.components[0][0].container.plate
input_plate = plate.id
pool_func_data = process.pooling_function_data
content_type = type(plate.get_well(1, 1).composition)
id_plate_type = PLATE_TYPES[content_type]
plate_type_mapped = PLATE_TYPE_TO_POOL_TYPE[id_plate_type]
if plate_type_mapped != pool_type:
raise HTTPError(400, reason='Pooling process type does not '
'match pooling type')
_, pool_values, pool_blanks, plate_names = \
make_2D_arrays(plate, process.quantification_process)
pool_values = pool_values.tolist()
pool_blanks = pool_blanks.tolist()
plate_names = plate_names.tolist()
elif len(plate_ids) > 0:
content_types = {type(Plate(pid).get_well(1, 1).composition)
for pid in plate_ids}
if len(content_types) > 1:
raise HTTPError(400, reason='Plates contain different types '
'of compositions')
# check if the observed plates are the same type as the pooling
# type (i.e., no shotgun plates for 16S pooling)
content_type = content_types.pop()
id_plate_type = PLATE_TYPES[content_type]
plate_type_mapped = PLATE_TYPE_TO_POOL_TYPE[id_plate_type]
if plate_type_mapped != pool_type:
raise HTTPError(400, reason='Plate type does not match '
'pooling type')
pool_type_stripped = POOL_TYPE_PARAMS[pool_type]['abbreviation']
plate_type = POOL_TYPE_TO_PLATE_TYPE[pool_type]
robots = (Equipment.list_equipment('EpMotion') +
Equipment.list_equipment('echo'))
template = POOL_TYPE_PARAMS[pool_type]['template']
self.render(template, plate_ids=plate_ids,
robots=robots, pool_params=HTML_POOL_PARAMS,
input_plate=input_plate, pool_func_data=pool_func_data,
process_id=process_id, pool_values=pool_values,
plate_type=plate_type, pool_blanks=pool_blanks,
plate_names=plate_names, pool_type=pool_type_stripped)
def _compute_pools(self, plate_info):
super()._compute_pools(plate_info)
# for 16S, we calculate each sample independently
self.params['total_each'] = True
self.params['vol_constant'] = 1
pool_vals = self.function(self.raw_concs, **self.params)
# if adjust blank volume, do that
if self.params['blank_vol'] != '':
pool_vals = PoolingProcess.adjust_blank_vols(
pool_vals, self.comp_blanks, self.params['blank_vol'])
# if only pool some blanks, do that
if self.params['blank_num'] != '':
pool_vals = PoolingProcess.select_blanks(
pool_vals, self.raw_concs, self.comp_blanks,
int(self.params['blank_num']))
# estimate pool volume and concentration
total_c, total_v = PoolingProcess.estimate_pool_conc_vol(
pool_vals, self.comp_concs)
# store output values
output = {}
output['func_data'] = {'function': self.func_name,
'parameters': self.params}
output['raw_vals'] = self.raw_concs
output['comp_vals'] = self.comp_concs
output['pool_vals'] = pool_vals
output['pool_blanks'] = self.comp_blanks.tolist()
output['plate_names'] = self.plate_names.tolist()
output['plate_id'] = self.plate_id
output['destination'] = self.params['destination']
output['robot'] = self.params['robot']
output['blank_vol'] = self.params['blank_vol']
output['blank_num'] = self.params['blank_num']
output['total_conc'] = total_c
output['total_vol'] = total_v
output['quant-process-id'] = self.quant_process_id
return output
@authenticated
def post(self):
plates_info = json_decode(self.get_argument('plates-info'))
results = []
for pinfo in plates_info:
plate_result = self._compute_pools(pinfo)
plate = Plate(plate_result['plate_id'])
# calculate estimated molar fraction for each element of pool
amts = plate_result['comp_vals'] * plate_result['pool_vals']
pcts = amts / amts.sum()
quant_process = QuantificationProcess(
plate_result['quant-process-id'])
pool_name = 'Pool from plate %s (%s)' % (
plate.external_id,
datetime.now().strftime(quant_process.get_date_format()))
input_compositions = []
for comp, _, _ in quant_process.concentrations:
well = comp.container
row = well.row - 1
column = well.column - 1
input_compositions.append(
{'composition': comp,
'input_volume': plate_result['pool_vals'][row][column],
'percentage_of_output': pcts[row][column]})
robot = (Equipment(plate_result['robot'])
if plate_result['robot'] is not None else None)
process = PoolingProcess.create(
self.current_user, quant_process, pool_name,
plate_result['pool_vals'].sum(), input_compositions,
plate_result['func_data'], robot=robot,
destination=plate_result['destination'])
results.append({'plate-id': plate.id, 'process-id': process.id})
self.write(json_encode(results))
class LibraryPoolShotgunProcessHandler(BasePoolHandler):
@authenticated
def get(self):
pool_type = 'shotgun_plate'
plate_ids = self.get_arguments('plate_id')
process_id = self.get_argument('process_id', None)
input_plate = None
pool_func_data = None
pool_values = []
pool_blanks = []
plate_names = []
if process_id is not None:
try:
process = PoolingProcess(process_id)
except LabControlUnknownIdError:
raise HTTPError(404, reason="Pooling process %s doesn't exist"
% process_id)
plate = process.components[0][0].container.plate
input_plate = plate.id
pool_func_data = process.pooling_function_data
content_type = type(plate.get_well(1, 1).composition)
id_plate_type = PLATE_TYPES[content_type]
plate_type_mapped = PLATE_TYPE_TO_POOL_TYPE[id_plate_type]
if plate_type_mapped != pool_type:
raise HTTPError(400, reason='Pooling process type does not '
'match pooling type')
_, pool_values, pool_blanks, plate_names = \
make_2D_arrays(plate, process.quantification_process)
pool_values = pool_values.tolist()
pool_blanks = pool_blanks.tolist()
plate_names = plate_names.tolist()
elif len(plate_ids) > 0:
content_types = {type(Plate(pid).get_well(1, 1).composition)
for pid in plate_ids}
if len(content_types) > 1:
raise HTTPError(400, reason='Plates contain different types '
'of compositions')
# check if the observed plates are the same type as the pooling
# type (i.e., no shotgun plates for 16S pooling)
content_type = content_types.pop()
id_plate_type = PLATE_TYPES[content_type]
plate_type_mapped = PLATE_TYPE_TO_POOL_TYPE[id_plate_type]
if plate_type_mapped != pool_type:
raise HTTPError(400, reason='Plate type does not match '
'pooling type')
pool_type_stripped = POOL_TYPE_PARAMS[pool_type]['abbreviation']
plate_type = POOL_TYPE_TO_PLATE_TYPE[pool_type]
robots = (Equipment.list_equipment('EpMotion') +
Equipment.list_equipment('echo'))
template = POOL_TYPE_PARAMS[pool_type]['template']
self.render(template, plate_ids=plate_ids,
robots=robots, pool_params=HTML_POOL_PARAMS,
input_plate=input_plate, pool_func_data=pool_func_data,
process_id=process_id, pool_values=pool_values,
plate_type=plate_type, pool_blanks=pool_blanks,
plate_names=plate_names, pool_type=pool_type_stripped)
def _compute_pools(self, plate_info):
super()._compute_pools(plate_info)
self.params['total_each'] = False
self.params['vol_constant'] = 10 ** 9
pool_vals = self.function(self.raw_concs, **self.params)
# if adjust blank volume, do that
if self.params['blank_vol'] != '':
bv = self.params['blank_vol']
pool_vals = PoolingProcess.adjust_blank_vols(pool_vals,
self.comp_blanks,
bv)
# if only pool some blanks, do that
if self.params['blank_num'] != '':
bn = int(self.params['blank_num'])
pool_vals = PoolingProcess.select_blanks(pool_vals,
self.raw_concs,
self.comp_blanks,
bn)
# estimate pool volume and concentration
cs = self.comp_concs
total_c, total_v = PoolingProcess.estimate_pool_conc_vol(pool_vals, cs)
# store output values
output = {}
output['func_data'] = {'function': self.func_name,
'parameters': self.params}
output['raw_vals'] = self.raw_concs
output['comp_vals'] = self.comp_concs
output['pool_vals'] = pool_vals
output['pool_blanks'] = self.comp_blanks.tolist()
output['plate_names'] = self.plate_names.tolist()
output['plate_id'] = self.plate_id
output['destination'] = self.params['destination']
output['robot'] = self.params['robot']
output['blank_vol'] = self.params['blank_vol']
output['blank_num'] = self.params['blank_num']
output['total_conc'] = total_c
output['total_vol'] = total_v
output['quant-process-id'] = self.quant_process_id
return output
@authenticated
def post(self):
plates_info = json_decode(self.get_argument('plates-info'))
results = []
for pinfo in plates_info:
plate_result = self._compute_pools(pinfo)
plate = Plate(plate_result['plate_id'])
# calculate estimated molar fraction for each element of pool
amts = plate_result['comp_vals'] * plate_result['pool_vals']
pcts = amts / amts.sum()
quant_process = QuantificationProcess(
plate_result['quant-process-id'])
pool_name = 'Pool from plate %s (%s)' % (
plate.external_id,
datetime.now().strftime(quant_process.get_date_format()))
input_compositions = []
for comp, _, _ in quant_process.concentrations:
well = comp.container
row = well.row - 1
column = well.column - 1
input_compositions.append(
{'composition': comp,
'input_volume': plate_result['pool_vals'][row][column],
'percentage_of_output': pcts[row][column]})
robot = (Equipment(plate_result['robot'])
if plate_result['robot'] is not None else None)
process = PoolingProcess.create(
self.current_user, quant_process, pool_name,
plate_result['pool_vals'].sum(), input_compositions,
plate_result['func_data'], robot=robot,
destination=plate_result['destination'])
results.append({'plate-id': plate.id, 'process-id': process.id})
self.write(json_encode(results))
# The ComputeLibraryPoolValuesHandler calculates the results from
# the pooling process and display for user approval.
class ComputeLibraryPoolValuesHandler(BasePoolHandler):
@authenticated
def post(self):
plate_info = json_decode(self.get_argument('plate-info'))
output = self._compute_pools(plate_info)
# we need to make sure the values are serializable
output['pool_vals'] = output['pool_vals'].tolist()
output['pool_blanks'] = output['pool_blanks']
self.write(output)
def _compute_pools(self, plate_info):
super()._compute_pools(plate_info)
# pool_vals looks like its needed in the output
pool_vals = self.function(self.raw_concs, **self.params)
# if adjust blank volume, do that
if self.params['blank_vol'] != '':
pool_vals = PoolingProcess.adjust_blank_vols(
pool_vals, self.comp_blanks, self.params['blank_vol'])
# if only pool some blanks, do that
if self.params['blank_num'] != '':
pool_vals = PoolingProcess.select_blanks(
pool_vals, self.raw_concs, self.comp_blanks,
int(self.params['blank_num']))
# estimate pool volume and concentration
total_c, total_v = PoolingProcess.estimate_pool_conc_vol(
pool_vals, self.comp_concs)
# store output values
output = {}
output['pool_vals'] = pool_vals
output['pool_blanks'] = self.comp_blanks.tolist()
output['plate_names'] = self.plate_names.tolist()
output['plate_id'] = self.plate_id
output['destination'] = self.params['destination']
output['robot'] = self.params['robot']
output['blank_vol'] = self.params['blank_vol']
output['blank_num'] = self.params['blank_num']
output['total_conc'] = total_c
output['total_vol'] = total_v
output['quant-process-id'] = self.quant_process_id
return output
class DownloadPoolFileHandler(BaseDownloadHandler):
@authenticated
def get(self, process_id):
try:
process = PoolingProcess(int(process_id))
except LabControlUnknownIdError:
raise HTTPError(404, reason='PoolingProcess %s does not exist'
% process_id)
text = process.generate_pool_file()
plate_names_set = {x[0].container.plate.external_id for x in
process.components}
# Note that PoolingProcess objects (what `process` is above) definitely
# *can* validly have components from multiple plates: a user could
# chose to make a "plate pool" from more than one amplicon library prep
# plate. However, as of 10/09/2018, the wet lab apparently currently
# chooses not to do this, and instead chooses to make one pool per
# library prep plate. Given this self-imposed limitation, they expect
# to be able to have the (one-and-only) library plate name embedded in
# the name of the resulting normpool file. This
# *file naming convention* won't work--or at least, won't work as they
# expect it to--if there are multiple plates represented in the pool,
# so if that happens we generate an error below, at the point where the
# *file name* is generated. If they decide they want to allow
# themselves to make plate pools from multiple plates, all they need to
# do is decide on a more flexible naming convention, and
# we can change this naming code and remove this error condition.
if len(plate_names_set) > 1:
raise ValueError("Unable to generate normpool file name for pool "
"based on more than one plate: " +
", ".join(str(x) for x in plate_names_set))
plate_name = plate_names_set.pop()
name_pieces = [plate_name, "normpool"]
self.deliver_text(name_pieces, process, text, extension="csv")
```
|
{
"source": "jderiu/ParlAI",
"score": 3
}
|
#### File: parlai/core/opt.py
```python
import copy
import json
import pickle
import traceback
class Opt(dict):
"""
Class for tracking options.
Functions like a dict, but allows us to track the history of arguments as they are
set.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.history = []
self.deepcopies = []
def __setitem__(self, key, val):
loc = traceback.format_stack()[-2]
self.history.append((key, val, loc))
super().__setitem__(key, val)
def __getstate__(self):
return (self.history, self.deepcopies, dict(self))
def __setstate__(self, state):
self.history, self.deepcopies, data = state
self.update(data)
def __reduce__(self):
return (Opt, (), self.__getstate__())
def __deepcopy__(self, memo):
"""
Override deepcopy so that history is copied over to new object.
"""
# track location of deepcopy
loc = traceback.format_stack()[-3]
self.deepcopies.append(loc)
# copy all our children
memo = Opt({k: copy.deepcopy(v) for k, v in self.items()})
# deepcopy the history. history is only tuples, so we can do it shallow
memo.history = copy.copy(self.history)
# deepcopy the list of deepcopies. also shallow bc only strings
memo.deepcopies = copy.copy(self.deepcopies)
return memo
def display_deepcopies(self):
"""
Display all deepcopies.
"""
if len(self.deepcopies) == 0:
return 'No deepcopies performed on this opt.'
return '\n'.join(f'{i}. {loc}' for i, loc in enumerate(self.deepcopies, 1))
def display_history(self, key):
"""
Display the history for an item in the dict.
"""
changes = []
i = 0
for key_, val, loc in self.history:
if key != key_:
continue
i += 1
changes.append(f'{i}. {key} was set to {val} at:\n{loc}')
if changes:
return '\n'.join(changes)
else:
return f'No history for {key}'
def load_opt_file(optfile: str) -> Opt:
"""
Load an Opt from disk.
"""
try:
# try json first
with open(optfile, 'rt', encoding='utf-8') as t_handle:
opt = json.load(t_handle)
except UnicodeDecodeError:
# oops it's pickled
with open(optfile, 'rb') as b_handle:
opt = pickle.load(b_handle)
return Opt(opt)
```
#### File: parlai/scripts/self_chat.py
```python
from parlai.core.params import ParlaiParser
from parlai.core.agents import create_agent
from parlai.core.worlds import create_task
from parlai.utils.world_logging import WorldLogger
from parlai.utils.misc import TimeLogger
from parlai.core.message import Message
from pymongo import MongoClient
import math
import random
DATABASE_NAME = 'auto_judge'
COLLECTION_NAME = 'sampled-dialogues-amt-test3'
def setup_args(parser=None):
if parser is None:
parser = ParlaiParser(True, True, 'Self chat with a model')
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('-d', '--display-examples', type='bool', default=True)
parser.add_argument('-n', '-ne', '--num-examples', type=int, default=10)
parser.add_argument('-nd', '--num-dialogues', type=int, default=10)
parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2)
parser.add_argument('-host', '--mongo-host', type=str)
parser.add_argument('-port', '--mongo-port', type=int)
parser.add_argument('-user', '--user-name', type=str)
parser.add_argument('-pw', '--password', type=str)
parser.add_argument(
'--display-ignore-fields',
type=str,
default='label_candidates,text_candidates',
help='Do not display these fields',
)
parser.add_argument(
'-st',
'--selfchat-task',
type='bool',
default=True,
help='Create a self chat version of the task',
)
parser.add_argument(
'--num-self-chats', type=int, default=1, help='Number of self chats to run'
)
parser.add_argument(
'--selfchat-max-turns',
type=int,
default=6,
help='The number of dialogue turns before self chat ends',
)
parser.add_argument(
'--seed-messages-from-task',
action='store_true',
help='Automatically seed conversation with messages from task dataset.',
)
parser.add_argument(
'--outfile', type=str, default=None, help='File to save self chat logs'
)
parser.add_argument(
'--save-format',
type=str,
default='conversations',
choices=['conversations', 'parlai'],
help='Format to save logs in. conversations is a jsonl format, parlai is a text format.',
)
parser.set_defaults(interactive_mode=True, task='self_chat')
WorldLogger.add_cmdline_args(parser)
return parser
def cap_context(turn_list, domain):
if domain == 'dailydialog':
return turn_list[2:]
elif domain == 'personachat':
return turn_list[2:]
elif domain == 'wizard_of_wikipedia':
return turn_list[2:]
elif domain == 'empathetic_dialogues':
return turn_list[2:]
def self_chat(opt, print_parser=None):
client = MongoClient(
opt['mongo_host'],
opt['mongo_port'],
username=opt['user_name'],
password=opt['password'],
#authSource=DATABASE_NAME
)
db = client[DATABASE_NAME]
collection = db[COLLECTION_NAME]
if print_parser is not None:
if print_parser is True and isinstance(opt, ParlaiParser):
print_parser = opt
elif print_parser is False:
print_parser = None
if isinstance(opt, ParlaiParser):
print('[ Deprecated Warning: self_chat should be passed opt not Parser ]')
opt = opt.parse_args()
# Create agents
agent1 = create_agent(opt, requireModelExists=True)
agent2 = agent1.clone()
# Set IDs
model_id = agent1.id
agent1.id = model_id + "_1"
agent2.id = model_id + "_2"
world = create_task(opt, user_agents=[agent1, agent2])
# Set up world logging
logger = WorldLogger(opt)
log_time = TimeLogger()
# Run some self chats.
max_dial_cnt = opt['num_dialogues']
dial_cnt = 0
while dial_cnt < max_dial_cnt:
world.max_turn_cnt = world.sample_episode_length()
world.turn_cnt = 0
print('Dialogue Number: {}, Max Turn: {}\n'.format(dial_cnt, world.max_turn_cnt))
while True:
world.parley()
logger.log(world)
if opt.get('display_examples'):
print(world.display())
if world.episode_done():
break
print('\n\n')
dial_cnt += 1
if opt.get('display_examples'):
print('-- end of episode --')
logger.write(opt['outfile'], opt['format'])
for convo in logger._logs:
convo_data = {}
convo_data['system_name0'] = opt['model_file']
convo_data['system_name1'] = opt['model_file']
convo_data['system_type0'] = opt['model_file'].split('/')[2]
convo_data['system_type1'] = opt['model_file'].split('/')[2]
convo_data['is_human0'] = False
convo_data['is_human1'] = False
convo_data['domain_name'] = opt['task'].split(':')[0]
turn_list = []
for eid, exchange in enumerate(convo):
turn0 = exchange[0]
turn1 = exchange[1]
turn0['exchange_nr'] = eid
turn1['exchange_nr'] = eid
if type(turn0) == Message:
turn0.force_set('episode_done', bool(turn0['episode_done']))
else:
turn0['episode_done'] = bool(turn0['episode_done'])
if type(turn0) == Message:
turn1.force_set('episode_done', bool(turn1['episode_done']))
else:
turn1['episode_done'] = bool(turn1['episode_done'])
turn_list.append(turn0)
turn_list.append(turn1)
convo_data['convo'] = cap_context(turn_list, convo_data['domain_name'])
collection.insert_one(convo_data)
print(len(convo_data['convo']))
if __name__ == '__main__':
parser = setup_args()
self_chat(parser.parse_args(print_args=False))
```
|
{
"source": "jderoo/megaind-rpi",
"score": 2
}
|
#### File: python/megaind/__init__.py
```python
import smbus
import struct
import time
__HW_ADD_BASE = 0x50
VOLT_TO_MILIVOLT = 1000.0
def checkStack(stack):
if stack < 0 or stack > 7:
raise ValueError('Invalid stack level!')
return __HW_ADD_BASE + stack
def checkChannel(ch, limit=4):
if ch < 1 or ch > limit:
raise ValueError('Invalid channel number!')
def getWord(bus, hwAdd, add):
retry = 0
try:
buff = bus.read_i2c_block_data(hwAdd, add, 2)
val = bytearray(buff)[0] + 256 * bytearray(buff)[1]
except Exception as e:
bus.close()
raise Exception("Fail to read with exception " + str(e))
return val
# Diagnose functions
I2C_MEM_DIAG_TEMPERATURE = 114
I2C_MEM_DIAG_24V = 115
I2C_MEM_DIAG_5V = 117
I2C_MEM_REVISION_MAJOR = 120
I2C_MEM_REVISION_MINOR = 121
def getFwVer(stack):
hwAdd = checkStack(stack)
bus = smbus.SMBus(1)
try:
major = bus.read_byte_data(hwAdd, I2C_MEM_REVISION_MAJOR)
minor = bus.read_byte_data(hwAdd, I2C_MEM_REVISION_MINOR)
except Exception as e:
bus.close()
raise Exception("Fail to read with exception " + str(e))
bus.close()
return major + minor / 100.0
def getRaspVolt(stack):
hwAdd = checkStack(stack)
bus = smbus.SMBus(1)
try:
val = bus.read_word_data(hwAdd, I2C_MEM_DIAG_5V)
except Exception as e:
bus.close()
raise Exception("Fail to read with exception " + str(e))
bus.close()
return val / VOLT_TO_MILIVOLT
def getPowerVolt(stack):
hwAdd = checkStack(stack)
bus = smbus.SMBus(1)
val = getWord(bus, hwAdd, I2C_MEM_DIAG_24V)
bus.close()
return val / VOLT_TO_MILIVOLT
def getCpuTemp(stack):
hwAdd = checkStack(stack)
bus = smbus.SMBus(1)
try:
val = bus.read_byte_data(hwAdd, I2C_MEM_DIAG_TEMPERATURE)
except Exception as e:
bus.close()
raise Exception("Fail to read with exception " + str(e))
bus.close()
return val
# 0 to 10 volts input and output functions
U0_10_IN_VAL1_ADD = 28
U_PM_10_IN_VAL1_ADD = 36
U_0_10_OUT_VAL1_ADD = 4
def get0_10In(stack, channel):
checkChannel(channel)
hwAdd = checkStack(stack)
bus = smbus.SMBus(1)
val = getWord(bus, hwAdd, U0_10_IN_VAL1_ADD + (2 * (channel - 1)))
bus.close()
return val / VOLT_TO_MILIVOLT
def getpm10In(stack, channel):
checkChannel(channel)
hwAdd = checkStack(stack)
bus = smbus.SMBus(1)
val = getWord(bus, hwAdd, U_PM_10_IN_VAL1_ADD + (2 * (channel - 1)))
bus.close()
return val / VOLT_TO_MILIVOLT - 10
def get0_10Out(stack, channel):
checkChannel(channel)
hwAdd = checkStack(stack)
bus = smbus.SMBus(1)
val = getWord(bus, hwAdd, U_0_10_OUT_VAL1_ADD + (2 * (channel - 1)))
bus.close()
return val / VOLT_TO_MILIVOLT
def set0_10Out(stack, channel, value):
checkChannel(channel)
hwAdd = checkStack(stack)
bus = smbus.SMBus(1)
if value < 0 or value > 10:
raise ValueError("Invalid value!")
try:
bus.write_word_data(hwAdd, U_0_10_OUT_VAL1_ADD + (2 * (channel - 1)), int(value * 1000))
except Exception as e:
bus.close()
raise Exception("Fail to Write 0-10V output with exception " + str(e))
bus.close()
# 4 - 20 mA in/out functions
I4_20_IN_VAL1_ADD = 44
I4_20_OUT_VAL1_ADD = 12
MILLIAMP_TO_MICROAMP = 1000.0
def get4_20In(stack, channel):
checkChannel(channel)
hwAdd = checkStack(stack)
bus = smbus.SMBus(1)
val = getWord(bus, hwAdd, I4_20_IN_VAL1_ADD + (2 * (channel - 1)))
bus.close()
return val / 1000.0
def get4_20Out(stack, channel):
checkChannel(channel)
hwAdd = checkStack(stack)
bus = smbus.SMBus(1)
val = getWord(bus, hwAdd, I4_20_OUT_VAL1_ADD + (2 * (channel - 1)))
bus.close()
return val / 1000.0
def set4_20Out(stack, channel, value):
checkChannel(channel)
hwAdd = checkStack(stack)
bus = smbus.SMBus(1)
if value < 4 or value > 20:
raise ValueError("Invalid value!")
try:
bus.write_word_data(hwAdd, I4_20_OUT_VAL1_ADD + (2 * (channel - 1)), int(value * 1000))
except Exception as e:
bus.close()
raise Exception("Fail to Write 4-20mA output with exception " + str(e))
bus.close()
# digital in/out functions
I2C_MEM_RELAY_VAL = 0
I2C_MEM_RELAY_SET = 1
I2C_MEM_RELAY_CLR = 2
I2C_MEM_OPTO_IN_VAL = 3
I2C_MEM_OD_PWM1 = 20
I2C_MEM_OPTO_RISING_ENABLE = 103
I2C_MEM_OPTO_FALLING_ENABLE = 104
I2C_MEM_OPTO_CH_CONT_RESET = 105
I2C_MEM_OPTO_COUNT1 = 106
def getOptoCh(stack, channel):
checkChannel(channel)
hwAdd = checkStack(stack)
bus = smbus.SMBus(1)
try:
val = bus.read_byte_data(hwAdd, I2C_MEM_OPTO_IN_VAL)
except Exception as e:
bus.close()
raise Exception("Fail to read with exception " + str(e))
bus.close()
if (1 << (channel - 1)) & val:
return 1
return 0
def getOpto(stack):
hwAdd = checkStack(stack)
bus = smbus.SMBus(1)
try:
val = bus.read_byte_data(hwAdd, I2C_MEM_OPTO_IN_VAL)
except Exception as e:
bus.close()
raise Exception("Fail to read with exception " + str(e))
bus.close()
return val
def getOptoCount(stack, channel):
checkChannel(channel)
hwAdd = checkStack(stack)
bus = smbus.SMBus(1)
try:
val = bus.read_word_data(hwAdd, I2C_MEM_OPTO_COUNT1 + (2 * (channel - 1)))
except Exception as e:
bus.close()
raise Exception("Fail to read with exception " + str(e))
bus.close()
return val
def rstOptoCount(stack, channel):
checkChannel(channel)
hwAdd = checkStack(stack)
bus = smbus.SMBus(1)
try:
bus.write_byte_data(hwAdd, I2C_MEM_OPTO_CH_CONT_RESET, int(channel))
except Exception as e:
bus.close()
raise Exception("Fail to write with exception " + str(e))
bus.close()
def getOptoRisingCountEnable(stack, channel):
checkChannel(channel)
hwAdd = checkStack(stack)
bus = smbus.SMBus(1)
try:
val = bus.read_byte_data(hwAdd, I2C_MEM_OPTO_RISING_ENABLE)
except Exception as e:
bus.close()
raise Exception("Fail to read with exception " + str(e))
bus.close()
if (1 << (channel - 1)) & val != 0:
return 1
return 0
def setOptoRisingCountEnable(stack, channel, state):
checkChannel(channel)
hwAdd = checkStack(stack)
bus = smbus.SMBus(1)
try:
val = bus.read_byte_data(hwAdd, I2C_MEM_OPTO_RISING_ENABLE)
except Exception as e:
bus.close()
raise Exception("Fail to read with exception " + str(e))
if state == 0:
val &= ~(1 << (channel - 1))
else:
val |= 1 << (channel - 1)
try:
bus.write_byte_data(hwAdd, I2C_MEM_OPTO_RISING_ENABLE, val)
except Exception as e:
bus.close()
raise Exception("Fail to write with exception " + str(e))
bus.close()
def getOptoFallingCountEnable(stack, channel):
checkChannel(channel)
hwAdd = checkStack(stack)
bus = smbus.SMBus(1)
try:
val = bus.read_byte_data(hwAdd, I2C_MEM_OPTO_FALLING_ENABLE)
except Exception as e:
bus.close()
raise Exception("Fail to read with exception " + str(e))
bus.close()
if (1 << (channel - 1)) & val != 0:
return 1
return 0
def setOptoFallingCountEnable(stack, channel, state):
checkChannel(channel)
hwAdd = checkStack(stack)
bus = smbus.SMBus(1)
try:
val = bus.read_byte_data(hwAdd, I2C_MEM_OPTO_FALLING_ENABLE)
except Exception as e:
bus.close()
raise Exception("Fail to read with exception " + str(e))
if state == 0:
val &= ~(1 << (channel - 1))
else:
val |= 1 << (channel - 1)
try:
bus.write_byte_data(hwAdd, I2C_MEM_OPTO_FALLING_ENABLE, val)
except Exception as e:
bus.close()
raise Exception("Fail to write with exception " + str(e))
bus.close()
def setOdPWM(stack, channel, value):
checkChannel(channel)
hwAdd = checkStack(stack)
bus = smbus.SMBus(1)
if value < 0 or value > 100: # prcent
raise ValueError("Invalid value!")
try:
bus.write_word_data(hwAdd, I2C_MEM_OD_PWM1 + (2 * (channel - 1)), int(value * 100))
except Exception as e:
bus.close()
raise Exception("Fail to Write Open-Drain output PWM with exception " + str(e))
bus.close()
def getOdPWM(stack, channel):
checkChannel(channel)
hwAdd = checkStack(stack)
bus = smbus.SMBus(1)
val = getWord(bus, hwAdd, I2C_MEM_OD_PWM1 + (2 * (channel - 1)))
bus.close()
return val / 100.0
def setLed(stack, channel, val):
checkChannel(channel)
hwAdd = checkStack(stack)
bus = smbus.SMBus(1)
out = channel + 4
try:
if val != 0:
bus.write_byte_data(hwAdd, I2C_MEM_RELAY_SET, out)
else:
bus.write_byte_data(hwAdd, I2C_MEM_RELAY_CLR, out)
except Exception as e:
bus.close()
raise Exception("Fail to Write LED's with exception " + str(e))
bus.close()
def setLedAll(stack, val):
if val < 0 or val > 15:
raise ValueError("Invalid value!")
val = val << 4
hwAdd = checkStack(stack)
bus = smbus.SMBus(1)
try:
bus.write_byte_data(hwAdd, I2C_MEM_RELAY_VAL, val)
except Exception as e:
bus.close()
raise Exception("Fail to Write LED's with exception " + str(e))
bus.close()
def getLed(stack, channel):
checkChannel(channel)
hwAdd = checkStack(stack)
bus = smbus.SMBus(1)
mask = 1 << (channel + 3)
try:
val = bus.read_byte_data(hwAdd, I2C_MEM_RELAY_VAL)
except Exception as e:
bus.close()
raise Exception("Fail to Write LED's with exception " + str(e))
bus.close()
if val & mask:
return 1
return 0
# watchdog functions
I2C_MEM_WDT_RESET_ADD = 83
I2C_MEM_WDT_INTERVAL_SET_ADD = 84
I2C_MEM_WDT_INTERVAL_GET_ADD = I2C_MEM_WDT_INTERVAL_SET_ADD + 2
I2C_MEM_WDT_INIT_INTERVAL_SET_ADD = I2C_MEM_WDT_INTERVAL_GET_ADD + 2
I2C_MEM_WDT_INIT_INTERVAL_GET_ADD = I2C_MEM_WDT_INIT_INTERVAL_SET_ADD + 2
I2C_MEM_WDT_RESET_COUNT_ADD = I2C_MEM_WDT_INIT_INTERVAL_GET_ADD + 2
I2C_MEM_WDT_CLEAR_RESET_COUNT_ADD = I2C_MEM_WDT_RESET_COUNT_ADD + 2
I2C_MEM_WDT_POWER_OFF_INTERVAL_SET_ADD = I2C_MEM_WDT_CLEAR_RESET_COUNT_ADD + 1
I2C_MEM_WDT_POWER_OFF_INTERVAL_GET_ADD = I2C_MEM_WDT_POWER_OFF_INTERVAL_SET_ADD + 4
WDT_MAX_POWER_OFF_INTERVAL = 4147200
RELOAD_KEY = 202
def wdtGetPeriod(stack):
hwAdd = checkStack(stack)
bus = smbus.SMBus(1)
try:
val = bus.read_word_data(hwAdd, I2C_MEM_WDT_INTERVAL_GET_ADD)
except Exception as e:
bus.close()
raise ValueError(e)
bus.close()
return val
def wdtSetPeriod(stack, val):
ret = 1
hwAdd = checkStack(stack)
if val < 10 or val > 65000:
raise ValueError('Invalid interval value [10..65000]')
bus = smbus.SMBus(1)
try:
bus.write_word_data(hwAdd, I2C_MEM_WDT_INTERVAL_SET_ADD, val)
except Exception as e:
bus.close()
raise ValueError(e)
bus.close()
return ret
def wdtReload(stack):
ret = 1
hwAdd = checkStack(stack)
bus = smbus.SMBus(1)
try:
bus.write_byte_data(hwAdd, I2C_MEM_WDT_RESET_ADD, RELOAD_KEY)
except Exception as e:
bus.close()
raise ValueError(e)
bus.close()
return ret
def wdtSetDefaultPeriod(stack, val):
ret = 1
hwAdd = checkStack(stack)
if val < 10 or val > 64999:
raise ValueError('Invalid interval value [10..64999]')
bus = smbus.SMBus(1)
try:
bus.write_word_data(hwAdd, I2C_MEM_WDT_INIT_INTERVAL_SET_ADD, val)
except:
bus.close()
raise ValueError(e)
bus.close()
return ret
def wdtGetDefaultPeriod(stack):
hwAdd = checkStack(stack)
bus = smbus.SMBus(1)
try:
val = bus.read_word_data(hwAdd, I2C_MEM_WDT_INIT_INTERVAL_GET_ADD)
except Exception as e:
bus.close()
raise ValueError(e)
bus.close()
return val
def wdtSetOffInterval(stack, val):
ret = 1
hwAdd = checkStack(stack)
if 10 > val or val > WDT_MAX_POWER_OFF_INTERVAL:
raise ValueError('Invalid interval value [2..4147200]')
bus = smbus.SMBus(1)
buff = [0, 0, 0, 0]
buff[0] = 0xff & val
buff[1] = 0xff & (val >> 8)
buff[2] = 0xff & (val >> 16)
buff[3] = 0xff & (val >> 24)
try:
bus.write_i2c_block_data(hwAdd, I2C_MEM_WDT_POWER_OFF_INTERVAL_SET_ADD, buff)
except Exception as e:
bus.close()
raise ValueError(e)
bus.close()
return ret
def wdtGetOffInterval(stack):
hwAdd = checkStack(stack)
bus = smbus.SMBus(1)
try:
buff = bus.read_i2c_block_data(hwAdd, I2C_MEM_WDT_POWER_OFF_INTERVAL_GET_ADD, 4)
val = buff[0] + (buff[1] << 8) + (buff[2] << 16) + (buff[3] << 24)
except Exception as e:
bus.close()
raise ValueError(e)
bus.close()
return val
def wdtGetResetCount(stack):
hwAdd = checkStack(stack)
bus = smbus.SMBus(1)
try:
val = bus.read_word_data(hwAdd, I2C_MEM_WDT_RESET_COUNT_ADD)
except Exception as e:
bus.close()
raise ValueError(e)
bus.close()
return val
I2C_RTC_YEAR_ADD = 70
I2C_RTC_MONTH_ADD = 71
I2C_RTC_DAY_ADD = 72
I2C_RTC_HOUR_ADD = 73
I2C_RTC_MINUTE_ADD = 74
I2C_RTC_SECOND_ADD = 75
I2C_RTC_SET_YEAR_ADD = 76
I2C_RTC_SET_MONTH_ADD = 77
I2C_RTC_SET_DAY_ADD = 78
I2C_RTC_SET_HOUR_ADD = 79
I2C_RTC_SET_MINUTE_ADD = 80
I2C_RTC_SET_SECOND_ADD = 81
I2C_RTC_CMD_ADD = 82
def rtcGet(stack):
hwAdd = checkStack(stack)
bus = smbus.SMBus(1)
try:
buff = bus.read_i2c_block_data(hwAdd, I2C_RTC_YEAR_ADD, 6)
except Exception as e:
bus.close()
raise ValueError(e)
bus.close()
t = (2000 + buff[0], buff[1], buff[2], buff[3], buff[4], buff[5])
return t
def rtcSet(stack, y, mo, d, h, m, s):
if y > 2000:
y -= 2000
if y < 0 or y > 255:
raise ValueError("Invalid year!")
if mo > 12 or mo < 1:
raise ValueError("Invalid month!")
if d < 1 or d > 31:
raise ValueError("Invalid day!")
if h < 0 or h > 23:
raise ValueError("Invalid hour!")
if m < 0 or m > 59:
raise ValueError("Invalid minute!")
if s < 0 or s > 59:
raise ValueError("Invalid seconds!")
hwAdd = checkStack(stack)
bus = smbus.SMBus(1)
buff = [int(y), int(mo), int(d), int(h), int(m), int(s), 0xaa]
# buff[0] = int(y)
# buff[1] = int(mo)
# buff[2] = int(d)
# buff[3] = int(h)
# buff[4] = int(m)
# buff[5] = int(s)
# buff[6] = 0xaa
try:
bus.write_i2c_block_data(hwAdd, I2C_RTC_SET_YEAR_ADD, buff)
except Exception as e:
bus.close()
raise ValueError(e)
bus.close()
```
#### File: python/tests/flash_leds.py
```python
import megaind as ind
import time
def flash_seq(n, delay=0.1, stack=0):
for i in range(n):
for j in range(4):
ind.setLed(stack, j + 1, 1)
time.sleep(delay)
for j in range(4):
ind.setLed(stack, j + 1, 0)
time.sleep(delay)
def flash(n, delay=0.2, stack=0):
for i in range(n):
ind.setLedAll(stack, 15)
time.sleep(delay)
ind.setLedAll(stack, 0)
time.sleep(delay)
if __name__ == "__main__":
flash_seq(10)
flash(10)
```
#### File: python/tests/test_digital.py
```python
import megaind as ind
import sys
def test_getOptoCh(v):
for i in range (1, 5):
val = ind.getOptoCh(0, i)
if v :
print ('Opto cupled input channel ' + str(i) + " read value " + str(val))
assert val >= 0, "Should be greater than or equal to 0"
assert val <= 1, "Should be less than or equal to 1"
def test_getOpto(v):
val = ind.getOpto(0)
if v :
print("Opto input read " + str(val))
assert val >= 0, "Should be greater than or equal to 0"
assert val <= 15, "Should be less than or equal to 15"
def test_getOptoCount(v):
for i in range (1, 5):
val = ind.getOptoCount(0, i)
if v :
print ('Opto cupled input contor channel ' + str(i) + " read value " + str(val))
assert val >= 0, "Should be greater than or equal to 0"
def test_rstOptoCount(v):
for i in range (1, 5):
ind.rstOptoCount(0, i);
val = ind.getOptoCount(0, i)
if v :
print ('Opto cupled input contor channel ' + str(i) + " read value " + str(val))
assert val == 0, "Should be equal to 0"
def test_getOptoRisingCountEnable(v):
for i in range (1, 5):
val = ind.getOptoRisingCountEnable(0, i)
if v :
print ('Opto cupled input rising edge enable channel ' + str(i) + " read value " + str(val))
assert val >= 0, "Should be greater than or equal to 0"
if __name__ == "__main__":
verb = False
if len(sys.argv) > 1:
if sys.argv[1] == '-v':
verb = True
test_getOptoCh(verb)
print("getOptoCh() passed")
test_getOpto(verb)
print("getOpto() passed")
test_getOptoCount(verb)
print ("getOptoCount() passed")
test_rstOptoCount(verb)
print ("rstOptoCount() passed")
```
|
{
"source": "jder/tools-for-curiousity-app",
"score": 2
}
|
#### File: tools_for_curiousity_app/t4c/views.py
```python
from django.shortcuts import render
from django.http import HttpResponse
from pprint import pprint
def index(request):
print("t4c/views.py @ index()")
pprint(request)
context = {}
return render(request, "t4c/index.html", context)
def aframedemo(request):
print("t4c/views.py @ aframedemo()")
pprint(request)
context = {}
return render(request, "t4c/aframedemo.html", context)
def minigame1(request):
print("t4c/views.py @ minigame1()")
pprint(request)
context = {}
return render(request, "t4c/minigame1.html", context)
def minigame2(request):
print("t4c/views.py @ minigame2()")
pprint(request)
context = {}
return render(request, "t4c/minigame2.html", context)
def minigame3(request):
print("t4c/views.py @ minigame3()")
pprint(request)
context = {}
return render(request, "t4c/minigame3.html", context)
def minigame4(request):
print("t4c/views.py @ minigame4()")
pprint(request)
context = {}
return render(request, "t4c/minigame4.html", context)
def OBSOLETEindex(request):
return HttpResponse("Hello, world. You're at the t4c index.")
```
|
{
"source": "jderuiter/rpki-scripts",
"score": 2
}
|
#### File: jderuiter/rpki-scripts/dump_json.py
```python
from asn1crypto.cms import ContentInfo
from asn1crypto.crl import CertificateList
import rpki.roa
import rpki.manifest
from rpki.certificate import RPKICertificate
import os
import sys
import socket
import json
from datetime import datetime
ADDRESS_FAMILY_IPV4 = b'\x00\x01'
ADDRESS_FAMILY_IPV6 = b'\x00\x02'
# Turn a tuple of bits into a byte string. The number of bits needs to be a
# multiple of 8.
def bits_to_bytes(bits):
if len(bits) % 8 != 0:
raise ValueError("Number of bits not a multiple of 8")
out = []
for i in range(0, len(bits) >> 3):
v = 0
for j in range(0, 8):
v |= bits[i*8+j] << j
out.append(v)
return bytes(out)
# Print bits as IPv4 prefix in CIDR notation
def ipv4_prefix_to_string(bits):
if len(bits) > 32:
raise ValueError("Too many bits for IPv4 prefix")
# Extend bits to full IPv4 length
prefix = bits + tuple(0 for _ in range(32 - len(bits)))
b = bits_to_bytes(prefix)
str_prefix = socket.inet_ntop(socket.AF_INET, b) + "/" + str(len(bits))
return str_prefix
# Print bits as IPv6 prefix in CIDR notation
def ipv6_prefix_to_string(bits):
if len(bits) > 128:
raise ValueError("Too many bits for IPv6 prefix")
# Extend bits to full IPv6 length
prefix = bits + tuple(0 for _ in range(128 - len(bits)))
b = bits_to_bytes(prefix)
str_prefix = socket.inet_ntop(socket.AF_INET6, b) + "/" + str(len(bits))
return str_prefix
# Rewrite ipAddrBlocks in native format to readable prefixes
def rewrite_ipAddrBlocks(ipAddrBlocks):
for ipAddrBlock in ipAddrBlocks:
if ipAddrBlock['addressFamily'] == ADDRESS_FAMILY_IPV4:
ipAddrBlock['addressFamily'] = 'IPv4'
for k in range(0, len(ipAddrBlock['addresses'])):
# Rewrite IP prefix from bits to readable string
ipAddrBlock['addresses'][k]['address'] = ipv4_prefix_to_string(ipAddrBlock['addresses'][k]['address'])
# TODO Check max_length is consistent with prefix length?
elif ipAddrBlock['addressFamily'] == ADDRESS_FAMILY_IPV6:
ipAddrBlock['addressFamily'] = 'IPv6'
for k in range(0, len(ipAddrBlock['addresses'])):
# Rewrite IP prefix from bits to readable string
ipAddrBlock['addresses'][k]['address'] = ipv6_prefix_to_string(ipAddrBlock['addresses'][k]['address'])
# TODO Check max_length is consistent with prefix length?
else:
raise ValueError("Invalid addressFamily")
# Return version of object that can be converted to JSON.
# Byte strings are converted to hex, datetime to isoformat, sets to lists.
def jsonize_object(obj):
if isinstance(obj, dict):
return dict(map(lambda i: (i[0], jsonize_object(i[1])), obj.items()))
elif isinstance(obj, list) or isinstance(obj, set):
return list(map(jsonize_object, obj))
elif type(obj) == bytes:
return obj.hex()
elif type(obj) == datetime:
return obj.isoformat()
else:
return obj
def process_roa(roa):
# Rewrite the IP addresses in the ipAddrBlocks to readable prefixes
rewrite_ipAddrBlocks(roa['ipAddrBlocks'])
def process_manifest(manifest):
# Rewrite hashes to hex/bytes
for fileHash in manifest['fileList']:
fileHash['hash'] = bits_to_bytes(fileHash['hash']).hex()
def process_certificate(certificate):
# Rewrite ipAddressChoice
for ext in certificate['tbs_certificate']['extensions']:
if ext['extn_id'] == 'id-pe-ipAddrBlocks':
for ipAddrFamily in ext['extn_value']:
if ipAddrFamily['addressFamily'] == ADDRESS_FAMILY_IPV4:
ipAddrFamily['addressFamily'] = 'IPv4'
if ipAddrFamily['ipAddressChoice']:
for k in range(0, len(ipAddrFamily['ipAddressChoice'])):
# Rewrite IP prefix from bits to readable string
ipAddrFamily['ipAddressChoice'][k] = ipv4_prefix_to_string(ipAddrFamily['ipAddressChoice'][k])
elif ipAddrFamily['addressFamily'] == ADDRESS_FAMILY_IPV6:
ipAddrFamily['addressFamily'] = 'IPv6'
if ipAddrFamily['ipAddressChoice']:
for k in range(0, len(ipAddrFamily['ipAddressChoice'])):
# Rewrite IP prefix from bits to readable string
ipAddrFamily['ipAddressChoice'][k] = ipv6_prefix_to_string(ipAddrFamily['ipAddressChoice'][k])
def main():
if len(sys.argv) < 2:
sys.exit("Not enough arguments")
path = sys.argv[1]
# TODO Add flag to override detection based on filetype
# Try to determine type based on extension
file, ext = os.path.splitext(path)
ext = ext.lower()
if ext == '.roa':
ext_class = ContentInfo
elif ext == '.mft':
ext_class = ContentInfo
elif ext == '.crl':
ext_class = CertificateList
elif ext == '.cer':
ext_class = RPKICertificate
else:
sys.exit("Unknown filetype: " + ext)
# Read file
try:
file = open(path, "rb")
der_byte_string = file.read()
except Exception as e:
sys.exit("Could not read file.\n" + str(e))
# Parse ASN.1 data using previously picked type
try:
parsed = ext_class.load(der_byte_string)
except Exception as e:
sys.exit("Could not parse file.\n" + str(e))
# TODO Sanity check of resulting data
try:
# Convert to readable JSON output
data = parsed.native
if type(parsed) is ContentInfo:
for cert in data['content']['certificates']:
process_certificate(cert)
if data['content']['encap_content_info']['content_type'] == 'routeOriginAuthz':
process_roa(data['content']['encap_content_info']['content'])
elif data['content']['encap_content_info']['content_type'] == 'rpkiManifest':
process_manifest(data['content']['encap_content_info']['content'])
elif type(parsed) is RPKICertificate:
process_certificate(data)
elif type(parsed) is CertificateList:
pass
else:
sys.exit("Unkown content type")
print(json.dumps(jsonize_object(data), indent=2))
except Exception as e:
sys.exit("Something went wrong:\n" + str(e))
if __name__ == "__main__":
main()
```
|
{
"source": "jderuiter/SURFdnssec",
"score": 2
}
|
#### File: SURFdnssec/lib/rabbitdnssec.py
```python
import sys
import socket
import time
import os.path
import importlib
import ssl
import json
import syslog
import atexit
import configparser
import pika
import pika.spec
import pika.credentials
# Setup configuration, such as settings and application name
#
homedir = os.path.expanduser ('~')
appdir = homedir + '/ods-amqp'
appname = os.path.basename (sys.argv [0])
appcfg = configparser.ConfigParser ()
appcfg.read ([appdir + '/config', '/etc/opendnssec/ods-amqp.config'])
# Recreate the prefix from sys.argv [0] and add to to $PATH
#
prefix = os.path.dirname (sys.argv [0])
os.environ ['PATH'] = prefix + ':' + os.environ.get ('PATH')
# Open syslog, using standard settings
#
def cleanup_syslog ():
syslog.syslog (syslog.LOG_INFO, 'Program exiting')
syslog.closelog ()
syslog.openlog (appname,
(syslog.LOG_PERROR if sys.stderr.isatty () else 0) |
syslog.LOG_PID,
syslog.LOG_USER)
syslog.syslog (syslog.LOG_INFO, 'Program starting')
atexit.register (cleanup_syslog)
# Setup the RabbitMQ client
#
this_machine = socket.gethostname ().split ('.') [0]
this_port = int (appcfg ['rabbitmq'] ['port'])
vhost = appcfg ['rabbitmq'] ['vhost']
signer_cluster = appcfg ['rabbitmq'] ['signer_cluster']
signer_machines = appcfg ['rabbitmq'] ['signer_machines'].split ()
backup_machines = appcfg ['rabbitmq'] ['backup_machines'].split ()
plugindir = appcfg ['rabbitmq'] ['plugindir']
ca_certs = appcfg ['rabbitmq'] ['ca_certs']
backend = appcfg ['rabbitmq'] ['backend']
#
assert ((this_machine in signer_machines) or (this_machine in backup_machines))
assert (len (signer_machines) >= 2)
# Setup for TLS
#
wrap_tls = True
conf_tls = {
'ssl_version': ssl.PROTOCOL_TLSv1_2,
'ca_certs': ca_certs,
'certfile': appdir + '/ssl/certs/' + this_machine + '.pem',
'keyfile': appdir + '/ssl/private/' + this_machine + '.pem',
'server_side': False,
}
# Setup PKCS #11
#
pkcs11_libfile = appcfg ['pkcs11'] ['libfile']
pkcs11_token_label = appcfg ['pkcs11'] ['token_label']
pkcs11_pinfile_path = appcfg ['pkcs11'] ['pinfile']
pkcs11_curve_name = appcfg ['pkcs11'] ['curve_name']
# Send messages at various levels to syslog
#
def log_debug (msg, *args):
for a in args:
msg = msg + ' ' + unicode (str (a), 'utf-8')
msg = msg.encode ('ascii', 'replace')
syslog.syslog (syslog.LOG_DEBUG, msg)
def log_info (msg, *args):
for a in args:
msg = msg + ' ' + unicode (str (a), 'utf-8')
# msg = msg % tuple (map (str, args))
msg = msg.encode ('ascii', 'replace')
syslog.syslog (syslog.LOG_INFO, msg)
def log_notice (msg, *args):
for a in args:
msg = msg + ' ' + unicode (str (a), 'utf-8')
# msg = msg % tuple (map (str, args))
msg = msg.encode ('ascii', 'replace')
syslog.syslog (syslog.LOG_NOTICE, msg)
def log_warning (msg, *args):
for a in args:
msg = msg + ' ' + unicode (str (a), 'utf-8')
# msg = msg % tuple (map (str, args))
msg = msg.encode ('ascii', 'replace')
syslog.syslog (syslog.LOG_WARNING, msg)
def log_error (msg, *args):
for a in args:
msg = msg + ' ' + unicode (str (a), 'utf-8')
# msg = msg % tuple (map (str, args))
msg = msg.encode ('ascii', 'replace')
syslog.syslog (syslog.LOG_ERR, msg)
def log_critical (msg, *args):
for a in args:
msg = msg + ' ' + unicode (str (a), 'utf-8')
# msg = msg % tuple (map (str, args))
msg = msg.encode ('ascii', 'replace')
syslog.syslog (syslog.LOG_CRIT, msg)
# Return the name of a queue on the current machine (prefix by hostname)
#
def my_queue (queue):
return this_machine + '_' + queue
# Return the name of an exchange on the current machine (prefix by hostname)
#
def my_exchange (exchange='signer'):
return this_machine + '_' + exchange
# Return configuration dict for the current app from config section [APPNAME]
# (Use ovr_appname to override the application name to something else)
#
def my_config (ovr_appname=None):
global appcfg, appname
assert (ovr_appname != 'accounts')
if ovr_appname is None:
ovr_appname = appname
return appcfg [ovr_appname]
# Return the backend module name used for signing DNS zone data.
#
def my_backend ():
return backend
# Return the plugin directory for this program.
#
def my_plugindir (ovr_appname=None):
return plugindir + '/' + (ovr_appname or appname)
# Return the backend module used for signing DNS zone data.
# By default, a possible loading location is the plugin directory's
# subdirectory named by sys.argv [0], but ovr_appname can be used to
# override this default name for the application subdirectory under
# the plugin directory.
#
def my_backendmod (modname_prefix, modname_postfix='', ovr_appname=None):
sys.path.append (my_plugindir (ovr_appname=ovr_appname))
backendmod = importlib.import_module (
modname_prefix + backend + modname_postfix )
sys.path.pop ()
return backendmod
# Retrieve a PlainCredentials object based on the current appname.
# Overrides exist for appname and username.
#
def my_credentials (ovr_appname=None, ovr_username=None):
global appcfg, appname
if ovr_username is None:
username = appcfg [ovr_appname or appname] ['username']
else:
username = ovr_username
password = appcfg ['accounts'] [username]
return pika.PlainCredentials (username, password)
# Retrieve a ConnectionParameters objcet. This is based on settings
# in the [rabbitmq] configuration section, which applies to all appnames
# under this UNIX account, except for the credentials which can be
# supplied here as a parameter, and may well be derived with
# my_credentials().
#
def my_connectionparameters (my_creds, host=this_machine, port=this_port, **params):
return pika.ConnectionParameters (
host,
port,
virtual_host=vhost,
ssl=wrap_tls,
ssl_options=conf_tls,
credentials=my_creds,
**params)
# Construct a BasicProperties object, based on standard available
# information and optional headers. There are options for overriding
# the username.
#
def my_basicproperties (headers=None, ovr_appname=None, ovr_username=None):
return pika.spec.BasicProperties (
timestamp=time.time (),
user_id=(ovr_username or appcfg [
ovr_appname or appname] ['username']),
cluster_id=signer_cluster,
headers=headers)
def pkcs11_pin ():
"""Load the PKCS #11 PIN from the OpenDNSSEC configuration.
"""
return open (pkcs11_pinfile_path).read ().strip ()
def pkcs11_pinfile ():
"""Return the PKCS #11 PIN file from the OpenDNSSEC configuration.
"""
return pkcs11_pinfile_path
class MessageCollector (object):
"""MessageCollector synchronously loads at least one message,
but more when they are immediately available. This helps
to speed up operations when work accumulates and batch-mode
operation is possible. At the same time, it does not slow
down operations when messages drip in one at a time.
This is probably best combined with transactions, as in
chan.tx_select ()
clx = MessageCollector (chan)
clx.collect ()
...
for m in clx.messages ():
...inner.loop...
...
if ...we are happy...:
clx.ack ()
else:
clx.nack ()
chan.tx_commit ()
"""
def __init__ (self, chan, queue=None):
self.chan = chan
self.queue = queue
self.msgtags = []
self.msglist = []
self.gotempty = False
def messages (self):
"""Return the list of messages collected.
"""
return self.msglist
def count (self):
"""Return the number of messages collected.
"""
return len (self.msglist)
def ack (self):
"""Send a basic_ack() on all collected messages.
"""
for tag in self.msgtags:
self.chan.basic_ack (delivery_tag=tag)
self.msgtags = []
def nack (self, requeue=True):
"""Send a basic_nack() on all collected messages.
"""
for tag in self.msgtags:
self.chan.basic_nack (delivery_tag=tag, requeue=requeue)
self.msgtags = []
def more_to_collect (self):
"""Call this to see if we should proceed; it means that
we collected at least one message, and nothing more
is available for immediate processing.
"""
# return len (self.msglist) == 0 or not self.empty
#FAIL# print 'Length of collected messages:', len (self.msglist)
#FAIL# print 'Number of waiting messages:', self.chan.get_waiting_message_count ()
qhdl = self.chan.queue_declare (queue=self.queue, passive=True)
# print 'qhdl.method.message_count =', qhdl.method.message_count
#FAIL# return len (self.msglist) == 0 or self.chan.get_waiting_message_count () > 0
return len (self.msglist) == 0 or qhdl.method.message_count > 0
def collect (self, queue=None):
"""Collect at least one message; if more can be collected
without waiting, then do so. This method is not
re-entrant. The queue defaults to the value that was
optionally set when this object was instantiated.
"""
regcb = False
self.empty = False
tout = None
while self.more_to_collect ():
# print 'There is more to collect...'
# Note: self.chan is an instance of
# pika.adapters.blocking_connection.BlockingChannel
# which returns (None,None,None) for an empty queue
# or (mth,props,body) otherwise
#FAIL# (mth, props, body) = self.chan.consume (
#FAIL# queue=(queue or self.queue),
#FAIL# inactivity_timeout=tout)
(mth,props,body) = self.chan.basic_get (
queue=(queue or self.queue))
# print 'Class MTH =', type (mth)
#TODO# No timeout... and bad reponses when empty!
if type (mth) != pika.spec.Basic.GetOk:
#TODO# raise Exception ('Unexpectedly found empty queue "' + (queue or self.queue) + '"')
# print 'Unexpectedly found empty queue "' + (queue or self.queue) + '"'
time.sleep (60)
continue
self.msgtags.append (mth.delivery_tag)
self.msglist.append (body)
# The next looping is not blocking
tout = 10
#TODO#FROMHERE#
#TODO# self.callback_GetOk (self, self.chan, mth, props, body)
#DROP# self.chan.basic_get (callback=self.callback_GetOk,
#DROP# queue=(queue or self.queue))
#DROP# if not regcb:
#DROP# self.chan.add_callback (clx.callback_GetEmpty,
#DROP# pika.spec.Basic.GetEmpty,
#DROP# one_shot=True)
#DROP# regcb = True
pass # print 'There is nothing more to collect'
def callback_GetEmpty (self, frame):
"""Take note that no messages are currently available.
"""
self.gotempty = True
def callback_GetOk (self, chan, mth, props, body):
"""Take note of a new message. Store its delivery_tag
for future use with self.ack() or self.nack().
"""
self.msgtags.append (mth.delivery_tag)
self.msglist.append (body)
def open_client_connection (username=None, hostname='localhost'):
"""Return a connection as an AMQP client, with the given
username. A password is determined locally. When
no username is provided, guest / guest will be used.
The default host to connect to is localhost, but
another value may be passed in.
The returned value is a connection, to be used as in
cnx = open_client_connection (...)
chan = cnx.channel ()
...
cnx.close ()
Exceptions that might be raised include
pika.exceptions.AMQPChannelError
pika.exceptions.AMQPError
See amqp_client_channel() for a "with" form.
"""
if username is not None:
password = appcfg ['accounts'] [username]
creds = pika.PlainCredentials (username, password)
else:
# Construct ConnectionParameters for guest / guest
creds = None
cnxparm = pika.ConnectionParameters (
host=hostname,
port=this_port,
virtual_host=vhost,
ssl=wrap_tls,
ssl_options=conf_tls,
credentials=creds
)
cnx = pika.BlockingConnection (cnxparm)
return cnx
class amqp_client_channel ():
"""Use this class in the "with" form:
with amqp_client_channel (...) as chan:
chan.basic_publish (...)
Set username to login in another way than guest / guest.
Set hostname to connect to another host than localhost.
Set transactional to request transactional behaviour.
Any AMQP exceptions will be caught, printed and fatally exited.
In the transactional variety, the channel is setup accordingly
and calls to tx_commit() and/or tx_rollback() are supported.
When normally ending the "with" clause, any remaining work will
be committed, and any failure to that end will be reported along
with the AMQP exceptions. When the "with" clause is left early
due to an exception, than the transaction will be rolled back.
"""
def __init__ (self, username=None, hostname='localhost', transactional=False):
self.username = username
self.hostname = hostname
self.transact = transactional
def __enter__ (self):
self.cnx = open_client_connection (self.username, self.hostname)
self.chan = self.cnx.channel ()
if self.transact:
self.chan.tx_select ()
return self.chan
def __exit__ (self, typ, val, tbk):
txfail = False
if self.transact:
if val is not None:
self.chan.tx_rollback ()
else:
frame_method = self.chan.tx_commit ()
txfail = type (frame_method.method) != pika.spec.Tx.CommitOk
self.cnx.close ()
if isinstance (val, pika.exceptions.AMQPChannelError):
log_error ('AMQP Channel Error:', val)
sys.exit (1)
if isinstance (val, pika.exceptions.AMQPConnectionError):
log_error ('AMQP Connection Error:', val)
sys.exit (1)
if isinstance (val, pika.exceptions.AMQPError):
log_error ('AMQP Error:', val)
sys.exit (1)
if self.transact:
if txfail:
log_error ('AMQP Transaction Failure')
sys.exit (1)
```
#### File: SURFdnssec/ods-registry/ods-registry-local-parent.py
```python
import os
import sys
import socket
import ssl
import pika
import rabbitdnssec
from rabbitdnssec import log_debug, log_info, log_notice, log_warning, log_error, log_critical
exchange_name = rabbitdnssec.my_exchange ('parenting')
#
# Return a socket / handle for the connection to the local parent
#
def connect ():
#
# Create the queueing infrastructure for the parent exchange.
#
creds = rabbitdnssec.my_credentials (ovr_username='parenting')
cnxparm = rabbitdnssec.my_connectionparameters (creds)
intcnx = None
chan = None
try:
intcnx = pika.BlockingConnection (cnxparm)
chan = intcnx.channel ()
#TODO:CLASS# chan.basic_consume (process_msg, queue=queue_name)
#TODO:NOTHERE# chan.tx_select ()
#TODO:CLASS# chan.start_consuming ()
return (intcnx,chan)
except pika.exceptions.AMQPChannelError, e:
log_error ('AMQP Channel Error:', e)
sys.exit (1)
except pika.exceptions.AMQPError, e:
log_error ('AMQP Error:', e)
sys.exit (1)
#
# Terminate any outstanding connection to the local parent
#
def disconnect (cnx):
(intcnx,chan) = cnx
#
# To end this program, unwind all instances and unregister our own
# callback to cb_uploaded_hint().
#
chan.basic_cancel (uploaded_hints_tag)
for pex in parenting_exchange.values ():
pex.close ()
chan = None
intcnx.close ()
intcnx = None
#
# Pass a set of DNSSEC keys to the parent
#
def update_keys (cnx, domain, keys):
(intcnx,chan) = cnx
dnskeys = map (lambda k: '3600 IN DNSKEY ' + k.to_text (), keys)
msg = ''.join (dnskeys).strip ()
domnodot = domain.to_text ()
if domnodot.endswith ('.'):
domnodot = domnodot [:-1]
log_info ('Local "registry" update with zone', domnodot, 'keys', msg)
chan.basic_publish (exchange=exchange_name,
routing_key=domnodot,
body=msg)
```
#### File: SURFdnssec/ods-registry/ods-registry-sidn.py
```python
import sys
import rabbitdnssec
from rabbitdnssec import log_debug, log_info, log_notice, log_warning, log_error, log_critical
cfg = rabbitdnssec.my_config ('ods-registry')
# Read configuration
#
sidn_host = cfg ['registry_sidn_host']
sidn_port = int (cfg ['registry_sidn_port'])
sidn_user = cfg ['registry_sidn_account']
sidn_pass = cfg ['registry_sidn_password']
sidn_root = cfg ['registry_sidn_calist']
sidn_lock = cfg ['registry_sidn_epplock']
# Check invocation when called as main script
#
#TODO# Perhaps skip configuration file parsing for main script?
#
server_tuple = None
if __name__ == '__main__':
if len (sys.argv) > 3:
log_error ('Usage: ' + sys.argv [0] + ' [<registry> [<port>]]\n')
sys.exit (1)
try:
if len (sys.argv) >= 2:
# Override hostname
sidn_host = sys.argv [1]
if len (sys.argv) >= 3:
# Override port
sidn_port = int (sys.argv [2])
except:
log_error ('Registry ' + sys.argv [1] + ':' + sys.argv [2] + ' is unknown\n')
sys.exit (1)
#
# A few oft-used strings as an easy-to-use (constant-value) variable
#
xml_head = '<?xml version="1.0" encoding="UTF-8" standalone="no"?>\n'
epp_open = '<epp xmlns="urn:ietf:params:xml:ns:epp-1.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="urn:ietf:params:xml:ns:epp-1.0 epp-1.0.xsd">\n'
epp_clos = '</epp>\n'
eppns = 'urn:ietf:params:xml:ns:epp-1.0'
dnssecns = 'urn:ietf:params:xml:ns:secDNS-1.1'
sidnresultns = 'http://rxsd.domain-registry.nl/sidn-ext-epp-1.0'
#
# The number of arguments for each recognised command, grouped by the
# shellname. The shellname is this script's basename, so through links
# the available command set can be altered.
#
action_argcount = {
'registry_shell': {
'keysync': 1,
'eppkeys': 1,
'exit': 0,
'quit': 0,
'help': 0,
},
}
import os
import os.path
import re
import time
from syslog import *
import base64
import dns
import dns.name
import dns.resolver
import dns.rdataclass
import dns.rdatatype
import dns.rdtypes.ANY
import dns.rdtypes.ANY.DNSKEY
from dns.rdtypes import dnskeybase
import socket
import ssl
import struct
import fcntl
#
# Report an error and quit with an error code
#
def fatal (errstr):
log_error ('Fatal error:', errstr, '-- Closing shell with force')
closelog ()
sys.exit (1)
#
# Run a command; show "OK" unless "more" is set to indicate more commands follow
# Commands are run through sudo, to obtain the right privileges.
#
def runcmd (cmdline, more=False):
syslog (LOG_INFO, 'Running: ' + cmdline)
retval = os.system ('sudo ' + cmdline)
if retval != 0:
fatal ('Error: ' + str (retval) + '\n')
elif not more:
log_debug ('OK\n')
#
# Print a prompt if the standard input is an interactive terminal:
#
def prompt ():
if os.isatty (sys.stdin.fileno ()):
log_debug (shellname + '$ ')
from lxml import etree
#
# Globals
#
# Greeting from server, result of last <hello/>
greetz = None
hostname = None
#
# Create a TLS-wrapped connection to the registration server
#
def connect ():
global sidn_host, sidn_port, sidn_root
try:
sox = socket.socket (socket.AF_INET, socket.SOCK_STREAM)
soxplus = ssl.wrap_socket (sox, ca_certs=sidn_root, cert_reqs=ssl.CERT_REQUIRED)
soxplus.connect ( (sidn_host,sidn_port) )
hello (soxplus)
login (soxplus)
return soxplus
except:
log_error ('Failed to securely connect to server %s:%d\n' % (sidn_host,sidn_port))
raise
#
# Drop a TLS-wrapped connection to the registration server
#
def disconnect (sox):
logout (sox)
#
# Send a message, await the reply synchronously and return it
#
def syncio (sox, query):
try:
if query:
#DEBUG_SHOWS_PASSWORD# sys.stdout.write (query)
query = struct.pack ('>L', 4 + len (query)) + query
sox.send (query)
else:
log_debug ('Picking up response without sending a query\n')
except:
log_error ('Failed to send message to registry server\n')
raise
try:
resplen = struct.unpack ('>L', sox.read (4)) [0] - 4
# syslog (LOG_DEBUG, 'Receiving %d response bytes from registry' % resplen)
xmltext = ''
while len (xmltext) < resplen:
xmltext = xmltext + sox.read (resplen - len (xmltext))
#DEBUG_SHOWS_ANYTHING# sys.stdout.write (xmltext)
except:
log_error ('Failed to receive reply from registry server\n')
raise
try:
xmltree = etree.fromstring (xmltext)
return xmltree
except:
log_error ('Failed to parse XML:\n| ' + xmltext.replace ('\n', '\n| '))
raise
#
# Check if a response (to a command) is OK.
# Note that some error codes are quite acceptable, such as
# "object not found" in response to deletion. Such codes
# can be added through the "extra" string parameter.
#
# Return True for okay, or False otherwise.
#
def response_ok (resp, extra=None):
if resp.tag != '{' + eppns + '}epp':
return False
result = resp.find ('{' + eppns + '}response/{' + eppns + '}result')
if result is None:
return False
if not result.attrib.has_key ('code'):
return False
rescode = result.attrib ['code']
if rescode [:1] != '1' and rescode != extra:
return False
return True
#
# Raise an exception that incorporates an error message
# and an XML text.
#
def raise_xml (errstr, xml):
try:
rescode = xml.find ('{' + eppns + '}response/{' + eppns + '}result').attrib ['code']
resmesg = xml.find ('{' + eppns + '}response/{' + eppns + '}result/{' + eppns + '}msg').text
errstr = errstr + ': ' + rescode + ' ' + resmesg
try:
for cond in xml.find ('{' + eppns + '}response/{' + eppns + '}extension/{' + sidnresultns + '}ext/{' + sidnresultns + '}response/{' + sidnresultns + '}msg'):
rescode = rescode + '.' + cond.attrib ['code']
resdetl = cond.text
errstr = errstr + ' -- ' + reslevl + ' ' +rescode + ': ' + resmesg + ' (' + resdetl + ')'
except:
pass
except Exception, e:
errstr = errstr + ':\n| ' + etree.tostring (xml).replace ('\n', '\n| ')
if errstr [-3:] == '\n| ':
errstr = errstr [:-2]
errstr = errstr + 'Plus, ' + str (e) + '\n'
syslog (LOG_CRIT, errstr)
raise Exception (errstr)
#
# Check if a response is ok, just as with response_ok()
# but raise an error string if the result is False.
#
def require_ok (errstr, resp, extra=None):
if not response_ok (resp, extra):
raise_xml (errstr, resp)
#
# Return a keyData XML string for the given key
#
def keydata_xmlstring (key):
key64 = base64.standard_b64encode (key.key)
return ("""
<secDNS:keyData>
<secDNS:flags>""" + str (key.flags) + """</secDNS:flags>
<secDNS:protocol>""" + str (key.protocol) + """</secDNS:protocol>
<secDNS:alg>""" + str (key.algorithm) + """</secDNS:alg>
<secDNS:pubKey>""" + key64 + """</secDNS:pubKey>
</secDNS:keyData>
""")
#
# Return a dsData XML string for the given key
# The zone is a dns.name object; the key is a dns.rdtypes.ANY.DNSKEY object.
# The algorithm is mentioned by name. The embedkeydata option can be
# provided to indicate if an optional <secDNS:keyData/> should be embedded.
#
def dsdata_xmlstring (zone, key, alg='SHA1', embedkeydata=True):
keyds = dns.dnssec.make_ds (zone, key, alg)
hexdigest = ''
for c in keyds.digest:
hexdigest = hexdigest + ('%02x' % ord (c))
return ("""
<secDNS:dsData>
<secDNS:keyTag>""" + str (keyds.key_tag) + """</secDNS:keyTag>
<secDNS:alg>""" + str (keyds.algorithm) + """</secDNS:alg>
<secDNS:digestType>""" + str (keyds.digest_type) + """</secDNS:digestType>
<secDNS:digest>""" + hexdigest + """</secDNS:digest>
""" + (keydata_xmlstring (key) if embedkeydata else '') + """
</secDNS:dsData>
""")
#
# Send a "hello" greeting and store the reply in greetz
#
def hello (sox):
global greetz, hostname
greetz = syncio (sox,
xml_head +
epp_open +
""" <hello/>
""" +
epp_clos)
syncio (sox, None)
hostname = greetz.find ('{' + eppns + '}greeting/{' + eppns + '}svID').text
#
# Send a keepalive message while nothing else is going on
#
keepalive = hello
#
# Login to the server, after the usual greetings phase
#
def login (sox):
# print 'Login in progress:'
resp = syncio (sox,
xml_head +
epp_open +
""" <command>
<login>
<clID>""" + sidn_user + """</clID>
<pw>""" + sidn_pass + """</pw>
<options>
<version>1.0</version>
<lang>en</lang>
</options>
<svcs>
<objURI>urn:ietf:params:xml:ns:contact-1.0</objURI>
<objURI>urn:ietf:params:xml:ns:host-1.0</objURI>
<objURI>urn:ietf:params:xml:ns:domain-1.0</objURI>
<svcExtension>
<extURI>http://rxsd.domain-registry.nl/sidn-ext-epp-1.0</extURI>
</svcExtension>
</svcs>
</login>
</command>
""" +
epp_clos)
require_ok ('Failed to login to EPP server ' + hostname, resp)
#
# Logout from the server
#
def logout (sox):
# print 'Logging out...'
resp = syncio (sox,
xml_head +
epp_open +
""" <command>
<logout/>
</command>
""" +
epp_clos)
require_ok ('Failed to logout from EPP server ' + hostname, resp, '1500')
# print 'Logged out.'
#
# Update from the old to the new set of keys for a given zone.
# Either key set may be empty, to signify moving from/to an
# unsigned zone reference in the parent zone.
#
def update_keys (sox, zone, newkeys):
worktodo = False
zonestr = zone.to_text ()
if zonestr [-1:] == '.':
zonestr = zonestr [:-1]
#
# Retrieve the old/current keys over EPP
oldkeys = eppkeys (sox, zonestr)
#
# Start construction of the EPP command
query = (
xml_head +
epp_open +
""" <command>
<update>
<domain:update xmlns:domain="urn:ietf:params:xml:ns:domain-1.0">
<domain:name>""" + zonestr + """</domain:name>
</domain:update>
</update>
<extension>
<secDNS:update xmlns:secDNS=\"""" + dnssecns + """\">
""")
#
# Remove any old keys that are missing in the new set
toberemoved = ''
for key in oldkeys:
found = False
for key2 in newkeys:
found = found or key._cmp (key2) == 0
if not found:
# toberemoved = toberemoved + dsdata_xmlstring (zone, key, embedkeydata=False)
toberemoved = toberemoved + keydata_xmlstring (key)
worktodo = True
if toberemoved != '':
query = query + '<secDNS:rem>\n' + toberemoved + '</secDNS:rem>\n'
#
# Add any new keys that are not in the old set
tobeadded = ''
for key in newkeys:
found = False
for key2 in oldkeys:
found = found or key._cmp (key2) == 0
if not found:
# tobeadded = tobeadded + dsdata_xmlstring (zone, key, embedkeydata=False)
tobeadded = tobeadded + keydata_xmlstring (key)
worktodo = True
if tobeadded != '':
query = query + '<secDNS:add>\n' + tobeadded + '</secDNS:add>\n'
#
# Finish construction of the EPP command
query = (query +
""" </secDNS:update>
</extension>
</command>
""" + epp_clos)
#
# Execute the EPP command
if worktodo:
resp = syncio (sox, query)
require_ok ('Failed to update the key set in the parent', resp)
#
# Setup a resolver instance for localhost
#
rescf = os.popen ('echo nameserver 127.0.0.1', 'r')
local_resolver = dns.resolver.Resolver (configure=False)
local_resolver.read_resolv_conf (rescf)
local_resolver.use_edns (0, 0, 4096)
rescf.close ()
#
# Obtain the list of keys in use according to EPP
#
def eppkeys (sox, zonestr):
# print 'EPP download of current keyset in progress:'
resp = syncio (sox,
xml_head +
epp_open +
""" <command>
<info>
<domain:info xmlns:domain="urn:ietf:params:xml:ns:domain-1.0">
<domain:name hosts="all">""" + zonestr + """</domain:name>
</domain:info>
</info>
</command>
""" +
epp_clos)
require_ok ('Failed to obtain domain info for ' + zonestr, resp)
eppkeys = []
for xk in resp.findall ('{' + eppns + '}response/{' + eppns + '}extension/{' + dnssecns + '}infData/{' + dnssecns + '}keyData'):
flags = int (xk.find ('{' + dnssecns + '}flags').text)
if flags & dnskeybase.SEP != 0:
proto = int (xk.find ('{' + dnssecns + '}protocol').text)
alg = int (xk.find ('{' + dnssecns + '}alg').text)
key = base64.standard_b64decode (xk.find ('{' + dnssecns + '}pubKey').text)
k = dns.rdtypes.ANY.DNSKEY.DNSKEY (dns.rdataclass.IN, dns.rdatatype.DNSKEY, flags, proto, alg, key)
eppkeys.append (k)
return eppkeys
#
# Obtain the list of keys for a domain, and add them
#
# Note: This assumes that the SEP bit is not just a hint, but actually
# used if and only if a key fulfills the role of secure entry point,
# also known as a key signing key. This is the case with OpenDNSSEC,
# but it may or may not hold for additional keys imported.
#
def keysync (sox, zonestr):
newkeys = []
#TODO# Handle empty lists if none present, but beware of timeouts
zone = dns.name.from_text (zonestr)
#CLASS# keys = local_resolver.query (zone, rdtype=dns.rdtypes.ANY.DNSKEY)
keys = local_resolver.query (zone, rdtype=48) # DNSKEY
for k in keys:
if k.flags & dnskeybase.SEP != 0:
newkeys.append (k)
#TMP# update_keys (sox, zone, [], newkeys)
oldkeys = eppkeys (sox, zonestr)
update_keys (sox, zone, oldkeys, newkeys)
#
# The main program for the shell
#
def shell_session (cnx):
global shellname, action_argcount, sidn_host, sidn_port, sidn_user, sidn_pass
shellname = 'registry_shell'
openlog ('registry_shell', LOG_PID | (LOG_PERROR if sys.stderr.isatty () else 0), LOG_DAEMON)
syslog (LOG_INFO, 'Opening new shell to ' + sidn_host + ':' + str (sidn_port))
loggedin = False
last_contact = None
last_user = None
try:
login (cnx)
loggedin = True
moretodo = True
while moretodo:
prompt ()
cmd = sys.stdin.readline ()
if cmd == '':
log_debug ('exit\nOK\n')
break
if cmd == '\n' or cmd [:1] == '#':
continue
cmd = cmd.strip ()
syslog (LOG_INFO, 'Received: ' + cmd)
while cmd.find (' ') != -1:
cmd = cmd.replace (' ', ' ')
argv = cmd.split (' ')
if not action_argcount [shellname].has_key (argv [0]):
fatal ('Command not allowed')
if len (argv) != 1 + action_argcount [shellname] [argv [0]]:
fatal ('Wrong args')
elif argv [0] == 'keysync':
keysync (cnx, argv [1])
elif argv [0] == 'eppkeys':
keyset = eppkeys (cnx, argv [1])
ctr = 0
for key in keyset:
# print key.to_text ()
ctr = ctr + 1
log_debug ('Number of KSK keys found: ', ctr)
elif argv [0] == 'help' and os.isatty (sys.stdin.fileno ()):
prefix = 'Supported commands: '
for cmd in action_argcount [shellname].keys ():
log_debug (prefix + cmd)
prefix = ', '
log_debug ('\nOK\n')
elif argv [0] == 'exit' or argv [0] == 'quit':
log_debug ('OK\n')
moretodo = False
else:
fatal ('Unknown command')
except SystemExit:
raise
except Exception, e:
syslog ('Shell exception: ' + str (e))
fatal ('You hurt my feelings -- this is goodbye')
sys.exit (1)
finally:
if loggedin:
logout (cnx)
syslog (LOG_INFO, 'Closing shell regularly')
closelog ()
#
# Main program -- running inside flock()
#
if __name__ == '__main__':
cnx = connect ()
# print 'Connected to %s:%d' % server_tuple
syslog (LOG_INFO, 'Registry server date:' + greetz.find ('{' + eppns + '}greeting/{' + eppns + '}svDate').text)
lockf = open (sidn_lock, 'w')
try:
fcntl.flock (lockf, fcntl.LOCK_EX)
shell_session (cnx)
finally:
os.unlink (sidn_lock)
lockf.close ()
disconnect (cnx)
```
#### File: SURFdnssec/ods-zonedata/ods-zonedata-recv-knot.py
```python
import os
import stat
import fcntl
import rabbitdnssec
from rabbitdnssec import log_debug, log_info, log_notice, log_warning, log_error, log_critical
def addzone (zone, zonedata):
# Ensure that a zone is served by Knot DNS.
# Note: Key setup and DNSSEC signing is orthogonally setup;
# it defaults to being off, so an unsigned zone is delivered.
#
# Note: This procedure is idempotent, zone additions are neutral
# for already-existing zones.
#
# Note: Zone addition is not done in the parenting procedure,
# as it makes little sense there without actual zone data (with,
# at minimum, the SOA record). The parenting exchange will get
# a hint when we add a zone though, so it can append any child
# name server records as soon as we add the zone.
#
global_lock = open ('/tmp/knotc-global-lock', 'w')
fcntl.lockf (global_lock, fcntl.LOCK_EX)
rv0 = os.system ('/usr/sbin/knotc conf-begin')
rv1 = 0
rv2 = 0
if rv0==0:
os.system ('/usr/sbin/knotc conf-set zone.domain "' + zone + '"')
# Ignore the result; the zone may already exist; check that
rv1 = os.system ('/usr/sbin/knotc conf-get "zone[' + zone + ']"')
if rv0==0 and rv1==0:
try:
knot_signed = '/var/opendnssec/signed/' + zone + '.txt'
shared = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP
fd = open (knot_signed, 'w')
fd.write (zonedata)
fd.close ()
os.chmod (knot_signed, shared)
rv2 = os.system ('/usr/sbin/knotc conf-set "zone[' + zone + '].file" "' + knot_signed + '"')
except:
rv2 = 2
if rv0==0 and rv1==0 and rv2==0:
os.system ('/usr/sbin/knotc conf-commit')
log_debug ('CMD> ods-keyops-knot-sharekey "' + zone + '"')
os.system ('ods-keyops-knot-sharekey "' + zone + '"')
else:
if rv0==0:
os.system ('/usr/sbin/knotc conf-abort')
log_error ('Knot DNS could not add zone', zone, '(%d,%d,%d)' % (rv0,rv1,rv2))
global_lock.close ()
def delzone (zone):
# Remove a zone from Knot DNS, so it is no longer served.
# Note: The removal is even done when key material still
# exists. In this case, the zone is no longer delivered
# but the key material is assumed to be cleaned up by an
# orthogonal process [that will shrug if the zone has
# been removed already].
#
# Note: Zone deletion is not done in the parenting procedure,
# as it can silently ignore the case of a deleted zone (for which
# we need, at minimum, the SOA record). The parenting exchange
# needs no hint when we delete a zone.
#
global_lock = open ('/tmp/knotc-global-lock', 'w')
fcntl.lockf (global_lock, fcntl.LOCK_EX)
rv0 = os.system ('/usr/sbin/knotc conf-begin')
rv1 = 0
rv2 = 0
if rv0==0:
rv1 = os.system ('/usr/sbin/knotc conf-unset zone.domain "' + zone + '"')
if rv0==0 and rv1==0:
rv2 = os.system ('/usr/sbin/knotc -f zone-purge "' + zone + '"')
if rv0==0 and rv1==0 and rv2==0:
os.system ('/usr/sbin/knotc conf-commit')
else:
if rv0==0:
os.system ('/usr/sbin/knotc conf-abort')
log_error ('Knot DNS could not delete zone', zone, '(%d,%d,%d)' % (rv0,rv1,rv2))
global_lock.close ()
```
|
{
"source": "jdesai840/MentalabPi",
"score": 3
}
|
#### File: src/explorepy/bt_client.py
```python
from explorepy._exceptions import *
import bluetooth
import time
import sys
class BtClient:
""" Responsible for Connecting and reconnecting explore devices via bluetooth"""
def __init__(self):
self.is_connected = False
self.lastUsedAddress = None
self.socket = None
self.host = None
self.port = None
self.name = None
def init_bt(self, device_name=None, device_addr=None):
"""
Initialize Bluetooth connection
Args:
device_name(str): Name of the device (either device_name or device address should be given)
device_addr(str): Devices MAC address
"""
if (device_addr is None) and (device_name is None):
raise InputError("Missing name or address")
if device_name is not None:
if device_addr is None:
if not self.find_mac_addr(device_name):
raise DeviceNotFoundError("Error: Couldn't find the device! Restart your device and run the code "
"again and check if MAC address/name is entered correctly.")
if not self._check_mac_address(device_name=device_name, mac_address=self.lastUsedAddress):
raise ValueError("MAC address does not match the expected value!")
else:
self.lastUsedAddress = device_addr
if not self._check_mac_address(device_name=device_name, mac_address=self.lastUsedAddress):
raise ValueError("MAC address does not match the expected value!")
else:
# No need to scan if we have the address
self.lastUsedAddress = device_addr
device_name = "Explore_"+str(device_addr[-5:-3])+str(device_addr[-2:])
address_known = True
service_matches = self.find_explore_service()
if service_matches is None:
raise DeviceNotFoundError("SSP service for the device %s, with MAC address %s could not be found. Please "
"restart the device and try again" %(device_name, self.lastUsedAddress))
for services in service_matches:
self.port = services["port"]
self.name = services["name"]
self.host = services["host"]
# Checking if "Explore_ABCD" matches "XX:XX:XX:XX:AB:CD"
if self._check_mac_address(device_name=device_name, mac_address=self.host):
break
if not self._check_mac_address(device_name=device_name, mac_address=self.host):
raise ValueError("MAC address does not match the expected value on the SSP service!!")
def bt_connect(self):
"""Creates the socket
"""
timeout = 0
while True:
try:
self.socket = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
print("Connecting to %s with address %s" % (self.name, self.host))
self.socket.connect((self.host, self.port))
break
except bluetooth.BluetoothError as error:
self.socket.close()
print("Could not connect; Retrying in 2s...")
time.sleep(2)
timeout += 1
if timeout > 5:
raise DeviceNotFoundError("Could not find the device! Please make sure the device is on and in "
"advertising mode.")
return self.socket
def reconnect(self):
"""
tries to open the last bt socket, uses the last port and host. if after 1 minute the connection doesnt succeed,
program will end
"""
timeout = 1
while timeout < 5:
try:
self.socket = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
self.socket.connect((self.host, self.port))
break
except bluetooth.BluetoothError as error:
print("Bluetooth Error: Probably timeout, attempting reconnect. Error: ", error)
time.sleep(5)
timeout += 1
if timeout > 5:
self.socket.close()
raise DeviceNotFoundError("Could not find the device! Please make sure the device is on and in "
"advertising mode.")
def find_mac_addr(self, device_name):
i = 0
while i < 5:
nearby_devices = bluetooth.discover_devices(lookup_names=True, flush_cache=True )
for address, name in nearby_devices:
if name == device_name:
self.lastUsedAddress = address
return True
i += 1
print("No device found with the name: %s, searching again..." % device_name)
time.sleep(0.1)
return False
def find_explore_service(self):
uuid = "1101" # Serial Port Profile (SPP) service
i = 0
while i < 5:
service_matches = bluetooth.find_service(uuid=uuid, address=self.lastUsedAddress)
if len(service_matches) > 0:
return service_matches
i += 1
return None
@staticmethod
def _check_mac_address(device_name, mac_address):
return (device_name[-4:-2] == mac_address[-5:-3]) and (device_name[-2:] == mac_address[-2:])
```
#### File: explorepy/dashboard/dashboard.py
```python
import numpy as np
import time
from functools import partial
from threading import Thread
from explorepy.tools import HeartRateEstimator
from bokeh.layouts import widgetbox, row, column
from bokeh.models import ColumnDataSource, ResetTool, PrintfTickFormatter, Panel, Tabs
from bokeh.plotting import figure
from bokeh.server.server import Server
from bokeh.palettes import Colorblind
from bokeh.models.widgets import Select, DataTable, TableColumn, RadioButtonGroup
from bokeh.models import SingleIntervalTicker
from bokeh.core.property.validation import validate, without_property_validation
from tornado import gen
from bokeh.transform import dodge
ORN_SRATE = 20 # Hz
WIN_LENGTH = 10 # Seconds
MODE_LIST = ['EEG', 'ECG']
CHAN_LIST = ['Ch1', 'Ch2', 'Ch3', 'Ch4', 'Ch5', 'Ch6', 'Ch7', 'Ch8']
DEFAULT_SCALE = 10 ** -3 # Volt
N_MOVING_AVERAGE = 60
V_TH = [10**-5, 5 * 10 ** -3] # Noise threshold for ECG (Volt)
ORN_LIST = ['accX', 'accY', 'accZ', 'gyroX', 'gyroY', 'gyroZ', 'magX', 'magY', 'magZ']
SCALE_MENU = {"1 uV": 6., "5 uV": 5.3333, "10 uV": 5., "100 uV": 4., "200 uV": 3.6666, "500 uV": 3.3333, "1 mV": 3., "5 mV": 2.3333,
"10 mV": 2., "100 mV": 1.}
TIME_RANGE_MENU = {"10 s": 10., "5 s": 5., "20 s": 20.}
LINE_COLORS = ['green', '#42C4F7', 'red']
FFT_COLORS = Colorblind[8]
class Dashboard:
"""Explorepy dashboard class"""
def __init__(self, n_chan, exg_fs, mode="signal", firmware_version="NA"):
"""
Args:
n_chan (int): Number of channels
exg_fs (int): Sampling rate of ExG signal
mode (str): Visualization mode {'signal', 'impedance'}
firmware_version:
"""
self.n_chan = n_chan
self.y_unit = DEFAULT_SCALE
self.offsets = np.arange(1, self.n_chan + 1)[:, np.newaxis].astype(float)
self.chan_key_list = ['Ch' + str(i + 1) for i in range(self.n_chan)]
self.exg_mode = 'EEG'
self.rr_estimator = None
self.win_length = WIN_LENGTH
self.mode = mode
self.exg_fs = exg_fs
# Init ExG data source
exg_temp = np.zeros((n_chan, 2))
exg_temp[:, 0] = self.offsets[:, 0]
exg_temp[:, 1] = np.nan
init_data = dict(zip(self.chan_key_list, exg_temp))
init_data['t'] = np.array([0., 0.])
self.exg_source = ColumnDataSource(data=init_data)
# Init ECG R-peak source
init_data = dict(zip(['r_peak', 't'], [np.array([None], dtype=np.double), np.array([None], dtype=np.double)]))
self.r_peak_source = ColumnDataSource(data=init_data)
# Init marker source
init_data = dict(zip(['marker', 't'], [np.array([None], dtype=np.double), np.array([None], dtype=np.double)]))
self.marker_source = ColumnDataSource(data=init_data)
# Init ORN data source
init_data = dict(zip(ORN_LIST, np.zeros((9, 1))))
init_data['t'] = [0.]
self.orn_source = ColumnDataSource(data=init_data)
# Init table sources
self.heart_rate_source = ColumnDataSource(data={'heart_rate': ['NA']})
self.firmware_source = ColumnDataSource(data={'firmware_version': [firmware_version]})
self.battery_source = ColumnDataSource(data={'battery': ['NA']})
self.temperature_source = ColumnDataSource(data={'temperature': ['NA']})
self.light_source = ColumnDataSource(data={'light': ['NA']})
self.battery_percent_list = []
self.server = None
# Init fft data source
init_data = dict(zip(self.chan_key_list, np.zeros((self.n_chan, 1))))
init_data['f'] = np.array([0.])
self.fft_source = ColumnDataSource(data=init_data)
# Init impedance measurement source
init_data = {'channel': [CHAN_LIST[i] for i in range(0, self.n_chan)],
'impedance': ['NA' for i in range(self.n_chan)],
'row': ['1' for i in range(self.n_chan)],
'color': ['black' for i in range(self.n_chan)]}
self.imp_source = ColumnDataSource(data=init_data)
def start_server(self):
"""Start bokeh server"""
validation = validate(False)
self.server = Server({'/': self._init_doc}, num_procs=1)
self.server.start()
def start_loop(self):
"""Start io loop and show the dashboard"""
self.server.io_loop.add_callback(self.server.show, "/")
self.server.io_loop.start()
def _init_doc(self, doc):
self.doc = doc
self.doc.title = "Explore Dashboard"
# Create plots
self._init_plots()
# Create controls
m_widgetbox = self._init_controls()
# Create tabs
exg_tab = Panel(child=self.exg_plot, title="ExG Signal")
orn_tab = Panel(child=column([self.acc_plot, self.gyro_plot, self.mag_plot], sizing_mode='fixed'),
title="Orientation")
fft_tab = Panel(child=self.fft_plot, title="Spectral analysis")
imp_tab = Panel(child=self.imp_plot, title="Impedance")
if self.mode == "signal":
self.tabs = Tabs(tabs=[exg_tab, orn_tab, fft_tab], width=1200)
elif self.mode == "impedance":
self.tabs = Tabs(tabs=[imp_tab], width=1200)
self.doc.add_root(row([m_widgetbox, self.tabs]))
self.doc.add_periodic_callback(self._update_fft, 2000)
self.doc.add_periodic_callback(self._update_heart_rate, 2000)
@gen.coroutine
@without_property_validation
def update_exg(self, time_vector, ExG):
"""update_exg()
Update ExG data in the visualization
Args:
time_vector (list): time vector
ExG (np.ndarray): array of new data
"""
# Update ExG data
ExG = self.offsets + ExG / self.y_unit
new_data = dict(zip(self.chan_key_list, ExG))
new_data['t'] = time_vector
self.exg_source.stream(new_data, rollover=2 * self.exg_fs * WIN_LENGTH)
@gen.coroutine
@without_property_validation
def update_orn(self, timestamp, orn_data):
"""Update orientation data
Args:
timestamp (float): timestamp of the sample
orn_data (float vector): Vector of orientation data with shape of (9,)
"""
if self.tabs.active != 1:
return
new_data = dict(zip(ORN_LIST, np.array(orn_data)[:, np.newaxis]))
new_data['t'] = [timestamp]
self.orn_source.stream(new_data, rollover=2 * WIN_LENGTH * ORN_SRATE)
@gen.coroutine
@without_property_validation
def update_info(self, new):
"""Update device information in the dashboard
Args:
new(dict): Dictionary of new values
"""
for key in new.keys():
data = {key: new[key]}
if key == 'firmware_version':
self.firmware_source.stream(data, rollover=1)
elif key == 'battery':
self.battery_percent_list.append(new[key][0])
if len(self.battery_percent_list) > N_MOVING_AVERAGE:
del self.battery_percent_list[0]
value = int(np.mean(self.battery_percent_list) / 5) * 5
if value < 1:
value = 1
self.battery_source.stream({key: [value]}, rollover=1)
elif key == 'temperature':
self.temperature_source.stream(data, rollover=1)
elif key == 'light':
data[key] = [int(data[key][0])]
self.light_source.stream(data, rollover=1)
else:
print("Warning: There is no field named: " + key)
@gen.coroutine
@without_property_validation
def _update_fft(self):
""" Update spectral frequency analysis plot
"""
# Check if the tab is active and if EEG mode is active
if (self.tabs.active != 2) or (self.exg_mode != 'EEG'):
return
exg_data = np.array([self.exg_source.data[key] for key in self.chan_key_list])
if exg_data.shape[1] < self.exg_fs * 4.5:
return
fft_content, freq = get_fft(exg_data, self.exg_fs)
data = dict(zip(self.chan_key_list, fft_content))
data['f'] = freq
self.fft_source.data = data
@gen.coroutine
@without_property_validation
def _update_heart_rate(self):
"""Detect R-peaks and update the plot and heart rate"""
if self.exg_mode == 'EEG':
self.heart_rate_source.stream({'heart_rate': ['NA']}, rollover=1)
return
if self.rr_estimator is None:
self.rr_estimator = HeartRateEstimator(fs=self.exg_fs)
# Init R-peaks plot
self.exg_plot.circle(x='t', y='r_peak', source=self.r_peak_source,
fill_color="red", size=8)
ecg_data = (np.array(self.exg_source.data['Ch1'])[-500:] - self.offsets[0]) * self.y_unit
time_vector = np.array(self.exg_source.data['t'])[-500:]
# Check if the peak2peak value is bigger than threshold
if (np.ptp(ecg_data) < V_TH[0]) or (np.ptp(ecg_data) > V_TH[1]):
print("P2P value larger or less than threshold!")
return
peaks_time, peaks_val = self.rr_estimator.estimate(ecg_data, time_vector)
peaks_val = (np.array(peaks_val)/self.y_unit) + self.offsets[0]
if peaks_time:
data = dict(zip(['r_peak', 't'], [peaks_val, peaks_time]))
self.r_peak_source.stream(data, rollover=50)
# Update heart rate cell
estimated_heart_rate = self.rr_estimator.heart_rate
data = {'heart_rate': [estimated_heart_rate]}
self.heart_rate_source.stream(data, rollover=1)
@gen.coroutine
@without_property_validation
def update_marker(self, timestamp, code):
if self.mode == "impedance":
return
new_data = dict(zip(['marker', 't', 'code'], [np.array([0.01, self.n_chan+0.99, None], dtype=np.double),
np.array([timestamp, timestamp, None], dtype=np.double)]))
self.marker_source.stream(new_data=new_data, rollover=100)
@gen.coroutine
@without_property_validation
def update_imp(self, imp):
if self.mode == "impedance":
color = []
imp_str = []
for x in imp:
if x > 500:
color.append("black")
imp_str.append("Open")
elif x > 100:
color.append("red")
imp_str.append(str(round(x, 0))+" K\u03A9")
elif x > 50:
color.append("orange")
imp_str.append(str(round(x, 0))+" K\u03A9")
elif x > 10:
color.append("yellow")
imp_str.append(str(round(x, 0))+" K\u03A9")
elif x > 5:
imp_str.append(str(round(x, 0)) + " K\u03A9")
color.append("green")
else:
color.append("green")
imp_str.append("<5K\u03A9") # As the ADS is not precise in low values.
data = {"impedance": imp_str,
'channel': [CHAN_LIST[i] for i in range(0, self.n_chan)],
'row': ['1' for i in range(self.n_chan)],
'color': color
}
self.imp_source.stream(data, rollover=self.n_chan)
else:
raise RuntimeError("Trying to compute impedances while the dashboard is not in Impedance mode!")
@gen.coroutine
@without_property_validation
def _change_scale(self, attr, old, new):
"""Change y-scale of ExG plot"""
new, old = SCALE_MENU[new], SCALE_MENU[old]
old_unit = 10 ** (-old)
self.y_unit = 10 ** (-new)
for ch, value in self.exg_source.data.items():
if ch in CHAN_LIST:
temp_offset = self.offsets[CHAN_LIST.index(ch)]
self.exg_source.data[ch] = (value - temp_offset) * (old_unit / self.y_unit) + temp_offset
self.r_peak_source.data['r_peak'] = (np.array(self.r_peak_source.data['r_peak'])-self.offsets[0]) *\
(old_unit / self.y_unit) + self.offsets[0]
@gen.coroutine
@without_property_validation
def _change_t_range(self, attr, old, new):
"""Change time range"""
self._set_t_range(TIME_RANGE_MENU[new])
@gen.coroutine
def _change_mode(self, new):
"""Set EEG or ECG mode"""
self.exg_mode = MODE_LIST[new]
def _init_plots(self):
"""Initialize all plots in the dashboard"""
self.exg_plot = figure(y_range=(0.01, self.n_chan + 1 - 0.01), y_axis_label='Voltage', x_axis_label='Time (s)',
title="ExG signal",
plot_height=600, plot_width=1270,
y_minor_ticks=int(10),
tools=[ResetTool()], active_scroll=None, active_drag=None,
active_inspect=None, active_tap=None)
self.mag_plot = figure(y_axis_label='Magnetometer [mgauss/LSB]', x_axis_label='Time (s)',
plot_height=230, plot_width=1270,
tools=[ResetTool()], active_scroll=None, active_drag=None,
active_inspect=None, active_tap=None)
self.acc_plot = figure(y_axis_label='Accelerometer [mg/LSB]',
plot_height=190, plot_width=1270,
tools=[ResetTool()], active_scroll=None, active_drag=None,
active_inspect=None, active_tap=None)
self.acc_plot.xaxis.visible = False
self.gyro_plot = figure(y_axis_label='Gyroscope [mdps/LSB]',
plot_height=190, plot_width=1270,
tools=[ResetTool()], active_scroll=None, active_drag=None,
active_inspect=None, active_tap=None)
self.gyro_plot.xaxis.visible = False
self.fft_plot = figure(y_axis_label='Amplitude (uV)', x_axis_label='Frequency (Hz)', title="FFT",
x_range=(0, 70), plot_height=600, plot_width=1270, y_axis_type="log")
self.imp_plot = self._init_imp_plot()
# Set yaxis properties
self.exg_plot.yaxis.ticker = SingleIntervalTicker(interval=1, num_minor_ticks=10)
# Initial plot line
for i in range(self.n_chan):
self.exg_plot.line(x='t', y=CHAN_LIST[i], source=self.exg_source,
line_width=1.5, alpha=.9, line_color="#42C4F7")
self.fft_plot.line(x='f', y=CHAN_LIST[i], source=self.fft_source, legend_label=CHAN_LIST[i] + " ",
line_width=2, alpha=.9, line_color=FFT_COLORS[i])
self.exg_plot.line(x='t', y='marker', source=self.marker_source,
line_width=1, alpha=.8, line_color='#7AB904', line_dash="4 4")
for i in range(3):
self.acc_plot.line(x='t', y=ORN_LIST[i], source=self.orn_source, legend_label=ORN_LIST[i] + " ",
line_width=1.5, line_color=LINE_COLORS[i], alpha=.9)
self.gyro_plot.line(x='t', y=ORN_LIST[i + 3], source=self.orn_source, legend_label=ORN_LIST[i + 3] + " ",
line_width=1.5, line_color=LINE_COLORS[i], alpha=.9)
self.mag_plot.line(x='t', y=ORN_LIST[i + 6], source=self.orn_source, legend_label=ORN_LIST[i + 6] + " ",
line_width=1.5, line_color=LINE_COLORS[i], alpha=.9)
# Set x_range
self.plot_list = [self.exg_plot, self.acc_plot, self.gyro_plot, self.mag_plot]
self._set_t_range(WIN_LENGTH)
self.exg_plot.ygrid.minor_grid_line_color = 'navy'
self.exg_plot.ygrid.minor_grid_line_alpha = 0.05
# Set the formatting of yaxis ticks' labels
self.exg_plot.yaxis[0].formatter = PrintfTickFormatter(format="Ch %i")
# Autohide toolbar/ Legend location
for plot in self.plot_list:
plot.toolbar.autohide = True
plot.background_fill_color = "#fafafa"
if len(plot.legend) != 0:
plot.legend.location = "bottom_left"
plot.legend.orientation = "horizontal"
plot.legend.padding = 2
def _init_imp_plot(self):
p = figure(plot_width=600, plot_height=200, x_range=CHAN_LIST[0:self.n_chan],
y_range=[str(1)], toolbar_location=None)
p.circle(x='channel', y="row", radius=.3, source=self.imp_source, fill_alpha=0.6, color="color",
line_color='color', line_width=2)
text_props = {"source": self.imp_source, "text_align": "center",
"text_color": "black", "text_baseline": "middle", "text_font": "helvetica",
"text_font_style": "bold"}
x = dodge("channel", -0.1, range=p.x_range)
r = p.text(x=x, y=dodge('row', -.4, range=p.y_range), text="impedance", **text_props)
r.glyph.text_font_size = "10pt"
r = p.text(x=x, y=dodge('row', -.3, range=p.y_range), text="channel", **text_props)
r.glyph.text_font_size = "12pt"
p.outline_line_color = None
p.grid.grid_line_color = None
p.axis.axis_line_color = None
p.axis.major_tick_line_color = None
p.axis.major_label_standoff = 0
p.axis.visible = False
return p
def _init_controls(self):
"""Initialize all controls in the dashboard"""
# EEG/ECG Radio button
self.mode_control = RadioButtonGroup(labels=MODE_LIST, active=0)
self.mode_control.on_click(self._change_mode)
self.t_range = Select(title="Time window", value="10 s", options=list(TIME_RANGE_MENU.keys()), width=210)
self.t_range.on_change('value', self._change_t_range)
self.y_scale = Select(title="Y-axis Scale", value="1 mV", options=list(SCALE_MENU.keys()), width=210)
self.y_scale.on_change('value', self._change_scale)
# Create device info tables
columns = [TableColumn(field='heart_rate', title="Heart Rate (bpm)")]
self.heart_rate = DataTable(source=self.heart_rate_source, index_position=None, sortable=False,
reorderable=False,
columns=columns, width=200, height=50)
columns = [TableColumn(field='firmware_version', title="Firmware Version")]
self.firmware = DataTable(source=self.firmware_source, index_position=None, sortable=False, reorderable=False,
columns=columns, width=200, height=50)
columns = [TableColumn(field='battery', title="Battery (%)")]
self.battery = DataTable(source=self.battery_source, index_position=None, sortable=False, reorderable=False,
columns=columns, width=200, height=50)
columns = [TableColumn(field='temperature', title="Temperature (C)")]
self.temperature = DataTable(source=self.temperature_source, index_position=None, sortable=False,
reorderable=False, columns=columns, width=200, height=50)
columns = [TableColumn(field='light', title="Light (Lux)")]
self.light = DataTable(source=self.light_source, index_position=None, sortable=False, reorderable=False,
columns=columns, width=200, height=50)
# Add widgets to the doc
m_widgetbox = widgetbox([self.mode_control, self.y_scale, self.t_range, self.heart_rate,
self.battery, self.temperature, self.light, self.firmware], width=220)
return m_widgetbox
def _set_t_range(self, t_length):
"""Change time range of ExG and orientation plots"""
for plot in self.plot_list:
self.win_length = int(t_length)
plot.x_range.follow = "end"
plot.x_range.follow_interval = t_length
plot.x_range.range_padding = 0.
plot.x_range.min_interval = t_length
def get_fft(exg, EEG_SRATE):
"""Compute FFT"""
n_chan, n_sample = exg.shape
L = n_sample / EEG_SRATE
n = 1024
freq = EEG_SRATE * np.arange(int(n / 2)) / n
fft_content = np.fft.fft(exg, n=n) / n
fft_content = np.abs(fft_content[:, range(int(n / 2))])
return fft_content[:, 1:], freq[1:]
if __name__ == '__main__':
print('Opening Bokeh application on http://localhost:5006/')
m_dashboard = Dashboard(n_chan=8)
m_dashboard.start_server()
def my_loop():
T = 0
time.sleep(2)
while True:
time_vector = np.linspace(T, T + .2, 50)
T += .2
EEG = (np.random.randint(0, 2, (8, 50)) - .5) * .0002 # (np.random.rand(8, 50)-.5) * .0005
m_dashboard.doc.add_next_tick_callback(partial(m_dashboard.update_exg, time_vector=time_vector, ExG=EEG))
device_info_attr = ['firmware_version', 'battery', 'temperature', 'light']
device_info_val = [['2.0.4'], [95], [21], [13]]
new_data = dict(zip(device_info_attr, device_info_val))
m_dashboard.doc.add_next_tick_callback(partial(m_dashboard.update_info, new=new_data))
m_dashboard.doc.add_next_tick_callback(
partial(m_dashboard.update_orn, timestamp=T, orn_data=np.random.rand(9)))
time.sleep(0.2)
thread = Thread(target=my_loop)
thread.start()
m_dashboard.start_loop()
```
#### File: src/explorepy/__main__.py
```python
import sys
import argparse
from explorepy.cli import CLI
def main():
parser = argparse.ArgumentParser(
description='Python package for the Mentalab Explore',
usage='''explorepy <command> [args]
Available Commands
find_device: Scans for nearby explore-devices. Prints out Name and MAC address of the found devices
acquire: Connects to device and streams data. needs either MAC or Name of the desired device as input
-a --address Device MAC address (Form XX:XX:XX:XX:XX:XX).
-n --name Device name (e.g. "Explore_12AB").
record_data: Connects to a device and records ExG and orientation data live to separate files
-a --address Device MAC address (Form XX:XX:XX:XX:XX:XX).
-n --name Device name (e.g. Explore_12AB). Either device name or MAC address is needed.
-f --filename The prefix of the files.
-t --type File type (either edf or csv).
-ow --overwrite Overwrite already existing files with the same name.
-d --duration Recording duration in seconds
push2lsl Streams Data to Lab Streaming Layer (LSL).
-a --address Device MAC address (Form XX:XX:XX:XX:XX:XX).
-n --name Device name (e.g. Explore_12AB). Either device name or MAC address is needed.
visualize Visualizes real-time data in a browser-based dashboard
-a --address Device MAC address (Form XX:XX:XX:XX:XX:XX).
-n --name Device name (e.g. Explore_12AB). Either device name or MAC address is needed.
-nf --notchfreq Frequency of applied notch filter (By default, no notch filter is applied)
-lf --lowfreq Low cutoff frequency of bandpass filter
-hf --highfreq High cutoff frequency of bandpass filter
impedance Show electrode impedances
-a --address Device MAC address (Form XX:XX:XX:XX:XX:XX).
-n --name Device name (e.g. Explore_12AB). Either device name or MAC address is needed.
-nf --notchfreq Frequency of applied notch filter (By default, no notch filter is applied)
bin2csv Takes a Binary file and converts it to 3 CSV files (ExG, orientation and marker files)
-i --inputfile Name of the input file
-ow --overwrite Overwrite already existing files with the same name.
bin2edf Takes a Binary file and converts it to 2 BDF+ files (ExG and orientation, markers are saved in ExG file)
-i --inputfile Name of the input file
-ow --overwrite Overwrite already existing files with the same name.
format_memory This command formats the memory
-a --address Device MAC address (Form XX:XX:XX:XX:XX:XX).
-n --name Device name (e.g. Explore_12AB).
set_sampling_rate This command sets the sampling rate of ExG input (yet in alpha state)
-a --address Device MAC address (Form XX:XX:XX:XX:XX:XX).
-n --name Device name (e.g. Explore_12AB).
-sr --sampling_rate Sampling rate of ExG channels, it can be 250 or 500.
soft_reset This command does a soft reset of the device. All the settings (e.g. sampling rate, channel mask) return to the default values.
-a --address Device MAC address (Form XX:XX:XX:XX:XX:XX).
-n --name Device name (e.g. Explore_12AB).
''')
parser.add_argument('command', help='Command to run.')
args = parser.parse_args(sys.argv[1:2])
if not hasattr(CLI, args.command):
print('Incorrect usage. See help below.')
parser.print_help()
exit(1)
cli = CLI(args.command)
if __name__ == "__main__":
main()
```
|
{
"source": "jdesai840/PHYS234-Project2",
"score": 2
}
|
#### File: jdesai840/PHYS234-Project2/brody.py
```python
import jetson.inference
import jetson.utils
import asyncio
from bleak import BleakClient
from bleak import discover
from neosensory_python import NeoDevice
from time import sleep
net = jetson.inference.detectNet('ssd-mobilenet-v2', threshold=0.8)
camera = jetson.utils.gstCamera(1280, 720, '/dev/video0')
display = jetson.utils.glDisplay()
# For haptic link debugging
# def notification_handler(sender, data):
# print("{0}: {1}".format(sender, data))
async def run(loop):
buzz_addr = "C7:6D:2E:06:83:39"
devices = await discover()
async with BleakClient(buzz_addr,loop=loop) as client:
my_buzz = NeoDevice(client)
await asyncio.sleep(1)
x = await client.is_connected()
print("Connection State: {0}\r\n".format(x))
# await my_buzz.enable_notifications(notification_handler)
await asyncio.sleep(1)
await my_buzz.request_developer_authorization()
await my_buzz.accept_developer_api_terms()
await my_buzz.pause_device_algorithm()
motor_vibrate_frame = [0, 0, 0, 0]
motor_pattern_index = 0
motor_pattern_value = 0
intensity = 255
light_intensity = 51
delay = 0.01
qdelay = 0.002
async def motor_vehicle_pattern():
await my_buzz.vibrate_motors([intensity, 0, 0, intensity])
sleep(delay)
await my_buzz.vibrate_motors([0, 0, 0, 0])
sleep(qdelay)
async def busy_intersection_pattern():
await my_buzz.vibrate_motors([intensity, intensity, intensity, intensity])
sleep(delay)
await my_buzz.vibrate_motors([0, 0, 0, 0])
sleep(qdelay)
await my_buzz.vibrate_motors([0, intensity, intensity, 0])
sleep(delay)
await my_buzz.vibrate_motors([0, 0, 0, 0])
sleep(qdelay)
async def person_pattern():
await my_buzz.vibrate_motors([intensity, intensity, 0, 0])
sleep(delay)
await my_buzz.vibrate_motors([0, 0, 0, 0])
sleep(qdelay)
await my_buzz.vibrate_motors([0, 0, intensity, intensity])
sleep(delay)
await my_buzz.vibrate_motors([0, 0, 0, 0])
sleep(qdelay)
async def toilet_pattern():
await my_buzz.vibrate_motors([light_intensity, 0, light_intensity, 0])
sleep(qdelay)
await my_buzz.vibrate_motors([0, 0, 0, 0])
sleep(qdelay)
await my_buzz.vibrate_motors([0, light_intensity, 0, light_intensity])
sleep(qdelay)
await my_buzz.vibrate_motors([0, 0, 0, 0])
sleep(qdelay)
async def computer_pattern():
await my_buzz.vibrate_motors([intensity, 0, 0, intensity])
sleep(delay)
await my_buzz.vibrate_motors([0, 0, 0, 0])
sleep(qdelay)
async def kitchen_utensils_pattern():
await my_buzz.vibrate_motors([intensity, intensity, intensity, intensity])
sleep(delay)
await my_buzz.vibrate_motors([0, 0, 0, 0])
sleep(qdelay)
await my_buzz.vibrate_motors([0, intensity, intensity, 0])
sleep(delay)
await my_buzz.vibrate_motors([0, 0, 0, 0])
sleep(qdelay)
async def diningware_pattern():
await my_buzz.vibrate_motors([intensity, intensity, 0, 0])
sleep(delay)
await my_buzz.vibrate_motors([0, 0, 0, 0])
sleep(qdelay)
await my_buzz.vibrate_motors([0, 0, intensity, intensity])
sleep(delay)
await my_buzz.vibrate_motors([0, 0, 0, 0])
sleep(qdelay)
async def rest_place_pattern():
await my_buzz.vibrate_motors([light_intensity, 0, light_intensity, 0])
sleep(qdelay)
await my_buzz.vibrate_motors([0, 0, 0, 0])
sleep(qdelay)
await my_buzz.vibrate_motors([0, light_intensity, 0, light_intensity])
sleep(qdelay)
await my_buzz.vibrate_motors([0, 0, 0, 0])
sleep(qdelay)
async def cell_phone_pattern():
await my_buzz.vibrate_motors([light_intensity,light_intensity,light_intensity,light_intensity])
await my_buzz.vibrate_motors([light_intensity,light_intensity,light_intensity,light_intensity])
sleep(delay)
await my_buzz.vibrate_motors([0, 0, 0, 0])
await my_buzz.vibrate_motors([0, 0, 0, 0])
try:
while True:
await asyncio.sleep(0.1)
while display.IsOpen():
img, width, height = camera.CaptureRGBA()
detections = net.Detect(img, width, height)
display.RenderOnce(img, width, height)
display.SetTitle("Object Detection | NEtwork {:0f} FPS".format(net.GetNetworkFPS()))
# Frame rate limiter to avoid excessive haptic feedback triggering
sleep(0.5)
# Similar objects are grouped together until the user's brain has adapted,
# upon which more distinct, specific groupings can be made
for detection in detections:
if detection.ClassID == 1:
print('human')
await person_pattern()
elif detection.ClassID == 3:
print('car')
await motor_vehicle_pattern()
elif detection.ClassID == 4:
print('motorcycle')
await motor_vehicle_pattern()
elif detection.ClassID == 6:
print('bus')
await motor_vehicle_pattern()
elif detection.ClassID == 8:
print('truck')
await motor_vehicle_pattern()
elif detection.ClassID == 10:
print('traffic light')
await busy_intersection_pattern()
elif detection.ClassID == 13:
print('stop sign')
await busy_intersection_pattern()
elif detection.ClassID == 72:
print('monitor')
await computer_pattern()
elif detection.ClassID == 73:
print('laptop')
await computer_pattern()
elif detection.ClassID == 74:
print('mouse')
await computer_pattern()
elif detection.ClassID == 76:
print('keyboard')
await computer_pattern()
elif detection.ClassID == 77:
print('cell phone')
await cell_phone_pattern()
elif detection.ClassID == 48:
print('fork')
await kitchen_utensils_pattern()
elif detection.ClassID == 49:
print('knife')
await kitchen_utensils_pattern()
elif detection.ClassID == 50:
print('spoon')
await kitchen_utensils_pattern()
elif detection.ClassID == 45:
print('plate')
await diningware_pattern()
elif detection.ClassID == 47:
print('cup')
await diningware_pattern()
elif detection.ClassID == 51:
print('bowl')
await diningware_pattern()
elif detection.ClassID == 15:
print('bench')
await rest_place_pattern()
elif detection.ClassID == 62:
print('chair')
await rest_place_pattern()
elif detection.ClassID == 63:
print('couch')
await rest_place_pattern()
elif detection.ClassID == 65:
print('bed')
await rest_place_pattern()
elif detection.ClassID == 70:
print('toilet')
await toilet_pattern()
except KeyboardInterrupt:
await my_buzz.resume_device_algorithm()
pass
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(run(loop))
```
|
{
"source": "jdeschamps/N2V_fiji",
"score": 3
}
|
#### File: ImageJ2/N2V/BatchPredict.py
```python
from java.io import File
import sys
from de.csbdresden.n2v.command import N2VPredictCommand
def getFileName(path):
fileparts = path.split("/")
return fileparts[len(fileparts)-1]
def runNetwork(inputFile, outputFile):
print("input: " + inputFile.getAbsolutePath() + ", output: " + outputFile.getAbsolutePath())
img = io.open(inputFile.getAbsolutePath())
mymod = (command.run(N2VPredictCommand, False,
"input", img,
"modelFile", modelFile,
"showProgressDialog", False)).get()
myoutput = mymod.getOutput("output")
io.save(myoutput, outputFile.getAbsolutePath())
if(output == input):
print("ERROR: please provide an output directory that is not the same as the input directory")
sys.exit()
for file in input.listFiles():
if file.toString().endswith(".tif"):
runNetwork(file, File(output, getFileName(file.toString())))
```
|
{
"source": "jdeschamps/napari-sklearn-decomposition",
"score": 2
}
|
#### File: napari_sklearn_decomposition/_tests/test_widget.py
```python
from typing import Callable
import napari
import numpy as np
import pytest
from napari_sklearn_decomposition import decomposition
from napari_sklearn_decomposition._widget import NMF, PCA, FastICA
RESULT_NAMES = {"PCA": "PCA Eigenvectors", "FastICA": "Independent Components", "NMF": "Non-negative Components"}
RESULT_COLORMAPS = {"PCA": "PiYG", "FastICA": "PiYG", "NMF": "viridis"}
def test_plugin_widget_added(make_napari_viewer: Callable[..., napari.Viewer]):
viewer = make_napari_viewer()
viewer.window.add_plugin_dock_widget(
plugin_name="napari-sklearn-decomposition", widget_name="Decomposition Widget"
)
assert len(viewer.window._dock_widgets) == 1
def test_widget_added(make_napari_viewer: Callable[..., napari.Viewer]) -> None:
# Make a viewer with an image
viewer = make_napari_viewer()
viewer.add_image(np.random.random((100, 50, 50)), name="test")
widget = decomposition()
viewer.window.add_dock_widget(widget)
# Check widget was added
assert len(viewer.window._dock_widgets) == 1
# Check that the default choice is correct
assert widget.choice.get_value() == "PCA"
# Check that the PCA widget was created properly
assert widget.PCA.n_components.get_value() == 6
assert widget.PCA.svd_solver.get_value() == "auto"
assert widget.PCA.whiten.get_value()
assert (widget.PCA.image.get_value() == viewer.layers["test"].data).all()
@pytest.mark.parametrize(
"method",
[
PCA,
NMF,
FastICA,
],
)
def test_decompositions(
make_napari_viewer: Callable[..., napari.Viewer], method
) -> None:
# Make a viewer with an image
viewer = make_napari_viewer()
viewer.add_image(np.random.random((100, 50, 50)))
# Launch the widget for the decomposition method
widget = method()
viewer.window.add_dock_widget(widget)
# Run the widget with default settings
widget()
# Check a new image was added
assert len(viewer.layers) == 2
# Check that the name & shape of the new layer is correct
assert viewer.layers[-1].name == RESULT_NAMES[method.__name__]
assert viewer.layers[-1].data.shape == (6, 50, 50)
# Check that the colormap is correct
assert viewer.layers[-1].colormap.name == RESULT_COLORMAPS[method.__name__]
# # make_napari_viewer is a pytest fixture that returns a napari viewer object
# # capsys is a pytest fixture that captures stdout and stderr output streams
# def test_example_q_widget(make_napari_viewer, capsys):
# # make viewer and add an image layer using our fixture
# viewer = make_napari_viewer()
# viewer.add_image(np.random.random((100, 100)))
# # create our widget, passing in the viewer
# my_widget = ExampleQWidget(viewer)
# # call our widget method
# my_widget._on_click()
# # read captured output and check that it's as we expected
# captured = capsys.readouterr()
# assert captured.out == "napari has 1 layers\n"
# def test_example_magic_widget(make_napari_viewer, capsys):
# viewer = make_napari_viewer()
# layer = viewer.add_image(np.random.random((100, 100)))
# # this time, our widget will be a MagicFactory or FunctionGui instance
# my_widget = example_magic_widget()
# # if we "call" this object, it'll execute our function
# my_widget(viewer.layers[0])
# # read captured output and check that it's as we expected
# captured = capsys.readouterr()
# assert captured.out == f"you have selected {layer}\n"
```
|
{
"source": "jdesilosmd/SEIRV-Model-for-COVID-19-in-a-University-Setting",
"score": 2
}
|
#### File: jdesilosmd/SEIRV-Model-for-COVID-19-in-a-University-Setting/idmcomp.py
```python
import numpy as np
class IdmComp:
def __init__(self, N, time, R0, t_inc, t_inf, eff):
self.N = N
self.time = np.arange(1, time+1, 1)
self.R0 = R0
self.t_inc = t_inc
self.t_inf = t_inf
self.eff = eff
def idm_rates(self):
alpha = 1/self.t_inc
gamma = 1/self.t_inf
beta = self.R0*gamma
c_s = 1-self.eff
duration = self.time
return duration, alpha, beta, gamma, c_s
def herd_im(self):
solution = (1-(1/self.R0))/self.eff
if self.R0 ==0:
return np.nan
elif self.eff == 0:
return np.nan
else:
if solution >=1.0:
return 1.0
else:
return solution
def initial_state_seir(self, E_in, I_in, R_in):
S_in = self.N-1
E_in = E_in
I_in = I_in
R_in = R_in
return S_in, E_in, I_in, R_in
def initial_state_seirv(self, E_in, I_in, R_in, p):
S_in = ((1-p)*self.N)
E_in = E_in
I_in = I_in
R_in = R_in
V_in = p*self.N
return S_in, E_in, I_in, R_in, V_in
if __name__ == "__main__":
IdmComp
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.