content
stringlengths 5
1.05M
|
---|
from tank import Tank, Direction
from field import Field
from util import ArmedTimer, GameObject
import random
from itertools import cycle
class TankAI:
SPAWNING_DELAY = 1.5
FIRE_TIMER = 1.0
@staticmethod
def dir_delay():
return random.uniform(0.3, 3.0)
def pick_direction(self):
c, r = self.field.map.col_row_from_coords(*self.tank.position)
prohibited_dir = set()
# prohibited_dir.add(self.tank.direction)
if c <= 1:
prohibited_dir.add(Direction.LEFT)
if r <= 1:
prohibited_dir.add(Direction.UP)
if c >= self.field.map.width - 2:
prohibited_dir.add(Direction.RIGHT)
if r >= self.field.map.height - 2:
prohibited_dir.add(Direction.DOWN)
return random.choice(list(Direction.all() - prohibited_dir))
def __init__(self, tank: Tank, field: Field):
self.tank = tank
self.field = field
self.fire_timer = ArmedTimer(delay=self.FIRE_TIMER)
self.dir_timer = ArmedTimer(delay=self.dir_delay())
self.spawn_timer = ArmedTimer(delay=self.SPAWNING_DELAY)
def _destroy(self):
self.tank.to_destroy = True
def _degrade(self):
if self.tank.color == Tank.Color.PLAIN:
self.tank.color = Tank.Color.GREEN
else:
self._destroy()
def update(self):
if self.tank.is_spawning:
if self.spawn_timer.tick():
if self.field.oc_map.test_rect(self.tank.bounding_rect, good_values=(None, self.tank)):
self.tank.is_spawning = False
else:
return
else:
return
if self.tank.hit:
if self.tank.tank_type == Tank.Type.ENEMY_HEAVY:
self._degrade()
else:
self._destroy()
self.tank.hit = False
if self.fire_timer.tick():
self.tank.fire()
self.fire_timer.start()
if self.dir_timer.tick():
self.tank.direction = self.pick_direction()
self.dir_timer.delay = self.dir_delay()
self.dir_timer.start()
self.tank.move_tank(self.tank.direction)
def reset(self):
self.tank.direction = Direction.random()
class EnemyFractionAI:
MAX_ENEMIES = 5
RESPAWN_TIMER = 5.0
def __init__(self, field: Field, tanks: GameObject):
self.tanks = tanks
self.field = field
self.spawn_points = {
(x, y): None for x, y in field.respawn_points(True)
}
self.spawn_timer = ArmedTimer(self.RESPAWN_TIMER)
self.enemy_queue = cycle([
Tank.Type.ENEMY_SIMPLE,
Tank.Type.ENEMY_FAST,
Tank.Type.ENEMY_MIDDLE,
Tank.Type.ENEMY_HEAVY,
])
self._enemy_queue_iter = iter(self.enemy_queue)
self.try_to_spawn_tank()
@property
def all_enemies(self):
return [t for t in self.tanks if t.fraction == Tank.ENEMY]
def get_next_enemy(self, pos):
t_type = next(self._enemy_queue_iter)
new_tank = Tank(Tank.ENEMY, Tank.Color.PLAIN, t_type)
new_tank.is_spawning = True
new_tank.ai = TankAI(new_tank, self.field)
if random.uniform(0, 1) > 0.35:
new_tank.is_bonus = True
new_tank.place(self.field.get_center_of_cell(*pos))
return new_tank
def try_to_spawn_tank(self):
free_locations = list()
for loc, tank in self.spawn_points.items():
if isinstance(tank, Tank):
if not tank.is_spawning:
self.spawn_points[loc] = None
else:
free_locations.append(loc)
if free_locations and len(self.all_enemies) < self.MAX_ENEMIES:
pos = random.choice(free_locations)
tank = self.get_next_enemy(pos)
self.spawn_points[pos] = tank
self.tanks.add_child(tank)
def stop_all_moving(self):
for t in self.all_enemies:
t.stop()
def update(self):
if self.spawn_timer.tick():
self.spawn_timer.start()
self.try_to_spawn_tank()
for enemy_tank in self.all_enemies:
self.update_one_tank(enemy_tank)
def update_one_tank(self, t: Tank):
t.to_destroy = False
t.ai.update()
|
from hbr_client import ApiClient, UserClient
from time import sleep
import pprint
'''
client = ApiClient(token="token")
for i in range(10):
r = client.save({'x':i})
print(r.status_code)
print(r.text)
sleep(1)
'''
client = UserClient('email', 'pass')
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(client.get_data())
|
premio = float(input("Digite o valor do premio: "))
imposto = (7 / 100)
premio2 = (premio * imposto)
premiofinal = (premio - premio2)
primeirodescont = (32/100)
amg1 = (premiofinal * primeirodescont)
segundodescont = (46/100)
amg2 = (premiofinal * segundodescont)
terceirodescont = (22/100)
amg3 = (premiofinal * terceirodescont)
print("O premio com o imposto descontado ficará:", premiofinal,".O primeiro amigo receberá:", amg1,".O segundo amigo receberá:", amg2,".e o terceiro:", amg3)
|
#!/usr/bin/python3
import os
import sys
import math
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import data_utils
load_fn = data_utils.load_cls_train_val
balance_fn = None
map_fn = None
keep_remainder = True
save_ply_fn = None
num_class = 40
batch_size = 128
sample_num = 512
num_epochs = 1024
step_val = 500
learning_rate_base = 0.01
decay_steps = 8000
decay_rate = 0.5
learning_rate_min = 1e-6
weight_decay = 1e-5
jitter = 0.0
jitter_val = 0.0
jitter_test = 0.0
rotation_range = [0, 0, 0, 'u']
rotation_range_val = [0, 0, 0, 'u']
rotation_range_test = [0, 0, 0, 'u']
rotation_order = 'rxyz'
scaling_range = [0, 0, 0, 'g']
scaling_range_val = [0, 0, 0, 'u']
scaling_range_test = [0, 0, 0, 'u']
sample_num_variance = 1 // 8
sample_num_clip = 1 // 4
x = 3
xconv_param_name = ('K', 'D', 'P', 'C', 'links')
xconv_params = [dict(zip(xconv_param_name, xconv_param)) for xconv_param in
[(8, 1, -1, 16 * x, []),
(12, 2, 384, 32 * x, []),
(16, 2, 128, 64 * x, []),
(16, 3, 128, 128 * x, [])]]
with_global = True
fc_param_name = ('C', 'dropout_rate')
fc_params = [dict(zip(fc_param_name, fc_param)) for fc_param in
[(128 * x, 0.0),
(64 * x, 0.8)]]
sampling = 'random'
optimizer = 'adam'
epsilon = 1e-2
data_dim = 6
use_extra_features = False
with_X_transformation = True
sorting_method = None
|
from .education_field_counter import EducationFieldCounter
|
import pickle
import unittest
import pandas as pd
from ..src.pdf_table_extractions import run_table_extraction
class TestTableExtractions(unittest.TestCase):
tables = {}
def create_test_metadata(self, db, buffer_size, tables_per_job):
metadata1 = {'pdf_name': 'test/test_docs/foo1.pdf', 'page_num': '1', 'coords': '', 'camelot_coords': '170, 370, 560, 270'}
metadata2 = {'pdf_name': 'test/test_docs/foo2.pdf', 'page_num': '1', 'coords': '', 'camelot_coords': '316, 499, 566, 337'}
metadata3 = {'pdf_name': 'test/test_docs/foo3.pdf', 'page_num': '1', 'coords': '', 'camelot_coords': '46, 704, 521, 546'}
return [metadata1, metadata2, metadata3]
def insert_test_tables_local(self, coll, detected_tables):
self.tables.update({detected_tables['pdf_name']: pickle.loads(detected_tables['table_df'])})
def easy_table(self):
df1 = self.tables['foo1.pdf']
self.assertEqual((6, 4), df1.shape)
def medium_table(self):
df2 = self.tables['foo2.pdf']
self.assertEqual((11, 2), df2.shape)
def hard_table(self):
df3 = self.tables['foo3.pdf']
self.assertEqual((16, 4), df3.shape)
def test_tables(self):
run_table_extraction(self.create_test_metadata, self.insert_test_tables_local, 1, False)
self.easy_table()
self.medium_table()
self.hard_table()
if __name__ == '__main__':
unittest.main()
|
import dlvhex
import hexlite.ast.shallowparser as shp
from hexlite.modelcallback import JSONModelCallback
import logging, sys
def id(p):
for x in dlvhex.getTrueInputAtoms():
tup = x.tuple()
if len(tup) != 2:
raise Exception("this external atom processes only arity 1 predicate inputs")
dlvhex.output( (tup[1],) )
def idc(c):
dlvhex.output((c,))
def testZeroArity0():
pass
def testZeroArity1():
dlvhex.output(())
def testA(pred):
if len(dlvhex.getTrueInputAtoms()) == 0:
dlvhex.output(('foo',))
else:
dlvhex.output(('bar',))
def testB(pred1, pred2):
if len(dlvhex.getTrueInputAtoms()) <= 1:
dlvhex.output(('bar',))
else:
dlvhex.output(('foo',))
def testC(pred):
for atom in dlvhex.getTrueInputAtoms():
for x in atom.tuple()[1:]:
# output arguments of input predicate
dlvhex.output((x.value(),))
def testArity1OneTupleA():
dlvhex.output( ('one',) )
def testArity1OneTupleB():
dlvhex.output( (dlvhex.storeConstant('one'),) )
def testArity1TwoTuples():
dlvhex.output( ('one',) )
dlvhex.output( ('two',) )
def testEven(pred1, pred2):
true = [x for x in dlvhex.getTrueInputAtoms()]
num = len(true)
if num % 2 == 0:
dlvhex.output(())
def testSubstr(string, start, length):
stringv = string.value()
needquote = '"' in stringv
startv = start.intValue()
lengthv = length.intValue()
unquoted = stringv.strip('"')
endv = min(startv+lengthv, len(unquoted)+1)
out = unquoted[startv:endv]
if needquote:
out = '"'+out+'"'
logging.debug('testSubstr with string={} start={} length={} creates out={}'.format(stringv, startv, lengthv, out))
dlvhex.output((out,))
def testStrlen(string):
stringv = string.value()
unquoted = stringv.strip('"')
dlvhex.output( (len(unquoted),) )
def testSmallerThan(int1, int2):
if int1.intValue() < int2.intValue():
dlvhex.output( () )
def testConcat(strs):
#logging.debug('testConcat got '+repr(strs)+' '+str(strs.__class__)+' '+str(strs[0].__class__))
values = [s.value() for s in strs]
#logging.debug('testConcat values '+repr(values))
needquote = any(['"' in s for s in values])
#logging.debug('testConcat needquote '+repr(needquote))
unquoted = [s.strip('"') for s in values]
result = ''.join(unquoted)
if needquote:
result = '"'+result+'"'
#logging.debug('testConcat returns '+repr(result))
dlvhex.output( (result,) )
def isFunctionTerm(term):
logging.debug('isFunctionTerm got '+repr(term))
pinp = shp.parseTerm(term.value())
logging.debug('parseTerm {}'.format(repr(pinp)))
if len(pinp) > 1:
# yes it is
dlvhex.output( () )
def functionCompose(args):
logging.debug('functionCompose got '+repr(args))
if len(args) == 1:
dlvhex.output((args[0],))
else:
apred = args[0].value()
avalues = [a.value() for a in args[1:]]
dlvhex.output(("{}({})".format(apred, ','.join(avalues)),))
def functionDecompose(term, narg):
logging.debug('functionDecompose got {} and {}'.format(repr(term), narg))
pinp = shp.parseTerm(term.value())
logging.debug('parseTerm {}'.format(repr(pinp)))
argidx = narg.intValue()
if argidx < len(pinp):
dlvhex.output( (shp.shallowprint(pinp[argidx]),) )
def functionDecomposeN(inp, N):
logging.debug('functionDecomposeN got {} and {}'.format(repr(inp), N))
pinp = shp.parseTerm(inp.value())
logging.debug('parseTerm {}'.format(repr(pinp)))
if len(pinp) == N+1:
otuple = [ shp.shallowprint(x) for x in pinp ]
dlvhex.output( tuple(otuple) )
def functionDecompose1(inp):
functionDecomposeN(inp, 1)
def functionDecompose2(inp):
functionDecomposeN(inp, 2)
def functionDecompose3(inp):
functionDecomposeN(inp, 3)
def getArity(term):
logging.debug('getArity got {}'.format(repr(term)))
pinp = shp.parseTerm(term.value())
logging.debug('parseTerm {}'.format(repr(pinp)))
dlvhex.output( (len(pinp)-1,) )
def isEmpty(assignment):
true = 0
false = 0
unknown = 0
premisse = ()
for x in dlvhex.getInputAtoms():
if x.isTrue():
true = true + 1
elif x.isFalse():
false = false + 1
else:
unknown = unknown + 1
if true > 0:
# external atom is true
dlvhex.output(())
elif (true + unknown) > 0:
# external atom can be true
dlvhex.outputUnknown(())
else:
# else case applies: (true + unknown) < min.intValue() or true > max.intValue()
#
# external atom is certainly not true
v = 0
def numberOfBalls(assignment, min, max):
true = 0
false = 0
unknown = 0
premisse = ()
for x in dlvhex.getInputAtoms():
if x.isTrue():
true = true + 1
elif x.isFalse():
false = false + 1
else:
unknown = unknown + 1
v = 0
if true >= min.intValue() and (true + unknown) <= max.intValue():
# external atom is true
dlvhex.output(())
elif (true + unknown) >= min.intValue() and true <= max.intValue():
# external atom can be true
dlvhex.outputUnknown(())
else:
# else case applies: (true + unknown) < min.intValue() or true > max.intValue()
#
# external atom is certainly not true
v = 0
def numberOfBallsSE(assignment, max):
true = 0
false = 0
unknown = 0
premisse = ()
for x in dlvhex.getInputAtoms():
if x.isTrue():
true = true + 1
elif x.isFalse():
false = false + 1
else:
unknown = unknown + 1
v = 0
if (true + unknown) <= max.intValue():
# external atom is true
dlvhex.output(())
elif true <= max.intValue():
# external atom can be true
dlvhex.outputUnknown(())
else:
# else case applies: if true > max.intValue()
#
# external
v = 0
def numberOfBallsGE(assignment, min):
true = 0
false = 0
unknown = 0
premisse = ()
for x in dlvhex.getInputAtoms():
if x.isTrue():
true = true + 1
elif x.isFalse():
false = false + 1
else:
unknown = unknown + 1
v = 0
if true >= min.intValue():
# external atom is true
dlvhex.output(())
elif (true + unknown) >= min.intValue():
# external atom can be true
dlvhex.outputUnknown(())
else:
# else case applies: if (true + unknown) < min.intValue()
#
# external
v = 0
# no native implementations for these, so let's use the non-native ones
testIsEmpty = isEmpty
testNumberOfBalls = numberOfBalls
testNumberOfBallsSE = numberOfBallsSE
testNumberOfBallsGE = numberOfBallsGE
def partialTest(assignment):
# returns true if the predicate input is true for more than 1 constant
true = 0
false = 0
unknown = 0
premisse = ()
for x in dlvhex.getInputAtoms():
if x.isTrue():
true = true + 1
# premisse = premisse + (x, )
# print "true input atom:", x.value()
elif x.isFalse():
false = false + 1
# premisse = premisse + (x.negate(), )
# print "false input atom:", x.value()
else:
unknown = unknown + 1
# print "unknown input atom:", x.value()
v = 0
if true > 1:
# dlvhex.learn(premisse + (dlvhex.storeOutputAtom((), False).negate(), ))
dlvhex.output(())
elif true + unknown > 1:
dlvhex.outputUnknown(())
def someSelected(selected):
for x in dlvhex.getInputAtoms():
if x.tuple()[0] == selected and x.isTrue():
dlvhex.output(())
def someSelectedPartial(selected):
unknown = False
for x in dlvhex.getInputAtoms():
if x.tuple()[0] == selected and x.isTrue():
dlvhex.output( () )
return
elif not x.isFalse():
unknown = True
if unknown:
dlvhex.outputUnknown(())
def someSelectedLearning(selected):
for x in dlvhex.getInputAtoms():
if x.tuple()[0] == selected and x.isTrue():
dlvhex.output(())
nogood = [x, dlvhex.storeOutputAtom(()).negate()]
dlvhex.learn(nogood)
def secondArgByFirstArg(predicate, first_arg):
for x in dlvhex.getTrueInputAtoms():
xtup = x.tuple()
if xtup[0] == predicate and xtup[1] == first_arg:
outtup = (xtup[2],)
dlvhex.output(outtup)
try:
logging.info("trying to add nogood for %s and %s", x, outtup)
nogood = [x, dlvhex.storeOutputAtom(outtup).negate()]
dlvhex.learn(nogood)
except dlvhex.StoreAtomException as e:
logging.warning("could not store atom: %s", e)
def secondArgByFirstArgMoreLearning(predicate, first_arg):
for x in dlvhex.getTrueInputAtoms():
xtup = x.tuple()
if xtup[0] == predicate and xtup[1] == first_arg:
outtup = (xtup[2],)
dlvhex.output(outtup)
# we simply produce all relevant nogoods
# this is unrealistic, but helps to test several parts of hexlite (only one eatom call will be sufficient)
for output in dlvhex.getInstantiatedOutputAtoms():
aux, pred, selector, out = output.tuple()
logging.debug("processing instantiated output atom &secondArgByFirstArgMoreLearning[%s,%s](%s)", pred, selector, out)
try:
relevantinput = dlvhex.storeAtom((pred, selector, out))
nogood1 = [relevantinput, output.negate()]
dlvhex.learn(nogood1)
nogood2 = [relevantinput.negate(), output]
dlvhex.learn(nogood2)
except dlvhex.StoreAtomException as e:
logging.debug("relevant input atom not storable")
def testSetMinus(p, q):
# is true for all constants in extension of p but not in extension of q
pset, qset = set(), set()
for x in dlvhex.getTrueInputAtoms():
tup = x.tuple()
if tup[0].value() == p.value():
pset.add(tup[1].value())
elif tup[0].value() == q.value():
qset.add(tup[1].value())
rset = pset - qset
for r in rset:
dlvhex.output( (r,) )
def testSetMinusLearn(p, q):
# is true for all constants in extension of p but not in extension of q
# (same as testSetMinus)
# uses learning
pe = p.extension()
#logging.error("ATOM got pe {}".format(repr(pe)))
#for y in pe:
# logging.error("ATOM y in pe {} {} {}".format(str(y), repr(y[0].symlit.lit), hash(y)))
qe = q.extension()
#logging.error("ATOM got qe {}".format(repr(qe)))
#for y in qe:
# logging.error("ATOM y in qe {} {} {}".format(str(y), repr(y[0].symlit.lit), hash(y)))
for x in pe:
logging.debug("ATOM x = {} {} in pe {}(.)".format(repr(x), x.__class__, p))
if x not in qe:
logging.debug("ATOM {} not in qe {}(.)".format(x, q))
# learn that it is not allowed that p(x) and -q(x) and this atom is false for x
try:
p_x = dlvhex.storeAtom((p, ) + x)
q_x = dlvhex.storeAtom((q, ) + x)
output_x = dlvhex.storeOutputAtom(x)
nogood = (p_x, q_x.negate(), output_x.negate())
logging.debug("ATOM nogood {} for p_x {} q_x {} output_x {}".format(repr(nogood), repr(p_x), repr(q_x), repr(output_x)))
dlvhex.learn(nogood)
except dlvhex.StoreAtomException as e:
logging.warning("StoreAtomException %s", e)
pass
#logging.error("ATOM output {}".format(repr(x)))
dlvhex.output(x)
def testNonmon(p):
pset = set()
for x in dlvhex.getTrueInputAtoms():
tup = x.tuple()
pset.add(tup[1].intValue())
mapping = {
frozenset([]): [2],
frozenset([1]): [1],
frozenset([2]): [1],
frozenset([1,2]): [1,2],
}
pset = frozenset(pset)
if pset not in mapping:
raise Exception("testNonmon is supposed to handle only input domain {1,2}")
for o in mapping[pset]:
dlvhex.output( (o,) )
def testNonmon2(p):
pset = set()
for x in dlvhex.getTrueInputAtoms():
tup = x.tuple()
pset.add(tup[1].intValue())
mapping = {
frozenset([]): [2],
frozenset([1]): [2],
frozenset([2]): [ ],
frozenset([1,2]): [1,2],
}
pset = frozenset(pset)
if pset not in mapping:
raise Exception("testNonmon2 is supposed to handle only input domain {1,2}")
for o in mapping[pset]:
dlvhex.output( (o,) )
def rdf(uri):
logging.warning('TODO implement &rdf (and #namespace)')
dlvhex.output(('s', 'p', 'o'))
def issue_2_num(a):
n = 0
for x in dlvhex.getInputAtoms():
if x.tuple()[0] == a and x.isTrue():
n += 1
dlvhex.output((n, ))
def testStoreParseable(term):
newterm = "foo(bar({}),baz)".format(term.value())
dlvhex.output( (dlvhex.storeParseable(newterm),) )
def register(arguments=None):
if arguments == ['foo', '3']:
sys.stdout.write("testplugin loaded with arguments %s" % str(arguments))
if arguments == ['jsonout']:
dlvhex.registerModelCallbackClass(JSONModelCallback)
#XFAIL = expected failure
dlvhex.addAtom("testA", (dlvhex.PREDICATE,), 1)
dlvhex.addAtom("testB", (dlvhex.PREDICATE, dlvhex.PREDICATE), 1)
dlvhex.addAtom("testC", (dlvhex.PREDICATE,), 1)
dlvhex.addAtom("testZeroArity0", tuple(), 0)
dlvhex.addAtom("testZeroArity1", tuple(), 0)
#XFAIL unused dlvhex.addAtom("testConcatAll", (dlvhex.PREDICATE,), 1)
#unused dlvhex.addAtom("testListDomain", (dlvhex.TUPLE,), 1)
#unused dlvhex.addAtom("testListConcat", (dlvhex.TUPLE,), 1)
#unused dlvhex.addAtom("testListLength", (dlvhex.CONSTANT,dlvhex.CONSTANT), 1)
#unused dlvhex.addAtom("testListSplit", (dlvhex.CONSTANT,dlvhex.CONSTANT), 2)
#unused dlvhex.addAtom("testListHalf", (dlvhex.CONSTANT,), 2)
#unused dlvhex.addAtom("testListMerge", (dlvhex.CONSTANT,dlvhex.CONSTANT,dlvhex.CONSTANT), 2)
dlvhex.addAtom("testSubstr", (dlvhex.CONSTANT,dlvhex.CONSTANT,dlvhex.CONSTANT), 1)
dlvhex.addAtom("testStrlen", (dlvhex.CONSTANT,), 1)
dlvhex.addAtom("testSmallerThan", (dlvhex.CONSTANT,dlvhex.CONSTANT), 0)
dlvhex.addAtom("testEven", (dlvhex.PREDICATE,dlvhex.PREDICATE), 0)
#unused dlvhex.addAtom("testOdd", (dlvhex.PREDICATE,dlvhex.PREDICATE), 0)
#unused dlvhex.addAtom("testLessThan", (dlvhex.PREDICATE,dlvhex.PREDICATE), 0)
#unused dlvhex.addAtom("testEqual", (dlvhex.PREDICATE,dlvhex.PREDICATE), 0)
dlvhex.addAtom("id", (dlvhex.PREDICATE,), 1)
#XFAIL partial dlvhex.addAtom("idp", (dlvhex.PREDICATE,), 1)
dlvhex.addAtom("idc", (dlvhex.CONSTANT,), 1)
#TODO testCautiousQuery
dlvhex.addAtom("testSetMinus", (dlvhex.PREDICATE,dlvhex.PREDICATE), 1)
dlvhex.addAtom("testSetMinusLearn", (dlvhex.PREDICATE,dlvhex.PREDICATE), 1)
dlvhex.addAtom("testNonmon", (dlvhex.PREDICATE,), 1)
dlvhex.addAtom("testNonmon2", (dlvhex.PREDICATE,), 1)
prop = dlvhex.ExtSourceProperties()
prop.setProvidesPartialAnswer(True)
dlvhex.addAtom("isEmpty", (dlvhex.PREDICATE, ), 0, prop)
dlvhex.addAtom("testIsEmpty", (dlvhex.PREDICATE, ), 0, prop)
prop = dlvhex.ExtSourceProperties()
prop.setProvidesPartialAnswer(True)
dlvhex.addAtom("numberOfBalls", (dlvhex.PREDICATE, dlvhex.CONSTANT, dlvhex.CONSTANT), 0, prop)
dlvhex.addAtom("testNumberOfBalls", (dlvhex.PREDICATE, dlvhex.CONSTANT, dlvhex.CONSTANT), 0, prop)
prop = dlvhex.ExtSourceProperties()
prop.setProvidesPartialAnswer(True)
prop.addAntimonotonicInputPredicate(0)
dlvhex.addAtom("numberOfBallsSE", (dlvhex.PREDICATE, dlvhex.CONSTANT), 0, prop)
dlvhex.addAtom("testNumberOfBallsSE", (dlvhex.PREDICATE, dlvhex.CONSTANT), 0, prop)
prop = dlvhex.ExtSourceProperties()
prop.setProvidesPartialAnswer(True)
prop.addMonotonicInputPredicate(0)
dlvhex.addAtom("numberOfBallsGE", (dlvhex.PREDICATE, dlvhex.CONSTANT), 0, prop)
dlvhex.addAtom("testNumberOfBallsGE", (dlvhex.PREDICATE, dlvhex.CONSTANT), 0, prop)
prop = dlvhex.ExtSourceProperties()
prop.setProvidesPartialAnswer(True)
dlvhex.addAtom("partialTest", (dlvhex.PREDICATE, ), 0, prop)
# someSelected and variations
dlvhex.addAtom("someSelected", (dlvhex.PREDICATE,), 0)
dlvhex.addAtom("someSelectedLearning", (dlvhex.PREDICATE,), 0)
prop = dlvhex.ExtSourceProperties()
prop.setProvidesPartialAnswer(True)
dlvhex.addAtom("someSelectedPartial", (dlvhex.PREDICATE,), 0, prop)
dlvhex.addAtom("secondArgByFirstArg", (dlvhex.PREDICATE, dlvhex.CONSTANT), 1)
dlvhex.addAtom("secondArgByFirstArgMoreLearning", (dlvhex.PREDICATE, dlvhex.CONSTANT), 1)
#XFAIL (TODO) sumD0
#XFAIL getreq
#unused mapping
#unused getSizes
#unused getSizesRestr
#XFAIL getDiagnoses
prop = dlvhex.ExtSourceProperties()
prop.addFiniteOutputDomain(0)
dlvhex.addAtom("testConcat", (dlvhex.TUPLE,), 1, prop)
dlvhex.addAtom("functionCompose", (dlvhex.TUPLE,), 1)
dlvhex.addAtom("functionDecompose", (dlvhex.CONSTANT,dlvhex.CONSTANT), 1)
dlvhex.addAtom("functionDecompose1", (dlvhex.CONSTANT,), 2)
dlvhex.addAtom("functionDecompose2", (dlvhex.CONSTANT,), 3)
dlvhex.addAtom("functionDecompose3", (dlvhex.CONSTANT,), 4)
dlvhex.addAtom("getArity", (dlvhex.CONSTANT,), 1)
dlvhex.addAtom("isFunctionTerm", (dlvhex.CONSTANT,), 0)
dlvhex.addAtom("rdf", (dlvhex.CONSTANT,), 3)
dlvhex.addAtom("issue_2_num", (dlvhex.PREDICATE,), 1)
dlvhex.addAtom("testArity1OneTupleA", (), 1)
dlvhex.addAtom("testArity1OneTupleB", (), 1)
dlvhex.addAtom("testArity1TwoTuples", (), 1)
dlvhex.addAtom("testStoreParseable", (dlvhex.CONSTANT,), 1)
# vim:list:noet:
|
#!/usr/bin/env python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This application demonstrates how to perform basic operations with the
Google Cloud Vision API.
Example Usage:
python detect.py text ./resources/wakeupcat.jpg
python detect.py labels ./resources/landmark.jpg
python detect.py web ./resources/landmark.jpg
python detect.py web-uri http://wheresgus.com/dog.JPG
python detect.py faces-uri gs://your-bucket/file.jpg
For more information, the documentation at
https://cloud.google.com/vision/docs.
"""
import argparse
import io
from google.cloud import vision
def detect_faces(path):
"""Detects faces in an image."""
vision_client = vision.Client()
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision_client.image(content=content)
faces = image.detect_faces()
print('Faces:')
for face in faces:
print('anger: {}'.format(face.emotions.anger))
print('joy: {}'.format(face.emotions.joy))
print('surprise: {}'.format(face.emotions.surprise))
vertices = (['({},{})'.format(bound.x_coordinate, bound.y_coordinate)
for bound in face.bounds.vertices])
print('face bounds: {}'.format(','.join(vertices)))
def detect_faces_uri(uri):
"""Detects faces in the file located in Google Cloud Storage or the web."""
vision_client = vision.Client()
image = vision_client.image(source_uri=uri)
faces = image.detect_faces()
print('Faces:')
for face in faces:
print('anger: {}'.format(face.emotions.anger))
print('joy: {}'.format(face.emotions.joy))
print('surprise: {}'.format(face.emotions.surprise))
vertices = (['({},{})'.format(bound.x_coordinate, bound.y_coordinate)
for bound in face.bounds.vertices])
print('face bounds: {}'.format(','.join(vertices)))
def detect_labels(path):
"""Detects labels in the file."""
vision_client = vision.Client()
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision_client.image(content=content)
labels = image.detect_labels()
print('Labels:')
for label in labels:
print(label.description)
def detect_labels_uri(uri):
"""Detects labels in the file located in Google Cloud Storage or on the
Web."""
vision_client = vision.Client()
image = vision_client.image(source_uri=uri)
labels = image.detect_labels()
print('Labels:')
for label in labels:
print(label.description)
def detect_landmarks(path):
"""Detects landmarks in the file."""
vision_client = vision.Client()
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision_client.image(content=content)
landmarks = image.detect_landmarks()
print('Landmarks:')
for landmark in landmarks:
print(landmark.description)
def detect_landmarks_uri(uri):
"""Detects landmarks in the file located in Google Cloud Storage or on the
Web."""
vision_client = vision.Client()
image = vision_client.image(source_uri=uri)
landmarks = image.detect_landmarks()
print('Landmarks:')
for landmark in landmarks:
print(landmark.description)
def detect_logos(path):
"""Detects logos in the file."""
vision_client = vision.Client()
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision_client.image(content=content)
logos = image.detect_logos()
print('Logos:')
for logo in logos:
print(logo.description)
def detect_logos_uri(uri):
"""Detects logos in the file located in Google Cloud Storage or on the Web.
"""
vision_client = vision.Client()
image = vision_client.image(source_uri=uri)
logos = image.detect_logos()
print('Logos:')
for logo in logos:
print(logo.description)
def detect_safe_search(path):
"""Detects unsafe features in the file."""
vision_client = vision.Client()
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision_client.image(content=content)
safe = image.detect_safe_search()
print('Safe search:')
print('adult: {}'.format(safe.adult))
print('medical: {}'.format(safe.medical))
print('spoofed: {}'.format(safe.spoof))
print('violence: {}'.format(safe.violence))
def detect_safe_search_uri(uri):
"""Detects unsafe features in the file located in Google Cloud Storage or
on the Web."""
vision_client = vision.Client()
image = vision_client.image(source_uri=uri)
safe = image.detect_safe_search()
print('adult: {}'.format(safe.adult))
print('medical: {}'.format(safe.medical))
print('spoofed: {}'.format(safe.spoof))
print('violence: {}'.format(safe.violence))
def detect_text(path):
"""Detects text in the file."""
vision_client = vision.Client()
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision_client.image(content=content)
texts = image.detect_text()
print('Texts:')
for text in texts:
print('\n"{}"'.format(text.description))
vertices = (['({},{})'.format(bound.x_coordinate, bound.y_coordinate)
for bound in text.bounds.vertices])
print('bounds: {}'.format(','.join(vertices)))
def detect_text_uri(uri):
"""Detects text in the file located in Google Cloud Storage or on the Web.
"""
vision_client = vision.Client()
image = vision_client.image(source_uri=uri)
texts = image.detect_text()
print('Texts:')
for text in texts:
print('\n"{}"'.format(text.description))
vertices = (['({},{})'.format(bound.x_coordinate, bound.y_coordinate)
for bound in text.bounds.vertices])
print('bounds: {}'.format(','.join(vertices)))
def detect_properties(path):
"""Detects image properties in the file."""
vision_client = vision.Client()
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision_client.image(content=content)
props = image.detect_properties()
print('Properties:')
for color in props.colors:
print('fraction: {}'.format(color.pixel_fraction))
print('\tr: {}'.format(color.color.red))
print('\tg: {}'.format(color.color.green))
print('\tb: {}'.format(color.color.blue))
print('\ta: {}'.format(color.color.alpha))
def detect_properties_uri(uri):
"""Detects image properties in the file located in Google Cloud Storage or
on the Web."""
vision_client = vision.Client()
image = vision_client.image(source_uri=uri)
props = image.detect_properties()
print('Properties:')
for color in props.colors:
print('frac: {}'.format(color.pixel_fraction))
print('\tr: {}'.format(color.color.red))
print('\tg: {}'.format(color.color.green))
print('\tb: {}'.format(color.color.blue))
print('\ta: {}'.format(color.color.alpha))
def detect_web(path):
"""Detects web annotations given an image."""
vision_client = vision.Client()
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision_client.image(content=content)
notes = image.detect_web()
if notes.pages_with_matching_images:
print('\n{} Pages with matching images retrieved')
for page in notes.pages_with_matching_images:
print('Score : {}'.format(page.score))
print('Url : {}'.format(page.url))
if notes.full_matching_images:
print ('\n{} Full Matches found: '.format(
len(notes.full_matching_images)))
for image in notes.full_matching_images:
print('Score: {}'.format(image.score))
print('Url : {}'.format(image.url))
if notes.partial_matching_images:
print ('\n{} Partial Matches found: '.format(
len(notes.partial_matching_images)))
for image in notes.partial_matching_images:
print('Score: {}'.format(image.score))
print('Url : {}'.format(image.url))
if notes.web_entities:
print ('\n{} Web entities found: '.format(len(notes.web_entities)))
for entity in notes.web_entities:
print('Score : {}'.format(entity.score))
print('Description: {}'.format(entity.description))
def detect_web_uri(uri):
"""Detects web annotations in the file located in Google Cloud Storage."""
vision_client = vision.Client()
image = vision_client.image(source_uri=uri)
notes = image.detect_web()
if notes.pages_with_matching_images:
print('\n{} Pages with matching images retrieved')
for page in notes.pages_with_matching_images:
print('Score : {}'.format(page.score))
print('Url : {}'.format(page.url))
if notes.full_matching_images:
print ('\n{} Full Matches found: '.format(
len(notes.full_matching_images)))
for image in notes.full_matching_images:
print('Score: {}'.format(image.score))
print('Url : {}'.format(image.url))
if notes.partial_matching_images:
print ('\n{} Partial Matches found: '.format(
len(notes.partial_matching_images)))
for image in notes.partial_matching_images:
print('Score: {}'.format(image.score))
print('Url : {}'.format(image.url))
if notes.web_entities:
print ('\n{} Web entities found: '.format(len(notes.web_entities)))
for entity in notes.web_entities:
print('Score : {}'.format(entity.score))
print('Description: {}'.format(entity.description))
def detect_crop_hints(path):
"""Detects crop hints in an image."""
vision_client = vision.Client()
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision_client.image(content=content)
hints = image.detect_crop_hints()
for n, hint in enumerate(hints):
print('\nCrop Hint: {}'.format(n))
vertices = (['({},{})'.format(bound.x_coordinate, bound.y_coordinate)
for bound in hint.bounds.vertices])
print('bounds: {}'.format(','.join(vertices)))
def detect_crop_hints_uri(uri):
"""Detects crop hints in the file located in Google Cloud Storage."""
vision_client = vision.Client()
image = vision_client.image(source_uri=uri)
hints = image.detect_crop_hints()
for n, hint in enumerate(hints):
print('\nCrop Hint: {}'.format(n))
vertices = (['({},{})'.format(bound.x_coordinate, bound.y_coordinate)
for bound in hint.bounds.vertices])
print('bounds: {}'.format(','.join(vertices)))
def detect_document(path):
"""Detects document features in an image."""
vision_client = vision.Client()
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision_client.image(content=content)
document = image.detect_full_text()
for b, page in enumerate(document.pages):
page_text = ''
for bb, block in enumerate(page.blocks):
block_text = ''
for p, paragraph in enumerate(block.paragraphs):
para_text = ''
for w, word in enumerate(paragraph.words):
word_text = ''
for s, symbol in enumerate(word.symbols):
word_text = word_text + symbol.text
para_text = para_text + word_text
block_text = block_text + para_text
print('\n--\nContent Block: {}'.format(block_text))
print('Block Bounding Box:\n{}'.format(block.bounding_box))
page_text = page_text + block_text
print('Page Content:\n{}'.format(page_text))
print('Page Dimensions: w: {} h: {}'.format(page.width, page.height))
def detect_document_uri(uri):
"""Detects document features in the file located in Google Cloud
Storage."""
vision_client = vision.Client()
image = vision_client.image(source_uri=uri)
document = image.detect_full_text()
for b, page in enumerate(document.pages):
page_text = ''
for bb, block in enumerate(page.blocks):
block_text = ''
for p, paragraph in enumerate(block.paragraphs):
para_text = ''
for w, word in enumerate(paragraph.words):
word_text = ''
for s, symbol in enumerate(word.symbols):
word_text = word_text + symbol.text
para_text = para_text + word_text
block_text = block_text + para_text
print('\n--\nContent Block: {}'.format(block_text))
print('Block Bounding Box:\n{}'.format(block.bounding_box))
page_text = page_text + block_text
print('Page Content:\n{}'.format(page_text))
print('Page Dimensions: w: {} h: {}'.format(page.width, page.height))
def run_local(args):
if args.command == 'faces':
detect_faces(args.path)
elif args.command == 'labels':
detect_labels(args.path)
elif args.command == 'landmarks':
detect_landmarks(args.path)
elif args.command == 'text':
detect_text(args.path)
elif args.command == 'logos':
detect_logos(args.path)
elif args.command == 'safe-search':
detect_safe_search(args.path)
elif args.command == 'properties':
detect_properties(args.path)
elif args.command == 'web':
detect_web(args.path)
elif args.command == 'crophints':
detect_crop_hints(args.path)
elif args.command == 'document':
detect_document(args.path)
def run_uri(args):
if args.command == 'text-uri':
detect_text_uri(args.uri)
elif args.command == 'faces-uri':
detect_faces_uri(args.uri)
elif args.command == 'labels-uri':
detect_labels_uri(args.uri)
elif args.command == 'landmarks-uri':
detect_landmarks_uri(args.uri)
elif args.command == 'logos-uri':
detect_logos_uri(args.uri)
elif args.command == 'safe-search-uri':
detect_safe_search_uri(args.uri)
elif args.command == 'properties-uri':
detect_properties_uri(args.uri)
elif args.command == 'web-uri':
detect_web_uri(args.uri)
elif args.command == 'crophints-uri':
detect_crop_hints_uri(args.uri)
elif args.command == 'document-uri':
detect_document_uri(args.uri)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
subparsers = parser.add_subparsers(dest='command')
detect_faces_parser = subparsers.add_parser(
'faces', help=detect_faces.__doc__)
detect_faces_parser.add_argument('path')
faces_file_parser = subparsers.add_parser(
'faces-uri', help=detect_faces_uri.__doc__)
faces_file_parser.add_argument('uri')
detect_labels_parser = subparsers.add_parser(
'labels', help=detect_labels.__doc__)
detect_labels_parser.add_argument('path')
labels_file_parser = subparsers.add_parser(
'labels-uri', help=detect_labels_uri.__doc__)
labels_file_parser.add_argument('uri')
detect_landmarks_parser = subparsers.add_parser(
'landmarks', help=detect_landmarks.__doc__)
detect_landmarks_parser.add_argument('path')
landmark_file_parser = subparsers.add_parser(
'landmarks-uri', help=detect_landmarks_uri.__doc__)
landmark_file_parser.add_argument('uri')
detect_text_parser = subparsers.add_parser(
'text', help=detect_text.__doc__)
detect_text_parser.add_argument('path')
text_file_parser = subparsers.add_parser(
'text-uri', help=detect_text_uri.__doc__)
text_file_parser.add_argument('uri')
detect_logos_parser = subparsers.add_parser(
'logos', help=detect_logos.__doc__)
detect_logos_parser.add_argument('path')
logos_file_parser = subparsers.add_parser(
'logos-uri', help=detect_logos_uri.__doc__)
logos_file_parser.add_argument('uri')
safe_search_parser = subparsers.add_parser(
'safe-search', help=detect_safe_search.__doc__)
safe_search_parser.add_argument('path')
safe_search_file_parser = subparsers.add_parser(
'safe-search-uri',
help=detect_safe_search_uri.__doc__)
safe_search_file_parser.add_argument('uri')
properties_parser = subparsers.add_parser(
'properties', help=detect_properties.__doc__)
properties_parser.add_argument('path')
properties_file_parser = subparsers.add_parser(
'properties-uri',
help=detect_properties_uri.__doc__)
properties_file_parser.add_argument('uri')
# 1.1 Vision features
web_parser = subparsers.add_parser(
'web', help=detect_web.__doc__)
web_parser.add_argument('path')
web_uri_parser = subparsers.add_parser(
'web-uri',
help=detect_web_uri.__doc__)
web_uri_parser.add_argument('uri')
crop_hints_parser = subparsers.add_parser(
'crophints', help=detect_crop_hints.__doc__)
crop_hints_parser.add_argument('path')
crop_hints_uri_parser = subparsers.add_parser(
'crophints-uri', help=detect_crop_hints_uri.__doc__)
crop_hints_uri_parser.add_argument('uri')
document_parser = subparsers.add_parser(
'document', help=detect_document.__doc__)
document_parser.add_argument('path')
document_uri_parser = subparsers.add_parser(
'document-uri', help=detect_document_uri.__doc__)
document_uri_parser.add_argument('uri')
args = parser.parse_args()
if ('uri' in args.command):
run_uri(args)
else:
run_local(args)
|
# --------------
import pandas as pd
import numpy as np
from sklearn.cross_validation import train_test_split
# code starts here
##loading the dataset
df = pd.read_csv(path)
## analyzing the first 5 rows
print(df.head())
##create the feature set
X = df.drop('list_price',axis=1)
##create the target set
y = df['list_price']
## splitting into train and test set using test_train_split
X_train, y_train, X_test, y_test = train_test_split(X,y,random_state=6, test_size=0.3)
# code ends here
# --------------
import matplotlib.pyplot as plt
# code starts here
##getting the list of columns in the train set
cols = X_train.columns
## initializing the subplots with 3 rows and 3 cols
fig,axes = plt.subplots(nrows=3,ncols=3,figsize=(15,10))
# ## looping through the rows of the subplot
for i,row in enumerate(axes):
##looping through the subplot one by one
for j,ax in enumerate(row):
##added to pick appropriate columns from train set
col = cols[i*3 + j]
## plotting the scatter plot for each of the feature
ax.scatter(X_train[col],y_train)
##setting x label
ax.set_xlabel(col)
##setting y label
ax.set_ylabel('list_price')
# code ends here
# --------------
# Code starts here
##find correlation between features
corr = X_train.corr()
print(corr)
## identify features having correlation higher than 0.75
high_corr_features = corr[(corr>0.75) | (corr <-0.75)]
##remove highly correlated features from train set
X_train.drop(columns=['val_star_rating','play_star_rating'], inplace=True)
##remove highly correlated features from test set
X_test.drop(columns=['val_star_rating','play_star_rating'], inplace=True)
# Code ends here
# --------------
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
# Code starts here
##instantiate the Linear Regression Model
regressor = LinearRegression()
##fit the model
regressor.fit(X_train, y_train)
##make predictions from the model
y_pred = regressor.predict(X_test)
##calculate the mean squared error
# mse = mean_squared_error(y_pred,y_test)
mse = sum((y_pred-y_test)**2)/y_pred.shape[0]
print(mse)
##calculate the R squared error
r2 = r2_score(y_test, y_pred)
print(r2)
# Code ends here
# --------------
# Code starts here
##calculate the residual for the predicted values
residual = y_test - y_pred
##plot the histogram of the residual error
plt.hist(residual)
plt.xlabel('Residual Error')
plt.ylabel('Frequency')
plt.title('Frequency of residual error')
# Code ends here
|
import pandas as pd
import geopandas as gpd
from shapely.ops import unary_union
from sqr.core.shape import get_voronoi_series, find_neighbor_shapes
from sqr.core.network import get_communities
from sqr.core.config import years_hh, years_pers
def pre_partition_area(gdf, origin_geom):
'''
Makes a pre partition of the area shape into sub-areas
of shapes.
'''
has_pop_and_pers = gdf.minimum_pers.notnull() & gdf.minimum_hh.notnull()
sub_gdf = gdf[has_pop_and_pers].copy()
# get assignment and voronoi shape
voronoi_geo = get_voronoi_series(sub_gdf, origin_geom)
sub_gdf['assignment'] = get_communities(sub_gdf)
sub_gdf = gpd.GeoDataFrame(geometry=voronoi_geo, data=sub_gdf)
# get assignment information and geometry
gb_assign = sub_gdf.groupby('assignment')
pers = gb_assign\
.apply(lambda g: g[years_pers].dropna(axis=1,how='all').sum(0).min())\
.rename('count_pers')
hh = gb_assign\
.apply(lambda g: g[years_hh].dropna(axis=1,how='all').sum(0).min())\
.rename('count_hh')
cell = gb_assign.apply(lambda g: g.shape[0]).rename('count_cells')
info = pd.concat([pers,hh,cell], axis=1)
info['cells'] = gb_assign.apply(lambda g: list(g.index))
geoms = gb_assign.geometry.apply(lambda g: g.unary_union)
df_pre = gpd.GeoDataFrame(data=info, geometry=geoms)
return df_pre
def merge_insufficient(in_gdf):
'''
Merge partition with insufficient population
onto their nearby neighboring partition
shapes
'''
gdf = in_gdf.copy()
insuff = ((gdf.count_pers<100)|(gdf.count_hh<50)).to_dict()
overlap = find_neighbor_shapes(gdf)
overlap = overlap[overlap.idx1.apply(lambda i: insuff[i])]
overlap['other_insuff'] = overlap.idx2.apply(lambda i: insuff[i])
gb_idx = overlap.groupby('idx1')
neighbor_suff = (~gb_idx.other_insuff.min())
match_to_neighbor = neighbor_suff.sort_values().index
info_cols = ['count_pers', 'count_hh','count_cells','cells']
optimals = {}
geoms = gdf.geometry.to_dict()
for idx in match_to_neighbor:
opt = gb_idx\
.get_group(idx)\
.sort_values(['other_insuff','overlap_area'],ascending=[1,0])\
.iloc[0]\
.idx2
if opt in optimals:
opt = optimals[opt]
optimals[idx] = opt
geoms[opt] = unary_union([geoms[opt],geoms[idx]])
gdf.loc[opt,info_cols] += gdf.loc[idx,info_cols].values
df = gdf\
.drop(match_to_neighbor, axis=0)\
.drop('geometry', axis=1)
geos = gpd.GeoSeries(geoms, crs=gdf.crs)
out_gdf = gpd.GeoDataFrame(data=df, geometry=geos)
return df
def assign_cells_partition(part_gdf, cells_gdf):
'''
Merge a GeoDataFrame of partitions onto a
DataFrame containing cells and information.
'''
assign = {}
for idx in part_gdf.index:
geom = part_gdf.loc[idx].geometry
assign[idx] = set(cells_gdf[cells_gdf.intersects(geom)].index)
part_neighbors = find_neighbor_shapes(part_gdf, overlap=False)\
.values\
.tolist()
for i1,i2 in part_neighbors:
intersects = list(assign[i1] & assign[i2])
cells_gdf_sub = cells_gdf.loc[intersects]
intersect_1 = cells_gdf_sub.geometry.intersection(part_gdf.loc[i1].geometry).area
intersect_2 = cells_gdf_sub.geometry.intersection(part_gdf.loc[i2].geometry).area
i1_more = intersect_1>intersect_2
assign[i1] -= set(cells_gdf_sub[~i1_more].index)
assign[i2] -= set(cells_gdf_sub[i1_more].index)
errors = []
for (i1,i2) in part_neighbors:
if len(assign[i1] & assign[i2])>0:
errors+= [(i1,i2)]
if len(errors)>0:
raise ValueError('Non surjective assignment')
series = [(idx,pd.Series(list(assign[idx]))) for idx in assign.keys()]
assignment = pd.concat(dict(series), axis=0)\
.reset_index()\
.drop('level_1',axis=1)\
.rename(columns = {'level_0':'assignment'})\
.set_index(0)
return assignment
|
import re
from decimal import Decimal
from django.conf import settings
from django.contrib.gis.db.backends.base import BaseSpatialOperations
from django.contrib.gis.db.backends.utils import SpatialOperation, SpatialFunction
from django.contrib.gis.db.backends.postgis.adapter import PostGISAdapter
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Distance
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.postgresql_psycopg2.base import DatabaseOperations
from django.db.utils import DatabaseError
from django.utils import six
from django.utils.functional import cached_property
from .models import GeometryColumns, SpatialRefSys
#### Classes used in constructing PostGIS spatial SQL ####
class PostGISOperator(SpatialOperation):
"For PostGIS operators (e.g. `&&`, `~`)."
def __init__(self, operator):
super(PostGISOperator, self).__init__(operator=operator)
class PostGISFunction(SpatialFunction):
"For PostGIS function calls (e.g., `ST_Contains(table, geom)`)."
def __init__(self, prefix, function, **kwargs):
super(PostGISFunction, self).__init__(prefix + function, **kwargs)
class PostGISFunctionParam(PostGISFunction):
"For PostGIS functions that take another parameter (e.g. DWithin, Relate)."
sql_template = '%(function)s(%(geo_col)s, %(geometry)s, %%s)'
class PostGISDistance(PostGISFunction):
"For PostGIS distance operations."
dist_func = 'Distance'
sql_template = '%(function)s(%(geo_col)s, %(geometry)s) %(operator)s %%s'
def __init__(self, prefix, operator):
super(PostGISDistance, self).__init__(prefix, self.dist_func,
operator=operator)
class PostGISSpheroidDistance(PostGISFunction):
"For PostGIS spherical distance operations (using the spheroid)."
dist_func = 'distance_spheroid'
sql_template = '%(function)s(%(geo_col)s, %(geometry)s, %%s) %(operator)s %%s'
def __init__(self, prefix, operator):
# An extra parameter in `end_subst` is needed for the spheroid string.
super(PostGISSpheroidDistance, self).__init__(prefix, self.dist_func,
operator=operator)
class PostGISSphereDistance(PostGISDistance):
"For PostGIS spherical distance operations."
dist_func = 'distance_sphere'
class PostGISRelate(PostGISFunctionParam):
"For PostGIS Relate(<geom>, <pattern>) calls."
pattern_regex = re.compile(r'^[012TF\*]{9}$')
def __init__(self, prefix, pattern):
if not self.pattern_regex.match(pattern):
raise ValueError('Invalid intersection matrix pattern "%s".' % pattern)
super(PostGISRelate, self).__init__(prefix, 'Relate')
class PostGISOperations(DatabaseOperations, BaseSpatialOperations):
compiler_module = 'django.contrib.gis.db.models.sql.compiler'
name = 'postgis'
postgis = True
geom_func_prefix = 'ST_'
version_regex = re.compile(r'^(?P<major>\d)\.(?P<minor1>\d)\.(?P<minor2>\d+)')
valid_aggregates = {'Collect', 'Extent', 'Extent3D', 'MakeLine', 'Union'}
Adapter = PostGISAdapter
Adaptor = Adapter # Backwards-compatibility alias.
def __init__(self, connection):
super(PostGISOperations, self).__init__(connection)
prefix = self.geom_func_prefix
# PostGIS-specific operators. The commented descriptions of these
# operators come from Section 7.6 of the PostGIS 1.4 documentation.
self.geometry_operators = {
# The "&<" operator returns true if A's bounding box overlaps or
# is to the left of B's bounding box.
'overlaps_left': PostGISOperator('&<'),
# The "&>" operator returns true if A's bounding box overlaps or
# is to the right of B's bounding box.
'overlaps_right': PostGISOperator('&>'),
# The "<<" operator returns true if A's bounding box is strictly
# to the left of B's bounding box.
'left': PostGISOperator('<<'),
# The ">>" operator returns true if A's bounding box is strictly
# to the right of B's bounding box.
'right': PostGISOperator('>>'),
# The "&<|" operator returns true if A's bounding box overlaps or
# is below B's bounding box.
'overlaps_below': PostGISOperator('&<|'),
# The "|&>" operator returns true if A's bounding box overlaps or
# is above B's bounding box.
'overlaps_above': PostGISOperator('|&>'),
# The "<<|" operator returns true if A's bounding box is strictly
# below B's bounding box.
'strictly_below': PostGISOperator('<<|'),
# The "|>>" operator returns true if A's bounding box is strictly
# above B's bounding box.
'strictly_above': PostGISOperator('|>>'),
# The "~=" operator is the "same as" operator. It tests actual
# geometric equality of two features. So if A and B are the same feature,
# vertex-by-vertex, the operator returns true.
'same_as': PostGISOperator('~='),
'exact': PostGISOperator('~='),
# The "@" operator returns true if A's bounding box is completely contained
# by B's bounding box.
'contained': PostGISOperator('@'),
# The "~" operator returns true if A's bounding box completely contains
# by B's bounding box.
'bbcontains': PostGISOperator('~'),
# The "&&" operator returns true if A's bounding box overlaps
# B's bounding box.
'bboverlaps': PostGISOperator('&&'),
}
self.geometry_functions = {
'equals': PostGISFunction(prefix, 'Equals'),
'disjoint': PostGISFunction(prefix, 'Disjoint'),
'touches': PostGISFunction(prefix, 'Touches'),
'crosses': PostGISFunction(prefix, 'Crosses'),
'within': PostGISFunction(prefix, 'Within'),
'overlaps': PostGISFunction(prefix, 'Overlaps'),
'contains': PostGISFunction(prefix, 'Contains'),
'intersects': PostGISFunction(prefix, 'Intersects'),
'relate': (PostGISRelate, six.string_types),
'coveredby': PostGISFunction(prefix, 'CoveredBy'),
'covers': PostGISFunction(prefix, 'Covers'),
}
# Valid distance types and substitutions
dtypes = (Decimal, Distance, float) + six.integer_types
def get_dist_ops(operator):
"Returns operations for both regular and spherical distances."
return {'cartesian': PostGISDistance(prefix, operator),
'sphere': PostGISSphereDistance(prefix, operator),
'spheroid': PostGISSpheroidDistance(prefix, operator),
}
self.distance_functions = {
'distance_gt': (get_dist_ops('>'), dtypes),
'distance_gte': (get_dist_ops('>='), dtypes),
'distance_lt': (get_dist_ops('<'), dtypes),
'distance_lte': (get_dist_ops('<='), dtypes),
'dwithin': (PostGISFunctionParam(prefix, 'DWithin'), dtypes)
}
# Adding the distance functions to the geometries lookup.
self.geometry_functions.update(self.distance_functions)
# Only PostGIS versions 1.3.4+ have GeoJSON serialization support.
if self.spatial_version < (1, 3, 4):
GEOJSON = False
else:
GEOJSON = prefix + 'AsGeoJson'
# ST_ContainsProperly ST_MakeLine, and ST_GeoHash added in 1.4.
if self.spatial_version >= (1, 4, 0):
GEOHASH = 'ST_GeoHash'
BOUNDINGCIRCLE = 'ST_MinimumBoundingCircle'
self.geometry_functions['contains_properly'] = PostGISFunction(prefix, 'ContainsProperly')
else:
GEOHASH, BOUNDINGCIRCLE = False, False
# Geography type support added in 1.5.
if self.spatial_version >= (1, 5, 0):
self.geography = True
# Only a subset of the operators and functions are available
# for the geography type.
self.geography_functions = self.distance_functions.copy()
self.geography_functions.update({
'coveredby': self.geometry_functions['coveredby'],
'covers': self.geometry_functions['covers'],
'intersects': self.geometry_functions['intersects'],
})
self.geography_operators = {
'bboverlaps': PostGISOperator('&&'),
}
# Native geometry type support added in PostGIS 2.0.
if self.spatial_version >= (2, 0, 0):
self.geometry = True
# Creating a dictionary lookup of all GIS terms for PostGIS.
self.gis_terms = set(['isnull'])
self.gis_terms.update(self.geometry_operators)
self.gis_terms.update(self.geometry_functions)
self.area = prefix + 'Area'
self.bounding_circle = BOUNDINGCIRCLE
self.centroid = prefix + 'Centroid'
self.collect = prefix + 'Collect'
self.difference = prefix + 'Difference'
self.distance = prefix + 'Distance'
self.distance_sphere = prefix + 'distance_sphere'
self.distance_spheroid = prefix + 'distance_spheroid'
self.envelope = prefix + 'Envelope'
self.extent = prefix + 'Extent'
self.force_rhr = prefix + 'ForceRHR'
self.geohash = GEOHASH
self.geojson = GEOJSON
self.gml = prefix + 'AsGML'
self.intersection = prefix + 'Intersection'
self.kml = prefix + 'AsKML'
self.length = prefix + 'Length'
self.length_spheroid = prefix + 'length_spheroid'
self.makeline = prefix + 'MakeLine'
self.mem_size = prefix + 'mem_size'
self.num_geom = prefix + 'NumGeometries'
self.num_points = prefix + 'npoints'
self.perimeter = prefix + 'Perimeter'
self.point_on_surface = prefix + 'PointOnSurface'
self.polygonize = prefix + 'Polygonize'
self.reverse = prefix + 'Reverse'
self.scale = prefix + 'Scale'
self.snap_to_grid = prefix + 'SnapToGrid'
self.svg = prefix + 'AsSVG'
self.sym_difference = prefix + 'SymDifference'
self.transform = prefix + 'Transform'
self.translate = prefix + 'Translate'
self.union = prefix + 'Union'
self.unionagg = prefix + 'Union'
if self.spatial_version >= (2, 0, 0):
self.extent3d = prefix + '3DExtent'
self.length3d = prefix + '3DLength'
self.perimeter3d = prefix + '3DPerimeter'
else:
self.extent3d = prefix + 'Extent3D'
self.length3d = prefix + 'Length3D'
self.perimeter3d = prefix + 'Perimeter3D'
@cached_property
def spatial_version(self):
"""Determine the version of the PostGIS library."""
# Trying to get the PostGIS version because the function
# signatures will depend on the version used. The cost
# here is a database query to determine the version, which
# can be mitigated by setting `POSTGIS_VERSION` with a 3-tuple
# comprising user-supplied values for the major, minor, and
# subminor revision of PostGIS.
if hasattr(settings, 'POSTGIS_VERSION'):
version = settings.POSTGIS_VERSION
else:
try:
vtup = self.postgis_version_tuple()
except DatabaseError:
raise ImproperlyConfigured(
'Cannot determine PostGIS version for database "%s". '
'GeoDjango requires at least PostGIS version 1.3. '
'Was the database created from a spatial database '
'template?' % self.connection.settings_dict['NAME']
)
version = vtup[1:]
return version
def check_aggregate_support(self, aggregate):
"""
Checks if the given aggregate name is supported (that is, if it's
in `self.valid_aggregates`).
"""
agg_name = aggregate.__class__.__name__
return agg_name in self.valid_aggregates
def convert_extent(self, box):
"""
Returns a 4-tuple extent for the `Extent` aggregate by converting
the bounding box text returned by PostGIS (`box` argument), for
example: "BOX(-90.0 30.0, -85.0 40.0)".
"""
ll, ur = box[4:-1].split(',')
xmin, ymin = map(float, ll.split())
xmax, ymax = map(float, ur.split())
return (xmin, ymin, xmax, ymax)
def convert_extent3d(self, box3d):
"""
Returns a 6-tuple extent for the `Extent3D` aggregate by converting
the 3d bounding-box text returnded by PostGIS (`box3d` argument), for
example: "BOX3D(-90.0 30.0 1, -85.0 40.0 2)".
"""
ll, ur = box3d[6:-1].split(',')
xmin, ymin, zmin = map(float, ll.split())
xmax, ymax, zmax = map(float, ur.split())
return (xmin, ymin, zmin, xmax, ymax, zmax)
def convert_geom(self, hex, geo_field):
"""
Converts the geometry returned from PostGIS aggretates.
"""
if hex:
return Geometry(hex)
else:
return None
def geo_db_type(self, f):
"""
Return the database field type for the given geometry field.
Typically this is `None` because geometry columns are added via
the `AddGeometryColumn` stored procedure, unless the field
has been specified to be of geography type instead.
"""
if f.geography:
if not self.geography:
raise NotImplementedError('PostGIS 1.5 required for geography column support.')
if f.srid != 4326:
raise NotImplementedError('PostGIS 1.5 supports geography columns '
'only with an SRID of 4326.')
return 'geography(%s,%d)' % (f.geom_type, f.srid)
elif self.geometry:
# Postgis 2.0 supports type-based geometries.
# TODO: Support 'M' extension.
if f.dim == 3:
geom_type = f.geom_type + 'Z'
else:
geom_type = f.geom_type
return 'geometry(%s,%d)' % (geom_type, f.srid)
else:
return None
def get_distance(self, f, dist_val, lookup_type):
"""
Retrieve the distance parameters for the given geometry field,
distance lookup value, and the distance lookup type.
This is the most complex implementation of the spatial backends due to
what is supported on geodetic geometry columns vs. what's available on
projected geometry columns. In addition, it has to take into account
the newly introduced geography column type introudced in PostGIS 1.5.
"""
# Getting the distance parameter and any options.
if len(dist_val) == 1:
value, option = dist_val[0], None
else:
value, option = dist_val
# Shorthand boolean flags.
geodetic = f.geodetic(self.connection)
geography = f.geography and self.geography
if isinstance(value, Distance):
if geography:
dist_param = value.m
elif geodetic:
if lookup_type == 'dwithin':
raise ValueError('Only numeric values of degree units are '
'allowed on geographic DWithin queries.')
dist_param = value.m
else:
dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))
else:
# Assuming the distance is in the units of the field.
dist_param = value
if (not geography and geodetic and lookup_type != 'dwithin'
and option == 'spheroid'):
# using distance_spheroid requires the spheroid of the field as
# a parameter.
return [f._spheroid, dist_param]
else:
return [dist_param]
def get_geom_placeholder(self, f, value):
"""
Provides a proper substitution value for Geometries that are not in the
SRID of the field. Specifically, this routine will substitute in the
ST_Transform() function call.
"""
if value is None or value.srid == f.srid:
placeholder = '%s'
else:
# Adding Transform() to the SQL placeholder.
placeholder = '%s(%%s, %s)' % (self.transform, f.srid)
if hasattr(value, 'expression'):
# If this is an F expression, then we don't really want
# a placeholder and instead substitute in the column
# of the expression.
placeholder = placeholder % self.get_expression_column(value)
return placeholder
def _get_postgis_func(self, func):
"""
Helper routine for calling PostGIS functions and returning their result.
"""
# Close out the connection. See #9437.
with self.connection.temporary_connection() as cursor:
cursor.execute('SELECT %s()' % func)
return cursor.fetchone()[0]
def postgis_geos_version(self):
"Returns the version of the GEOS library used with PostGIS."
return self._get_postgis_func('postgis_geos_version')
def postgis_lib_version(self):
"Returns the version number of the PostGIS library used with PostgreSQL."
return self._get_postgis_func('postgis_lib_version')
def postgis_proj_version(self):
"Returns the version of the PROJ.4 library used with PostGIS."
return self._get_postgis_func('postgis_proj_version')
def postgis_version(self):
"Returns PostGIS version number and compile-time options."
return self._get_postgis_func('postgis_version')
def postgis_full_version(self):
"Returns PostGIS version number and compile-time options."
return self._get_postgis_func('postgis_full_version')
def postgis_version_tuple(self):
"""
Returns the PostGIS version as a tuple (version string, major,
minor, subminor).
"""
# Getting the PostGIS version
version = self.postgis_lib_version()
m = self.version_regex.match(version)
if m:
major = int(m.group('major'))
minor1 = int(m.group('minor1'))
minor2 = int(m.group('minor2'))
else:
raise Exception('Could not parse PostGIS version string: %s' % version)
return (version, major, minor1, minor2)
def proj_version_tuple(self):
"""
Return the version of PROJ.4 used by PostGIS as a tuple of the
major, minor, and subminor release numbers.
"""
proj_regex = re.compile(r'(\d+)\.(\d+)\.(\d+)')
proj_ver_str = self.postgis_proj_version()
m = proj_regex.search(proj_ver_str)
if m:
return tuple(map(int, [m.group(1), m.group(2), m.group(3)]))
else:
raise Exception('Could not determine PROJ.4 version from PostGIS.')
def num_params(self, lookup_type, num_param):
"""
Helper routine that returns a boolean indicating whether the number of
parameters is correct for the lookup type.
"""
def exactly_two(np):
return np == 2
def two_to_three(np):
return np >= 2 and np <= 3
if (lookup_type in self.distance_functions and
lookup_type != 'dwithin'):
return two_to_three(num_param)
else:
return exactly_two(num_param)
def spatial_lookup_sql(self, lvalue, lookup_type, value, field, qn):
"""
Constructs spatial SQL from the given lookup value tuple a
(alias, col, db_type), the lookup type string, lookup value, and
the geometry field.
"""
alias, col, db_type = lvalue
# Getting the quoted geometry column.
geo_col = '%s.%s' % (qn(alias), qn(col))
if lookup_type in self.geometry_operators:
if field.geography and not lookup_type in self.geography_operators:
raise ValueError('PostGIS geography does not support the '
'"%s" lookup.' % lookup_type)
# Handling a PostGIS operator.
op = self.geometry_operators[lookup_type]
return op.as_sql(geo_col, self.get_geom_placeholder(field, value))
elif lookup_type in self.geometry_functions:
if field.geography and not lookup_type in self.geography_functions:
raise ValueError('PostGIS geography type does not support the '
'"%s" lookup.' % lookup_type)
# See if a PostGIS geometry function matches the lookup type.
tmp = self.geometry_functions[lookup_type]
# Lookup types that are tuples take tuple arguments, e.g., 'relate' and
# distance lookups.
if isinstance(tmp, tuple):
# First element of tuple is the PostGISOperation instance, and the
# second element is either the type or a tuple of acceptable types
# that may passed in as further parameters for the lookup type.
op, arg_type = tmp
# Ensuring that a tuple _value_ was passed in from the user
if not isinstance(value, (tuple, list)):
raise ValueError('Tuple required for `%s` lookup type.' % lookup_type)
# Geometry is first element of lookup tuple.
geom = value[0]
# Number of valid tuple parameters depends on the lookup type.
nparams = len(value)
if not self.num_params(lookup_type, nparams):
raise ValueError('Incorrect number of parameters given for `%s` lookup type.' % lookup_type)
# Ensuring the argument type matches what we expect.
if not isinstance(value[1], arg_type):
raise ValueError('Argument type should be %s, got %s instead.' % (arg_type, type(value[1])))
# For lookup type `relate`, the op instance is not yet created (has
# to be instantiated here to check the pattern parameter).
if lookup_type == 'relate':
op = op(self.geom_func_prefix, value[1])
elif lookup_type in self.distance_functions and lookup_type != 'dwithin':
if not field.geography and field.geodetic(self.connection):
# Geodetic distances are only available from Points to
# PointFields on PostGIS 1.4 and below.
if not self.connection.ops.geography:
if field.geom_type != 'POINT':
raise ValueError('PostGIS spherical operations are only valid on PointFields.')
if str(geom.geom_type) != 'Point':
raise ValueError('PostGIS geometry distance parameter is required to be of type Point.')
# Setting up the geodetic operation appropriately.
if nparams == 3 and value[2] == 'spheroid':
op = op['spheroid']
else:
op = op['sphere']
else:
op = op['cartesian']
else:
op = tmp
geom = value
# Calling the `as_sql` function on the operation instance.
return op.as_sql(geo_col, self.get_geom_placeholder(field, geom))
elif lookup_type == 'isnull':
# Handling 'isnull' lookup type
return "%s IS %sNULL" % (geo_col, ('' if value else 'NOT ')), []
raise TypeError("Got invalid lookup_type: %s" % repr(lookup_type))
def spatial_aggregate_sql(self, agg):
"""
Returns the spatial aggregate SQL template and function for the
given Aggregate instance.
"""
agg_name = agg.__class__.__name__
if not self.check_aggregate_support(agg):
raise NotImplementedError('%s spatial aggregate is not implmented for this backend.' % agg_name)
agg_name = agg_name.lower()
if agg_name == 'union':
agg_name += 'agg'
sql_template = '%(function)s(%(field)s)'
sql_function = getattr(self, agg_name)
return sql_template, sql_function
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
return GeometryColumns
def spatial_ref_sys(self):
return SpatialRefSys
|
from typing import Any, Dict
import pytest
import requests
from schemathesis import models
from schemathesis.runner.checks import content_type_conformance, response_schema_conformance
from schemathesis.schemas import BaseSchema
def make_test_result(schema: BaseSchema, definition: Dict[str, Any]) -> models.TestResult:
endpoint = models.Endpoint("/path", "GET", definition=definition)
return models.TestResult(endpoint, schema)
def make_response(content=b"{}", content_type="application/json") -> requests.Response:
response = requests.Response()
response._content = content
response.status_code = 200
response.headers["Content-Type"] = content_type
return response
@pytest.fixture()
def response(request):
return make_response(content_type=request.param)
@pytest.fixture()
def results(request, swagger_20) -> models.TestResult:
return make_test_result(swagger_20, {"produces": request.param})
@pytest.mark.parametrize(
"response, results",
(
("application/json", []),
("application/json", ["application/json"]),
("application/json;charset=utf-8", ["application/json"]),
),
indirect=["response", "results"],
)
def test_content_type_conformance_valid(response, results):
assert content_type_conformance(response, results) is None
@pytest.mark.parametrize(
"response, results",
(("plain/text", ["application/json"]), ("plain/text;charset=utf-8", ["application/json"])),
indirect=["response", "results"],
)
def test_content_type_conformance_invalid(response, results):
message = (
f"^Received a response with '{response.headers['Content-Type']}' Content-Type, "
"but it is not declared in the schema.\n\nDefined content types: application/json$"
)
with pytest.raises(AssertionError, match=message):
content_type_conformance(response, results)
SUCCESS_SCHEMA = {"type": "object", "properties": {"success": {"type": "boolean"}}, "required": ["success"]}
@pytest.mark.parametrize(
"content, definition",
(
(b'{"success": true}', {}),
(b'{"success": true}', {"responses": {"200": {"description": "text"}}}),
(b'{"random": "text"}', {"responses": {"200": {"description": "text"}}}),
(b'{"success": true}', {"responses": {"200": {"description": "text", "schema": SUCCESS_SCHEMA}}}),
(b'{"success": true}', {"responses": {"default": {"description": "text", "schema": SUCCESS_SCHEMA}}}),
),
)
def test_response_schema_conformance(swagger_20, content, definition):
response = make_response(content)
results = make_test_result(swagger_20, definition)
assert response_schema_conformance(response, results) is None
@pytest.mark.parametrize(
"content, definition",
(
(b'{"random": "text"}', {"responses": {"200": {"description": "text", "schema": SUCCESS_SCHEMA}}}),
(b'{"random": "text"}', {"responses": {"default": {"description": "text", "schema": SUCCESS_SCHEMA}}}),
),
)
def test_response_schema_conformance_invalid(swagger_20, content, definition):
response = make_response(content)
results = make_test_result(swagger_20, definition)
with pytest.raises(AssertionError):
response_schema_conformance(response, results)
|
"""
Batch processor template: An application of the template method pattern to
batch processing.
I got myself into a tangle trying to factor out common code in a small batch
processing program. This is a toy version to experiment with.
The `batch_processor_template` function performs timing and error handling,
sourcing work items from an `Iterable` batch and delegating the actual
processing to a Processor with four methods: validate, get_data, process, and
write_result. We could make an ABC for Processors with several implementations
for doing different kinds of work.
Note that the batch `Iterable` and the Processor must be a matched pair,
producing and consuming compatible type. Maybe that's a sign they should be
combined. Otherwise, I'm only assuming that items have an `id` and a decent
`__repr__`.
"""
import dataclasses
import logging
import random
import sys
import traceback
import uuid
from collections import namedtuple
from typing import List
from contexttimer import Timer
def configure_logging(level=logging.INFO):
logger = logging.getLogger("example")
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(level)
return logger
logger = configure_logging()
def _generate_id():
return uuid.uuid4().hex[20:]
def _randomly_fail(p, ex, msg):
"""
Raise an exception with probability p.
"""
if random.uniform(0,1) < p:
raise ex(msg)
@dataclasses.dataclass
class ProcessingStats:
total_processing_time: float = 0.0
n_succeeded: int = 0
n_failed: int = 0
batch_id: str = None
items: List = dataclasses.field(default_factory=list)
WhatsitDetails = namedtuple("WhatsitDetails", "id")
WhatsitResult = namedtuple("WhatsitResult", "id status")
def whatsit_batch(n):
"""
Generate objects that describe work to be done.
"""
for _ in range(n):
yield WhatsitDetails(id=_generate_id())
class WhatsitProcessor:
"""
A processor for Whatsits that sometimes fails.
"""
def validate(self, whatsit_detail):
_randomly_fail(p=0.05, ex=ValueError, msg=f'Invalid whatsit: {whatsit_detail.id}!')
def get_data(self, whatsit_detail):
_randomly_fail(p=0.05, ex=OSError, msg=f'"{whatsit_detail.id}.whatsit" not found!')
return whatsit_detail
def process(self, whatsit):
_randomly_fail(p=0.05, ex=RuntimeError, msg=f"Processing {whatsit.id} borked!")
return WhatsitResult(whatsit.id, "ok")
def write_result(self, result):
_randomly_fail(p=0.05, ex=OSError, msg=f"Writing {result.id}.result failed!")
logger.info(f"Writing whatsit_{result.id}.result")
def batch_processor_template(processor, batch, on_error_continue=True):
"""
Process a batch of items while collecting stats and logging failures.
"""
stats = ProcessingStats()
try:
with Timer() as total_t:
for item in batch:
try:
logger.info(f"Processing {item}")
processor.validate(item)
data = processor.get_data(item)
result = processor.process(data)
processor.write_result(result)
stats.n_succeeded += 1
stats.items.append({
"id": item.id,
"status": "ok",
})
except (RuntimeError, OSError, ValueError) as e:
logger.error(f"Error processing %s %s\n%s",
item, e, traceback.format_exc(limit=None, chain=True))
stats.n_failed += 1
stats.items.append({
"id": item.id,
"status": f"{e.__class__.__name__}",
})
if not on_error_continue:
raise e
finally:
stats.total_processing_time = total_t.elapsed
logger.info(f"{stats!r}")
if __name__ == "__main__":
batch_processor_template(
processor=WhatsitProcessor(),
batch=whatsit_batch(20),
)
|
from typing import List, Set, Dict
from tqdm import tqdm
from src.data.objects.stack import Stack
from src.features.frame_features_builder import FrameFeaturesBuilder
from src.features.overall_features_builder import OverallFeaturesBuilder
from src.preprocess.seq_coder import SeqCoder
from src.preprocess.token import Token
from src.stack_builders.builders import UserStackBuilder
CodedStack = List[int]
CodedStackTokens = List[Token[int]]
def get_non_empty_users(user2stack: Dict[int, List[Token[int]]]) -> Set[int]:
return {user_id for user_id in user2stack if len(user2stack[user_id]) > 0}
def fixers_stacks_ctor(stacks: List[Stack], y: List[int], user_ids: Set[int],
user_stack_builder: UserStackBuilder, frame_features_builder: FrameFeaturesBuilder,
seq_coder: SeqCoder, overall_features_builder: OverallFeaturesBuilder):
filtered_stacks, assignees_stacks, overall_features, y_filtered = [], [], [], []
for stack, label in tqdm(list(zip(stacks, y))):
user2stack = user_stack_builder(stack, user_ids)
frame_features_builder.build(stack, user2stack)
stack_coded = seq_coder.transform(stack)
user2stack_coded = {user_id: seq_coder.transform(user2stack[user_id]) for user_id in user_ids}
non_empty_users = get_non_empty_users(user2stack_coded)
if len(non_empty_users) > 1 and label in non_empty_users:
non_empty_users_list = list(non_empty_users)
filtered_stacks.append(stack_coded)
assignees_stacks.append([user2stack_coded[user_id] for user_id in non_empty_users_list])
user2overall_features = overall_features_builder(stack, user_ids)
overall_features.append([user2overall_features[user_id] for user_id in non_empty_users_list])
y_filtered.append(non_empty_users_list.index(label))
return filtered_stacks, assignees_stacks, overall_features, y_filtered
|
from flask import Blueprint
abouts_bluesrprints = Blueprint('abouts', __name__, template_folder='templates', static_folder='static',
url_prefix='/about')
from . import views
|
class ScrollEventType(Enum, IComparable, IFormattable, IConvertible):
"""
Describes the behavior that caused a System.Windows.Controls.Primitives.ScrollBar.Scroll event for a System.Windows.Controls.Primitives.ScrollBar control.
enum ScrollEventType,values: EndScroll (0),First (1),LargeDecrement (2),LargeIncrement (3),Last (4),SmallDecrement (5),SmallIncrement (6),ThumbPosition (7),ThumbTrack (8)
"""
def __eq__(self, *args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self, *args):
pass
def __gt__(self, *args):
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args):
pass
def __lt__(self, *args):
pass
def __ne__(self, *args):
pass
def __reduce_ex__(self, *args):
pass
def __str__(self, *args):
pass
EndScroll = None
First = None
LargeDecrement = None
LargeIncrement = None
Last = None
SmallDecrement = None
SmallIncrement = None
ThumbPosition = None
ThumbTrack = None
value__ = None
|
import argparse
import pandas as pd
def get_args():
desc = 'Fixes a GTF with no genes'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-gtf', '-g', dest='gtf',
help='gtf to fix')
args = parser.parse_args()
return args
# check what entries are missing in the gtf
def is_bad_gtf(gtffile):
missing_gene = False
missing_trans = False
# how many lines are useless lines
with open(gtffile, 'r') as infile:
for i, line in enumerate(infile):
if '##' not in line:
break
skiprows = [j for j in range(0, i)]
df = pd.read_csv(gtffile, sep='\t', usecols=[2], skiprows=skiprows)
categories = df.iloc[:,0].unique()
# print(categories)
# what are we missing?
if 'gene' not in categories:
missing_gene = True
if 'transcript' not in categories:
missing_trans = True
return (missing_gene, missing_trans)
# get value associated with keyword in the 9th column of gtf
def get_field_value(key, fields):
if key not in fields:
return None
else:
return fields.split(key+' "')[1].split()[0].replace('";','')
def construct_new_entry(prev_line, coords, entry_type):
# print('Constructing new {} entry'.format(entry_type))
# add gene or transcript type, coords, and len
prev_line[2] = entry_type
prev_line[3] = min(coords)
prev_line[4] = max(coords)
prev_line[7] = '.'
# change the fields to reflect what type we are now
new_fields = ''
fields = prev_line[-1]
gid = get_field_value('gene_id', fields)
new_fields += 'gene_id "{}";'.format(gid)
# if there's a gene name add it too
gname = get_field_value('gene_name', fields)
if gname:
new_fields += 'gene_name "{}";'.format(gname)
if entry_type == 'transcript':
tid = get_field_value('transcript_id', fields)
new_fields += ' transcript_id "{}";'.format(tid)
prev_line[-1] = new_fields
prev_line = format_to_write(prev_line)
return prev_line
def make_ofile_name(matfile, prefix=None):
fname = matfile.split('.gtf')[0]
if prefix:
fname += '_'
fname += prefix
fname += '_reformatted.gtf'
return fname
def format_to_write(line):
return ''.join('\t'.join([str(i) for i in line])+'\n')
def main():
args = get_args()
gtffile = args.gtf
(missing_gene, missing_transcript) = is_bad_gtf(gtffile)
print('Missing transcript : {}'.format(missing_transcript))
# if nothing is missing, you good!
if not missing_gene and not missing_transcript:
print('GTF has both gene and transcript entries. Nothing to add.')
return
# loop through this thing
infile = open(gtffile, 'r')
outfile = open(make_ofile_name(gtffile), 'w')
curr_gid = ''
curr_gid_coords = []
curr_tid = ''
curr_tid_coords = []
first_transcript = True
first_exon = True
gene_list = []
transcript_list = []
prev_line = ''
# relevant entries
entries = ['exon']
if missing_gene:
entries.append('transcript')
if missing_gene or missing_transcript:
for line in infile:
# skip the dumb header lines
if line.startswith('#'):
continue
line = line.strip().split('\t')
fields = line[-1]
gid = get_field_value('gene_id', fields)
tid = get_field_value('transcript_id', fields)
if line[2] in entries:
# set variables if first entry
if first_exon:
curr_gid = gid
curr_tid = tid
curr_gid_coords = [int(line[3]), int(line[4])]
curr_tid_coords = [int(line[3]), int(line[4])]
first_exon = False
prev_line = line
# found a new transcript
elif missing_transcript and tid != curr_tid:
# create transcript entry and dump to current gene list
new_entry = construct_new_entry(
prev_line, curr_tid_coords, 'transcript')
transcript_list = new_entry+''.join(transcript_list)
gene_list += transcript_list
transcript_list = ''
curr_tid_coords = []
if missing_gene and gid != curr_gid:
# create gene entry and write current gene list
new_entry = construct_new_entry(
prev_line, curr_gid_coords, 'gene')
gene_list = new_entry+''.join(gene_list)
gene_list += ''.join(transcript_list)
transcript_list = ''
curr_tid_coords = []
outfile.write(gene_list)
gene_list = ''
curr_gid_coords = []
# update loop vars
curr_gid = gid
curr_tid = tid
curr_gid_coords.append(int(line[3]))
curr_gid_coords.append(int(line[4]))
curr_tid_coords.append(int(line[3]))
curr_tid_coords.append(int(line[4]))
prev_line = line
# regardless, append to list of entries to write
transcript_list += format_to_write(line)
# if we've reached the end of the file
# create transcript entry and dump to current gene list
if missing_transcript:
new_entry = construct_new_entry(
prev_line, curr_tid_coords, 'transcript')
transcript_list = new_entry+''.join(transcript_list)
gene_list += transcript_list
transcript_list = ''
# create gene entry and write current gene list
if missing_gene:
new_entry = construct_new_entry(
prev_line, curr_gid_coords, 'gene')
gene_list = new_entry+''.join(gene_list)
outfile.write(gene_list)
gene_list = ''
infile.close()
outfile.close()
if __name__ == '__main__':
main()
|
import sys
import aicv
import aicv.altairviz as altviz
data_file_name = "C:/Users/deeya/Downloads/200413_nitrous_oxide_cycling_regimes_data_for_repositories.csv"
# mapping of the column names to desired name
column_names_mapping = {'d15N_N2Oa_mean': "d15N-N2Oa_mean (per mil vs. atm N2)",
'd15N_N2Ob_mean': "d15N-N2Ob_mean (per mil vs. atm. N2)",
'd18O_N2O_mean': "d18O-N2O_mean (per mil vs. VSMOW)",
'N2O_mean': "N2O_mean (nM)",
'd18O_NO3_mean': 'd18O-NO3 avg (per mil vs. VSMOW)',
'd15N_NO3_mean': 'd15N-NO3 avg (per mil vs. atm. N2)',
'd15N_NO2': 'd15N-NO2 (per mil vs. atm N2)',
'd18O_NO2': 'd18O-NO2 (per mil vs. VSMOW)',
'Nitrite': "Nitrite [uM]",
'Oxygen': "Seabird Oxygen [umol/L]",
'NO3_mean': 'NO3_mean (uM)',
'Depth': 'Target Depth [m]'}
raw_features = [
'd15N_N2Oa_mean', 'd15N_N2Ob_mean', 'd18O_N2O_mean',
'N2O_mean', 'd18O_NO3_mean', 'd15N_NO3_mean',
'd15N_NO2', 'd18O_NO2', 'Nitrite',
'Oxygen', 'NO3_mean', 'Depth']
df = aicv.core.DataFrame(data_file_name)
df.map_columns(column_names_mapping)
df.add_derived_column("inv_N2O_mean = 1/N2O_mean")
av_config = {
"total_width": 1200,
"total_height": 680,
"tsne_heightfrac": 0.4,
"tsne_widthfrac": 0.2,
"fontsize": 10,
"padding_guess": 45,
"legend_selection": ['clusters']
}
av = altviz.AltairViz(df, raw_features)
av.set_config(av_config)
av.build_chart("My Dashboard",
altviz.AltairVerticalStacking(
av,
altviz.AltairHorizontalStacking(
av,
altviz.AltairScatterplot(
"tsne_ax1",
"tsne_ax2")(av),
altviz.AltairVerticalStacking(
av,
altviz.AltairHistogram(
"Target Depth [m]",
interactive=True)(av),
altviz.AltairHistogram(
"Seabird Oxygen [umol/L]",
interactive=True)(av),
altviz.AltairHistogram(
"inv_N2O_mean",
interactive=True)(av))(),
altviz.AltairVerticalStacking(
av,
altviz.AltairHistogram(
"d15N-N2Oa_mean (per mil vs. atm N2)",
interactive=True)(av),
altviz.AltairHistogram(
"d15N-N2Ob_mean (per mil vs. atm. N2)",
interactive=True)(av),
altviz.AltairHistogram(
"d18O-N2O_mean (per mil vs. VSMOW)",
interactive=True)(av))(),
altviz.AltairVerticalStacking(
av,
altviz.AltairHistogram(
"NO3_mean (uM)",
interactive=True)(av),
altviz.AltairHistogram(
"d15N-NO3 avg (per mil vs. atm. N2)",
interactive=True)(av),
altviz.AltairHistogram(
"d18O-NO3 avg (per mil vs. VSMOW)",
interactive=True)(av))(),
altviz.AltairVerticalStacking(
av,
altviz.AltairHistogram(
"Nitrite [uM]",
interactive=True)(av),
altviz.AltairHistogram(
"d15N-NO2 (per mil vs. atm N2)",
interactive=True)(av),
altviz.AltairHistogram(
"d18O-NO2 (per mil vs. VSMOW)",
interactive=True)(av))()
, av.show_legend()
)() # Row #1
, altviz.AltairHorizontalStacking(
av,
altviz.AltairScatterplot(
"inv_N2O_mean",
"d15N-N2Oa_mean (per mil vs. atm N2)")(av),
altviz.AltairScatterplot(
"inv_N2O_mean",
"d15N-N2Ob_mean (per mil vs. atm. N2)")(av),
altviz.AltairScatterplot(
"inv_N2O_mean",
"d18O-N2O_mean (per mil vs. VSMOW)")(av),
altviz.AltairScatterplot(
"d15N-N2Oa_mean (per mil vs. atm N2)",
"d18O-N2O_mean (per mil vs. VSMOW)")(av))()
, altviz.AltairHorizontalStacking(
av,
altviz.AltairScatterplot(
"d15N-NO2 (per mil vs. atm N2)",
"Nitrite [uM]")(av),
altviz.AltairScatterplot(
"d15N-NO2 (per mil vs. atm N2)",
"d18O-NO2 (per mil vs. VSMOW)")(av),
altviz.AltairScatterplot(
"Seabird Oxygen [umol/L]",
"NO3_mean (uM)")(av),
altviz.AltairScatterplot(
"d15N-NO3 avg (per mil vs. atm. N2)",
"d18O-NO3 avg (per mil vs. VSMOW)")(av))()
)()
)
|
class f:
def __init__(self,i,s,c,d):
self.i=i;self.s=s;self.c=c;self.d=d
l=[];input=__import__('sys').stdin.readline
for i in range(int(input())):
s,c,d=map(int,input().split())
l.append(f(i,s,c,d))
l.sort(key=lambda x:(-x.s,x.c,x.d))
print(l[0].i+1)
|
import pandas as pd
import matplotlib.pyplot as plt
from threading import Thread
from excel_reader import get_data
from excel_reader import get_all_columns
# I can not avoid repeating the code, because the cleft hand person who created the menu could not normally pass the
# function arguments to FunctionItem(args), because it returns FOUR args, but should ONE
# You can just pass 'plot_type' to function args and use this variable in df.plot(x=...,y=...,kind=plot_type)
def create_line_pot():
# Variables
get_all_columns_arr = get_all_columns()
# Print all columns of table
print(f"Available columns: {get_all_columns()}\n")
# Input variables for plot
f_column = input("Enter name of the first column: ")
s_column = input("Enter name of the second column: ")
try:
# If get_all_columns string array has f_column and s_column - make action
if f_column and s_column in get_all_columns_arr:
# df = select columns from excel data
df = pd.DataFrame(get_data(), columns=[f_column, s_column])
# Create plot, using f_column, s_column, figsize=(width, height) - figure size (inch)
# kind='...' - type of plot line|bar
df.plot(x=f_column, y=s_column, figsize=(8, 8), kind='line')
# Show plot
plt.show()
except ArithmeticError:
print("Invalid field name")
def create_bar_pot():
get_all_columns_arr = get_all_columns()
print(f"Available columns: {get_all_columns()}\n")
f_column = input("Enter name of the first column: ")
s_column = input("Enter name of the second column: ")
try:
if f_column and s_column in get_all_columns_arr:
df = pd.DataFrame(get_data(), columns=[f_column, s_column])
df.plot(x=f_column, y=s_column, figsize=(8, 8), kind='bar')
plt.show()
except ArithmeticError:
print("Invalid field name")
|
from bitshares_pricefeed.sources.indodax import IndoDax
def test_indodax_fetch(checkers):
source = IndoDax(quotes=['BTC', 'BTS'], bases=['IDR', 'USD'])
feed = source.fetch()
checkers.check_feed(feed, ['BTC:IDR', 'BTS:IDR'])
|
# https://www.hackerrank.com/challenges/even-tree/problem?h_r=profile
from collections import deque
def reconstruct(tree):
visited = [False] * len(tree)
queue = deque([0])
reconstructed = [list() for i in range(len(tree))]
while len(queue) > 0:
current = queue.popleft()
visited[current] = True
for i in tree[current]:
if not visited[i]:
reconstructed[current].append(i)
queue.append(i)
return reconstructed
cuts = 0
def dfs(tree, index):
global cuts
subtrees = []
for i in tree[index]:
subtrees.append(dfs(tree, i))
for vertices in subtrees[:]:
if not vertices % 2:
cuts += 1
subtrees.remove(vertices)
return sum(subtrees) + 1
# Complete the evenForest function below.
def evenForest(tree):
tree = reconstruct(tree)
dfs(tree, 0)
return (cuts)
|
import logging
import pandas as pd
import pytest
import calc
import calc.geom # noqa
import calc.prod # noqa
import calc.well # noqa
from calc.sets import DataSet, ProdSet, WellGeometrySet, WellSet # noqa
from const import HoleDirection, IHSPath, ProdStatRange # noqa
from db.models import ProdHeader
from db.models import ProdStat as Model
from executors import BaseExecutor, GeomExecutor, ProdExecutor, WellExecutor
from tests.utils import MockAsyncDispatch, rand_str
logger = logging.getLogger(__name__)
@pytest.fixture
def model_df():
ids = [(rand_str(length=10), rand_str(length=20)) for i in range(1, 11)]
good = [{"api10": i, "name": v} for i, v in ids]
model_df = pd.DataFrame(good)
yield model_df
@pytest.fixture
def prod_dispatcher(prod_h):
prod_dispatcher = MockAsyncDispatch({"data": prod_h})
yield prod_dispatcher
class TestBaseExecutor:
@pytest.fixture
def bexec(self):
yield BaseExecutor(HoleDirection.H)
def test_executor_init(self):
bexec = BaseExecutor(HoleDirection.H)
assert bexec.metrics.empty is True
assert {*bexec.metrics.columns} == {
"seconds",
"executor",
"hole_direction",
"operation",
"name",
"count",
}
assert bexec.download_kwargs == {}
assert bexec.process_kwargs == {}
assert bexec.persist_kwargs == {}
def test_repr(self):
assert repr(BaseExecutor(HoleDirection.H)) == "BaseExecutor[H]"
def test_init_validate_hole_dir(self):
bexec = BaseExecutor("H")
assert bexec.hole_dir is HoleDirection.H
with pytest.raises(ValueError):
BaseExecutor("BAD")
def test_raise_execution_error(self, bexec, caplog):
caplog.set_level(10)
hd = bexec.hole_dir.value
ct = 10
name = bexec.__class__.__name__
with pytest.raises(ValueError):
bexec.raise_execution_error(
"test_operation", ct, ValueError("value error message"), {}
)
expected = f"({name}[{hd}]) error during test_operationing: {ct} records in batch -- ValueError: value error message" # noqa
assert expected in caplog.text
def test_add_metric(self):
bexec = BaseExecutor(HoleDirection.H)
assert bexec.metrics.empty is True
bexec.add_metric(
operation="test_operation", name="test_name", seconds=30, count=10
)
assert bexec.metrics.empty is False
expected = {
"executor": "base",
"operation": "test_operation",
"name": "test_name",
"seconds": 30,
"count": 10,
"hole_direction": HoleDirection.H,
}
actual = bexec.metrics.iloc[0].to_dict()
assert expected == actual
@pytest.mark.asyncio
async def test_persist(self, bexec, model_df, bind):
await bexec._persist("entity_name", Model, model_df, reset_index=False)
actual = len(await Model.pk.values)
expected = model_df.shape[0]
assert expected == actual
@pytest.mark.asyncio
@pytest.mark.parametrize("df", [pd.DataFrame(), None])
async def test_persist_empty_or_none(self, bexec, df, caplog, bind):
caplog.set_level(10)
await bexec._persist("entity_name", Model, df)
assert "nothing to persist" in caplog.text
assert bexec.metrics.empty # no metrics added
class TestProdExecutor:
@pytest.fixture
def pexec(self):
yield ProdExecutor(HoleDirection.H)
def test_init_default(self):
pexec = ProdExecutor(HoleDirection.H)
assert pexec.metrics.empty is True
assert pexec.model_kwargs["stats"] == {"batch_size": 1000}
def test_init_model_kwargs(self):
header_kwargs = {1: 1}
monthly_kwargs = {2: 2}
stats_kwargs = {3: 3}
pexec = ProdExecutor(
HoleDirection.H,
header_kwargs=header_kwargs,
monthly_kwargs=monthly_kwargs,
stats_kwargs=stats_kwargs,
)
assert pexec.model_kwargs["header"] == header_kwargs
assert pexec.model_kwargs["monthly"] == monthly_kwargs
assert pexec.model_kwargs["stats"] == {"batch_size": 1000, **stats_kwargs}
@pytest.mark.parametrize("hole_dir", HoleDirection.members())
@pytest.mark.asyncio
async def test_download(self, prod_dispatcher, hole_dir):
pexec = ProdExecutor(hole_dir)
prodset = await pexec.download(
entities=["a", "b", "c"], dispatch=prod_dispatcher
)
# check prodset was returned
assert isinstance(prodset, ProdSet)
# check metric was added
assert pexec.metrics.shape[0] == 1
@pytest.mark.asyncio
async def test_download_bad_holedir(self):
pexec = ProdExecutor(HoleDirection.H)
pexec.hole_dir = ProdStatRange.FIRST
with pytest.raises(ValueError):
await pexec.download(entities=["a", "b", "c"])
@pytest.mark.parametrize("hole_dir", HoleDirection.members())
@pytest.mark.cionly
@pytest.mark.asyncio
async def test_download_catch_network_error(self, prod_dispatcher, hole_dir):
pexec = ProdExecutor(hole_dir)
with pytest.raises(Exception):
await pexec.download(entities=["a", "b", "c"])
@pytest.mark.cionly
@pytest.mark.asyncio
async def test_process_and_persist_with_default_option_sets(self, prod_df_h, bind):
prodset_h = prod_df_h.prodstats.to_prodset()
pexec = ProdExecutor(HoleDirection.H)
ps = await pexec.process(prodset_h)
assert ps.header.shape[0] == prod_df_h.index.levels[0].shape[0]
assert ps.monthly.shape[0] == prod_df_h.shape[0]
assert ps.stats.shape[0] > 0
await pexec.persist(ps)
expected = ps.stats.shape[0]
actual = await Model.agg.count()
assert expected == actual
@pytest.mark.cionly
@pytest.mark.asyncio
async def test_process_and_persist_h_one_option_set(self, prod_df_h, bind):
prodset_h = prod_df_h.prodstats.to_prodset()
pexec = ProdExecutor(HoleDirection.H)
opts = calc.prodstat_option_matrix(
ProdStatRange.FIRST, months=[6], include_zeroes=False
)
ps = await pexec.process(prodset_h, prodstat_opts=opts, ratio_opts=opts)
await pexec.persist(ps)
expected = ps.stats.shape[0]
actual = await Model.agg.count()
assert expected == actual
assert ps.header.shape[0] == prod_df_h.reset_index().api14.unique().shape[0]
assert ps.monthly.shape[0] == prod_df_h.shape[0]
assert ps.stats.shape[0] > 0
@pytest.mark.cionly
@pytest.mark.asyncio
async def test_process_and_persist_v_one_option_set(self, prod_df_v, bind):
# select an entity12 from the available dataframe that is likely to have
# more than one, but not too many, associated wells (for test speed)
entity12 = (
prod_df_v.groupby("entity12").count().iloc[:, 0].sort_values().index[2]
)
prod_df_v = prod_df_v.loc[prod_df_v.entity12 == entity12].copy(deep=True)
prodset_v = prod_df_v.prodstats.to_prodset()
pexec = ProdExecutor(HoleDirection.V)
opts = calc.prodstat_option_matrix(
ProdStatRange.FIRST, months=[6], include_zeroes=False
)
ps = await pexec.process(prodset_v, prodstat_opts=opts, ratio_opts=opts)
await pexec.persist(ps)
expected = ps.stats.shape[0]
actual = await Model.agg.count()
assert expected == actual
assert ps.header.shape[0] == prod_df_v.reset_index().api14.unique().shape[0]
assert ps.monthly.shape[0] == prod_df_v.shape[0]
assert ps.stats.shape[0] > 0
# @pytest.mark.cionly
# @pytest.mark.asyncio
# async def test_process_and_persist_v_with_default_option_sets(
# self, prod_df_v, bind
# ):
# prodset_v = prod_df_v.prodstats.to_prodset()
# pexec = ProdExecutor(HoleDirection.V)
# ps = await pexec.process(prodset_v)
# await pexec.persist(ps)
# expected = ps.stats.shape[0]
# actual = await Model.agg.count()
# assert expected == actual
# assert ps.header.shape[0] == prod_df_v.index.levels[0].shape[0]
# assert ps.monthly.shape[0] == prod_df_v.groupby(level=[0, 1]).first().shape[0]
# assert ps.stats.shape[0] > 0
@pytest.mark.asyncio
async def test_process_and_persist_h_tiny_batch(self, prod_df_h, bind):
api14s = (
prod_df_h.groupby("api14")
.count()
.iloc[:, 0]
.sort_values()
.index[:2]
.values.tolist()
)
prod_df_h = prod_df_h.loc[prod_df_h.api14.isin(api14s)]
prodset_h = prod_df_h.prodstats.to_prodset()
pexec = ProdExecutor(HoleDirection.H)
opts = calc.prodstat_option_matrix(
ProdStatRange.FIRST, months=[6], include_zeroes=False
)
ps = await pexec.process(prodset_h, prodstat_opts=opts, ratio_opts=opts)
await pexec.persist(ps)
expected = ps.stats.shape[0]
actual = await Model.agg.count()
assert expected == actual
assert ps.header.shape[0] == prod_df_h.reset_index().api14.unique().shape[0]
assert ps.monthly.shape[0] == prod_df_h.groupby(level=[0, 1]).first().shape[0]
assert ps.stats.shape[0] > 0
@pytest.mark.asyncio
async def test_process_and_persist_v_tiny_batch(self, prod_df_v, bind):
# select an entity12 from the available dataframe that is likely to have
# more than one, but not too many, associated wells (for test speed)
api14s = (
prod_df_v.groupby("api14")
.count()
.iloc[:, 0]
.sort_values()
.index[:2]
.values.tolist()
)
prod_df_v = prod_df_v.loc[prod_df_v.api14.isin(api14s)]
prodset_v = prod_df_v.prodstats.to_prodset()
pexec = ProdExecutor(HoleDirection.V)
opts = calc.prodstat_option_matrix(
ProdStatRange.FIRST, months=[6], include_zeroes=False
)
ps = await pexec.process(prodset_v, prodstat_opts=opts, ratio_opts=opts)
await pexec.persist(ps)
expected = ps.stats.shape[0]
actual = await Model.agg.count()
assert expected == actual
assert ps.header.shape[0] == prod_df_v.reset_index().api14.unique().shape[0]
assert ps.monthly.shape[0] == prod_df_v.shape[0]
assert ps.stats.shape[0] > 0
@pytest.mark.asyncio
async def test_arun_h_tiny_batch(self, prod_h, bind):
prod_h = prod_h[:5]
api14s = [x["api14"] for x in prod_h]
dispatch = MockAsyncDispatch({"data": prod_h})
opts = calc.prodstat_option_matrix(
ProdStatRange.FIRST, months=[6], include_zeroes=False
)
pexec = ProdExecutor(
HoleDirection.H,
download_kwargs={"dispatch": dispatch},
process_kwargs={"prodstat_opts": opts, "ratio_opts": opts},
)
ct, ps = await pexec.arun(api14s=api14s)
actual = await ProdHeader.agg.count()
assert len(api14s) == actual
@pytest.mark.asyncio
async def test_arun_v_tiny_batch(self, prod_v, bind):
prod_v = prod_v[:5]
api14s = [x["api14"] for x in prod_v]
dispatch = MockAsyncDispatch({"data": prod_v})
opts = calc.prodstat_option_matrix(
ProdStatRange.FIRST, months=[6], include_zeroes=False
)
pexec = ProdExecutor(
HoleDirection.V,
download_kwargs={"dispatch": dispatch},
process_kwargs={"prodstat_opts": opts, "ratio_opts": opts},
)
ct, ps = await pexec.arun(api14s=api14s)
actual = await ProdHeader.agg.count()
assert len(api14s) == actual
# def test_run_h_tiny_batch(self, prod_h, bind):
# # FIXME: causing asyncpg.exceptions._base.InterfaceError: cannot perform operation: another operation is in progress # noqa
# prod_h = prod_h[:5]
# api14s = [x["api14"] for x in prod_h]
# dispatch = MockAsyncDispatch({"data": prod_h})
# opts = calc.prodstat_option_matrix(
# ProdStatRange.FIRST, months=[6], include_zeroes=False
# )
# pexec = ProdExecutor(
# HoleDirection.H,
# download_kwargs={"dispatch": dispatch},
# process_kwargs={"prodstat_opts": opts, "ratio_opts": opts},
# )
# ct, ps = pexec.run(api14s=api14s, persist=False)
# print(ct)
# print(ps)
class TestGeomExecutor:
@pytest.fixture
def gexec(self):
yield GeomExecutor(HoleDirection.H)
def test_init_default(self):
gexec = GeomExecutor(HoleDirection.H)
assert gexec.metrics.empty is True
def test_init_model_kwargs(self):
locations_kwargs = {1: 1}
surveys_kwargs = {2: 2}
points_kwargs = {3: 3}
gexec = GeomExecutor(
HoleDirection.H,
locations_kwargs=locations_kwargs,
surveys_kwargs=surveys_kwargs,
points_kwargs=points_kwargs,
)
assert gexec.model_kwargs["locations"] == locations_kwargs
assert gexec.model_kwargs["surveys"] == surveys_kwargs
assert gexec.model_kwargs["points"] == {"batch_size": 1000, **points_kwargs}
@pytest.mark.parametrize("hole_dir", HoleDirection.members())
@pytest.mark.asyncio
async def test_download(self, wells_h_dispatcher, hole_dir):
gexec = GeomExecutor(hole_dir)
geomset = await gexec.download(
api14s=["a", "b", "c"], dispatch=wells_h_dispatcher
)
assert isinstance(geomset, WellGeometrySet)
assert gexec.metrics.shape[0] == 1
@pytest.mark.asyncio
async def test_download_bad_holedir(self):
gexec = GeomExecutor(HoleDirection.H)
gexec.hole_dir = ProdStatRange.FIRST
with pytest.raises(ValueError):
await gexec.download(zaza=["a", "b", "c"])
@pytest.mark.parametrize("hole_dir", HoleDirection.members())
@pytest.mark.asyncio
async def test_download_catch_network_error(self, hole_dir):
gexec = GeomExecutor(hole_dir)
with pytest.raises(Exception):
await gexec.download(zaza=["a", "b", "c"])
@pytest.mark.cionly
@pytest.mark.asyncio
async def test_process_and_persist_h_full(self, geomset_h, bind):
gexec = GeomExecutor(HoleDirection.H)
dataset: WellGeometrySet = await gexec.process(geomset_h)
await gexec.persist(dataset)
@pytest.mark.asyncio
async def test_process_and_persist_h_small_batch(self, geoms_h, bind):
geoms = geoms_h[:3]
geomset = pd.DataFrame.shapes.from_records(geoms, create_index=True)
gexec = GeomExecutor(HoleDirection.H)
dataset: WellGeometrySet = await gexec.process(geomset)
await gexec.persist(dataset)
@pytest.mark.cionly
@pytest.mark.asyncio
async def test_process_and_persist_v_full(self, geomset_v, bind):
gexec = GeomExecutor(HoleDirection.V)
dataset: WellGeometrySet = await gexec.process(geomset_v)
await gexec.persist(dataset)
@pytest.mark.asyncio
async def test_process_and_persist_v_small_batch(self, geoms_v, bind):
geoms = geoms_v[:3]
geomset = pd.DataFrame.shapes.from_records(geoms, create_index=True)
gexec = GeomExecutor(HoleDirection.V)
dataset: WellGeometrySet = await gexec.process(geomset)
await gexec.persist(dataset)
class TestWellExecutor:
@pytest.fixture
def exh(self, wells_h, fracs_h, geoms_h, prod_headers_h):
ihs_dispatch = MockAsyncDispatch({"data": wells_h})
fracfocus_dispatch = MockAsyncDispatch({"data": fracs_h})
geoms_dispatch = MockAsyncDispatch({"data": geoms_h})
prod_headers_dispatch = MockAsyncDispatch({"data": prod_headers_h})
exh = WellExecutor(
HoleDirection.H,
download_kwargs={
"dispatch": {"dispatch": ihs_dispatch},
"ihs_kwargs": {"dispatch": ihs_dispatch},
"fracfocus_kwargs": {"dispatch": fracfocus_dispatch},
},
process_kwargs={
"geoms_dispatch": geoms_dispatch,
"prod_headers_dispatch": prod_headers_dispatch,
},
)
yield exh
@pytest.fixture
def exv(self, wells_v, fracs_v, geoms_v, prod_headers_v):
ihs_dispatch = MockAsyncDispatch({"data": wells_v})
fracfocus_dispatch = MockAsyncDispatch({"data": fracs_v})
geoms_dispatch = MockAsyncDispatch({"data": geoms_v})
# geoms = await pd.DataFrame.shapes.from_ihs(
# IHSPath.well_h_geoms,
# api14s=api14h,
# # dispatch=geoms_dispatch,
# )
# await IHSClient.get_wells(
# IHSPath.well_h_geoms,
# api14s=api14h
# # , dispatch=geoms_dispatch
# )
# geoms_h
prod_headers_dispatch = MockAsyncDispatch({"data": prod_headers_v})
exv = WellExecutor(
HoleDirection.V,
download_kwargs={
"dispatch": {"dispatch": ihs_dispatch},
"ihs_kwargs": {"dispatch": ihs_dispatch},
"fracfocus_kwargs": {"dispatch": fracfocus_dispatch},
},
process_kwargs={
"geoms_dispatch": geoms_dispatch,
"prod_headers_dispatch": prod_headers_dispatch,
},
)
yield exv
def test_init_default(self):
ex = WellExecutor(HoleDirection.H)
assert ex.metrics.empty is True
def test_init_model_kwargs(self):
wells_kwargs = {1: 1}
depths_kwargs = {2: 2}
fracs_kwargs = {3: 3}
ips_kwargs = {4: 4}
stats_kwargs = {5: 5}
links_kwargs = {6: 6}
ex = WellExecutor(
HoleDirection.H,
wells_kwargs=wells_kwargs,
depths_kwargs=depths_kwargs,
fracs_kwargs=fracs_kwargs,
ips_kwargs=ips_kwargs,
stats_kwargs=stats_kwargs,
links_kwargs=links_kwargs,
)
assert ex.model_kwargs["wells"] == wells_kwargs
assert ex.model_kwargs["depths"] == depths_kwargs
assert ex.model_kwargs["fracs"] == fracs_kwargs
assert ex.model_kwargs["ips"] == ips_kwargs
assert ex.model_kwargs["stats"] == stats_kwargs
assert ex.model_kwargs["links"] == links_kwargs
@pytest.mark.parametrize("hole_dir", HoleDirection.members())
@pytest.mark.asyncio
async def test_download(self, hole_dir, wells_h, wells_v, fracs_h, fracs_v):
wells = wells_h if hole_dir == HoleDirection.H else wells_v
fracs = fracs_h if hole_dir == HoleDirection.H else fracs_v
ihs_dispatch = MockAsyncDispatch({"data": wells})
fracfocus_dispatch = MockAsyncDispatch({"data": fracs})
ex = WellExecutor(hole_dir)
wellset = await ex.download(
api14s=["a", "b", "c"],
ihs_kwargs={"dispatch": ihs_dispatch},
fracfocus_kwargs={"dispatch": fracfocus_dispatch},
)
assert isinstance(wellset, WellSet)
assert ex.metrics.shape[0] == 1
@pytest.mark.asyncio
async def test_download_bad_holedir(self):
ex = WellExecutor(HoleDirection.H)
ex.hole_dir = ProdStatRange.FIRST
with pytest.raises(ValueError):
await ex.download(zaza=["a", "b", "c"])
@pytest.mark.parametrize("hole_dir", HoleDirection.members())
@pytest.mark.asyncio
async def test_download_catch_network_error(self, hole_dir):
ex = WellExecutor(hole_dir)
with pytest.raises(Exception):
await ex.download(zaza=["a", "b", "c"])
@pytest.mark.cionly
@pytest.mark.asyncio
async def test_process_and_persist_h_full(self, exh, wellset_h, bind):
dataset: WellSet = await exh.process(wellset_h)
await exh.persist(dataset)
# @pytest.mark.asyncio
# async def test_process_and_persist_h_small_batch(self, geoms_h, bind):
# geoms = geoms_h[:3]
# wellset = pd.DataFrame.shapes.from_records(geoms, create_index=True)
# ex = WellExecutor(HoleDirection.H)
# dataset: WellSet = await ex.process(wellset)
# await ex.persist(dataset)
@pytest.mark.cionly
@pytest.mark.asyncio
async def test_process_and_persist_v_full(self, exv, wellset_v, bind):
dataset: WellSet = await exv.process(wellset_v)
await exv.persist(dataset)
# @pytest.mark.asyncio
# async def test_process_and_persist_v_small_batch(self, geoms_v, bind):
# geoms = geoms_v[:3]
# wellset = pd.DataFrame.shapes.from_records(geoms, create_index=True)
# ex = WellExecutor(HoleDirection.V)
# dataset: WellSet = await ex.process(wellset)
# await ex.persist(dataset)
if __name__ == "__main__":
import util
import loggers
from db import db
util.aio.async_to_sync(db.startup())
loggers.config(level=10)
# dir(prod_dispatcher)
# ihs_prod = load_json("tests/fixtures/ihs_prod.json")
# prod_df_h = ProductionWellSet(wells=ihs_prod).df().copy(deep=True)
# prodset = prod_df_h.prodstats.preprocess_header_and_monthly_records()
# prod_dispatcher = next(prod_dispatcher.__wrapped__(ihs_prod))
# x = len(calc.PRODSTAT_DEFAULT_OPTIONS)
# y = len(calc.PRODSTAT_DEFAULT_RATIO_OPTIONS)
# ((x * 4) + (y * (4 * 3))) * 124
# 54099
|
from likepy.starlark import StarLarkParser
from likepy.asthelper import dump_tree
import ast
code = '''
def controlflow():
x = 0
y = 'abc'
z = "ab""cdef"
a = ()
b = ...
pass
'''
def test_simple_parse_1():
p = StarLarkParser(code=code)
m = p.parse()
print(m)
print(dump_tree(m))
print(dump_tree(ast.parse(code))) |
from typing import List, Optional
import pytest
from httpx import AsyncClient
from starlette.applications import Starlette
from starlette.requests import Request
from starlette.responses import JSONResponse, Response
from ops2deb.exceptions import Ops2debUpdaterError
from ops2deb.logger import enable_debug
from ops2deb.updater import GenericUpdateStrategy, GithubUpdateStrategy
enable_debug(True)
@pytest.fixture
def app_factory():
def _app_response(request: Request):
return Response(status_code=200)
def _app_factory(versions: List[str]):
app = Starlette(debug=True)
for version in versions:
app.add_route(
f"/releases/{version}/some-app.tar.gz", _app_response, ["HEAD", "GET"]
)
return app
return _app_factory
@pytest.fixture
def github_app_factory():
def _github_app_factory(latest_release: str, versions: Optional[List[str]] = None):
versions = versions or []
app = Starlette(debug=True)
@app.route("/owner/name/releases/{version}/some-app.tar.gz")
def github_asset(request: Request):
version = request.path_params["version"]
status = 200 if version in versions or version == latest_release else 404
return Response(status_code=status)
@app.route("/repos/owner/name/releases/latest")
def github_release_api(request: Request):
return JSONResponse({"tag_name": latest_release})
return app
return _github_app_factory
@pytest.mark.parametrize(
"versions,expected_result",
[
(["1.0.0", "1.1.0"], "1.1.0"),
(["1.0.0", "1.1.3"], "1.1.3"),
(["1.0.0", "1.0.1", "1.1.0"], "1.1.0"),
(["1.0.0", "1.1.1", "2.0.0"], "1.1.1"),
(["1.0.0", "2.0.0"], "2.0.0"),
(["1.0.0", "2.0.3"], "2.0.3"),
(["1.0.0", "1.1.0", "2.0.0"], "1.1.0"),
(["1.0.0", "1.0.1", "1.0.2", "1.1.0", "1.1.1"], "1.1.1"),
],
)
async def test_generic_update_strategy_should_find_expected_blueprint_release(
blueprint_factory, app_factory, versions, expected_result
):
blueprint = blueprint_factory(
fetch={
"url": "http://test/releases/{{version}}/some-app.tar.gz",
"sha256": "deadbeef",
}
)
app = app_factory(versions)
async with AsyncClient(app=app) as client:
update_strategy = GenericUpdateStrategy(client)
assert await update_strategy(blueprint) == expected_result
@pytest.mark.parametrize(
"fetch_url,tag_name",
[
("https://github.com/owner/name/releases/{{version}}/some-app.tar.gz", "2.3.0"),
("https://github.com/owner/name/releases/v{{version}}/some-app.tar.gz", "v2.3.0"),
],
)
async def test_github_update_strategy_should_find_expected_blueprint_release(
blueprint_factory, github_app_factory, fetch_url, tag_name
):
app = github_app_factory(tag_name)
blueprint = blueprint_factory(fetch={"url": fetch_url, "sha256": "deadbeef"})
async with AsyncClient(app=app) as client:
update_strategy = GithubUpdateStrategy(client)
assert await update_strategy(blueprint) == "2.3.0"
async def test_github_update_strategy_should_not_return_an_older_version_than_current_one(
blueprint_factory, github_app_factory
):
app = github_app_factory("0.1.0", versions=["1.0.0"])
url = "https://github.com/owner/name/releases/{{version}}/some-app.tar.gz"
blueprint = blueprint_factory(fetch={"url": url, "sha256": "deadbeef"})
async with AsyncClient(app=app) as client:
update_strategy = GithubUpdateStrategy(client)
assert await update_strategy(blueprint) == "1.0.0"
async def test_github_update_strategy_should_fail_gracefully_when_asset_not_found(
blueprint_factory, github_app_factory
):
app = github_app_factory("someapp-v2.3.0")
url = "https://github.com/owner/name/releases/someapp-v{{version}}/some-app.tar.gz"
blueprint = blueprint_factory(fetch={"url": url, "sha256": "deadbeef"})
async with AsyncClient(app=app) as client:
with pytest.raises(Ops2debUpdaterError) as e:
await GithubUpdateStrategy(client)(blueprint)
assert "Failed to determine latest release URL" in str(e)
|
from abc import ABCMeta, abstractmethod
import six
@six.add_metaclass(ABCMeta)
class SegmentTree(object):
@abstractmethod
def build(self, iterable):
pass
@abstractmethod
def get(self, idx):
pass
@abstractmethod
def update(self, idx, val):
pass
@abstractmethod
def query(self, low, high):
pass
|
import io
import pytest
import random
@pytest.fixture
def stream():
class MockFileStream(io.BytesIO):
def __init__(self, fd, *args, **kwargs):
self._fd = fd
super(MockFileStream, self).__init__(*args, **kwargs)
def fileno(self):
return self._fd
@property
def data(self):
return self.getvalue()
@data.setter
def data(self, data):
self.truncate()
self.write(data)
self.seek(0)
return MockFileStream(random.randrange(4, 100))
istream = stream
ostream = stream
|
import vdb.stalker as v_stalker
import vtrace.tests as vt_tests
breakpoints = {
'windows': 'ntdll.NtTerminateProcess',
'linux': 'libc.exit',
'freebsd': 'libc.exit',
}
class VdbStalkerTest(vt_tests.VtraceProcessTest):
def test_vdb_stalker(self):
plat = self.trace.getMeta('Platform')
symname = breakpoints.get(plat)
entry = self.trace.parseExpression(symname)
v_stalker.addStalkerEntry(self.trace, entry)
self.runUntilExit()
self.assertTrue(len(v_stalker.getStalkerHits(self.trace)) >= 2)
|
#!/usr/bin/env python3
import argparse
import re
from urllib.parse import unquote
from pynvim import attach
# --------------------------------------------------------------
# functions
# --------------------------------------------------------------
kind_dict = {}
kind_dict[1] = 'File'
kind_dict[2] = 'Module'
kind_dict[3] = 'Namespace'
kind_dict[4] = 'Package'
kind_dict[5] = 'Class'
kind_dict[6] = 'Method'
kind_dict[7] = 'Property'
kind_dict[8] = 'Field'
kind_dict[9] = 'Constructor'
kind_dict[10] = 'Enum'
kind_dict[11] = 'Interface'
kind_dict[12] = 'Function'
kind_dict[13] = 'Variable'
kind_dict[14] = 'Constant'
kind_dict[15] = 'String'
kind_dict[16] = 'Number'
kind_dict[17] = 'Boolean'
kind_dict[18] = 'Array'
kind_dict[19] = 'Object'
kind_dict[20] = 'Key'
kind_dict[21] = 'Null'
kind_dict[22] = 'EnumMember'
kind_dict[23] = 'Struct'
kind_dict[24] = 'Event'
kind_dict[25] = 'Operator'
kind_dict[26] = 'TypeParameter'
def get_kind(val):
return kind_dict.get(val, 'Unkown')
def get_exclude_re_patterns(symbol_excludes):
re_patterns = []
for pattern in symbol_excludes:
re_pattern = re.sub(r'\.', r'\.', pattern)
re_pattern = re.sub(r'\*\*', r'.|', re_pattern)
re_pattern = re.sub(r'\*', r'[^/]*', re_pattern)
re_pattern = re.sub(r'\|', r'*', re_pattern)
re_patterns.append(re_pattern)
return re_patterns
def file_is_excluded(filename, exclude_re_patterns):
for pattern in exclude_re_patterns:
if re.match(pattern, filename):
return True
return False
# --------------------------------------------------------------
# execution
# --------------------------------------------------------------
parser = argparse.ArgumentParser(
description='connect to running Nvim to get CocAction("getWorkspaceSymbols", query)')
parser.add_argument('socket', help="returned by Nvim's v:servername")
parser.add_argument('bufnr', help="Nvim buffer where query should be done")
parser.add_argument(
'query', help="query to pass to CocAction('getWorkspaceSymbols')")
parser.add_argument('ansi_typedef', help="ansi code for highlight Typedef")
parser.add_argument('ansi_comment', help="ansi code for highlight Comment")
parser.add_argument('ansi_ignore', help="ansi code for highlight Ignore")
parser.add_argument('symbol_excludes', help="Coc config symbol excludes list")
parser.add_argument(
'--kind', nargs=1, help='only search for a specific "kind" (class, function, etc)')
args = parser.parse_args()
nvim = attach('socket', path=args.socket)
items = nvim.call('CocAction', 'getWorkspaceSymbols', args.query,
int(args.bufnr))
if items is None or len(items) == 0:
exit(0)
symbol_excludes = eval(args.symbol_excludes)
exclude_re_patterns = get_exclude_re_patterns(symbol_excludes)
ignored_colon = args.ansi_ignore.replace('STRING', ':')
for item in items:
lnum = item['location']['range']['start']['line'] + 1
col = item['location']['range']['start']['character']
filename = unquote(item['location']['uri'].replace('file://', ''))
kind = get_kind(item['kind'])
# filters
if args.kind is not None and args.kind[0].lower() != kind.lower():
continue
if file_is_excluded(filename, exclude_re_patterns):
continue
name_with_ansi = item['name']
kind_with_ansi = args.ansi_typedef.replace('STRING', '[' + kind + ']')
filename_with_ansi = args.ansi_comment.replace('STRING', filename)
lnum_col_with_ansi = args.ansi_ignore.replace('STRING',
':' + str(lnum) + ':' + str(col))
print("{0} {1}{2}{3}{4}".format(
name_with_ansi, kind_with_ansi, ignored_colon, filename_with_ansi,
lnum_col_with_ansi))
|
import numpy as np
from computational_thermodynamics import equilibrium
# Physical parameters
T = 360 #K
P = 2 * 0.101325 #MPa
# The feed composition consists of A, B, C, D, and I (an inert). In this example,
# all components are present in equal amounts in the feed
z = np.array([0.2, 0.2, 0.2, 0.2, 0.2])
#Load the equilibria module
Equi = equilibrium.Equilibria()
#Run calculation
(lagrange, nt, x) = Equi.optimize_equilibrium(T,P,z)
# lagrange (numpy.ndarray) - Lagrange multipliers (see pdf document)
# nt (float) - Total number of moles at equilibrium
# x (numpy.ndarray) - Mole fractions of all 15 components
#Display results
print("Lagrange multipliers:")
print(lagrange)
print("\n\nnt = " + str(nt) + " moles\n\n")
print("Mole fractions\n")
compounds = ["A","B","C","D","I","A2","AB","AC","AD","B2","BC","BD","C2","CD","D2"]
print ("{:<10} {:<15}".format("Compound", "Mole Fraction"))
print("---------------------------------------")
for i in range(0,np.size(x)):
print ("{:<10} {:<15}".format(compounds[i], x[i]))
print ("{:<10} {:<15}".format("Sum", np.sum(x))) |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: Ansible Team
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: github_release
short_description: Interact with GitHub Releases
description:
- Fetch metadata about GitHub Releases
version_added: 2.2
options:
token:
description:
- GitHub Personal Access Token for authenticating. Mutually exclusive with C(password).
user:
description:
- The GitHub account that owns the repository
required: true
password:
description:
- The GitHub account password for the user. Mutually exclusive with C(token).
version_added: "2.4"
repo:
description:
- Repository name
required: true
action:
description:
- Action to perform
required: true
choices: [ 'latest_release', 'create_release' ]
tag:
description:
- Tag name when creating a release. Required when using action is set to C(create_release).
version_added: 2.4
target:
description:
- Target of release when creating a release
version_added: 2.4
name:
description:
- Name of release when creating a release
version_added: 2.4
body:
description:
- Description of the release when creating a release
version_added: 2.4
draft:
description:
- Sets if the release is a draft or not. (boolean)
type: 'bool'
default: 'no'
version_added: 2.4
prerelease:
description:
- Sets if the release is a prerelease or not. (boolean)
type: bool
default: 'no'
version_added: 2.4
author:
- "Adrian Moisey (@adrianmoisey)"
requirements:
- "github3.py >= 1.0.0a3"
'''
EXAMPLES = '''
- name: Get latest release of a public repository
github_release:
user: ansible
repo: ansible
action: latest_release
- name: Get latest release of testuseer/testrepo
github_release:
token: tokenabc1234567890
user: testuser
repo: testrepo
action: latest_release
- name: Get latest release of test repo using username and password. Ansible 2.4.
github_release:
user: testuser
password: secret123
repo: testrepo
action: latest_release
- name: Create a new release
github_release:
token: tokenabc1234567890
user: testuser
repo: testrepo
action: create_release
tag: test
target: master
name: My Release
body: Some description
'''
RETURN = '''
create_release:
description:
- Version of the created release
- "For Ansible version 2.5 and later, if specified release version already exists, then State is unchanged"
- "For Ansible versions prior to 2.5, if specified release version already exists, then State is skipped"
type: str
returned: success
sample: 1.1.0
latest_release:
description: Version of the latest release
type: str
returned: success
sample: 1.1.0
'''
import traceback
GITHUB_IMP_ERR = None
try:
import github3
HAS_GITHUB_API = True
except ImportError:
GITHUB_IMP_ERR = traceback.format_exc()
HAS_GITHUB_API = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native
def main():
module = AnsibleModule(
argument_spec=dict(
repo=dict(required=True),
user=dict(required=True),
password=dict(no_log=True),
token=dict(no_log=True),
action=dict(
required=True, choices=['latest_release', 'create_release']),
tag=dict(type='str'),
target=dict(type='str'),
name=dict(type='str'),
body=dict(type='str'),
draft=dict(type='bool', default=False),
prerelease=dict(type='bool', default=False),
),
supports_check_mode=True,
mutually_exclusive=(('password', 'token'),),
required_if=[('action', 'create_release', ['tag']),
('action', 'create_release', ['password', 'token'], True)],
)
if not HAS_GITHUB_API:
module.fail_json(msg=missing_required_lib('github3.py >= 1.0.0a3'),
exception=GITHUB_IMP_ERR)
repo = module.params['repo']
user = module.params['user']
password = module.params['password']
login_token = module.params['token']
action = module.params['action']
tag = module.params.get('tag')
target = module.params.get('target')
name = module.params.get('name')
body = module.params.get('body')
draft = module.params.get('draft')
prerelease = module.params.get('prerelease')
# login to github
try:
if password:
gh_obj = github3.login(user, password=password)
elif login_token:
gh_obj = github3.login(token=login_token)
else:
gh_obj = github3.GitHub()
# test if we're actually logged in
if password or login_token:
gh_obj.me()
except github3.exceptions.AuthenticationFailed as e:
module.fail_json(msg='Failed to connect to GitHub: %s' % to_native(e),
details="Please check username and password or token "
"for repository %s" % repo)
repository = gh_obj.repository(user, repo)
if not repository:
module.fail_json(msg="Repository %s/%s doesn't exist" % (user, repo))
if action == 'latest_release':
release = repository.latest_release()
if release:
module.exit_json(tag=release.tag_name)
else:
module.exit_json(tag=None)
if action == 'create_release':
release_exists = repository.release_from_tag(tag)
if release_exists:
module.exit_json(changed=False, msg="Release for tag %s already exists." % tag)
release = repository.create_release(
tag, target, name, body, draft, prerelease)
if release:
module.exit_json(changed=True, tag=release.tag_name)
else:
module.exit_json(changed=False, tag=None)
if __name__ == '__main__':
main()
|
###
# Copyright 2020 Hewlett Packard Enterprise, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
# -*- coding: utf-8 -*-
"""Containers used for REST requests and responses."""
import sys
import json
from collections import (OrderedDict)
from six import text_type, string_types, StringIO, BytesIO
from six.moves import http_client
class JSONEncoder(json.JSONEncoder):
"""JSON Encoder class"""
def default(self, obj):
"""Set defaults in JSON encoder class
:param obj: object to be encoded into JSON.
:type obj: RestResponse
:returns: A JSON :class:`OrderedDict`
"""
if isinstance(obj, RestResponse):
jsondict = OrderedDict()
jsondict['Status'] = obj.status
jsondict['Headers'] = obj.getheaders()
if obj.read:
jsondict['Content'] = obj.dict
return jsondict
return json.JSONEncoder.default(self, obj)
class JSONDecoder(json.JSONDecoder):
"""Custom JSONDecoder that understands our types"""
def decode(self, json_string):
"""Decode JSON string
:param json_string: The JSON string to be decoded into usable data.
:type json_string: str
:returns: returns a parsed dict
"""
parsed_dict = super(JSONDecoder, self).decode(json_string)
return parsed_dict
class _FakeSocket(BytesIO):
"""slick way to parse a http response.
http://pythonwise.blogspot.com/2010/02/parse-http-response.html"""
def makefile(self, *args, **kwargs):
"""Return self object"""
return self
class RisObject(dict):
"""Converts a JSON/Rest dict into a object so you can use .property notation
:param d: dictionary to be converted
:type d: dict
"""
def __init__(self, d):
"""Initialize RisObject
"""
super(RisObject, self).__init__()
self.update(**dict((k, self.parse(value)) for k, value in list(d.items())))
def __getattr__(self, k):
try:
return self[k]
except KeyError:
raise AttributeError(
"type object '%s' has no attribute '%s'" %
(self.__class__.__name__, k)
)
@classmethod
def parse(cls, value):
"""Parse for RIS value
:param cls: class referenced from class method
:type cls: RisObject
:param value: value to be parsed
:type value: data type
:returns: returns parsed value
"""
if isinstance(value, dict):
return cls(value)
elif isinstance(value, list):
return [cls.parse(i) for i in value]
return value
class RestRequest(object):
"""Holder for Request information
:param path: The URI path.
:type path: str
:param method: method to be implemented
:type method: str
:param data: body payload for the rest call
:type data: dict
"""
def __init__(self, path, method='GET', data='', url=None):
self._path = path
self._body = data
self._method = method
self.url = url
@property
def path(self):
"""The path the request is made against."""
return self._path
@property
def method(self):
"""The method to implement."""
return self._method
@property
def body(self):
"""The body to pass along with the request, if any."""
return self._body
def __str__(self):
"""Format string"""
body = '' if not self._body else self._body
try:
return "{} {}\n\n{}".format(self.method, self.path, body)
except:
return "{} {}\n\n{}".format(self.method, self.path, '')
class RestResponse(object):
"""Returned by Rest requests
:param rest_request: Holder for request information
:type rest_request: :class:`RestRequest` object
:param http_response: Response from HTTP
:type http_response: :class:`HTTPResponse` object
"""
def __init__(self, rest_request, http_response):
self._read = None
self._status = None
self._headers = None
self._session_key = None
self._session_location = None
self._rest_request = rest_request
self._http_response = http_response
self._read = self._http_response.data if http_response is not None else None
self._ori = self._read
@property
def read(self):
"""The response body, attempted to be translated into json, else is a string."""
if self._read and not isinstance(self._read, text_type):
self._read = self._read.decode("utf-8", "ignore")
return self._read
@read.setter
def read(self, read):
"""Property for setting _read
:param read: The data to set to read.
:type read: str
"""
if read is not None:
if isinstance(read, dict):
read = json.dumps(read, indent=4)
self._read = read
def getheaders(self):
"""Get all headers included in the response."""
return dict(self._http_response.headers) if self._http_response\
is not None else self._headers
def getheader(self, name):
"""Case-insensitive search for an individual header
:param name: The header name to retrieve.
:type name: str
:returns: returns a header from HTTP response or None if not found.
"""
def search_dict(search_key, dct):
for key, val in dct.items():
if key.lower() == search_key.lower():
return val
return None
if self._http_response:
return search_dict(name, self._http_response.headers)
return search_dict(name, self._headers)
def loaddict(self, newdict):
"""Property for setting JSON data. Used during initialization.
:param newdict: The string data to set as JSON data.
:type newdict: str
"""
self._read = json.dumps(newdict, indent=4)
@property
def dict(self):
"""The response body data as an dict"""
try:
return json.loads(self.read)
except ValueError as exp:
if self.path != '/smbios':
sys.stderr.write("An invalid response body was returned: %s" % exp)
return None
@property
def obj(self):
"""The response body data as an object"""
return RisObject.parse(self.dict)
@property
def ori(self):
"""The original response body data"""
return self._ori
@property
def status(self):
"""The status code of the request."""
if self._status:
return self._status
return self._http_response.status if self._http_response is not None else self._status
@property
def session_key(self):
"""The saved session key for the connection."""
if self._session_key:
return self._session_key
self._session_key = self.getheader('x-auth-token')
return self._session_key
@property
def session_location(self):
"""The saved session location, used for logging out."""
if self._session_location:
return self._session_location
self._session_location = self.getheader('location')
return self._session_location
@property
def request(self):
"""The saved http request the response was generated by."""
return self._rest_request
@property
def path(self):
"""The path the request was made against."""
return self.request.path
def __str__(self):
"""Class string formatter"""
headerstr = ''
for kiy, val in self.getheaders().items():
headerstr += '%s %s\n' % (kiy, val)
return "%(status)s\n%(headerstr)s\n\n%(body)s" % \
{'status': self.status, 'headerstr': headerstr, 'body': self.read}
class RisRestResponse(RestResponse):
"""Returned by Rest requests from CHIF
:param rest_request: Holder for request information
:type rest_request: :class:`RestRequest` object
:param resp_text: text from response to be buffered and read
:type resp_text: str
"""
def __init__(self, rest_request, resp_txt):
"""Initialization of RisRestResponse"""
if not isinstance(resp_txt, string_types):
resp_txt = "".join(map(chr, resp_txt))
self._respfh = StringIO(resp_txt)
self._socket = _FakeSocket(bytearray(list(map(ord, self._respfh.read()))))
response = http_client.HTTPResponse(self._socket)
response.begin()
response.data = response.read()
response.headers = {ki[0]:ki[1] for ki in response.getheaders()}
super(RisRestResponse, self).__init__(rest_request, response)
class StaticRestResponse(RestResponse):
"""A RestResponse object used when data is being cached."""
def __init__(self, **kwargs):
restreq = None
if 'restreq' in kwargs:
restreq = kwargs['restreq']
super(StaticRestResponse, self).__init__(restreq, None)
if 'Status' in kwargs:
self._status = kwargs['Status']
if 'Headers' in kwargs:
self._headers = kwargs['Headers']
if 'session_key' in kwargs:
self._session_key = kwargs['session_key']
if 'session_location' in kwargs:
self._session_location = kwargs['session_location']
if 'Content' in kwargs:
content = kwargs['Content']
if isinstance(content, string_types):
self._read = content
else:
self._read = json.dumps(content)
else:
self._read = ''
def getheaders(self):
"""Function for accessing the headers"""
returnlist = {}
if isinstance(self._headers, dict):
returnlist = self._headers
elif isinstance(self._headers, (list, tuple)):
returnlist = {ki[0]:ki[1] for ki in self._headers}
else:
for item in self._headers:
returnlist.update(item.items()[0])
return returnlist
|
import cv2
import os
from skimage.feature import hog
img = cv2.imread(r'images\test-3-classes\3\944.png')
ft, im = hog(img, visualize=True)
cv2.imshow('a',im/255)
cv2.waitKey(0)
cv2.destroyAllWindows() |
from ..feature_types import raw_feature
import LAMP
@raw_feature(
name="lamp.screen_state",
dependencies=["lamp.screen_state"]
)
def screen_state(resolution=None, limit=2147483647, cache=True, **kwargs):
"""
Get all cal data bounded by time interval and optionally subsample the data.
:param resolution (int): The subsampling resolution (TODO).
:param limit (int): The maximum number of GPS events to query for (defaults to INT_MAX).
:return timestamp (int): The UTC timestamp for the GPS event.
:return TODO
"""
data = LAMP.SensorEvent.all_by_participant(kwargs['id'],
origin="lamp.screen_state",
_from=kwargs['start'],
to=kwargs['end'],
_limit=limit)['data']
return [{'timestamp': x['timestamp'], **x['data']} for x in data]
|
from flask import Flask
app = Flask(__name__)
animais = [
{
'nome':'A',
'tipo':'Cachorro',
'medidas':{
'altura':0.5,
'peso':20
}
},
{
'nome':'B',
'tipo':'Gato',
'medidas':{
'altura':0.3,
'peso':10
}
},
{
'nome':'C',
'tipo':'Leopardo',
'medidas':{
'altura':1,
'peso':70
}
}
]
@app.route('/')
def olaMundo():
return 'Ola mundo!'
@app.route('/animal')
def listar(self):
return {'animal':self.animais}
@app.route('/animal/<nome>')
def pegar(self, nome):
for animal in self.animais:
if animal.nome == nome:
return animal
return 'Animal não encontrado'
@app.route('/animal')
def adicionar(self, animal):
self.animais.append(animal)
return self.animais
if __name__ == '__main__':
app.run() |
import math
from maze import *
###########################################################
# Compute the distance between a ray, defined by a point p and
# an angle alpha, and an horizontal segment [a,b]
# distance is math.inf if no intersection
#
# @param px, py coordinates of point p
# @param alpha angle of ray from x axis
# @param ax, ay coordinates of point a
# @param bx x coordinare of point b (here, by=ay)
#
# @return [distance, tx, ty] where t is the intersection point
# None if no intersection
#
###########################################################
def Distance2HSegment(px, py, alpha, ax, ay, bx, by):
#### DEBUG ####
#print(f"Distance2HSegment {px},{py},{alpha},{ax},{ay},{bx},{ay}")
# Normalise alpha in ]-pi,pi]
alpha = math.fmod(alpha, 2*math.pi)
# here, alpha is in ]-2pi, 2pi[
if alpha < 0:
alpha = alpha + 2*math.pi
# here, alpha is in [0, 2pi[
if alpha > math.pi:
alpha = alpha - 2*math.pi
# finished, alpha is now in ]-pi, pi]
# set default results
distance = math.inf
tx = None
ty = None
# if b is to the left of a, swap a and b so ax < bx
if bx < ax:
cx = ax
ax = bx
bx = cx
if alpha == 0:
# intersection only if py == ay
if py == ay:
# intesection only if py < by
if px < bx:
if px > ax:
# p is on [a,b]
distance = 0
tx = px
ty = py
else:
# p on the line, to the left of a
# intersection is a
distance = ay-py
tx = ax
ty = ay
else:
pass
else:
pass
elif alpha == -math.pi:
# intersection only if py == ay
if py == ay:
# intesection only if px > ax
if px > ax:
if px < bx:
# p is on [a,b]
distance = 0
tx = px
ty = py
else:
# p on the line, to the right of b
# intersection is b
distance = py - by
tx = bx
ty = ay
else:
pass
else:
pass
elif (py > ay) and (alpha > 0):
# p is above and facing up
pass
elif (py < ay) and (alpha < 0):
# p is below and facing down
pass
elif (px < ax) and ((alpha > math.pi/2) or (alpha < -math.pi/2)):
# p is to the left and facing left
pass
elif (px > bx) and ((alpha < math.pi/2) and (alpha > -math.pi/2)):
# p is to the right and facing right
pass
else: # general case
# o point on (a,b) with same x coordinate as p
# The triangle (p,o,t) is square in o,
# so op=distance * cos(beta) and ot=distance * sin(beta)
# with beta = pi/2-alpha
beta = math.pi/2-alpha
distance = abs((ay-py)/math.cos(beta))
tx = px + distance*math.sin(beta)
if (ax<tx) and (tx<bx):
ty = ay
else:
distance = math.inf
tx = None
#### DEBUG ####
#print(f" ==> {distance},{tx},{ty}")
return [distance, tx, ty]
###########################################################
# Compute the distance between a ray, defined by a point p and
# an angle alpha, and a vertical segment [a,b]
# distance is math.inf if no intersection
#
# @param px, py coordinates of point p
# @param alpha angle of ray from x axis
# @param ax, ay coordinates of point a
# @param by y coordinare of point b (here, bx=ax)
#
# @return [distance, tx, ty] where t is the intersection point
# None if no intersection
#
###########################################################
def Distance2VSegment(px, py, alpha, ax, ay, bx, by):
#### DEBUG ####
#print(f"Distance2VSegment {px},{py},{alpha},{ax},{ay},{ax},{by}")
# Normalise alpha in ]-pi,pi]
alpha = math.fmod(alpha, 2*math.pi)
# here, alpha is in ]-2pi, 2pi[
if alpha < 0:
alpha = alpha + 2*math.pi
# here, alpha is in [0, 2pi[
if alpha > math.pi:
alpha = alpha - 2*math.pi
# finished, alpha is now in ]-pi, pi]
# set result to None
distance = math.inf
tx = None
ty = None
# if b is under a, swap a and b so ay < by
if by < ay:
cy = ay
ay = by
by = cy
if alpha == math.pi/2:
#### DEBUG
#print(" Case 1 : alpha=90")
# intersection only if px == ax
if px == ax:
# intesection only if py < by
if py < by:
if py > ay:
#### DEBUG
#print("p is on [a,b]")
# p is on [a,b]
distance = 0
tx = px
ty = py
else:
# p on the line, under a
# intersection is a
#### DEBUG
#print("p is on the line, under a")
distance = ax - px
tx = ax
ty = ay
else:
pass
else:
pass
elif alpha == -math.pi/2:
#### DEBUG
#print(" Case 2: alpha=-90")
# intersection only if px == ax
if px == ax:
# intesection only if py > ay
if py > ay:
if py < by:
# p is on [a,b]
distance = 0
tx = px
ty = py
else:
# p on the line, over b
# intersection is b
distance = px - bx
tx = ax
ty = by
else:
pass
else:
pass
elif (py > by) and (alpha > 0):
# p is above and facing up
#### DEBUG
#print(" Case 3 : p above segment")
pass
elif (py < ay) and (alpha < 0):
# p is below and facing down
#### DEBUG
#print(" Case 4 : p below segment")
pass
elif (px < ax) and ((alpha > math.pi/2) or (alpha < -math.pi/2)):
# p is to the left and facing left
#### DEBUG
#print(" Case 5 : p to the left")
pass
elif (px > ax) and ((alpha < math.pi/2) and (alpha > -math.pi/2)):
# p is to the right and facing right
#### DEBUG
#print(" Case 6 : p to the right")
pass
else: # general case
# o point on (a,b) with same y coordinate as p
# The triangle (p,o,t) is square in o,
# so op=distance * cos(alpha) and ot=distance * sin(alpha)
distance = abs((ax-px)/math.cos(alpha))
ty = py + distance*math.sin(alpha)
#### DEBUG
#print(f" Case 7 : good case, ty={ty}")
if (ay<ty) and (ty<by):
#### DEBUG
#print(" Case 7a : t on segment")
tx = ax
else:
#### DEBUG
#print(" Case 7b : t not on segment")
distance = math.inf
ty = None
#### DEBUG ####
#print(f" ==> {distance},{tx},{ty}")
return [distance, tx, ty]
###########################################################
#
###########################################################
class Robot:
simu_time_step = 0.1 # time step in seconds
wheels_distance = 100 # distance between wheels, in mm
#######################################################
def __init__(self, maze):
self.maze = maze
self.current_pos_x = 0
self.current_pos_y = 0
self.current_heading = 0
self.StartPosition()
#######################################################
#
#######################################################
def StartPosition(self):
if self.maze.generated == False or self.maze.start_H:
# 1st cell output on the right
self.current_heading = 0
self.current_pos_x = Robot.wheels_distance/2 + Maze.wall_width
self.current_pos_y = (Maze.cell_width - Maze.wall_width)/2
else:
# 1st cell output on the top
self.current_heading = math.pi/2
self.current_pos_x = (Maze.cell_width - Maze.wall_width)/2
self.current_pos_y = Robot.wheels_distance/2 + Maze.wall_width
#######################################################
# RunStep
# compute new position from speed and curvature
# speed is set for external wheel. It has main impact for high curvature values.
# curvature is > 0 when turning right (clockwise).
# curvature = 0 for straight forward, +/-math.inf for on place turn
#######################################################
def RunStep(self, speed, curvature):
distance = speed*self.simu_time_step
if curvature == 0:
dx = distance * math.cos(current_heading)
dy = distance * math.sin(current_heading)
self.current_pos_x += dx
self.current_pos_y += dy
elif curvature == math.inf: # clockwise on place rotation, change only heading
# radius of curve circle for external wheel
radius_ext = Robot.wheel_distance
# rotation angle
angle = distance / radius_ext
# new heading
self.heading = self.heading - angle
elif curvature == -math.inf: # counterclockwise on place rotation, change only heading
# radius of curve circle for external wheel
radius_ext = Robot.wheel_distance
# rotation angle
angle = distance / radius_ext
# new heading
self.heading = self.heading + angle
else: # general case, change heading and center position
# radius or curve circle for center of robot
radius = 1/curvature
# radius of curve circle for external wheel
radius_ext = radius + Robot.wheel_distance
# unit vector pointed on heading
u_x = math.cos(self.current_heading)
u_y = math.sin(self.current_heading)
# unit vector pointing to center of curve circle, orthogonal to u
if curvature > 0:
# rotate to left, otrhogonal is -pi/2 rotation
uorth_x = u_y
uorth_y = -u_x
else:
# rotate to right, otrhogonal is pi/2 rotation
uorth_x = -u_y
uorth_y = u_x
# center of circle
o_x = self.current_pos_x + radius * uorth_x
o_y = self.current.pos_x + radius * uorth_y
# position of external wheel
R_x = o_x - radius_ext*u_x
R_y = o_y - radius_ext*u_y
# rotation angle
angle = distance / radius_ext
if curvature > 0:
# new heading
self.heading = self.heading - angle
# new unit vector
u_x = math.cos(self.current_heading)
u_y = math.sin(self.current_heading)
# new unit vector
u_x = math.cos(self.current_heading)
u_y = math.sin(self.current_heading)
# new orthogonal unit vector
uorth_x = u_y
uorth_y = -u_x
else:
# new heading
self.heading = self.heading + angle
# new unit vector
u_x = math.cos(self.current_heading)
u_y = math.sin(self.current_heading)
# new unit vector
u_x = math.cos(self.current_heading)
u_y = math.sin(self.current_heading)
# new orthogonal unit vector
uorth_x = -u_y
uorth_y = u_x
# new center position
self.current_pos_x = o_x - radius*uorth_x
self.current_pos_y = o_y - radius*uorth_y
#######################################################
# WallDistance_unit_H
# return distance to a H wall in maze
#
#######################################################
def WallDistance_unit_H(self, angle, i, j):
# coordinates of extremities of wall
ax = i * Maze.cell_width
ay = j * Maze.cell_width
bx = ax + Maze.cell_width
by = ay
# distance to top segment
sax = ax
say = ay + Maze.wall_width/2
sbx = bx
sby = by + Maze.wall_width/2
[dmin, tx, ty] = Distance2HSegment(self.current_pos_x, self.current_pos_y, angle, sax, say, sbx, sby)
# distance to bottom segment
sax = ax
say = ay - Maze.wall_width/2
sbx = bx
sby = by - Maze.wall_width/2
[d, ttx, tty] = Distance2HSegment(self.current_pos_x, self.current_pos_y, angle, sax, say, sbx, sby)
if d < dmin:
dmin = d
tx = ttx
ty = tty
# distance to left segment
sax = ax
say = ay + Maze.wall_width/2
sbx = ax
sby = ay - Maze.wall_width/2
[d, ttx, tty] = Distance2VSegment(self.current_pos_x, self.current_pos_y, angle, sax, say, sbx, sby)
if d < dmin:
dmin = d
tx = ttx
ty = tty
# distance to right segment
sax = bx
say = by + Maze.wall_width/2
sbx = bx
sby = by - Maze.wall_width/2
[d, ttx, tty] = Distance2VSegment(self.current_pos_x, self.current_pos_y, angle, sax, say, sbx, sby)
if d < dmin:
dmin = d
tx = ttx
ty = tty
return [dmin, tx, ty]
#######################################################
# WallDistance_unit_V
# return distance to a H wall in maze
#
#######################################################
def WallDistance_unit_V(self, angle, i, j):
# coordinates of extremities of wall
ax = i * Maze.cell_width
ay = j * Maze.cell_width
bx = ax
by = ay + Maze.cell_width
# distance to top segment
sax = bx - Maze.wall_width/2
say = by
sbx = bx + Maze.wall_width/2
sby = by
[dmin, tx, ty] = Distance2HSegment(self.current_pos_x, self.current_pos_y, angle, sax, say, sbx, sby)
# distance to bottom segment
sax = ax - Maze.wall_width/2
say = ay
sbx = ax + Maze.wall_width/2
sby = ay
[d, ttx, tty] = Distance2HSegment(self.current_pos_x, self.current_pos_y, angle, sax, say, sbx, sby)
if d < dmin:
dmin = d
tx = ttx
ty = tty
# distance to left segment
sax = ax - Maze.wall_width/2
say = ay
sbx = bx - Maze.wall_width/2
sby = by
[d, ttx, tty] = Distance2VSegment(self.current_pos_x, self.current_pos_y, angle, sax, say, sbx, sby)
if d < dmin:
dmin = d
tx = ttx
ty = tty
# distance to right segment
sax = ax + Maze.wall_width/2
say = ay
sbx = bx + Maze.wall_width/2
sby = by
[d, ttx, tty] = Distance2VSegment(self.current_pos_x, self.current_pos_y, angle, sax, say, sbx, sby)
if d < dmin:
dmin = d
tx = ttx
ty = tty
return [dmin, tx, ty]
#######################################################
# WallDistance
# return distance to nearest wall in maze
#
#######################################################
def WallDistance(self, sensor_angle):
dmin = math.inf
txmin = math.inf
tymin = math.inf
imin = math.inf
jmin = math.inf
angle = self.current_heading + sensor_angle
for i in range(0, self.maze.cellnb_x):
for j in range(0, self.maze.cellnb_y):
# distance to horizontal walls
if self.maze.HWalls[self.maze.ij2n(i,j)] == True:
[d, tx, ty] = self.WallDistance_unit_H(angle, i, j)
if d < dmin:
dmin = d
imin = i
jmin = j
txmin = tx
tymin = ty
# distance to vertical walls
if self.maze.VWalls[self.maze.ij2n(i,j)] == True:
[d, tx, ty] = self.WallDistance_unit_V(angle, i, j)
if d < dmin:
dmin = d
imin = i
jmin = j
txmin = tx
tymin = ty
# distance to horizontal walls on last line
for i in range(0, self.maze.cellnb_x):
j = self.maze.cellnb_y
[d, tx, ty] = self.WallDistance_unit_H(angle, i, j)
if d < dmin:
dmin = d
imin = i
jmin = j
txmin = tx
tymin = ty
# distance to vertical walls on last column
for j in range(0, self.maze.cellnb_y):
i = self.maze.cellnb_x
[d, tx, ty] = self.WallDistance_unit_V(angle, i, j)
if d < dmin:
dmin = d
imin = i
jmin = j
txmin = tx
tymin = ty
return [dmin, txmin, tymin, imin, jmin]
|
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import dash_katex
import numpy as np
import plotly.express as px
from scipy import stats
from app import app
layout = html.Div([
dash_katex.DashKatex(
expression=r'''
f_X(x) = \frac{1}{\sqrt{2\pi\sigma^2}}
\exp\left(-\frac{(x - \mu)^2}{2\sigma^2}\right)
''',
displayMode=True
),
dcc.Graph(id='normal_graph'),
dash_katex.DashKatex(expression=r'\mu'),
dcc.Slider(
id='normal_mean',
value=0,
min=-5,
max=5,
marks={x: str(x) for x in range(-5, 6)},
step=0.01,
tooltip={'placement': 'top'}
),
dash_katex.DashKatex(expression=r'\sigma^2'),
dcc.Slider(
id='normal_variance',
value=1,
min=0.01,
max=10,
marks={x: str(x) for x in range(11)},
step=0.01,
tooltip={'placement': 'top'}
)
])
@app.callback(
Output('normal_graph', 'figure'),
[Input('normal_mean', 'value'),
Input('normal_variance', 'value')]
)
def plot(mean, variance):
std = np.sqrt(variance)
x = np.linspace(-10, 10, 1000)
y = stats.norm.pdf(x, mean, std)
range_x = [-10, 10]
range_y = [-0.2, max(1.2, max(y) + 0.2)]
figure = px.line(x=x, y=y, range_x=range_x, range_y=range_y)
return figure
|
from http import server
import os
from plumbum import local, ProcessExecutionError
import sys
from webbrowser import open_new_tab
from .utils import M
test_runner = local['pytest']
linter = local['pylint']
type_checker = local['mypy']
@M.command()
def devserver(port=8080):
os.chdir(os.path.dirname(os.path.dirname(__file__))+'/web')
open_new_tab("http://localhost:{}/".format(port))
server.test(HandlerClass=server.SimpleHTTPRequestHandler, ServerClass=server.HTTPServer, port=port)
@M.command()
def run(tests=None):
with local.env(PYTHONPATH='./src:./'):
if tests is None:
tests='tests'
args = (tests,)
else:
tests = 'tests/brython_jinja2/test_'+tests+'.py'
args = (tests,'--fulltrace', '--pdb')
try:
test_runner.run(args, stdout=sys.stdout, stderr=sys.stderr)
except:
pass
@M.command()
def lint(report=False):
with local.env(PYTHONPATH='./src:./tests/brython/'):
try:
if not report:
linter("--reports=n", "brython_jinja2", stdout=sys.stdout, stderr=sys.stderr)
else:
linter("brython_jinja2", stdout=sys.stdout, stderr=sys.stderr)
except ProcessExecutionError:
exit(1)
@M.command()
def check():
with local.env(MYPYPATH='../tests/brython/:../'):
with local.cwd('./src'):
try:
type_checker("--ignore-missing-imports", "-p", "brython_jinja2", stdout=sys.stdout, stderr=sys.stderr) & RETCODE
except ProcessExecutionError:
exit(1)
|
# Copyright (c) 2017 Christoph Landgraf. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import atexit
import contextlib
import functools
import itertools
import math
import signal
import sys
import traceback
import cui
import cui.term.curses
from cui import buffers
from cui.term import Frame
from cui.keymap import WithKeymap
from cui.util import deep_get, deep_put, forward
from cui.colors import ColorCore, ColorException
from cui.meta import Singleton, combine_meta_classes
from cui.io_selector import IOSelector
__all__ = ['init_func', 'Core']
# =================================== API ======================================
# Package Lifecycle
def core_api(_globals, fn_name, keychords=None):
wrapped_fn = functools.wraps(getattr(Core, fn_name))(
(lambda *args, **kwargs: getattr(Core(), fn_name)(*args, **kwargs)))
if keychords:
Core.set_keychord(keychords, wrapped_fn)
_globals[fn_name] = wrapped_fn
return wrapped_fn
@contextlib.contextmanager
def core_api_ns(_globals):
def _core_api(*args, **kwargs):
core_api(_globals, *args, **kwargs)
yield _core_api
@contextlib.contextmanager
def context():
yield Core()
def init_func(fn):
"""
Decorator that marks a function as an initialization function.
Functions decorated with init_func will be run after the first
terminal has been initialized, and the init-file has been
read. The function has_run may be used to determine if a function
has been run.
"""
fn.__has_run__ = False
@functools.wraps(fn)
def wrapper_fn(*args, **kwargs):
if fn.__has_run__:
cui.message('Warning: executing init_func %s more than once.' % fn)
result = fn(*args, **kwargs)
fn.__has_run__ = True
return result
Core.__init_functions__.append(wrapper_fn)
return wrapper_fn
def post_init_func(fn):
"""
Decorator that marks a function as a post-initialization function.
Functions decorated with post_init_func will be executed after all init_funcs
have been executed. The function has_run may be used to determine if a function
has been run.
"""
fn.__has_run__ = False
@functools.wraps(fn)
def wrapper_fn(*args, **kwargs):
if fn.__has_run__:
cui.message('Warning: executing post_init_func %s more than once.' % fn)
result = fn(*args, **kwargs)
fn.__has_run__ = True
return result
Core.__post_init_functions__.append(wrapper_fn)
return wrapper_fn
def update_func(fn):
Core.__update_functions__.append(fn)
return fn
# ==============================================================================
class Logger(object):
def __init__(self):
self.messages = []
def log(self, msg):
if (len(self.messages) > 1000):
self.messages.pop(0)
self.messages.append(msg)
def clear(self):
self.messages = []
def echo_area_default():
c = Core()
return (c.last_message,
'%s/%s' % (c._frame._wm.window_set_index + 1, c._frame._wm.window_set_count))
class MiniBuffer(buffers.InputBuffer):
def __init__(self, core):
super(MiniBuffer, self).__init__()
self._core = core
@property
def _buffer(self):
return self._core.mini_buffer_state['buffer']
@_buffer.setter
def _buffer(self, value):
self._core.mini_buffer_state['buffer'] = value
@property
def _cursor(self):
return self._core.mini_buffer_state['cursor']
@_cursor.setter
def _cursor(self, value):
self._core.mini_buffer_state['cursor'] = value
@property
def prompt(self):
return self._core.mini_buffer_state.get('prompt', '')
def on_auto_complete(self):
if self._core.mini_buffer_state['complete_function'] is None:
return super(MiniBuffer, self).on_auto_complete()
return self._core.mini_buffer_state['complete_function'](self._buffer)
def on_send_current_buffer(self, b):
if self._core.mini_buffer_state:
self._core.mini_buffer_state.get('submit_function', lambda _: None)(b)
def get_input_lines(self):
for i, s in enumerate(self._core.mini_buffer_states):
yield self.get_buffer_line(s['prompt'],
s['buffer'],
s['cursor'],
show_cursor=(i == 0))
def get_echo_area(self, window):
left, right = self._core.echo_area
left = left.split('\n', 1)[0]
right = right.split('\n', 1)[0]
space = (window.columns - len(left) - len(right))
if space < 0:
left = left[:(space - 4)] + '... '
return [left, ' ' * max(0, space), right]
def get_lines(self, window):
yield from itertools.islice(self.get_input_lines(), window.rows - 1)
yield self.get_echo_area(window)
# Runloop Control
class RunloopControl(Exception):
def __init__(self):
super(RunloopControl, self).__init__()
class RunloopCancel(RunloopControl):
pass
class RunloopExit(RunloopControl):
pass
class RunloopResult(RunloopControl):
def __init__(self, result):
super(RunloopResult, self).__init__()
self.result = result
class RunloopState(object):
def __init__(self):
self.running = False
self.current_keychord = []
self.on_exit = []
self.mini_buffer_state = None
def runloop_cancel():
"""
Cancels the current runloop
"""
raise RunloopCancel()
def runloop_result(result):
raise RunloopResult(result)
def interactive(*args, **kwargs):
def _interactive(fn):
fn.__cui_interactive_args__ = args
fn.__cui_interactive_kwargs__ = kwargs
return fn
return _interactive
# TODO can core._interactive be put into runloop_state???
@contextlib.contextmanager
def _interactive_context(handle_cancel):
"""
This contextmanager ensures that RunloopCancel is always
caught by the top-level interactive command that is executed
"""
interactive_set = Core().set_interactive(True)
try:
if interactive_set or handle_cancel:
try:
yield
except RunloopCancel:
Core().message('Interactive cancelled.')
else:
yield
finally:
if interactive_set:
Core().set_interactive(False)
def run_interactive(fn, handle_cancel=False):
args = getattr(fn, '__cui_interactive_args__', [])
kwargs = getattr(fn, '__cui_interactive_kwargs__', {})
with _interactive_context(handle_cancel):
return fn(*[arg() for arg in args],
**{kwarg: kwargs[kwarg]() for kwarg in kwargs})
@forward(lambda self: self._frame,
['replace_buffer',
'new_window_set', 'has_window_set', 'delete_window_set', 'delete_window_set_by_name',
'next_window_set', 'previous_window_set',
'find_window', 'select_window', 'select_next_window', 'select_previous_window',
'select_left_window', 'select_right_window', 'select_top_window', 'select_bottom_window',
'delete_selected_window', 'delete_all_windows',
'split_window_below', 'split_window_right', 'selected_window'],
Frame)
class Core(WithKeymap,
ColorCore,
metaclass=combine_meta_classes(Singleton, WithKeymap.__class__)):
__init_functions__ = []
__post_init_functions__ = []
__update_functions__ = []
def __init__(self):
super(Core, self).__init__()
self._init_state()
self.logger = Logger()
self.io_selector = IOSelector(timeout=None, as_update_func=False)
self.buffers = []
self._mini_buffer = MiniBuffer(self)
self._exit_handlers = []
self._last_message = ""
self._frame = None
self._removed_update_funcs = []
self._runloops = []
self._running = False
self._interactive = False
atexit.register(self._at_exit)
def _init_state(self):
self._state = {}
self.def_variable(['tab-stop'], 4)
self.def_variable(['tree-tab'], 2)
self.def_variable(['echo-area'], echo_area_default)
from cui.buffers_std import LogBuffer
self.def_variable(['default-buffer-class'], LogBuffer)
def message(self, msg, show_log=True, log_message=None):
"""
Display a message in the echo area and log it.
:param msg: The message to be displayed
:param show_log: Set to False, to avoid appending the message to the log
:param log_message: Provide an alternative text for appending to the log
"""
self._last_message = msg
if log_message:
self.logger.log(log_message)
elif show_log:
self.logger.log(msg)
def exception(self):
"""
Call to log the last thrown exception exception.
"""
exc_type, exc_value, exc_tb = sys.exc_info()
cui.message(traceback.format_exception_only(exc_type, exc_value)[-1],
log_message=traceback.format_exc())
def get_buffer(self, buffer_class, *args):
buffer_name = buffer_class.name(*args)
buffers = list(filter(lambda b: b.buffer_name() == buffer_name, # XXX python3
self.buffers))
if len(buffers) > 1:
raise Exception('Error: multiple buffers with same buffer_name')
elif len(buffers) == 0:
return None
else:
return buffers[0]
def get_buffers(self, buffer_class, predicate=None):
return list(filter(lambda b: (type(b) is buffer_class and
(predicate is None or predicate(b))),
self.buffers))
@property
def mini_buffer(self):
return self._mini_buffer
@property
def mini_buffer_state(self):
return self._runloops[0].mini_buffer_state
@property
def mini_buffer_states(self):
return filter(lambda s: s is not None,
map(lambda rl: rl.mini_buffer_state,
self._runloops))
def create_buffer(self, buffer_class, *args):
buffer_object = self.get_buffer(buffer_class, *args)
if buffer_object == None:
buffer_object = buffer_class(*args)
self.buffers.insert(0, buffer_object)
return buffer_object
def select_buffer(self, buffer_object):
if buffer_object:
self.selected_window().set_buffer(buffer_object)
def _find_next_buffer(self, buffer_object):
return self.buffers[(self.buffers.index(buffer_object) + 1) % len(self.buffers)]
def _find_previous_buffer(self, buffer_object):
return self.buffers[(self.buffers.index(buffer_object) - 1)]
def previous_buffer(self):
"""
Switch to the previous buffer in the selected window.
"""
selected_window = self.selected_window()
selected_window.set_buffer(self._find_previous_buffer(selected_window.buffer()))
def next_buffer(self):
"""
Switch to the next buffer in the selected window.
"""
selected_window = self.selected_window()
selected_window.set_buffer(self._find_next_buffer(selected_window.buffer()))
def kill_buffer_object(self, buffer_object):
self.replace_buffer(buffer_object, self._find_next_buffer(buffer_object))
self.buffers.remove(buffer_object)
if len(self.buffers) == 0: # Ensure we always have a buffer available
cui.switch_buffer(self.get_variable('default-buffer-class'))
def current_buffer(self, no_minibuffer=False):
"""
Return the buffer in the selected window.
:param no_minibuffer: Set to ``True`` if minibuffer should be excluded.
"""
return \
self._mini_buffer \
if self.mini_buffer_state and not no_minibuffer else \
self.selected_window().buffer()
def get_variable(self, path):
return deep_get(self._state, path, return_none=False)
def def_variable(self, path, value=None):
deep_put(self._state, path, value, create_path=True)
def set_variable(self, path, value=None):
deep_put(self._state, path, value, create_path=False)
def add_exit_handler(self, handler_fn):
self._exit_handlers.append(handler_fn)
def remove_exit_handler(self, handler_fn):
self._exit_handlers.remove(handler_fn)
def running(self):
return self._running
def _run_exit_handlers(self):
while len(self._exit_handlers):
try:
self._exit_handlers.pop()()
except:
self.logger.log(traceback.format_exc())
def _at_exit(self):
self._run_exit_handlers()
for log_item in self.logger.messages:
print(log_item)
def _init_packages(self):
for fn in Core.__init_functions__:
try:
fn()
except:
self.message('init-function %s failed:\n%s'
% (fn.__name__, traceback.format_exc()))
def _post_init_packages(self):
for fn in Core.__post_init_functions__:
try:
fn()
except:
self.message('post-init-function %s failed:\n%s'
% (fn.__name__, traceback.format_exc()))
def remove_update_func(self, fn):
self._removed_update_funcs.append(fn)
def is_update_func(self, fn):
return fn in self.__update_functions__
def _update_packages(self):
# Process removed update functions
while self._removed_update_funcs:
self.__update_functions__.remove(
self._removed_update_funcs.pop(0))
for fn in Core.__update_functions__:
try:
fn()
except:
self.message('update-function %s failed:\n%s'
% (fn.__name__, traceback.format_exc()))
def _update_ui(self):
# Reset on_pre_render-flag on all buffers
for buf in self.buffers:
buf.on_pre_render_called = False
self._frame.render()
@property
def last_message(self):
return self._last_message
@property
def echo_area(self):
return self.get_variable(['echo-area'])()
def bye(self):
"""
Exit cui.
"""
raise RunloopExit()
def input_delegate(self):
return self.current_buffer()
def takes_input(self):
return self.current_buffer().takes_input
def dispatch_input(self, keychord, is_input):
rl = self._runloops[0]
if keychord == 'C-g':
runloop_cancel()
else:
try:
rl.current_keychord.append(keychord)
fn = self.handle_input(rl.current_keychord)
if hasattr(fn, '__call__'):
# current_keychord was handled via keymap
run_interactive(fn, handle_cancel=True)
rl.current_keychord = []
elif is_input and len(rl.current_keychord) == 1:
# kc is direct input that was not handled and not beginning of sequence
self.current_buffer().insert_chars(keychord)
rl.current_keychord = []
elif not fn:
# current_keychord is no suffix for
self.message('Unknown keychord: %s' % ' '.join(rl.current_keychord),
show_log=False)
rl.current_keychord = []
else:
self.message(' '.join(rl.current_keychord), show_log=False)
except RunloopResult:
raise
except RunloopCancel:
raise
except RunloopExit:
raise
except:
cui.exception()
rl.current_keychord = []
@property
def minibuffer_height(self):
return len(list(filter(lambda rl: rl.mini_buffer_state is not None,
self._runloops))) + 1
def activate_minibuffer(self, prompt, submit_fn, default='', complete_fn=None, exit_fn=None):
mini_buffer_id = ('minibuffer-%s'
% len(list(filter(lambda rl: rl.mini_buffer_state is not None,
self._runloops))))
_complete_fn = None if complete_fn is None else functools.partial(complete_fn, mini_buffer_id)
_exit_fn = None if exit_fn is None else functools.partial(exit_fn, mini_buffer_id)
self._runloops[0].mini_buffer_state = {
'id': mini_buffer_id,
'prompt': prompt,
'buffer': '',
'cursor': 0,
'submit_function': submit_fn,
'complete_function': _complete_fn,
}
if exit_fn:
self._runloops[0].on_exit.append(_exit_fn)
self.mini_buffer.reset_buffer(default)
def set_interactive(self, interactive):
has_set = not self._interactive and interactive
self._interactive = interactive
return has_set
def runloop_enter(self, pre_loop_fn=None):
self._runloops.insert(0, RunloopState())
self._runloops[0].running = True
if pre_loop_fn:
pre_loop_fn()
result = None
try:
while self._runloops[0].running:
self._update_ui()
self.io_selector.select()
except RunloopResult as e:
result = e.result
except RunloopCancel:
# In interactive mode top-level interactive handles cancel
if self._interactive:
raise
self.message('Cancelled.')
finally:
self.io_selector.invalidate()
while self._runloops[0].on_exit:
self._runloops[0].on_exit.pop(0)()
self._runloops.pop(0)
return result
def runloop_level(self):
return len(self._runloops)
def run(self):
self.buffers.append(self.get_variable(['default-buffer-class'])())
self._frame = cui.term.curses.Frame(self)
self._init_packages()
self._post_init_packages()
try:
self._running = True
while True:
self.runloop_enter()
except RunloopExit:
self.message('Exiting.')
finally:
self._running = False
self._run_exit_handlers()
|
from .inspect_runtime import get_paths_containing_string_in_locals, __eval_all_locators, \
get_all_paths_containing_string, get_all_categorized_paths_containing_string, get_all_paths_containing_string_in_nested_objects, ValueFinder
import pytest
def test_eval_all_locators(test_method_inside_locals_scope=None):
# Create some local var, methods and references too them.
# Expected Result is they are found inside of locals()
# Methods Under test: get_paths_containing_string_in_locals, eval
# Setup
local_test_var = "teststring"
def testmethodname():
pass
testmethodref = testmethodname.__name__
# Search the local runtime for all instances of your string
locals_filtered_by_string = get_paths_containing_string_in_locals(local_test_var, locals_dict=locals())
locals_filtered_by_string.extend(get_paths_containing_string_in_locals(testmethodref, locals_dict=locals()))
# Test the results through a function that exists in the same namespace that the variables were gathered from.
# The method to accomplish this was make the inspect_runtime.eval_all_locators function return executable code
# that could be run within any other namespace.
# More details - the below exec assigns to a variable. This requires that a variable of the
# same name can NOT previously exist, else the exec will fail to overwrite the value in the locals() namespace.
# The exec function can write to the locals scope, but not the function scope (For some
# reason unknown to me at this time). The variable can only be referenced through locals()['mykey']. BUT,
# if a variable with the exact same name already exists in the function scope. Any attempts to create/modify a
# var of the same name in the locals scope will fail silently. The value that was set in the in the function's
# scope gets priority and makes it immutable to the exec function. So for those reasons, return_exec_key is used
# to store a string of the same name as the variable that will later store the return value of the exec function.
return_exec_key = "return_exec_testname"
if test_method_inside_locals_scope:
return_exec_function = __eval_all_locators(locals_filtered_by_string, return_exec=False)
else:
return_exec_function = __eval_all_locators(locals_filtered_by_string, return_exec=True, return_exec_name=return_exec_key)
exec(return_exec_function)
assert len(locals()[return_exec_key]) > 0
assert len([x for x in locals()[return_exec_key] if local_test_var in str(x)]) > 0
assert len([x for x in locals()[return_exec_key] if testmethodref in str(x)]) > 0
assert len([x for x in locals()[return_exec_key] if callable(x)]) > 0
def test_get_valuefinders_rawlist():
assert str(get_all_paths_containing_string("module", locals(), [dict, tuple])).__contains__("module")
def test_get_valuefinders_categorized(populate_locals):
testclass = "newval"
assert len(get_all_categorized_paths_containing_string(testclass, locals(), [dict, tuple]).frames) > 0
assert len(get_all_categorized_paths_containing_string(TestClass.testval, locals(), [TestClass]).inspections) > 0
assert len(get_all_categorized_paths_containing_string(testclass, locals(), [TestClass]).locals) > 0
def locator_test_helper(object_ut, target_str, expected_fail=False):
result = []
result = get_all_paths_containing_string_in_nested_objects(object_ut, target_str, _result=result)
if not expected_fail:
assert len(result) > 0
for x in result:
try:
assert str(eval("object_ut" + x.locator)).__contains__(target_str)
except:
print(str(x.value) + " not found at " + str(x.locator) + " Was this value found in a different scope?")
def test_locators_are_returning_desired_values():
object_ut = TestClass()
locator_test_helper(object_ut, "TestClass")
# target string exists in the second level of attributes ie.. x.__class__.__func__
level2_target_str = "findme_in_second_level_of_attributes"
object_ut.possiblepaths_source_class_or_module = level2_target_str
locator_test_helper(object_ut, level2_target_str)
# target string does not exist
locator_test_helper(object_ut, "notoutthere", expected_fail=True)
class TestClass:
possiblepaths_source_class_or_module = None
testval = "testval"
@pytest.fixture
def populate_locals():
local_test_var = "teststring"
def testmethodname():
pass
testmethodref = testmethodname.__name__
|
#
# PySNMP MIB module TIMETRA-DOT3-OAM-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/TIMETRA-DOT3-OAM-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 21:09:59 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ValueRangeConstraint, ConstraintsIntersection, SingleValueConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ConstraintsUnion")
dot3OamEntry, dot3OamLoopbackEntry, dot3OamPeerMacAddress = mibBuilder.importSymbols("DOT3-OAM-MIB", "dot3OamEntry", "dot3OamLoopbackEntry", "dot3OamPeerMacAddress")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
NotificationGroup, ObjectGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ObjectGroup", "ModuleCompliance")
NotificationType, Counter32, iso, Bits, IpAddress, MibIdentifier, ModuleIdentity, Unsigned32, ObjectIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, Gauge32, Counter64, Integer32 = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "Counter32", "iso", "Bits", "IpAddress", "MibIdentifier", "ModuleIdentity", "Unsigned32", "ObjectIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "Gauge32", "Counter64", "Integer32")
TruthValue, TextualConvention, RowStatus, TimeStamp, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "TextualConvention", "RowStatus", "TimeStamp", "DisplayString")
tmnxSRConfs, tmnxSRNotifyPrefix, timetraSRMIBModules, tmnxSRObjs = mibBuilder.importSymbols("TIMETRA-GLOBAL-MIB", "tmnxSRConfs", "tmnxSRNotifyPrefix", "timetraSRMIBModules", "tmnxSRObjs")
timetraDOT3OAMMIBModule = ModuleIdentity((1, 3, 6, 1, 4, 1, 6527, 1, 1, 3, 42))
timetraDOT3OAMMIBModule.setRevisions(('1908-07-01 00:00', '1908-01-01 00:00', '2006-08-01 00:00',))
if mibBuilder.loadTexts: timetraDOT3OAMMIBModule.setLastUpdated('0807010000Z')
if mibBuilder.loadTexts: timetraDOT3OAMMIBModule.setOrganization('Alcatel-Lucent')
tmnxDot3OamObjs = MibIdentifier((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 42))
tmnxDot3OamMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 42))
tmnxDot3OamEntryObjs = MibIdentifier((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 42, 1))
tmnxDot3OamLoopbackObjs = MibIdentifier((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 42, 2))
tmnxDot3OamNotifyPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 6527, 3, 1, 3, 42))
tmnxDot3OamNotificationsPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 6527, 3, 1, 3, 42, 42))
tmnxDot3OamNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 6527, 3, 1, 3, 42, 42, 0))
tmnxDot3OamTable = MibTable((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 42, 1, 1), )
if mibBuilder.loadTexts: tmnxDot3OamTable.setStatus('current')
tmnxDot3OamEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 42, 1, 1, 1), )
dot3OamEntry.registerAugmentions(("TIMETRA-DOT3-OAM-MIB", "tmnxDot3OamEntry"))
tmnxDot3OamEntry.setIndexNames(*dot3OamEntry.getIndexNames())
if mibBuilder.loadTexts: tmnxDot3OamEntry.setStatus('current')
tmnxDot3OamLastChanged = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 42, 1, 1, 1, 1), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxDot3OamLastChanged.setStatus('current')
tmnxDot3OamInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 42, 1, 1, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 600)).clone(10)).setUnits('100s of milliseconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: tmnxDot3OamInterval.setStatus('current')
tmnxDot3OamMultiplier = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 42, 1, 1, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(2, 5)).clone(5)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: tmnxDot3OamMultiplier.setStatus('current')
tmnxDot3OamTunneling = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 42, 1, 1, 1, 4), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: tmnxDot3OamTunneling.setStatus('current')
tmnxDot3OamLooped = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 42, 1, 1, 1, 5), TruthValue().clone('false')).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxDot3OamLooped.setStatus('current')
tmnxDot3OamHoldTime = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 42, 1, 1, 1, 6), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 50))).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: tmnxDot3OamHoldTime.setStatus('current')
tmnxDot3OamLoopbackTable = MibTable((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 42, 2, 1), )
if mibBuilder.loadTexts: tmnxDot3OamLoopbackTable.setStatus('current')
tmnxDot3OamLoopbackEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 42, 2, 1, 1), )
dot3OamLoopbackEntry.registerAugmentions(("TIMETRA-DOT3-OAM-MIB", "tmnxDot3OamLoopbackEntry"))
tmnxDot3OamLoopbackEntry.setIndexNames(*dot3OamLoopbackEntry.getIndexNames())
if mibBuilder.loadTexts: tmnxDot3OamLoopbackEntry.setStatus('current')
tmnxDot3OamLoopbackLastChanged = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 42, 2, 1, 1, 1), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxDot3OamLoopbackLastChanged.setStatus('current')
tmnxDot3OamLoopbackLocalStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 42, 2, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("noLoopback", 1), ("localLoopback", 2))).clone('noLoopback')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: tmnxDot3OamLoopbackLocalStatus.setStatus('current')
tmnxDot3OamPeerChanged = NotificationType((1, 3, 6, 1, 4, 1, 6527, 3, 1, 3, 42, 42, 0, 1)).setObjects(("DOT3-OAM-MIB", "dot3OamPeerMacAddress"))
if mibBuilder.loadTexts: tmnxDot3OamPeerChanged.setStatus('current')
tmnxDot3OamLoopDetected = NotificationType((1, 3, 6, 1, 4, 1, 6527, 3, 1, 3, 42, 42, 0, 2)).setObjects(("IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: tmnxDot3OamLoopDetected.setStatus('current')
tmnxDot3OamLoopCleared = NotificationType((1, 3, 6, 1, 4, 1, 6527, 3, 1, 3, 42, 42, 0, 3)).setObjects(("IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: tmnxDot3OamLoopCleared.setStatus('current')
tmnxDot3OamMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 42, 1))
tmnxDot3OamMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 42, 2))
tmnxDot3OamMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 42, 1, 1)).setObjects(("TIMETRA-DOT3-OAM-MIB", "tmnxDot3OamGroup"), ("TIMETRA-DOT3-OAM-MIB", "tmnxDot3OamLoopbackGroup"), ("TIMETRA-DOT3-OAM-MIB", "tmnxDot3OamNotificationGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxDot3OamMIBCompliance = tmnxDot3OamMIBCompliance.setStatus('obsolete')
tmnxDot3OamMIBV6v0Compliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 42, 1, 2)).setObjects(("TIMETRA-DOT3-OAM-MIB", "tmnxDot3OamV6v0Group"), ("TIMETRA-DOT3-OAM-MIB", "tmnxDot3OamLoopbackGroup"), ("TIMETRA-DOT3-OAM-MIB", "tmnxDot3OamNotificationV6v0Group"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxDot3OamMIBV6v0Compliance = tmnxDot3OamMIBV6v0Compliance.setStatus('obsolete')
tmnxDot3OamMIBV6v1Compliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 42, 1, 3)).setObjects(("TIMETRA-DOT3-OAM-MIB", "tmnxDot3OamV6v0Group"), ("TIMETRA-DOT3-OAM-MIB", "tmnxDot3OamV6v1Group"), ("TIMETRA-DOT3-OAM-MIB", "tmnxDot3OamLoopbackGroup"), ("TIMETRA-DOT3-OAM-MIB", "tmnxDot3OamNotificationV6v0Group"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxDot3OamMIBV6v1Compliance = tmnxDot3OamMIBV6v1Compliance.setStatus('current')
tmnxDot3OamGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 42, 2, 1)).setObjects(("TIMETRA-DOT3-OAM-MIB", "tmnxDot3OamLastChanged"), ("TIMETRA-DOT3-OAM-MIB", "tmnxDot3OamInterval"), ("TIMETRA-DOT3-OAM-MIB", "tmnxDot3OamMultiplier"), ("TIMETRA-DOT3-OAM-MIB", "tmnxDot3OamTunneling"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxDot3OamGroup = tmnxDot3OamGroup.setStatus('obsolete')
tmnxDot3OamLoopbackGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 42, 2, 2)).setObjects(("TIMETRA-DOT3-OAM-MIB", "tmnxDot3OamLoopbackLastChanged"), ("TIMETRA-DOT3-OAM-MIB", "tmnxDot3OamLoopbackLocalStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxDot3OamLoopbackGroup = tmnxDot3OamLoopbackGroup.setStatus('current')
tmnxDot3OamNotificationGroup = NotificationGroup((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 42, 2, 3)).setObjects(("TIMETRA-DOT3-OAM-MIB", "tmnxDot3OamPeerChanged"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxDot3OamNotificationGroup = tmnxDot3OamNotificationGroup.setStatus('obsolete')
tmnxDot3OamNotificationV6v0Group = NotificationGroup((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 42, 2, 4)).setObjects(("TIMETRA-DOT3-OAM-MIB", "tmnxDot3OamPeerChanged"), ("TIMETRA-DOT3-OAM-MIB", "tmnxDot3OamLoopDetected"), ("TIMETRA-DOT3-OAM-MIB", "tmnxDot3OamLoopCleared"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxDot3OamNotificationV6v0Group = tmnxDot3OamNotificationV6v0Group.setStatus('current')
tmnxDot3OamV6v0Group = ObjectGroup((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 42, 2, 5)).setObjects(("TIMETRA-DOT3-OAM-MIB", "tmnxDot3OamLastChanged"), ("TIMETRA-DOT3-OAM-MIB", "tmnxDot3OamInterval"), ("TIMETRA-DOT3-OAM-MIB", "tmnxDot3OamMultiplier"), ("TIMETRA-DOT3-OAM-MIB", "tmnxDot3OamTunneling"), ("TIMETRA-DOT3-OAM-MIB", "tmnxDot3OamLooped"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxDot3OamV6v0Group = tmnxDot3OamV6v0Group.setStatus('current')
tmnxDot3OamV6v1Group = ObjectGroup((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 42, 2, 6)).setObjects(("TIMETRA-DOT3-OAM-MIB", "tmnxDot3OamHoldTime"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxDot3OamV6v1Group = tmnxDot3OamV6v1Group.setStatus('current')
mibBuilder.exportSymbols("TIMETRA-DOT3-OAM-MIB", PYSNMP_MODULE_ID=timetraDOT3OAMMIBModule, tmnxDot3OamLoopbackLocalStatus=tmnxDot3OamLoopbackLocalStatus, tmnxDot3OamPeerChanged=tmnxDot3OamPeerChanged, tmnxDot3OamMIBV6v0Compliance=tmnxDot3OamMIBV6v0Compliance, tmnxDot3OamTable=tmnxDot3OamTable, tmnxDot3OamMultiplier=tmnxDot3OamMultiplier, tmnxDot3OamNotificationGroup=tmnxDot3OamNotificationGroup, tmnxDot3OamLoopbackObjs=tmnxDot3OamLoopbackObjs, timetraDOT3OAMMIBModule=timetraDOT3OAMMIBModule, tmnxDot3OamLoopbackGroup=tmnxDot3OamLoopbackGroup, tmnxDot3OamInterval=tmnxDot3OamInterval, tmnxDot3OamV6v1Group=tmnxDot3OamV6v1Group, tmnxDot3OamLoopbackEntry=tmnxDot3OamLoopbackEntry, tmnxDot3OamEntry=tmnxDot3OamEntry, tmnxDot3OamLoopCleared=tmnxDot3OamLoopCleared, tmnxDot3OamMIBConformance=tmnxDot3OamMIBConformance, tmnxDot3OamNotificationsPrefix=tmnxDot3OamNotificationsPrefix, tmnxDot3OamLoopbackLastChanged=tmnxDot3OamLoopbackLastChanged, tmnxDot3OamLoopDetected=tmnxDot3OamLoopDetected, tmnxDot3OamNotifyPrefix=tmnxDot3OamNotifyPrefix, tmnxDot3OamMIBCompliance=tmnxDot3OamMIBCompliance, tmnxDot3OamNotificationV6v0Group=tmnxDot3OamNotificationV6v0Group, tmnxDot3OamLoopbackTable=tmnxDot3OamLoopbackTable, tmnxDot3OamGroup=tmnxDot3OamGroup, tmnxDot3OamEntryObjs=tmnxDot3OamEntryObjs, tmnxDot3OamMIBGroups=tmnxDot3OamMIBGroups, tmnxDot3OamTunneling=tmnxDot3OamTunneling, tmnxDot3OamObjs=tmnxDot3OamObjs, tmnxDot3OamHoldTime=tmnxDot3OamHoldTime, tmnxDot3OamV6v0Group=tmnxDot3OamV6v0Group, tmnxDot3OamLooped=tmnxDot3OamLooped, tmnxDot3OamNotifications=tmnxDot3OamNotifications, tmnxDot3OamMIBCompliances=tmnxDot3OamMIBCompliances, tmnxDot3OamLastChanged=tmnxDot3OamLastChanged, tmnxDot3OamMIBV6v1Compliance=tmnxDot3OamMIBV6v1Compliance)
|
from app import db
class Subcuenta(db.Model):
id_subcuenta = db.Column(db.INTEGER, primary_key=True)
nombre = db.Column(db.String(100), nullable=False, unique=True)
descripcion = db.Column(db.String(200))
id_cuenta = db.Column(db.INTEGER, db.ForeignKey('cuenta.id_cuenta', ondelete='set null'))
cuenta = db.relationship('Cuenta', backref='subcuenta', lazy=True)
def __repr__(self):
return f'Subcuenta {self.nombre}'
def save(self):
if not self.id_subcuenta:
db.session.add(self)
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
@staticmethod
def get_all():
return Subcuenta.query.all()
@staticmethod
def get_by_id(id):
return Subcuenta.query.get(id)
@staticmethod
def get_by_userName(nombre):
return Subcuenta.query.filter_by(nombre=nombre).first() |
import unittest
import pandas as pd
import numpy as np
from tests.common import AbstractTest
from time_blender.core import Generator, ConstantEvent
from time_blender.deterministic_events import ClockEvent, WaveEvent, WalkEvent
class TestClockEvent(AbstractTest):
def setUp(self):
super().setUp()
self.event = ClockEvent()
self.generator = Generator(start_date=self.start_date, end_date=self.end_date)
def test_execute(self):
data = self.generator.generate(self.event)
values = data.values
# values must be increasing
for i in range(1, len(values)):
self.assertEquals(values[i], values[i - 1] + 1)
class TestWaveEvent(AbstractTest):
def setUp(self):
super().setUp()
self.event = WaveEvent(30, 100)
self.generator = Generator(start_date=self.start_date, end_date=self.end_date)
def test_execute(self):
data = self.generator.generate(self.event)
values = data.values
print(np.mean(values))
self.assertClose(np.mean(values), 0.0, abs_tol=2.0) # centers on zero
self.assertGreater(len([v for v in values if v > 90]), 0) # goes up
self.assertGreater(len([v for v in values if v < 90]), 0) # goes down
class TestConstantEvent(AbstractTest):
def setUp(self):
super().setUp()
self.event = ConstantEvent(30)
self.generator = Generator(start_date=self.start_date, end_date=self.end_date)
def test_execute(self):
data = self.generator.generate(self.event)
values = data.values
for v in values:
self.assertEquals(v, 30)
class TestWalkEvent(AbstractTest):
def setUp(self):
super().setUp()
self.event = WalkEvent(10, initial_pos=0)
self.generator = Generator(start_date=self.start_date, end_date=self.end_date)
def test_execute(self):
data = self.generator.generate(self.event)
values = data.values
print(values) |
import os
import base36
from threading import get_ident
from typing import Dict, Text
from .util import exists_process
class Subscriber:
"""
Subscribers are shared between publishers and client-side subscriptions.
They are used to manage the lifetime of the associated fifo as well as
for sending data to and from the publishing and subscribed processes.
"""
def __init__(self, hub, name, thread_id=None, pid=None):
self.hub = hub
self.name = name
self.thread_id = thread_id or get_ident()
self.pid = pid or os.getpid()
def to_dict(self):
"""
Return a dict version of the subscriber, which is used when sending
through a channel.
"""
return {
'name': self.name,
'thread_id': self.thread_id,
'pid': self.pid
}
@classmethod
def from_dict(cls, hub: 'Hub', data: Dict) -> 'Subscriber':
"""
Create and return a Subsciber from a dict, which is used when
instantiating a subscriber after receiving as dict through a channel.
"""
return cls(hub, **data)
@property
def channel(self) -> 'Channel':
"""
Get the channel created for this subsciber.
"""
return self.hub[self.name]
@property
def is_host_process_active(self) -> bool:
"""
Does the Python process which generated this subscriber still exist
as well as the fifo file?
"""
return (
exists_process(self.pid)
and os.path.exists(self.channel.filepath)
)
@property
def b36_thread_id(self) -> Text:
"""
Base36-encoded string version of the thread id
"""
return base36.dumps(self.thread_id) |
import shutil
import subprocess
import os
import tempfile
import test
from urllib import request
from time import sleep
PROJECT_HOME_DIR = os.path.join(
os.path.dirname(os.path.realpath(test.__file__)), os.pardir, os.pardir
)
TUTORIAL_DIR = os.path.abspath(
os.path.join(PROJECT_HOME_DIR, "src")
)
TEST_FEATURE_GROUP_LIST = os.path.abspath(
os.path.join(PROJECT_HOME_DIR, "src/tutorial/ExampleFeatureList.yaml")
)
TEST_FEATURE_GROUP_DIR = os.path.abspath(
os.path.join(PROJECT_HOME_DIR, "src/tutorial/example_feature_library")
)
def test_build():
tempdir = None
try:
tempdir = tempfile.mkdtemp()
output_dir = os.path.join(tempdir, "output")
subprocess.run(
args=f"blizz build {TEST_FEATURE_GROUP_LIST} {TEST_FEATURE_GROUP_DIR} {output_dir}",
shell=True,
cwd=PROJECT_HOME_DIR,
)
files = os.listdir(output_dir)
assert "StudentFeatureGroup" in files
finally:
if tempdir:
shutil.rmtree(tempdir, ignore_errors=True)
def test_bootstrap():
subprocess.run("blizz bootstrap", shell=True)
def test_docs():
test_port = 9999
try:
p = subprocess.Popen(
args=f"blizz docs {TEST_FEATURE_GROUP_DIR} --serve --port {test_port}",
shell=True,
cwd=PROJECT_HOME_DIR,
stdout=subprocess.PIPE
)
sleep(10)
with request.urlopen(f"http://localhost:{test_port}", timeout=20) as req:
assert req.status == 200
finally:
if p:
p.terminate()
|
"""
Test to see if each form is working correctly
"""
from django.test import SimpleTestCase, TestCase
from ..forms import UserRegisterForm, UserLoginForm
class TestForms(TestCase):
# test for valid register page inputs
def test_register_form_valid(self):
form = UserRegisterForm(data={
'username': 'DudeMan123',
'email': '[email protected]',
'password1': 't3stp4ss',
'password2': 't3stp4ss'
})
self.assertTrue(form.is_valid())
# test for invalid register page inputs
def test_register_form_invalid(self):
form = UserRegisterForm(data={
'username': '/',
'email': '/',
'password1': '|',
'password2': '/'
})
self.assertFalse(form.is_valid())
# test for empty register page inputs
def test_register_form_empty(self):
form = UserRegisterForm(data={})
self.assertFalse(form.is_valid())
self.assertEquals(len(form.errors), 4)
# test for valid login page inputs
def test_login_form_valid(self):
form = UserLoginForm(data={
'user_name': 'testymctestface',
'pass_word': 't3st1ng!'
})
self.assertTrue(form.is_valid())
# test for invalid login page inputs
def test_login_form_invalid(self):
form = UserLoginForm(data={
'user_name': '/',
'pass_word': 'poidfugposiudfgpoisudfpgoiusdpfiogupsdfuposudfgposiudfpgoiuspfiogupsdfugpsdgfupdgfupsodgfupoisudfgpgspdo'
})
self.assertFalse(form.is_valid())
# test for empty login page inputs
def test_login_form_empty(self):
form = UserLoginForm(data={})
self.assertFalse(form.is_valid())
self.assertEquals(len(form.errors), 2)
|
"""merge fix_process_user_login and database_permission
Revision ID: ba3a6442af2b
Revises: 50db531bbf54, 70ad8e4607cd
Create Date: 2016-08-04 12:10:11.869905
"""
# revision identifiers, used by Alembic.
revision = 'ba3a6442af2b'
down_revision = ('50db531bbf54', '70ad8e4607cd')
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
pass
def downgrade():
pass
|
#!usr/bin/python
# from secret import flag
from Crypto.Cipher import AES
import hashlib
import time
import base64
key = hashlib.md5(str(1592566200).encode('utf-8'))
print(key.digest())
# padding = 16 - len(flag) % 16
aes = AES.new(str(key.hexdigest()).encode('utf-8'), AES.MODE_ECB)
print ("\n\n\n\n")
enc = "dd8e9250e270e1008cd5fba4c6dd93581d9e3b9822b5e0764e3cf126e75e6f69a7418a8ba0d0f66f3535e0573884a6468ba13399eca1ecc787413db51b00920e8ba13399eca1ecc787413db51b00920e"
# enc = base64.b64decode(enc)
# iv = enc[:16]
print(aes.decrypt(enc.encode('utf-8')))
# outData = aes.encrypt(str(flag) + padding* str(padding))
# print(outData.encode('base64'))
# a80182989ba919adbe9b |
import os
import uuid
from common.dbinstance import DbInstance
def insert_report(report, ipv4_addr):
# prepare result
result = {
'status': 400,
'data': None
}
# check if user has permission to create report
permitted = False
with DbInstance().get_instance().cursor() as cursor:
cursor.execute("""
SELECT
a.timestamp_created_report < DATE_SUB(CURRENT_TIMESTAMP, INTERVAL 15 SECOND) AS permission
FROM anons AS a
WHERE a.ipv4_addr = INET_ATON(%s)
""", (ipv4_addr,))
permitted = cursor.fetchone()
permitted = True if permitted is None else permitted['permission'] == 1
# create report if permitted
if permitted:
# insert/update ipv4_addr row
rows_anon = cursor.execute("""
INSERT INTO anons (ipv4_addr, timestamp_created_report) VALUES (INET_ATON(%s), CURRENT_TIMESTAMP)
ON DUPLICATE KEY UPDATE timestamp_created_report=CURRENT_TIMESTAMP
""", (ipv4_addr,))
# insert report
rows_thread = cursor.execute("""
INSERT INTO reports (post_id, data_reason, ipv4_addr)
VALUES (%s, %s, INET_ATON(%s))
""", (report['post_id'], report['reason'], ipv4_addr,))
id_inserted = cursor.lastrowid
# commit if ok
if rows_anon >= 1 and rows_thread == 1:
cursor.connection.commit()
result['status'] = 201
result['data'] = {
'id': id_inserted
}
else:
result['status'] = 429
result['data'] = {
'message': 'too many requests'
}
return result
|
# -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
class DnsDelegationZone(object):
"""Implementation of the 'DnsDelegationZone' model.
IP Range for range of vip address addition.
Attributes:
dns_zone_name (string): Specifies the dns zone name.
dns_zone_vips (list of string): Specifies list of vips part of dns
delegation zone.
"""
# Create a mapping from Model property names to API property names
_names = {
"dns_zone_name": 'dnsZoneName',
"dns_zone_vips": 'dnsZoneVips'
}
def __init__(self,
dns_zone_name=None,
dns_zone_vips=None):
"""Constructor for the DnsDelegationZone class"""
# Initialize members of the class
self.dns_zone_name = dns_zone_name
self.dns_zone_vips = dns_zone_vips
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
dns_zone_name = dictionary.get('dnsZoneName', None)
dns_zone_vips = dictionary.get('dnsZoneVips', None)
# Return an object of this model
return cls(dns_zone_name,
dns_zone_vips)
|
import os
# for windows
if os.name == 'nt':
os.system('cls') # For Windows
print ('we are on Windows')
# for mac and linux(here, os.name is 'posix')
else:
os.system('clear') # For Linux/OS X
print ('we are on Linux') |
"""
341. Flatten Nested List Iterator
https://leetcode.com/problems/flatten-nested-list-iterator/
Time complexity: O()
Space complexity: O()
Solution:
"""
from typing import List
# """
# This is the interface that allows for creating nested lists.
# You should not implement it, or speculate about its implementation
# """
class NestedInteger:
def isInteger(self) -> bool:
"""
@return True if this NestedInteger holds a single integer, rather than a nested list.
"""
def getInteger(self) -> int:
"""
@return the single integer that this NestedInteger holds, if it holds a single integer
Return None if this NestedInteger holds a nested list
"""
def getList(self) -> [NestedInteger]:
"""
@return the nested list that this NestedInteger holds, if it holds a nested list
Return None if this NestedInteger holds a single integer
"""
class NestedIterator:
def __init__(self, nestedList: [NestedInteger]):
self.stack = [[nestedList, 0]]
def next(self) -> int:
self.hasNext()
nL, i = self.stack[-1]
self.stack[-1][1] += 1
return nL[i].getInteger()
def hasNext(self) -> bool:
while self.stack:
nL, i = self.stack[-1]
if i == len(nL):
s.pop()
else:
if nL[i].isInteger():
return True
self.stack[-1][1] += 1
self.stack.append([nL[i].getList(), 0])
return False
# Your NestedIterator object will be instantiated and called as such:
# i, v = NestedIterator(nestedList), []
# while i.hasNext(): v.append(i.next()) |
import pandas as pd
import numpy as np
print(pd.Series([11, 20, 30, 20, 30, 30, 20])) #Cria uma coluna com os dados desta lista
print()
print(pd.Series([10, 20, 30, 33], index=["a", "b", "c", "d"])) #Cria uma tabela com a lista e os indices viram alfabeticos.
print()
print(pd.Series(np.random.randn(3), ["a", "b", "c"])) #Cria tez valores aleatoriod com numpy e torna os indices alfabeticos
print()
S = pd.Series({0: 10, 1: 20.3, 2: 30})#Cria uma coluna atravez de dicionario
print(S)
print(S[0]) #Pega apenas a linha que o indice é 0
print(S[0:2]) #Pega as linhas de indice 0 até 2
print()
print(pd.Series([0, 10, 20], name="Num")) #Cria uma tabela com nome
print()
S = pd.Series([10, 20, 30], name="Num")
print(S.to_numpy()) #Tranforma em um array do numpy
print()
S1 = pd.Series([33, 1, 22])
print(S + S1) #Da para fazer qualquer operacao matematica
print()
|
import logging
from collections import deque
from .model import ForwardSearchModel
class BreadthFirstSearch:
""" Apply Breadth-First search to a FSTRIPS problem """
def __init__(self, model: ForwardSearchModel, max_expansions=-1):
self.model = model
self.max_expansions = max_expansions
def run(self):
return self.search(self.model.init())
def search(self, s0):
# create obj to track state space
space = SearchSpace()
iteration = 0
num_goals_found = 0
open_ = deque() # fifo-queue storing the nodes which are next to explore
closed = set()
open_.append(make_root_node(s0))
node_id = 0
while open_:
iteration += 1
# logging.debug("brfs: Iteration {}, #unexplored={}".format(iteration, len(open_)))
node = open_.popleft()
is_goal = self.model.is_goal(node.state)
space.expand(node)
# we manage the closed list here to allow the parents update
if node.state in closed:
continue
closed.add(node.state)
node_id += 1
# exploring the node or if it is a goal node extracting the plan
if is_goal:
num_goals_found += 1
logging.info("Goal found after {} expansions. Number of goal states found: {}".format(
node_id, num_goals_found))
if 0 <= self.max_expansions <= node_id:
logging.info("Max. expansions reached. # expanded: {}, # goals: {}".format(node_id, num_goals_found))
return space
for operator, successor_state in self.model.successors(node.state):
open_.append(make_child_node(node, operator, successor_state))
logging.info("Search space exhausted. # expanded: {}, # goals: {}".format(node_id, num_goals_found))
space.complete = True
return space
class SearchNode:
def __init__(self, state, parent, action):
self.state = state
self.parent = parent
self.action = action
class SearchSpace:
""" A representation of a search space / transition system corresponding to some planning problem """
def __init__(self):
self.nodes = set()
self.last_node_id = 0
self.complete = False # Whether the state space contains all states reachable from the initial state
def expand(self, node: SearchNode):
self.nodes.add(node)
def make_root_node(state):
""" Construct the initial root node without parent nor action """
return SearchNode(state, None, None)
def make_child_node(parent_node, action, state):
""" Construct an child search node """
return SearchNode(state, parent_node, action)
|
# Generated by Django 3.2 on 2022-01-01 12:40
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('group', '0009_auto_20220101_1239'),
]
operations = [
migrations.AlterModelOptions(
name='generatednumber',
options={'ordering': ('number',)},
),
migrations.AlterModelOptions(
name='group',
options={'ordering': ('name',)},
),
migrations.AlterField(
model_name='invitationcode',
name='code',
field=models.CharField(default=uuid.UUID('d8cbaa3f-8a69-4950-868d-35e71cf8e430'), max_length=255),
),
]
|
'''
Searching - Exercise 1
Using the codonAMINO dictonary from tgac.py, the program
reads and translates the sequence in the input file rna_seq.fasta
(limited to the first reading frame).
'''
F = open('rna_seq.fasta')
Out = open('protein_seq.fasta','w')
from tgac import codonAMINO
seq = ''
for line in F:
# This collects the header
if line[0] == '>':
header = line.split()
geneID = header[0]
Out.write(geneID + '_protein\n')
# This collects the sequence
else:
seq = seq + line.strip()
prot = ''
# range(i, j, k) generates a list of numbers
# ranging from i to j-1 in steps of k
# We want to range from the first to the last
# position of the sequence.
for i in range(0,len(seq),3):
if codonAMINO.has_key(seq[i:i+3]):
prot = prot + codonAMINO[seq[i:i+3]]
# If codonAMINO does not have the key, we
# add a '*'
else:
prot = prot + '*'
Out.write(prot + '\n')
|
#!/bin/python3
# -*- coding:utf-8 -*-
import data.basedata as basedata,data.peopleinfolist as pl
import httpdata.urlinfo as urlinfo, httpdata.httpheader as httpheader
import threading, multiprocessing, requests, datetime, time, random
from PIL import Image
import re as are
import cnn.c_cnn as cnn
import numpy as np
import tensorflow as tf
class appointment(object):
def __init__(self, url, header, plist, ptime):
cpu = multiprocessing.cpu_count()
self.appointment_url = url['appointment']
self.result_url = url['result']
self.pic_url = url['picture']
self.form_url = url['form']
self.header = header['http_header']
self.pic_header = header['pic_header']
self.thrsnum = int(cpu + 1)
self.procnum = int(cpu / 2 + 1)
self.plist = self.formatlist(plist, ptime)
self.rlist = self.formatrlist(plist)
self.model = cnn.captcha_cnn()
self.model.load_weights('./cnn/kears_cnn/captcha_cnn_best.h5')
self.char_set = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def formatlist(self, plist, ptime):
'''
格式化信息列表,得到格式如下:
[
{
'shopid': '15',
'areaid': '3',
'phone': '18654714550',
'realname': '罗璐腾',
'shenfenzheng': '130105200010126014',
'shuliang':'5',
'picktime': '2020-02-20',
'pickdate': '11:00-13:00',
'yzm':''
}
]
截止2020-02-22,数据已不全,还需添加“token”项
'''
pl = []
temppl = []
#获取明日预约日期
date = datetime.date.today() + datetime.timedelta(days=1)
appodate = date.strftime('%Y-%m-%d')
#填充预约时段外的全部信息
for item in plist:
appo_info = {
'realname':item['name'],
'phone':item['phone'],
'shenfenzheng':item['id'],
'area':item['areaid'],
'shop':item['shopid'],
'pickdate':appodate,
'shuliang':'5',
'pid':'1',
'yzm':'',
'token':''
}
temppl.append(appo_info)
#填充预约时段信息,保证每人可尝试预约当天全部时段
for item_time in ptime:
for item_info in temppl:
item_info['picktime'] = item_time
pl.append(item_info.copy())
return pl
def makeappo(self):
'''
创建进程池,采用多进程形式尝试预约
'''
p = multiprocessing.Pool(self.procnum)
num = int(len(self.plist) / self.procnum + 1)
#根据所创建进程数量,拆分数据列表,每组数量为num,最后一组可能少于num;
tempList = self.avglist(self.plist, num)
for item in tempList:
p.apply_async(self.threadpost, args=(item, self.appointment_url, ))
p.close()
p.join()
def avglist(self, tlist, n):
'''
以步长n拆分列表
'''
for i in range(0,len(tlist),n):
yield tlist[i : i + n]
def threadpost(self, plist, url):
'''
创建子线程,并根据线程数量拆分数据列表
'''
threads = []
num = int(len(plist) / self.thrsnum) + 1
tempList = self.avglist(plist, num)
for item in tempList:
thread = threading.Thread(target=self.postinfo,args=(url,item,))
threads.append(thread)
thread.start()
for th in threads:
th.join()
def postinfo(self, url, info):
'''
向服务器发送POST请求,完成预约
'''
head = {
'header' : self.header,
'pic_header' : self.pic_header
}
for item in info:
try:
#获取header信息,用以获取验证码、提交预约申请,cookies实时更新
header, token = self._getheaderandtoken(head)
#获取验证码图片
picpath = self._getpicture(header['pic_header'], item['phone'])
#识别验证码,填充数据
item['yzm'] = str(self._getvcode(picpath))
item['token'] = token
re = requests.post(url,data=item.copy(),headers=header['pic_header'])
print(re.json())
except ConnectionResetError:
time.sleep(0.01)
continue
else:
continue
def getresult(self):
'''
查询预约结果,并保存
'''
with open('./result/result.txt' ,'w') as f:
for item in self.rlist:
re = requests.post(self.result_url,data=item,headers=self.header)
f.write(str(re.json())+'\n')
f.close()
def formatrlist(self, plist):
'''
格式化结果查询列表
'''
pl = []
for item in plist:
appo_info = {
'realname':item['name'],
'shenfenzheng':item['id'],
'pid':'1',
'result':''
}
pl.append(appo_info)
return pl
def _getcookiesandtoken(self):
'''
获取cookies及token
后续使用此cookies获取验证码
'''
try:
re = requests.get(url=self.form_url)
while not (re.status_code == 200):
try:
re = requests.get(url=self.form_url)
pattern = are.compile(r"var token = \S+';")
token = pattern.search(re.text)
token = token.group(0).split("'")
except :
print(EnvironmentError)
finally:
return requests.utils.dict_from_cookiejar(re.cookies), token
def _getheaderandtoken(self, header):
'''
完善header信息
'''
cookies, token = self._getcookiesandtoken()
for key in cookies:
header['header']['cookies'] = key + '=' + cookies[key]
header['pic_header']['cookies'] = key + '=' + cookies[key]
return header, token
def _getpicture(self, header, fn):
'''
请求验证码刷新,并保存验证码图片
'''
try:
re = requests.get(self.pic_url, headers=header)
while not (re.status_code == 200):
try:
re = requests.get(self.pic_url, headers=header)
except:
print(EnvironmentError)
finally:
picfile = r'./data/pictemp/' + fn + r'.png'
with open(picfile, 'wb') as f:
f.write(re.content)
return picfile
def _getvcode(self, picpath):
'''
处理验证码图片,并识别验证码
cnn模型训练程度极低,999条基础数据
'''
data_x = np.zeros((1, 25, 80, 3))
x = tf.io.read_file(picpath)
x = tf.image.decode_png(x, channels=3)
x = tf.image.convert_image_dtype(x, tf.float64)
x /= 255.
x = tf.reshape(x, (25, 80, 3))
data_x[0, :] = x
prediction_value = self.model.predict(data_x)
res = self.vec2text(np.argmax(prediction_value, axis=2))
return res
def vec2text(self, vec):
text = []
for item in vec:
index = item[0]
text.append(self.char_set[index])
return ''.join(text)
def apporun():
url = {
'appointment' : urlinfo.get_appointmenturl(),
'result' : urlinfo.get_searchurl(),
'picture' : urlinfo.get_picurl(),
'form' : urlinfo.get_formurl()
}
httpheaders = {
'http_header' : httpheader.conn_header(),
'pic_header' : httpheader.pic_header()
}
#随机创建50人的个人信息,包括身份证号、手机号、姓名
p = pl.peopleinfolist(50)
plist = p.getpl()
ptime = basedata.picktime
appo = appointment(url, httpheaders, plist, ptime)
#设计停止预约时间标志
date = str(datetime.date.today().strftime('%Y-%m-%d')) + ' 19:45:30'
temptime = time.strptime(date, '%Y-%m-%d %H:%M:%S')
keytime = time.mktime(temptime)
#使用时间戳为名保存所创建的身份信息,方便后续查询预约结果
nowtime = time.time()
fileadd = r'./data/infolist/' + str(nowtime) + r'_plist.txt'
with open(fileadd ,'w') as f:
for item in plist:
f.write(str(item)+'\n')
f.close()
while True:
appo.makeappo()
if keytime < time.time():
break
if __name__ == '__main__':
apporun()
#appo.getresult()
|
#!/usr/bin/python3.6
import threading
import multiprocessing
from functools import reduce
from time import sleep
from time import strftime
import mysql.connector
import json
from os.path import abspath, join, dirname
import random
import sys
import time
import itertools
import psycopg2
user_input = None
def func():
global user_input
user_input = input(":\t")
class TaurSeedGenerator:
def __init__(self, args):
self._is_psql = True
if self.main(args) == -1:
exit()
self.init_database_connection()
__SGBD = ['mysql', 'psql'] # ['mysql', 'sqlite', 'postgres']
# initialisation du fichier de configuration
u_config = {
"dbms": "mysql",
"db": None,
"user": None,
"password": None,
"host": "127.0.0.1",
"port": 3306,
"ignore": [],
"len_row": 20,
"equal": {},
"choice": {},
"combine": {},
"border": {
"_def": [0, 10000000]
},
"unit": {
"_def": 1
}
}
# initialisation des variables local
_db_cursor = _db_connector = None
queue = _finalseed = []
def appendExec(self, target=None, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
if target:
idx = 0
while idx < len(self.queue):
if (not self.queue[idx]) or (not self.queue[idx].is_alive()):
if self.queue[idx]:
self.queue[idx].terminate()
process = multiprocessing.Process(target=target, args=args, kwargs=kwargs)
self.queue[idx] = process
self.queue[idx].start()
break
idx = idx + 1
if idx == len(self.queue):
sleep(2)
self.appendExec(target, args, kwargs)
def waitAllFinish(self):
alldone = 0
while alldone < len(self.queue):
sleep(5)
alldone = 0
for process in self.queue:
if (not process) or process.exitcode or (not process.is_alive()):
alldone = alldone + 1
@staticmethod
def full_path(filename):
return abspath(join(dirname(__file__), filename))
def files(self):
return {
'first:homme': self.full_path('homme.taur'),
'first:femme': self.full_path('femme.taur'),
'last': self.full_path('prenom.taur'),
'word': self.full_path('words.taur'),
}
def get_name(self, val=0, typ=None):
selected = filename = None
if typ is not None:
typ = typ.lower()
if str(typ).__contains__('pseudo') \
or str(typ).__contains__('ickname') \
or str(typ).__contains__('prenom'):
val = 1
elif str(typ).__contains__('name') or str(typ).__contains__('nom'):
val = random.randint(2, 3)
if val == 0: # c'est un mot chercher
# selected = random.randint(0, 455000)
selected = random.randint(0, 53)
filename = self.files()['word']
elif val == 1: # c'est un prenom rechercher
# selected = random.randint(0, 88000)
selected = random.randint(0, 53)
filename = self.files()['last']
elif val == 2: # c'est un homme rechercher
# selected = random.randint(0, 12000)
selected = random.randint(0, 53)
filename = self.files()['first:homme']
elif val == 3: # c'est un prenom rechercher
# selected = random.randint(0, 42000)
selected = random.randint(0, 53)
filename = self.files()['first:femme']
with open(filename) as name_file:
c = 0
namer = ["", ""]
for line in name_file:
c = c + 1
namer.append(line.strip().split()[0])
if c > selected:
if val == 0:
if (c > 5) and not str(typ).__contains__('titre') and not str(typ).__contains__('title'):
return reduce(lambda x, y: str(x) + " " + str(y), namer[-7:])
return reduce(lambda x, y: str(x) + " " + str(y), namer[-2:])
else:
return namer[-1]
return "taur string" # Return empty string if file is empty
@staticmethod
def get_doc():
res = "*" * 10 + "Taur seed generator" + "*" * 10
res = res + "\n sgbd\tpour specifier le gestionaire de base de donnee. NB: si ommit 'mysql' sera utiliser"
res = res + "\n -u\t\tpour specifier le nom de l'utilisateur de la base de donnee. ce parametre est requis"
res = \
res + "\n -h\t\tpour specifier l'address hote de la base de donnee. NB:si ommit 'localhost' sera utiliser"
res = res + "\n -p\t\tpour specifier le mot de passe de l'utilisateur de la base de donnee."
res = res + "\n -db\tpour specifier la base de donnee a utiliser. ce parametre est requis"
res = res + "\n -l\t\tpour specifier la limite de donnee a inserer. sit omit la limit sera de 20"
res = res + "\n -i\t\tpour specifier la liste des tables a ignore pendant l'insertion."
res = res + "\n\t\tsi ce parametre est ommit, toute les tables soront modifier."
res = res + "\n\t\tNB: on souhaite souvant ignorer les tables detier pour les frameworks"
res = res + "\n\nexample:\n\tpython3 t_g_seed.py ? "
res = res + "\n\tpython3 t_g_seed.py -conf ~/config.json"
res = res + "\n\tpython3 t_g_seed.py -o -conf ./config.json"
res = \
res + "\n\nexample configuration:" + \
'\n{\
\n\t"dbms": "mysql",\
\n\t"db": test,\
\n\t"user": test,\
\n\t"password": test,\
\n\t"host": "127.0.0.1",\
\n\t"port": 3306,\
\n\t"ignore": ["SequelizeMeta"],\
\n\t"len_row": 50,\
\n\t"equal": { \
\n\t "your_colone": 0\
\n\t "your_colone": "test"\
\n\t},\
\n\t"choice": { \
\n\t "your_colone": ["val1", "val2", "val3"]\
\n\t "your_colone": [1, 5, 3]\
\n\t},\
\n\t"combine": { \
\n\t "your_colone":{ \
\n\t "val":[1,2,3,5,5,6,7,8,9]\
\n\t "join":"-"\
\n\t }\
\n\t "your_colone":{ \
\n\t "val":[[1,2],[3],[5],[5,6,7],8,9]\
\n\t "join":[]\
\n\t }\
\n\t},\
\n\t"border": {\
\n\t "_def": [0, 10000000]\
\n\t "your_colone": [5000, 10000000]\
\n\t},\
\n\t"unit": {\
\n\t "_def": 1\
\n\t "your_colone": 500\
\n\t}\
\n} '
res = res + "\n\nNB: tout autre parametre sera ignorer\n"
return res + "*" * 39
def main(self, args):
sleep(1)
print(args)
if args.__len__() <= 1:
return self.loadConfig()
if args.__contains__('?'):
print(self.get_doc())
return -1
if args.__contains__('-conf'):
try:
idx = args.index('-conf')
if len(args) > idx + 1:
return self.loadConfig(args[idx + 1] or None)
else:
print("Erreur: parametre de commande incorrecte")
except Exception as e:
print(e)
print("Erreur: fichier de configuration incorrecte ou introuvavble")
return 0
@staticmethod
def get_arg_value(idx, args):
arg = str(args[idx]).split('"')
if arg.__len__() == 3:
arg = arg[1]
else:
arg = arg[0].split("'")
if arg.__len__() == 3:
arg = arg[1]
else:
arg = arg[0]
return arg
def special_reduce(self, stri):
if type(self.u_config['combine'][stri]['join']) is str:
return lambda _x, y: str(_x) + self.u_config['combine'][stri]['join'] + str(y)
if type(self.u_config['combine'][stri]['join']) is list:
return lambda _x, y: (_x if type(_x) is list else [_x]) + self.u_config['combine'][stri]['join'] + (
y if type(y) is list else [y])
return lambda _x, y: _x + self.u_config['combine'][stri]['join'] + y
@staticmethod
def generate(val, ln):
if val == 0: # generer un nombre
return random.randint(0, 10 ** ln)
elif val == 1: # generer une date
return strftime('%Y-%m-%d %H:%M:%S')
elif val == 3: # generer une chaine de charactere
pass
def get_config(self, strin, who=1):
if self.u_config['equal'].__contains__(strin):
return self.u_config['equal'][strin]
if self.u_config['choice'].__contains__(strin):
return random.choice(self.u_config['choice'][strin])
if self.u_config['combine'].__contains__(strin):
nb = random.randint(2, len(self.u_config['combine'][strin]['val']))
a = list(itertools.combinations(self.u_config['combine'][strin]["val"], nb))
return reduce(self.special_reduce(strin), random.choice(a))
if who == 0:
return self.get_name(0, strin)
if self.u_config['unit'].__contains__(strin):
unit_key = self.u_config['unit'][strin]
else:
unit_key = self.u_config['unit']['_def']
if self.u_config['border'].__contains__(strin):
a = round(self.u_config['border'][strin][0] / unit_key)
b = round(self.u_config['border'][strin][1] / unit_key)
return unit_key * random.randint(a, b)
else:
a = round(self.u_config['border']['_def'][0] / unit_key)
b = round(self.u_config['border']['_def'][1] / unit_key)
return unit_key * random.randint(a, b)
def addseed(self, tb, args, fin):
finalseed = []
idx = len(finalseed)
finalseed.append([tb, [], [], None])
print("add seed table", tb)
for pos in range(self.u_config['len_row']):
finalseed[idx][1].append({})
for arg in args:
if arg[3] == "PRI":
typ = arg[1][:3]
if finalseed[idx][3] is None:
finalseed[idx][3] = arg[0] # on affecte cette cle primaire al la table
if pos == 0:
if typ == 'var':
finalseed[idx][1][pos][arg[0]] = self.get_name(0, arg[0])
elif typ == 'dat':
finalseed[idx][1][pos][arg[0]] = strftime('%Y-%m-%d %H:%M:%S')
else:
finalseed[idx][1][pos][arg[0]] = 1
else:
if typ == 'dat':
finalseed[idx][1][pos][arg[0]] = strftime('%Y-%m-%d %H:%M:%S')
else:
old = finalseed[idx][1][pos - 1][arg[0]]
finalseed[idx][1][pos][arg[0]] = old + str(1) if type(old) is str else old + 1
elif arg[3] == "MUL":
# cle secondaire detecter
if not finalseed[idx][2].__contains__(arg[0]):
finalseed[idx][2].append(
arg[0]) # on ajoute cette cle secondaire a la table si elle nexiste pas deja
finalseed[idx][1][pos][arg[0]] = random.randint(1, self.u_config['len_row'])
elif arg[3] == "UNI":
typ = arg[1][:3]
if pos == 0:
if typ == 'var':
finalseed[idx][1][pos][arg[0]] = self.get_config(arg[0], 0)
elif typ == 'dat':
finalseed[idx][1][pos][arg[0]] = strftime('%Y-%m-%d %H:%M:%S')
else:
finalseed[idx][1][pos][arg[0]] = 1
else:
if typ == 'dat':
finalseed[idx][1][pos][arg[0]] = strftime('%Y-%m-%d %H:%M:%S')
else:
old = finalseed[idx][1][pos - 1][arg[0]]
finalseed[idx][1][pos][arg[0]] = old + 1 if type(old) is int else old + str(1)
else:
typ = arg[1][:3]
if typ == 'var':
finalseed[idx][1][pos][arg[0]] = self.get_config(arg[0], 0)
elif (typ == 'boo') | (typ == 'BOO'):
finalseed[idx][1][pos][arg[0]] = random.choice([True, False])
elif typ == 'dat':
finalseed[idx][1][pos][arg[0]] = strftime('%Y-%m-%d %H:%M:%S')
else:
finalseed[idx][1][pos][arg[0]] = self.get_config(arg[0])
fin.append(finalseed[0])
def init_database_connection(self):
try:
if self.u_config['dbms'] == 'mysql':
self._db_connector = mysql.connector.connect(
host=self.u_config['host'],
port=self.u_config['port'],
database=self.u_config['db'],
user=self.u_config['user'],
passwd=self.u_config['password']
)
self._is_psql = False
else:
self._db_connector = psycopg2.connect(
host=self.u_config['host'],
port=self.u_config['port'],
database=self.u_config['db'],
user=self.u_config['user'],
password=self.u_config['password']
)
except Exception as e:
sleep(1)
print(e)
print("Erreur Taur n'arrive pas a se connecter avec les parametres fourni.")
print("\t{\n\t\tsgbd :\t\t'" + str(self.u_config['dbms']) + "'\n\t\t", end=' ')
print("user :\t\t'" + str(self.u_config['user']) + "'\n\t\t", end=' ')
print("password :\t'" + str(self.u_config['password']) + "'\n\t\t", end=' ')
print("host :\t\t'" + str(self.u_config['host']) + "'\n\t\t", end=' ')
print("database :\t'" + str(self.u_config['db']) + "'\n\t}\n\n", end=' ')
exit()
print("connection au sgbd '" + str(self.u_config['dbms']) + "' reussi")
self._db_cursor = self._db_connector.cursor()
print('*' * 8 ** 3, end='\n\n')
if self._is_psql:
self._db_cursor.execute("SELECT \
table_schema || '.' || table_name \
FROM \
information_schema.tables \
WHERE \
table_type = 'BASE TABLE' \
AND \
table_schema NOT IN ('pg_catalog', 'information_schema');")
else:
self._db_cursor.execute("SHOW TABLES")
table_list = []
for x in self._db_cursor:
print(x[0], end=', ')
table_list.append(x[0])
self._db_cursor.close()
print("\nList des tables trouver:\n\t", end=' ')
print(table_list)
if type(self.u_config['ignore']) is str:
if table_list.__contains__(self.u_config['ignore']):
table_list.remove(self.u_config['ignore'])
elif type(self.u_config['ignore']) is list:
for ignore in self.u_config['ignore']:
if table_list.__contains__(ignore):
table_list.remove(ignore)
sleep(2)
self._db_cursor = self._db_connector.cursor()
_finaltable = []
''' table final contient le nom de la table, la liste de colon
il sera representer comme suit:
[('table0',[('colonne1','type','isMul') # mul est mis ici pour les cles secondaires])]
'''
print("\n\nListe final des tables a modifier", table_list)
print("preparation de l'insertion")
manager = multiprocessing.Manager()
finalseed = manager.list()
for table in table_list:
assert isinstance(table, str)
# self._db_cursor.execute("SHOW CREATE TABLE " + el)
if self._is_psql:
self._db_cursor.execute("SELECT * \
FROM information_schema.columns \
WHERE table_schema = " + table)
else:
self._db_cursor.execute("SHOW COLUMNS FROM " + table)
_finaltable.append((table, []))
flen = len(_finaltable)
for nxt in self._db_cursor:
_finaltable[flen - 1][1].append(list(nxt))
self.appendExec(self.addseed, (table, _finaltable[flen - 1][1], finalseed))
self.waitAllFinish()
self._finalseed = finalseed
print()
print("seed a inserer")
print(self._finalseed)
print('verification de cle secondaire')
remplacement = True
idx = 0
count = 0 # pour verifier l'indexation recurssive
fln = len(self._finalseed)
fln2 = fln ** 3
precedent_primarys_key = []
while (remplacement | (idx < fln)) & (count < fln2):
remplacement = False
precedent_primarys_key.append((self._finalseed[idx][0], self._finalseed[idx][3]))
ln = len(self._finalseed[idx][2])
if ln != 0:
for foreign_id in range(ln):
if not self.string_contain_tuple_in_array(self._finalseed[idx][2][foreign_id],
precedent_primarys_key):
_el = self._finalseed[idx]
self._finalseed.remove(_el)
self._finalseed.append(_el)
remplacement = True
break
idx = idx + 1
if remplacement:
print("# on reinitialise les compteur: table: " + str(self._finalseed[fln - 1][0]))
print(list(map(lambda _x: _x[0], self._finalseed)))
count = count + 1
precedent_primarys_key = []
idx = 0
if count >= fln2:
print("\n\n**********************\n\tErreur: indexsation recurssive")
print("\tverifier les cles secondaire\n*******************\n")
exit()
print("\n\ncommencer l'insertion?\n")
print("vous avez 30 secondes pour repondre y ou o pour oui et tout autres lettre pour nom")
res = self.get_input(30)
if (res == 'y') | (res == 'Y') | (res == 'O') | (res == 'o'):
for table in self._finalseed:
into = reduce(lambda _x, _y: str(_x) + ", " + str(_y), table[1][0].keys())
valu = list(map(lambda _s: tuple(_s.values()), table[1]))
print(into)
print(valu)
sql = "INSERT INTO " + str(table[0]) + " (" + into + ") VALUES (" + reduce(lambda _x, y: _x + "%s,",
into.split(', '), "")[
:-1] + ")"
self._db_cursor.executemany(sql, valu)
self._db_connector.commit()
else:
if res is None:
print("delai depasser")
print('\n' * 2, "bye!", end='\n')
exit()
@staticmethod
def get_input(timeout=10):
global user_input
user_input = None
th = threading.Thread(target=func)
th.start()
count = 0
while count < timeout:
if not th.is_alive():
break
count = count + 1
time.sleep(1)
th._delete()
return user_input
@staticmethod
def string_contain_tuple_in_array(string, arr_tuple):
string = string.lower()
for tupl in arr_tuple:
_table_name = tupl[0][:-1].lower()
if tupl[0][-1] == 's'.lower():
_table_name = tupl[0][:-1].lower()
_primary_key = tupl[1].lower()
lns = len(string)
lnt = len(_table_name)
lnp = len(_primary_key)
if string.__contains__(_table_name) and string.__contains__(_primary_key) and (
lnp + lnt <= lns < lnp + lnt + 2) and (string.index(_table_name) == 0) and (
string.index(_primary_key) == lns - lnp):
return True
return False
def loadConfig(self, file_path='./config.seed.json'):
try:
if file_path[-5:] != '.json':
raise Exception('veuillez le fichier de configuration doit etre un fichier extenstion json')
json_file = open(str(file_path))
_config = json.load(json_file)
if _config.__contains__('dbms'):
self.u_config['dbms'] = _config['dbms']
if _config.__contains__('user'):
self.u_config['user'] = _config['user']
else:
raise Exception('utilisateur nom defini')
if _config.__contains__('password'):
self.u_config['password'] = _config['password']
if _config.__contains__('process_number'):
for proc in range(_config['process_number']):
self.queue.append(None)
else:
for proc in range(2):
self.queue.append(None)
if _config.__contains__('host'):
self.u_config['host'] = _config['host']
if _config.__contains__('port'):
self.u_config['port'] = _config['port']
elif self.u_config['dbms'] == 'psql':
self.u_config['port'] = 5432
if _config.__contains__('db'):
self.u_config['db'] = _config['db']
else:
raise Exception('la base de donnee n\'est pas specifier')
if _config.__contains__('len_row'):
self.u_config['len_row'] = _config['len_row']
else:
print('le nombre de colonne n\'a pas ete specifier, 50 sera utiliser par default')
if _config.__contains__('ignore'):
self.u_config['ignore'] = _config['ignore']
if _config.__contains__('equal'):
self.u_config['equal'] = _config['equal']
if _config.__contains__('choice'):
self.u_config['choice'] = _config['choice']
if _config.__contains__('combine'):
for key in _config['combine']:
if _config['combine'][key].__contains__('val') and (type(_config['combine'][key]['val']) is list):
self.u_config['combine'][key] = _config['combine'][key]
if not _config['combine'][key].__contains__('join'):
self.u_config['combine'][key]['join'] = " "
if _config.__contains__('border'):
self.u_config['border'] = _config['border']
if not _config['border'].__contains__('_def'):
self.u_config['unit']['_def'] = 1
if _config.__contains__('unit'):
self.u_config['unit'] = _config['unit']
if not _config['unit'].__contains__('_def'):
self.u_config['unit']['_def'] = 1
return 0
except Exception as e:
print(e)
return -1
if __name__ == "__main__":
TaurSeedGenerator(sys.argv)
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for node_config_api."""
import json
from absl.testing import absltest
from multitest_transport.api import api_test_util
from multitest_transport.api import node_config_api
from multitest_transport.models import ndb_models
class NodeConfigApiTest(api_test_util.TestCase):
def setUp(self):
super(NodeConfigApiTest, self).setUp(node_config_api.NodeConfigApi)
def _CreateNodeConfig(self, data):
node_config = ndb_models.NodeConfig(
id=ndb_models.NODE_CONFIG_ID,
env_vars=[
ndb_models.NameValuePair(**obj)
for obj in data['env_vars']
],
test_resource_default_download_urls=[
ndb_models.NameValuePair(**obj)
for obj in data['test_resource_default_download_urls']
])
return node_config
def testGet(self):
data = {
'env_vars': [
{'name': 'FOO', 'value': 'foo'},
{'name': 'BAR', 'value': 'bar'},
],
'test_resource_default_download_urls': [
{'name': 'abc', 'value': 'file://abc'},
{'name': 'def', 'value': 'file://def'},
],
}
node_config = self._CreateNodeConfig(data)
node_config.put()
res = self.app.get('/_ah/api/mtt/v1/node_config')
msg = json.loads(res.body)
self.assertEqual(data, msg)
def testUpdate(self):
data = {
'env_vars': [
{'name': 'FOO', 'value': 'foo'},
{'name': 'BAR', 'value': 'bar'},
],
'test_resource_default_download_urls': [
{'name': 'abc', 'value': 'file://abc'},
{'name': 'def', 'value': 'file://def'},
],
}
self.app.put_json('/_ah/api/mtt/v1/node_config', data)
self.assertEqual(self._CreateNodeConfig(data), ndb_models.GetNodeConfig())
if __name__ == '__main__':
absltest.main()
|
#!/usr/bin/python
#coding=utf-8
import os
import requests
import time
import re
from datetime import datetime
import urllib2
import json
import mimetypes
import smtplib
from email.MIMEText import MIMEText
from email.MIMEMultipart import MIMEMultipart
# configuration for pgyer
USER_KEY = "f605b7c7826690f796078e3dd23a60d5"
API_KEY = "8bdd05df986d598f01456914e51fc889"
PGYER_UPLOAD_URL = "https://www.pgyer.com/apiv1/app/upload"
repo_path = 'C:/Users/Administrator/.jenkins/workspace/Demo/app'
repo_url = 'https://github.com/r17171709/iite_test'
ipa_path = "C:/Users/Administrator/.jenkins/workspace/Demo/app/build/outputs/apk/app-release.apk"
update_description = "版本更新测试"
def parseUploadResult(jsonResult):
print 'post response: %s' % jsonResult
resultCode = jsonResult['code']
send_Email(jsonResult)
if resultCode != 0:
print "Upload Fail!"
raise Exception("Reason: %s" % jsonResult['message'])
print "Upload Success"
appKey = jsonResult['data']['appKey']
appDownloadPageURL = "https://www.pgyer.com/%s" % appKey
print "appDownloadPage: %s" % appDownloadPageURL
return appDownloadPageURL
def uploadIpaToPgyer(ipaPath, updateDescription):
print "Begin to upload ipa to Pgyer: %s" % ipaPath
headers = {'enctype': 'multipart/form-data'}
payload = {
'uKey': USER_KEY,
'_api_key': API_KEY,
'publishRange': '2', # 直接发布
'isPublishToPublic': '2', # 不发布到广场
'updateDescription': updateDescription # 版本更新描述
}
try_times = 0
while try_times < 5:
try:
print "uploading ... %s" % datetime.now()
ipa_file = {'file': open(ipaPath, 'rb')}
r = requests.post(PGYER_UPLOAD_URL,
headers = headers,
files = ipa_file,
data = payload
)
assert r.status_code == requests.codes.ok
result = r.json()
appDownloadPageURL = parseUploadResult(result)
return appDownloadPageURL
except requests.exceptions.ConnectionError:
print "requests.exceptions.ConnectionError occured!"
time.sleep(60)
print "try again ... %s" % datetime.now()
try_times += 1
except Exception as e:
print "Exception occured: %s" % str(e)
time.sleep(60)
print "try again ... %s" % datetime.now()
try_times += 1
if try_times >= 5:
raise Exception("Failed to upload ipa to Pgyer, retried 5 times.")
def parseQRCodeImageUrl(appDownloadPageURL):
try_times = 0
while try_times < 3:
try:
response = requests.get(appDownloadPageURL)
regex = '<img src=\"(.*?)\" style='
m = re.search(regex, response.content)
assert m is not None
appQRCodeURL = m.group(1)
print "appQRCodeURL: %s" % appQRCodeURL
return appQRCodeURL
except AssertionError:
try_times += 1
time.sleep(60)
print "Can not locate QRCode image. retry ... %s: %s" % (try_times, datetime.now())
if try_times >= 3:
raise Exception("Failed to locate QRCode image in download page, retried 3 times.")
def saveQRCodeImage(appDownloadPageURL, output_folder):
appQRCodeURL = parseQRCodeImageUrl(appDownloadPageURL)
response = requests.get(appQRCodeURL)
qr_image_file_path = os.path.join(output_folder, 'QRCode.png')
if response.status_code == 200:
with open(qr_image_file_path, 'wb') as f:
f.write(response.content)
print 'Save QRCode image to file: %s' % qr_image_file_path
def main():
appDownloadPageURL = uploadIpaToPgyer(ipa_path, update_description)
try:
output_folder = os.path.dirname(ipa_path)
saveQRCodeImage(appDownloadPageURL, output_folder)
except Exception as e:
print "Exception occured: %s" % str(e)
#获取 最后一次 提交git的信息
def getCommitInfo():
#方法一 使用 python 库 前提是 当前分支 在服务器上存在
# repo = Gittle(repo_path, origin_uri=repo_url)
# commitInfo = repo.commit_info(start=0, end=1)
# lastCommitInfo = commitInfo[0]
#方法二 直接 cd 到 目录下 git log -1 打印commit 信息
os.chdir(repo_path);
lastCommitInfo = run_cmd('git log -1')
return lastCommitInfo
#发送邮件
def send_Email(json_result):
print '*******start to send mail****'
appName = json_result['data']['appName']
appKey = json_result['data']['appKey']
appVersion = json_result['data']['appVersion']
appBuildVersion = json_result['data']['appBuildVersion']
appShortcutUrl = json_result['data']['appShortcutUrl']
#邮件接受者
mail_receiver = ['[email protected]']
#根据不同邮箱配置 host,user,和pwd
mail_host = 'smtp.139.com'
mail_port = 465
mail_user = '[email protected]'
mail_pwd = 'xxx'
mail_to = ','.join(mail_receiver)
msg = MIMEMultipart()
environsString = '<p><h3>本次打包相关信息</h3><p>'
# environsString += '<p>ipa 包下载地址 : ' + 'wudizhi' + '<p>'
environsString += '<p>蒲公英安装地址 : ' + 'http://www.pgyer.com/' + str(appShortcutUrl) + '<p><p><p><p>'
# environsString += '<li><a href="itms-services://?action=download-manifest&url=https://ssl.pgyer.com/app/plist/' + str(appKey) + '"></a>点击直接安装</li>'
environsString += '<p><h3>本次git提交相关信息</h3><p>'
#获取git最后一次提交信息
lastCommitInfo = getCommitInfo()
# #提交人
# committer = lastCommitInfo['committer']['raw']
# #提交信息
# description = lastCommitInfo['description']
environsString += '<p>' + '<font color="red">' + lastCommitInfo + '</font>' + '<p>'
# environsString += '<p>Description:' + '<font color="red">' + description + '</font>' + '<p>'
message = environsString
body = MIMEText(message, _subtype='html', _charset='utf-8')
msg["Accept-Language"]="zh-CN"
msg["Accept-Charset"]="ISO-8859-1,utf-8"
msg.attach(body)
msg['To'] = mail_to
msg['from'] = '[email protected]'
msg['subject'] = 'Android APP 最新打包文件'
try:
s = smtplib.SMTP()
# 设置为调试模式,就是在会话过程中会有输出信息
s.set_debuglevel(1)
s.connect(mail_host)
s.starttls() # 创建 SSL 安全加密 链接
s.login(mail_user, mail_pwd)
s.sendmail(mail_user, mail_receiver, msg.as_string())
s.close()
print '*******mail send ok****'
except Exception, e:
print e
def run_cmd(cmd):
try:
import subprocess
except ImportError:
_, result_f, error_f = os.popen3(cmd)
else:
process = subprocess.Popen(cmd, shell = True,
stdout = subprocess.PIPE, stderr = subprocess.PIPE)
result_f, error_f = process.stdout, process.stderr
errors = error_f.read()
if errors: pass
result_str = result_f.read().strip()
if result_f : result_f.close()
if error_f : error_f.close()
return result_str
if __name__ == '__main__':
main()
|
from unittest import TestCase
from pbx_gs_python_utils.utils.Dev import Dev
from pbx_gs_python_utils.utils.Files import Files
from docker_build.src.API_Docker import API_Docker
class test_API_Docker(TestCase):
def setUp(self):
self.result = None
def tearDown(self):
if self.result is not None:
Dev.pprint(self.result)
def test_ctor(self):
assert type(self.api).__name__ == 'API_Docker'
assert Files.folder_exists(self.api.images_path)
def test_build(self):
assert 'Successfully tagged alpine-python:latest' in self.api.build('alpine-python').get('console')
result = self.api.run('alpine-python', ['python3', '--version'])
assert result.get('ok') is True
assert result.get('console') == ['Python 3.7.3']
def test_exec(self):
assert 'Usage:\tdocker [OPTIONS] COMMAND' in self.api.docker_exec().get('error')
def test_run(self):
assert 'Hello from Docker!' in self.api.run('hello-world').get('console')
def test_run__with_params(self):
assert 'bin' in self.api.run('ubuntu', 'ls').get('console')
|
# Copyright 2019 The TensorHub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Load packages
from tensorflow import keras
import numpy as np
def create_vocabulary(corpus, type_embedding="word", num_words=10000):
"""Create a sequence tokenizer and generate vocabulary. Supports both 'word' and 'char' sequences.
Arguments:
corpus {list} -- A list of strings from which word-index or char-index mapping is created.
num_words {int} -- Maximum number of words to keep, based on word frequency. \
Only the most common (num_words-1) tokens will be kept. Not necessary when doing character embedding.
Returns:
TokenizerObject -- Tokenizer object to fit sequences.
Dict -- Vocabulary dictionary.
"""
# Custom tokenizer
if type_embedding.lower() == "word":
# Word embeddings
tokenizer = keras.preprocessing.text.Tokenizer(num_words=num_words, oov_token="<UNK>")
else:
# Character embeddings
tokenizer = keras.preprocessing.text.Tokenizer(char_level=True, oov_token="<UNK>")
# Fit tokenizer on the corpus
tokenizer.fit_on_texts(corpus)
# Generate vocabulary
vocab = tokenizer.word_index
return tokenizer, vocab
def load_embedding(filepath, token_index_mapping, embedding_dim=300):
"""Create an embedding matrix from the given pre-trained vector.
Arguments:
filepath {str} -- Path to load pre-trained embeddings (ex: glove).
embedding_dim {int} -- Dimension of the pre-trained embedding.
token_index_mapping {dict} -- A dictionary containing token-index mapping from the whole corpus.
Returns:
Matrix -- A numpy matrix containing embeddings for each token in the token-index mapping.
"""
# Placeholder for embedding
embedding_index = dict()
# Access file to load pre-trained embedding
with open(filepath, mode="r") as fp:
for line in fp:
values = line.split()
token = values[0:-dim]
coefs = values[-dim:]
embedding_index[token[0]] = coefs
# Create a weight matrix for token in training docs
embedding_matrix = np.zeros((len(token_index_mapping), embedding_dim))
# Create token-index mapping
for token, i in word_index.items():
embedding_vector = embeddings_index.get(token)
# Update embedding
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
return embedding_matrix |
from .base_enum import BaseEnum
class SearchAutoMapper(BaseEnum):
BOTH = True
AI = False
NO_AI = None
|
from pathlib import Path
from typing import Iterable, List, Tuple
Sample_Input = """[({(<(())[]>[[{[]{<()<>>
[(()[<>])]({[<{<<[]>>(
{([(<{}[<>[]}>{[]{[(<()>
(((({<>}<{<{<>}{[]{[]{}
[[<[([]))<([[{}[[()]]]
[{[{({}]{}}([{[{{{}}([]
{<[[]]>}<{[{[{[]{()[[[]
[<(<(<(<{}))><([]([]()
<{([([[(<>()){}]>(<<{{
<{([{{}}[<[[[<>{}]]]>[]]
"""
def parse_input(input: str) -> tuple:
return input.strip().split("\n")
def find_errors(lines: Iterable[str]) -> int:
illegal_chars = {")": 3, "]": 57, "}": 1197, ">": 25137}
_, _, errors = parse_lines(lines)
return sum([illegal_chars[e] for e in errors])
def complete_incomplete(lines: Iterable[str]) -> int:
closing_chars = {"(": ")", "[": "]", "{": "}", "<": ">"}
score_chars = {")": 1, "]": 2, "}": 3, ">": 4}
_, incomplete, _ = parse_lines(lines)
scores = []
for line in incomplete:
score = 0
for symbol in line[::-1]:
score *= 5
score += score_chars[closing_chars[symbol]]
scores.append(score)
scores.sort()
return scores[len(scores) // 2]
def parse_lines(lines: Iterable[str]) -> Tuple[List[int], List[int], List[int]]:
errors = []
incomplete = []
complete = []
for line in lines:
status, value = checker(line)
if status == "complete":
complete.append(line)
elif status == "open":
incomplete.append(value)
else:
errors.append(value)
return complete, incomplete, errors
def checker(line: str) -> Tuple[str, str]:
open_chars = {")": "(", "]": "[", "}": "{", ">": "<"}
stack = []
for l in line:
if l in "([{<":
stack.append(l)
else:
if len(stack) == 0:
return ("error", l)
last_char = stack.pop()
if open_chars[l] != last_char:
return ("error", l)
if len(stack) == 0:
return ("complete", "")
else:
return ("open", stack)
if __name__ == "__main__":
input_data = (Path.cwd() / "2021" / "data" / f"{Path(__file__).stem}_input.txt").read_text()
lines = parse_input(input_data)
print(f"Error Score is: {find_errors(lines)}")
print(f"Incomplete Score is: {complete_incomplete(lines)}")
|
from torch import nn
import torch
import math
class EncoderLayer(nn.Module):
def __init__(self, d_model, d_ff, num_heads, dropout):
super().__init__()
self.d_model = d_model
self.d_ff = d_ff
self.num_heads = num_heads
self.dropout = dropout
self.layer_norm_1 = LayerNormalization(self.d_model)
self.multihead_attention = MultiheadAttention(self.d_model, self.num_heads, self.dropout)
self.drop_out_1 = nn.Dropout(self.dropout)
self.layer_norm_2 = LayerNormalization(self.d_model)
self.feed_forward = FeedFowardLayer(self.d_model, self.d_ff, self.dropout)
self.drop_out_2 = nn.Dropout(self.dropout)
def forward(self, x, e_mask):
x_1 = self.layer_norm_1(x) # (B, L, d_model)
x = x + self.drop_out_1(
self.multihead_attention(x_1, x_1, x_1, mask=e_mask)
) # (B, L, d_model)
x_2 = self.layer_norm_2(x) # (B, L, d_model)
x = x + self.drop_out_2(self.feed_forward(x_2)) # (B, L, d_model)
return x # (B, L, d_model)
class MultiheadAttention(nn.Module):
def __init__(self, d_model, num_heads, dropout):
super().__init__()
self.inf = 1e9
self.d_model = d_model
self.num_heads = num_heads
self.d_k = d_model // num_heads
# W^Q, W^K, W^V in the paper
self.w_q = nn.Linear(d_model, d_model)
self.w_k = nn.Linear(d_model, d_model)
self.w_v = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout)
self.attn_softmax = nn.Softmax(dim=-1)
# Final output linear transformation
self.w_0 = nn.Linear(d_model, d_model)
def forward(self, q, k, v, mask=None):
input_shape = q.shape
# Linear calculation + split into num_heads
q = self.w_q(q).view(input_shape[0], -1, self.num_heads, self.d_k) # (B, L, H, d_k)
k = self.w_k(k).view(input_shape[0], -1, self.num_heads, self.d_k) # (B, L, H, d_k)
v = self.w_v(v).view(input_shape[0], -1, self.num_heads, self.d_k) # (B, L, H, d_k)
# For convenience, convert all tensors in size (B, H, L, d_k)
q = q.transpose(1, 2)
k = k.transpose(1, 2)
v = v.transpose(1, 2)
# Conduct self-attention
attn_values = self.self_attention(q, k, v, mask=mask) # (B, H, L, d_k)
concat_output = attn_values.transpose(1, 2)\
.contiguous().view(input_shape[0], -1, self.d_model) # (B, L, d_model)
return self.w_0(concat_output)
def self_attention(self, q, k, v, mask=None):
# Calculate attention scores with scaled dot-product attention
attn_scores = torch.matmul(q, k.transpose(-2, -1)) # (B, H, L, L)
attn_scores = attn_scores / math.sqrt(self.d_k)
# If there is a mask, make masked spots -INF
if mask is not None:
mask = mask.unsqueeze(1) # (B, 1, L) => (B, 1, 1, L) or (B, L, L) => (B, 1, L, L)
attn_scores = attn_scores.masked_fill_(mask == 0, -1 * self.inf)
# Softmax and multiplying K to calculate attention value
attn_distribs = self.attn_softmax(attn_scores)
attn_distribs = self.dropout(attn_distribs)
attn_values = torch.matmul(attn_distribs, v) # (B, H, L, d_k)
return attn_values
class FeedFowardLayer(nn.Module):
def __init__(self, d_model, d_ff, dropout):
super().__init__()
self.d_model = d_model
self.d_ff = d_ff
self.dropout = dropout
self.linear_1 = nn.Linear(self.d_model, self.d_ff, bias=True)
self.relu = nn.ReLU()
self.linear_2 = nn.Linear(self.d_ff, self.d_model, bias=True)
self.dropout = nn.Dropout(self.dropout)
def forward(self, x):
x = self.relu(self.linear_1(x)) # (B, L, d_ff)
x = self.dropout(x)
x = self.linear_2(x) # (B, L, d_model)
return x
class LayerNormalization(nn.Module):
def __init__(self, d_model, eps=1e-6):
super().__init__()
self.d_model = d_model
self.eps = eps
self.layer = nn.LayerNorm([self.d_model], elementwise_affine=True, eps=self.eps)
def forward(self, x):
x = self.layer(x)
return x
class PositionalEncoder(nn.Module):
def __init__(self, max_len, p_dim, device):
super().__init__()
self.device = device
self.max_len = max_len
self.p_dim = p_dim
# Make initial positional encoding matrix with 0
pe_matrix= torch.zeros(self.max_len, self.p_dim) # (L, d_model)
# Calculating position encoding values
for pos in range(self.max_len):
for i in range(self.p_dim):
if i % 2 == 0:
pe_matrix[pos, i] = math.sin(pos / (10000 ** (2 * i / self.p_dim)))
elif i % 2 == 1:
pe_matrix[pos, i] = math.cos(pos / (10000 ** (2 * i / self.p_dim)))
pe_matrix = pe_matrix.unsqueeze(0) # (1, L, p_dim)
self.positional_encoding = pe_matrix.to(self.device).requires_grad_(False)
def forward(self, x, cal='add'):
assert cal == 'add' or cal == 'concat', "Please specify the calculation method, either 'add' or 'concat'."
if cal == 'add':
x = x * math.sqrt(self.p_dim) # (B, L, d_model)
x = x + self.positional_encoding # (B, L, d_model)
elif cal == 'concat':
x = torch.cat((x, self.positional_encoding.repeat(x.shape[0],1,1)), dim=-1) # (B, T, d_model+p_dim)
return x
|
# Generated by Django 3.1 on 2020-08-13 15:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='main',
name='facebook',
field=models.TextField(default='-'),
),
migrations.AddField(
model_name='main',
name='twitter',
field=models.TextField(default='-'),
),
migrations.AddField(
model_name='main',
name='youtube',
field=models.TextField(default='-'),
),
]
|
"""Test json's Result.
"""
# Use of this source code is governed by the MIT license.
__license__ = "MIT"
__all__ = ['Test']
from ..Constant import *
from ..Capture import Capturer
from typing import Dict,List,Tuple
import json
class TreeTypeError(TypeError): pass
class FileRouteError(TypeError): pass
class AttrsTooMuch(Exception): pass
class Test():
def __init__(self, t_tree: TREE_TYPE=None, json_file: str=None):
if t_tree!=None and json_file==None:
if isinstance(t_tree, Dict):
self.tree = tree
else: raise TreeTypeError('测试的树结构类型错误。')
elif t_tree==None and json_file!=None:
if isinstance(json_file, str):
with open(json_file,'r',encoding='utf-8') as file:
self.tree = json.load(file) # 直接从文件中读。
else: raise FileRouteError('json文件路径错误。')
else: raise AttrsTooMuch('传递参数过多,只需传入其中一个。')
def get_data(self, _id: str, initials: str=None,
standard_flag: str=STANDARD_FLAG):
"""获取当前节点下的DATA域。
注意:本函数不处理异常,因此会抛出异常。注意处理异常。
"""
return Capturer.get_current_node(self.tree,_id,
initials=initials,standard_flag=standard_flag).get(DATA)
def get_head(self, _id: ID, initials: str=None, standard_flag: str=STANDARD_FLAG) ->str:
"""获取当前节点下的标题。
注意:本方法会抛出异常,请自行处理。
"""
return Capturer.get_current_node(self.tree,_id,
initials=initials,standard_flag=standard_flag).get(HEAD)
def get_create_time(self) ->str:
"""获取json文件创建的时间。"""
return self.tree.get(TIME)
def get_file_name(self) ->str:
"""获取文件名"""
return self.tree.get(TITLE)
def get_node(self, _id: ID, initials: str=None,
standard_flag: str=STANDARD_FLAG) ->TREE_TYPE:
"""获取当前的节点
注意:本函数会抛异常。
"""
return Capturer.get_current_node(self.tree,_id,
initials=initials,standard_flag=standard_flag)
def get_brother_node_id(self, _id: ID, initials: str=None,
standard_flag: str=STANDARD_FLAG) ->List[ID]:
"""获取当前标题编号下的所有孩子节点的标题编号。
注意:本函数会抛异常。
"""
return [x for x in Capturer.get_current_node(self.tree,_id,
initials=initials,standard_flag=standard_flag).keys()
if x not in [TIME,TITLE,TEXT,HEAD,DATA,TABLE,PRIOR,TEMP]]
def get_head_and_data(self, _id: ID, initials: str=None,
standard_flag: str=STANDARD_FLAG) ->Tuple[str,str]:
"""同时获取标题和内容。
注意:本函数会抛出异常。
"""
now_node = Capturer.get_current_node(self.tree,_id,
initials=initials,standard_flag=standard_flag)
return now_node.get(HEAD), now_node.get(DATA)
def get_table(self, _id: ID, initials: str=None, standard_flag: str=STANDARD_FLAG):
"""获取当前节点下的表格。
注意:本函数会抛异常。
"""
return Capturer.get_current_node(self.tree,_id,
initials=initials,standard_flag=standard_flag).get(TABLE)
def get_temp(self, _id: ID, initials: str=None, standard_flag: str=STANDARD_FLAG):
"""获取当前节点下的TEMP域。
注意:本方法会抛异常。
"""
return Capturer.get_current_node(self.tree,_id,
initials=initials,standard_flag=standard_flag).get(TEMP) |
import json
from django.core.urlresolvers import reverse
from django.contrib.gis.geos import MultiPolygon, Polygon
from django.test import TestCase
from mock import Mock, patch
from geo.management.commands.load_geos_from import Command as LoadGeos
from geo.management.commands.set_tract_csa_cbsa import Command as SetTractCBSA
from geo.models import Geo
from geo.utils import check_bounds
from censusdata.models import Census2010Sex
class ViewTest(TestCase):
fixtures = ['many_tracts', 'test_counties']
@patch('geo.views.SearchQuerySet')
def test_search_name(self, SQS):
SQS = SQS.return_value.models.return_value.load_all.return_value
result = Mock()
result.object.geoid = '11111'
result.object.geo_type = 1
result.object.name = 'MSA 1'
result.object.centlat = 45
result.object.centlon = 52
result.object.year = 2013
SQS.filter.return_value.filter.return_value = [result]
resp = self.client.get(reverse('geo:search'), {'q': 'Chicago', 'year': '2013'})
self.assertTrue('Chicago' in str(SQS.filter.call_args))
self.assertTrue('content' in str(SQS.filter.call_args))
self.assertFalse('text_auto' in str(SQS.filter.call_args))
resp = json.loads(resp.content)
self.assertEqual(1, len(resp['geos']))
geo = resp['geos'][0]
self.assertEqual('11111', geo['geoid'])
self.assertEqual('MSA 1', geo['name'])
self.assertEqual(1, geo['geo_type'])
self.assertEqual(45, geo['centlat'])
self.assertEqual(52, geo['centlon'])
@patch('geo.views.SearchQuerySet')
def test_search_autocomplete(self, SQS):
SQS = SQS.return_value.models.return_value.load_all.return_value
SQS.filter.return_value.filter.return_value = [Mock()]
self.client.get(reverse('geo:search'), {'q': 'Chicago', 'auto': '1', 'year': '2013'})
self.assertTrue('Chicago' in str(SQS.filter.call_args))
self.assertFalse('content' in str(SQS.filter.call_args))
self.assertTrue('text_auto' in str(SQS.filter.call_args))
self.assertTrue('year' in str(SQS.filter.return_value.filter.call_args))
class UtilsTest(TestCase):
def test_check_bounds(self):
self.assertIsNone(check_bounds('100', '100', '100', ''))
self.assertIsNone(check_bounds('-100', '100', '200', 'asdf'))
expected_bounds = (float('10.0'), float('40.1234'), float('20.20'), float('-10.123456'))
actual_bounds = check_bounds('10.0', '-10.123456', '20.20', '40.1234')
self.assertEqual(expected_bounds, actual_bounds)
class SetTractCBSATest(TestCase):
def setUp(self):
generic_geo = {
'minlat': -1, 'maxlat': 1, 'minlon': -1, 'maxlon': 1, 'centlat': 0,
'centlon': 0, 'name': 'Generic Geo', 'geom': MultiPolygon(
Polygon(((0, 0), (0, 2), (-1, 2), (0, 0))),
Polygon(((-4, -2), (-6, -1), (-2, -2), (-4, -2))))
}
self.county1 = Geo.objects.create(
geoid='11222', geo_type=Geo.COUNTY_TYPE, state='11', county='222',
csa='987', year='2012', **generic_geo)
self.county2 = Geo.objects.create(
geoid='11223', geo_type=Geo.COUNTY_TYPE, state='11', county='223',
cbsa='88776', year='2012', **generic_geo)
self.metro = Geo.objects.create(
geoid='88776', geo_type=Geo.METRO_TYPE, cbsa='88776', year='2012',
**generic_geo)
self.tract1 = Geo.objects.create(
geoid='1122233333', geo_type=Geo.TRACT_TYPE, state='11', year='2012',
county='222', tract='33333', **generic_geo)
self.tract2 = Geo.objects.create(
geoid='1122333333', geo_type=Geo.TRACT_TYPE, state='11', year='2012',
county='223', tract='33333', **generic_geo)
def tearDown(self):
self.county1.delete()
self.county2.delete()
self.tract1.delete()
self.tract2.delete()
self.metro.delete()
def test_set_fields(self):
SetTractCBSA().handle()
tract1 = Geo.objects.filter(geoid='1122233333').get()
tract2 = Geo.objects.filter(geoid='1122333333').get()
self.assertEqual('987', tract1.csa)
self.assertEqual(None, tract1.cbsa)
self.assertEqual(None, tract2.csa)
self.assertEqual('88776', tract2.cbsa)
class LoadGeosFromTest(TestCase):
def test_census_tract(self):
year = "2013"
row = ('1122233333', 'Tract 33333', '11', '222', '33333', '-45',
'45', Polygon(((0, 0), (0, 2), (-1, 2), (0, 0))))
field_names = ('GEOID', 'NAME', 'STATEFP', 'COUNTYFP', 'TRACTCE',
'INTPTLAT', 'INTPTLON')
command = LoadGeos()
geo = command.process_row(year, row, field_names)
self.assertEqual('20131122233333', geo['geoid'])
self.assertEqual(Geo.TRACT_TYPE, geo['geo_type'])
self.assertEqual('Tract 33333', geo['name'])
self.assertEqual('11', geo['state'])
self.assertEqual('222', geo['county'])
self.assertEqual('33333', geo['tract'])
self.assertEqual(None, geo['csa'])
self.assertEqual(None, geo['cbsa'])
self.assertEqual((-1, 0), (geo['minlon'], geo['maxlon']))
self.assertEqual((0, 2), (geo['minlat'], geo['maxlat']))
self.assertEqual(-45, geo['centlat'])
self.assertEqual(45, geo['centlon'])
self.assertEqual("2013", geo['year'])
def test_county(self):
year = "2010"
poly1 = Polygon(((0, 0), (0, 2), (-1, 2), (0, 0)))
poly2 = Polygon(((-4, -2), (-6, -1), (-2, -2), (-4, -2)))
row = ('11222', 'Some County', '11', '222', '-45', '45',
MultiPolygon(poly1, poly2))
field_names = ('GEOID', 'NAME', 'STATEFP', 'COUNTYFP', 'INTPTLAT',
'INTPTLON')
command = LoadGeos()
geo = command.process_row(year, row, field_names)
self.assertEqual('201011222', geo['geoid'])
self.assertEqual(Geo.COUNTY_TYPE, geo['geo_type'])
self.assertEqual('Some County', geo['name'])
self.assertEqual('11', geo['state'])
self.assertEqual('222', geo['county'])
self.assertEqual(None, geo['tract'])
self.assertEqual(None, geo['csa'])
self.assertEqual(None, geo['cbsa'])
self.assertEqual((-6, 0), (geo['minlon'], geo['maxlon']))
self.assertEqual((-2, 2), (geo['minlat'], geo['maxlat']))
self.assertEqual(-45, geo['centlat'])
self.assertEqual(45, geo['centlon'])
self.assertEqual("2010", geo['year'])
def test_metro(self):
year = "2010"
row = ('12345', 'Big City', '090', '12345', 'M1', '-45', '45',
Polygon(((0, 0), (0, 2), (-1, 2), (0, 0))))
field_names = ('GEOID', 'NAME', 'CSAFP', 'CBSAFP', 'LSAD', 'INTPTLAT',
'INTPTLON')
command = LoadGeos()
geo = command.process_row(year, row, field_names)
self.assertEqual('201012345', geo['geoid'])
self.assertEqual(Geo.METRO_TYPE, geo['geo_type'])
self.assertEqual('Big City', geo['name'])
self.assertEqual(None, geo['state'])
self.assertEqual(None, geo['county'])
self.assertEqual(None, geo['tract'])
self.assertEqual('090', geo['csa'])
self.assertEqual('12345', geo['cbsa'])
self.assertEqual("2010", geo['year'])
def test_micro(self):
year = '1900'
row = ('12345', 'Small Town', '', '12345', 'M2', '-45', '45',
Polygon(((0, 0), (0, 2), (-1, 2), (0, 0))))
field_names = ('GEOID', 'NAME', 'CSAFP', 'CBSAFP', 'LSAD', 'INTPTLAT',
'INTPTLON')
command = LoadGeos()
geo = command.process_row(year, row, field_names)
self.assertEqual('190012345', geo['geoid'])
self.assertEqual(Geo.MICRO_TYPE, geo['geo_type'])
self.assertEqual('Small Town', geo['name'])
self.assertEqual(None, geo['state'])
self.assertEqual(None, geo['county'])
self.assertEqual(None, geo['tract'])
self.assertEqual(None, geo['csa'])
self.assertEqual('12345', geo['cbsa'])
self.assertEqual('1900', geo['year'])
def test_replacing(self):
command = LoadGeos()
old_geo = {
'geoid': '1111111111', 'geo_type': Geo.TRACT_TYPE,
'name': 'Geo in 1990', 'year': '1990', 'state': '11', 'county': '111',
'tract': '11111', 'minlat': -1, 'maxlat': 1, 'minlon': -1,
'maxlon': 1, 'centlat': 0, 'centlon': 0,
'geom': MultiPolygon(
Polygon(((0, 0), (0, 2), (-1, 2), (0, 0))),
Polygon(((-4, -2), (-6, -1), (-2, -2), (-4, -2))))
}
command.save_batch([old_geo])
# Geo save worked
self.assertEqual(1, Geo.objects.filter(geoid='1111111111').count())
census = Census2010Sex(total_pop=100, male=45, female=55)
census.geoid_id = '1111111111'
census.save()
# Census data worked
self.assertEqual(1, Census2010Sex.objects.all().count())
new_geo = old_geo.copy()
new_geo['name'] = 'Geo in 2000'
command.save_batch([new_geo])
# check that both models still exist
query = Geo.objects.filter(geoid='1111111111')
self.assertEqual(1, query.count())
self.assertEqual('Geo in 2000', query.get().name)
self.assertEqual(1, Census2010Sex.objects.all().count())
Geo.objects.all().delete()
Census2010Sex.objects.all().delete()
|
from django.db import models
from accounts.models import User
# Create your models here.
class ContentAnalysis(models.Model):
author = models.ForeignKey(User, on_delete = models.CASCADE)
text_to_check = models.TextField(max_length = 15000, blank = True, null = True)
keyword = models.CharField(max_length = 100, blank = True, null = True)
page_title = models.TextField(blank = True, null = True)
meta_description = models.TextField(max_length = 280, blank = True, null = True)
publish_date = models.DateTimeField(auto_now_add = True)
def __str__(self):
return self.keyword
class Meta:
verbose_name = 'Content analysis'
verbose_name_plural = 'Content analysis'
|
import ______ as np
from ______ import default_rng
rng = default_rng(______) #seed to obtain same value for the distribution
time = np.arange(_,_,__)
time_ens=np.reshape(np.repeat(time,1000),[__,___])
noise = rng.normal(___,___, size=___)
time_ens = ______
time_mean = ______
print(np.shape(time))
print(np.shape(time_ens))
print(np.shape(time_mean))
|
def all_strings_ascending(str):
for start in range(0, len(str)):
for end in range(start+1, len(str)+1):
print(str[start:end])
def is_palindrome(str):
start = 0
end = len(str)-1
while start < len(str)//2:
if str[start] != str[end]:
return False
end -= 1
start += 1
return True
def is_palindrome_rec(str):
if len(str) == 0 or len(str) == 1:
return True
if str[0] != str[-1]:
return False
else:
return is_palindrom_rec(str[1:-1])
# len from len to 1
# start from 0 to len//2
# end from
def all_strings_descending(str):
for length in range(len(str), 0, -1):
for start in range(0, len(str)-1):
end = start+length
if end > len(str):
break
if is_palindrome(str[start:end]):
print("string", str)
print("biggest palindrome:", str[start:end])
return
print("ascending")
all_strings_ascending("abaa")
print("descending")
all_strings_descending("abaa")
|
import logging
import os
import signal
import sys
import time
from TwitterAPI import TwitterAPI, TwitterRequestError
from sentry_sdk import capture_exception
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from lionbot.data import Guild
from lionbot.errors import DiscordError
from lionbot.utils import send_discord_request
logging.basicConfig(level=logging.INFO)
engine = create_engine(os.environ.get('DATABASE_URL'))
Session = sessionmaker(bind=engine)
session = Session()
def send_tweet_message(tweet):
for guild in session.query(Guild).all():
if guild.twitter_stream_id is None:
continue
if not guild.twitter_replies and 'in_reply_to_user_id' in tweet and tweet['in_reply_to_user_id'] != '213161945':
continue
stream = guild.twitter_stream
url = f"https://twitter.com/Northernlion/status/{tweet['id']}"
content = f"<@&{stream.role_id}>\n{url}"
json_body = {
"content": content,
"allowed_mentions": {
"parse": ["roles"]
}
}
channel_id = stream.channel_id
try:
send_discord_request('post', f"channels/{channel_id}/messages", json_body)
except DiscordError as e:
capture_exception(e)
api = TwitterAPI(
os.environ.get("TWITTER_API_KEY"),
os.environ.get("TWITTER_API_SECRET"),
auth_type='oAuth2',
api_version='2'
)
stream = None
def run_stream():
stream = api.request('tweets/search/stream', params={'tweet.fields': 'id,text,in_reply_to_user_id'})
logging.info("Twitter stream started")
for msg in stream:
if 'data' in msg:
logging.info(f"Received tweet: {msg}")
send_tweet_message(msg['data'])
def start_stream(wait=30):
logging.info("Twitter stream starting...")
try:
run_stream()
except TwitterRequestError as e:
if e.status_code == 429:
stream = None
logging.info(f'Waiting {wait} seconds to retry...')
time.sleep(wait)
start_stream(wait=wait+30)
def signal_handler(signal, frame):
logging.info("Shutting down twitter stream...")
if stream is not None:
stream.close()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
start_stream() |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from torch import nn
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class MetricNet(nn.Module):
def __init__(self, encoder: nn.Module, classifier: nn.Module = None):
super(MetricNet, self).__init__()
self.encoder = encoder
self.classifier = classifier
def encoder_state_dict(self):
return self.encoder.state_dict()
def load_encoder_state_dict(self, checkpoint):
self.encoder.load_state_dict(checkpoint)
def forward(self, x, y):
x = self.encoder(x)
logits = self.classifier(x, y) if self.classifier is not None else None
return x, logits
def all_params(self):
params = [self.encoder.parameters()]
if self.classifier is not None:
params.append(self.classifier.parameters())
return params
|
import argparse
import pathlib
import sys
project_dir = str(pathlib.Path(__file__).parent.parent.resolve())
sys.path.insert(0, project_dir)
from citizens.app import CitizensRestApi
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Citizens REST API')
parser.add_argument('--socket')
parser.add_argument('--host', default='0.0.0.0')
parser.add_argument('--port', default=8080)
args = parser.parse_args()
if args.socket:
CitizensRestApi().run(unix_socket_path=args.socket)
else:
CitizensRestApi().run(host=args.host, port=args.port)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import gzip
import numpy as np
if sys.version_info[0] < 3:
import cPickle
""" Class for mnist data to handle data loading and arranging """
class mnist:
def __init__(self, path='data/mnist.pkl.gz', threshold=0.1):
with gzip.open(path, 'rb') as f:
train_set, val_set, test_set = cPickle.load(f)
self.x_train = train_set[0]
self.y_train = self.encode_onehot(train_set[1])
if not len(val_set[0]) is 0:
self.x_val = val_set[0]
self.y_val = self.encode_onehot(val_set[1])
self.n_val = self.x_val.shape[0]
self.x_test, self.y_test = test_set[0], self.encode_onehot(test_set[1])
self.n_train, self.n_test = self.x_train.shape[0], self.x_test.shape[0]
self.drop_dimensions(threshold)
self.x_dim, self.num_classes = self.x_train.shape[1], self.y_train.shape[1]
def drop_dimensions(self, threshold=0.1):
stds = np.std(self.x_train, axis=0)
good_dims = np.where(stds > threshold)[0]
self.x_train = self.x_train[:, good_dims]
if hasattr(self, 'x_val'):
self.x_val = self.x_val[:, good_dims]
self.x_test = self.x_test[:, good_dims]
def encode_onehot(self, labels):
d = np.max(labels) + 1
return np.eye(d)[labels]
|
# !/usr/bin/python
# -*- coding:utf-8 -*-
# author "chen"
import os
import math
import csv
import numpy as np
import configparser as ConfigParser
from sklearn import preprocessing
import matplotlib.pyplot as plt
import matplotlib.colors as colors
def read_config():
config = ConfigParser.RawConfigParser()
config.read('config.cfg', encoding = 'utf-8')
config_dict = {}
config_dict['DATA_File'] = config.get('section', 'DATA_File')
config_dict['k'] = config.getint('section', 'k')
config_dict['beta'] = config.getfloat('section', 'beta')
config_dict['ann'] = config.get('section', 'ann')
config_dict['metric'] = config.get('section', 'metric')
config_dict['n_clusters'] = config.getint('section', 'n_clusters')
return config_dict
# 读取数据集,传入文件名和维度,将数据集读取出来并返回
# 数据集每一行的格式是:X, Y, Z, ..., label
def get_data(filename):
data = []
label = []
with open(filename, 'r') as file_obj:
csv_reader = csv.reader(file_obj)
for row in csv_reader:
point = []
for d in row[:-1]:
point.append(float(d))
data.append(point)
# if row[-1] == 0, int(row[-1]) will fail
label.append(int(float(row[-1])))
X = np.array(data)
min_max_scaler = preprocessing.MinMaxScaler()
X_minMax = min_max_scaler.fit_transform(X)
return X_minMax, np.array(label, np.int8)
# return np.array(data, np.float32), np.array(label, np.int8)
# 计算欧几里得距离,a,b分别为两个元组
def dist(a, b):
d = len(a)
buffer = 0.0
for i in range(d):
tmp = a[i] - b[i]
buffer += tmp * tmp
return buffer
def maxminnorm(array):
'''
normalization function: aim to normalize the rho and delta
'''
x_max, x_min = np.max(array, 0), np.min(array, 0)
array = (array - x_min)/(x_max - x_min)
return array
def id_diagram(c_original_center, cc_mode_set, cc_set, rho_original, delta):
'''
c_original_center : pick center,选中的K个中心点
cc_mode_set : the first level cc mode set, (data -- find_cc -- cc set)
在第二层中,所有的CC的密度最高点(下一层的输入点)
cc_set: cc point set,在第二层中,cc中所有的点的集合
'''
rho_original = maxminnorm(rho_original)
delta_original = [0. for i in range(len(rho_original))]
for i, d in zip(cc_mode_set, delta):
delta_original[i] = d
plt.figure(figsize=[6.40,5.60])
point_set = [i for i in range(len(rho_original))]
point_set = list(set(point_set).difference(cc_set))
cc_set = list(set(cc_set).difference(set(cc_mode_set)))
cc_mode_set = list(set(cc_mode_set).difference(set(c_original_center)))
mode_X = []
mode_Y = []
for m in cc_mode_set:
mode_X.append(rho_original[m])
mode_Y.append(delta_original[m])
plt.scatter(mode_X, mode_Y, marker='.', s=200, c='blue', label='centers of other DSs')
center_X = []
center_Y = []
for c in c_original_center:
center_X.append(rho_original[c])
center_Y.append(delta_original[c])
plt.scatter(center_X, center_Y, marker='*', s=200, label='centers of top-K DSs', c='red')
plt.xlabel('Density:1/$r_k(x_i)$',fontsize=15)
plt.ylabel('Geometric Weight:$w_i$',fontsize=15)
ax=plt.gca()
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[::-1], labels[::-1], ncol=2, loc='lower center', bbox_to_anchor=(0.5,1), fontsize=12, frameon=False)
plt.show()
def plot_quickdsc(data, c_original_center, cc_mode_set, cc_set):
plt.figure(figsize=[6.40,5.60])
point_set = [i for i in range(len(data))]
point_set = list(set(point_set).difference(cc_set))
cc_set = list(set(cc_set).difference(set(cc_mode_set)))
cc_mode_set = list(set(cc_mode_set).difference(set(c_original_center)))
point_X = []
point_Y = []
for p in point_set:
point_X.append(data[p][0])
point_Y.append(data[p][1])
plt.scatter(point_X, point_Y, marker='x', c='grey', s=50, label='non-DS samples')
cc_X = []
cc_Y = []
for c in cc_set:
cc_X.append(data[c][0])
cc_Y.append(data[c][1])
plt.scatter(cc_X,cc_Y, marker='.', c='black', s=100, label='non-center samples in DSs')
mode_X = []
mode_Y = []
for m in cc_mode_set:
mode_X.append(data[m][0])
mode_Y.append(data[m][1])
plt.scatter(mode_X, mode_Y, marker='.', s=160, c='blue', label='centers of other DSs')
center_X = []
center_Y = []
for c in c_original_center:
center_X.append(data[c][0])
center_Y.append(data[c][1])
plt.scatter(center_X, center_Y, marker='*', s=200, label='centers of top-K DSs', c='red')
ax=plt.gca()
handles, labels = ax.get_legend_handles_labels()
# reverse the order
ax=plt.gca()
ax.legend(handles[::-1], labels[::-1], ncol=2, loc='lower center', bbox_to_anchor=(0.5,1), fontsize=12, frameon=False)
plt.show()
def show_cluster(data, label_pred, center_id):
plt.figure(figsize=[6.40,5.60])
X = []
Y = []
for point in data:
X.append(point[0])
Y.append(point[1])
plt.scatter(x=X, y=Y, c=label_pred, s=8)
center_X = []
center_Y = []
for i in center_id:
center_X.append(data[i][0])
center_Y.append(data[i][1])
plt.scatter(x=center_X, y=center_Y, marker='*',c='red',s=150)
plt.show()
|
# Genie
from genie.ops.base import Base
class RoutePolicy(Base):
exclude = [] |
import math
n = int(input())
max_speed = 0
res = 0
pre = list(map(int, input().split()))
for i in range(1, n):
cur = list(map(int, input().split()))
dis = math.sqrt((cur[1] - pre[1]) ** 2 + (cur[2] - pre[2]) ** 2 + (cur[3] - pre[3]) ** 2)
t = cur[0] - pre[0]
speed = dis / t
if max_speed < speed:
max_speed = speed
res = i
pre = cur
print(res)
"""
3
2 -9 2 3
4 9 9 8
9 2 3 4
"""
|
# == Copyright: 2017, CCX Technologies
from .__version__ import __version__
from adbus.service import Service
import adbus.server
import adbus.client
from . import exceptions
|
import pandas as pd
import streamlit as st
import numpy
st.set_page_config(page_title="NFL Projections 2021",layout="wide")
st.header("Week 17 Projections: Updated 1/2/2022.")
st.text("Final Projections are on Sunday morning")
st.text("Spread Win/Loss: (58-50).537")
st.text("Over/Under Win Loss: (49-64).434")
df = pd.read_excel("UpdatedResultsNFL.xlsx")
st.dataframe(df) |
# -*- coding: utf-8 -*-
from flask import current_app
from flask_login import UserMixin as LoginUserMixin
class UserMixin(LoginUserMixin):
""" This class adds methods to the User model class required by Flask-Login and Flask-User."""
def is_active(self):
if hasattr(self, 'active'):
return self.active
else:
return self.is_enabled
def set_active(self, active):
if hasattr(self, 'active'):
self.active = active
else:
self.is_enabled = active
def has_role(self, *specified_role_names):
""" Return True if the user has one of the specified roles. Return False otherwise.
has_roles() accepts a 1 or more role name parameters
has_role(role_name1, role_name2, role_name3).
For example:
has_roles('a', 'b')
Translates to:
User has role 'a' OR role 'b'
"""
# Allow developers to attach the Roles to the User or the UserProfile object
if hasattr(self, 'roles'):
roles = self.roles
else:
if hasattr(self, 'user_profile') and hasattr(self.user_profile, 'roles'):
roles = self.user_profile.roles
else:
roles = None
if not roles:
return False
# Translates a list of role objects to a list of role_names
user_role_names = [role.name for role in roles]
# Return True if one of the role_names matches
for role_name in specified_role_names:
if role_name in user_role_names:
return True
# Return False if none of the role_names matches
return False
def has_roles(self, *requirements):
""" Return True if the user has all of the specified roles. Return False otherwise.
has_roles() accepts a list of requirements:
has_role(requirement1, requirement2, requirement3).
Each requirement is either a role_name, or a tuple_of_role_names.
role_name example: 'manager'
tuple_of_role_names: ('funny', 'witty', 'hilarious')
A role_name-requirement is accepted when the user has this role.
A tuple_of_role_names-requirement is accepted when the user has ONE of these roles.
has_roles() returns true if ALL of the requirements have been accepted.
For example:
has_roles('a', ('b', 'c'), d)
Translates to:
User has role 'a' AND (role 'b' OR role 'c') AND role 'd'"""
# Allow developers to attach the Roles to the User or the UserProfile object
if hasattr(self, 'roles'):
roles = self.roles
else:
if hasattr(self, 'user_profile') and hasattr(self.user_profile, 'roles'):
roles = self.user_profile.roles
else:
roles = None
if not roles:
return False
# Translates a list of role objects to a list of role_names
user_role_names = [role.name for role in roles]
# has_role() accepts a list of requirements
for requirement in requirements:
if isinstance(requirement, (list, tuple)):
# this is a tuple_of_role_names requirement
tuple_of_role_names = requirement
authorized = False
for role_name in tuple_of_role_names:
if role_name in user_role_names:
# tuple_of_role_names requirement was met: break out of loop
authorized = True
break
if not authorized:
return False # tuple_of_role_names requirement failed: return False
else:
# this is a role_name requirement
role_name = requirement
# the user must have this role
if role_name not in user_role_names:
return False # role_name requirement failed: return False
# All requirements have been met: return True
return True
# Flask-Login is capable of remembering the current user ID in the browser's session.
# This function enables the user ID to be encrypted as a token.
# See https://flask-login.readthedocs.org/en/latest/#remember-me
def get_auth_token(self):
token_manager = current_app.user_manager.token_manager
user_id = int(self.get_id())
token = token_manager.encrypt_id(user_id)
# print('get_auth_token: user_id=', user_id, 'token=', token)
return token
def has_confirmed_email(self):
db_adapter = current_app.user_manager.db_adapter
# Handle multiple emails per user: Find at least one confirmed email
if db_adapter.UserEmailClass:
has_confirmed_email = False
user_emails = db_adapter.find_all_objects(db_adapter.UserEmailClass, user_id=self.id)
for user_email in user_emails:
if user_email.confirmed_at:
has_confirmed_email = True
break
# Handle single email per user
else:
has_confirmed_email = True if self.confirmed_at else False
return has_confirmed_email
|
from django.apps import AppConfig
class DesignsAuthConfig(AppConfig):
name = 'designs_auth'
|
import datetime
from gremlin_python.process.anonymous_traversal import traversal
from gremlin_python.process.graph_traversal import __, both, bothE, out, path
from gremlin_python.process.traversal import T
from cartographer.gizmo import Gizmo
from cartographer.gizmo.models import Vertex
from cartographer.gizmo.models.resource import Resource
from cartographer.gizmo.models.exceptions import VertexDoesNotExistException, MultipleVerticesFoundException
LABEL = 'state'
class State(Vertex):
"""Represents a State, including State Revisions in the graph database.
"""
label = LABEL
class vertices(Vertex.vertices):
label = LABEL
@classmethod
def create(cls, state_id: str, resource_count: int, serial: int, created_at: str, terraform_version: str):
"""Create a new State Vertex
created_at : the timestamp from Terraform Cloud for when the State was created. *Not* when it was created in Terradactyl.
"""
last_updated = str(datetime.datetime.utcnow().timestamp())
v = Gizmo().g.addV(State.label) \
.property('state_id', state_id) \
.property('resource_count', resource_count) \
.property('serial', serial) \
.property('terraform_version', terraform_version) \
.property('created_at', created_at).next()
return State(
_id=v.id,
state_id=state_id,
resource_count=resource_count,
serial=serial,
terraform_version=terraform_version,
created_at=created_at)
@classmethod
def update_or_create(cls, state_id: str, resource_count: int, serial: int, terraform_version: str, created_at: str):
try:
s = State.vertices.get(state_id=state_id)
s.resource_count = resource_count
s.save()
except VertexDoesNotExistException:
s = State.vertices.create(
state_id=state_id,
resource_count=resource_count,
serial=serial,
terraform_version=terraform_version,
created_at=created_at
)
return s
@classmethod
def get(cls, **kwargs):
"""Fetch a vertex where the given kwargs are has() filters that are dynamically concatenated to build
out the Gremlin query. E.g. where kwargs equals : {'name', 'bar', 'age', '12'} the query becomes
Gizmo().g.V().has('name', 'bar').has('age', '12')
Returns a new State.
"""
base_query = Gizmo().g.V().hasLabel(State.label)
for k, v in kwargs.items():
base_query = base_query.has(k, v)
if not State.vertices.exists(**kwargs):
raise VertexDoesNotExistException
else:
element_map = base_query.elementMap().next()
# TODO : Returned more than one error
return State(
_id=element_map[T.id],
state_id=element_map['state_id'],
created_at=element_map['created_at'],
terraform_version=element_map['terraform_version'],
serial=element_map['serial'],
resource_count=element_map['resource_count']
)
def __init__(self, _id: int, state_id: str, created_at: str, serial: int, resource_count: int, terraform_version: str):
self._id = _id
self.state_id = state_id
self.created_at = created_at
self.terraform_version = terraform_version
self.serial = serial
self.resource_count = resource_count
@property
def v(self):
return Gizmo().g.V().hasLabel(State.label).has('state_id', self.state_id).next()
@property
def created_at_dt(self):
return datetime.datetime.fromtimestamp(int(self.created_at))
def succeeded(self, target):
"""Creates an edge from the current state to the previous one.
TODO : Handle if current?
"""
Gizmo().g.V(self.v).has('state_id', self.state_id).as_('v') \
.V(target.v).has('state_id', target.state_id).as_('t') \
.coalesce(
__.inE('succeeded').where(__.outV().as_('v')),
__.addE('succeeded').from_('v')
).next()
def contains(self, target_resource):
"""Creates a edge from the current state to the target resource.
"""
Gizmo().g.V(self.v).has('state_id', self.state_id).as_('v') \
.V(target_resource.v).has('name', target_resource.name).has('resource_type', target_resource.resource_type).has('state_id', target_resource.state_id).as_('t') \
.coalesce(
__.inE('contains').where(__.outV().as_('v')),
__.addE('contains').from_('v')
).next()
def save(self):
self.last_updated = str(datetime.datetime.utcnow().timestamp())
Gizmo().g.V(self.v).has('state_id', self.state_id) \
.property('terraform_version', self.terraform_version) \
.property('serial', self.serial) \
.property('resource_count', self.resource_count) \
.property('last_updated', self.last_updated).next()
|
if __name__ == '__main__':
import os
import sys
import multiprocessing as mp
mp.freeze_support()
os.environ['QT_API'] = 'pyside2'
from qtpy import QtWidgets
from granite.app import GraniteApp
os.environ['QT_API'] = 'pyside2'
app = QtWidgets.QApplication(sys.argv)
app.setStyle('fusion')
ex = GraniteApp()
ex.show()
app.exec_() |
from flask_babel import lazy_gettext as _l
class ElsterProcessNotSuccessful(Exception):
"""Exception raised in case of an unsuccessful process in the ERiC binaries
"""
def __init__(self, message=None):
self.message = message
super().__init__()
def __str__(self):
return self.message
class ElsterGlobalError(ElsterProcessNotSuccessful):
"""Exception raised in case of an unsuccessful process in the ERiC binaries due to any of the global error codes.
"""
pass
class ElsterGlobalValidationError(ElsterGlobalError):
"""Exception raised in case of any global validation error detected by ERiC binaries
"""
# Overwrite initaliser to add special properties. Elster_response needs to be written to file at a higher level
def __init__(self, message=None, eric_response=None, validation_problems=None):
self.eric_response = eric_response
self.validation_problems = validation_problems
super().__init__(message)
class ElsterGlobalInitialisationError(ElsterGlobalError):
"""Exception raised in case of any error during initialisation
"""
pass
class ElsterTransferError(ElsterProcessNotSuccessful):
"""Exception raised in case of an unsuccessful process in the ERiC binaries due to an error with the transfer
"""
def __init__(self, message=None, eric_response=None, server_response=None):
self.eric_response = eric_response
self.server_response = server_response
if message is None:
message = ''
super().__init__(message)
class ElsterCryptError(ElsterProcessNotSuccessful):
"""Exception raised in case of an unsuccessful process in the ERiC binaries due to an error with the crypting
"""
pass
class ElsterIOError(ElsterProcessNotSuccessful):
"""Exception raised in case of an unsuccessful process in the ERiC binaries due to an error with IO processes
"""
pass
class ElsterPrintError(ElsterProcessNotSuccessful):
"""Exception raised in case of an unsuccessful process in the ERiC binaries due to an error with the print process
"""
pass
class ElsterNullReturnedError(ElsterGlobalError):
"""Exception raised in case None was returned by the ERiC binaries. This indicates that a null pointer was returned.
"""
pass
class ElsterAlreadyRequestedError(ElsterTransferError):
"""Exception raised in case an unlock_code for one idnr is requested multiple times.
"""
pass
class ElsterRequestIdUnkownError(ElsterTransferError):
"""Exception raised in case for an IdNr no unlock code request can be found and therefore the unlock_code
activation was unsuccessful.
"""
pass
class ElsterRequestAlreadyRevoked(ElsterTransferError):
"""Exception raised in case for an request with a specific request_code already has been revoked.
"""
pass
class ElsterInvalidBufaNumberError(ElsterProcessNotSuccessful):
"""Exception raised in case Erica found the combination of tax office and tax number (the BuFa number)
to be invalid
"""
def __init__(self):
self.message = _l('form.lotse.input_invalid.InvalidTaxNumber')
class ElsterResponseUnexpectedStructure(ElsterProcessNotSuccessful):
"""Exception raised in case an IdNr no unlock code request can be found and therefore the unlock_code
activation was unsuccessful.
"""
pass
class ElsterUnknownError(ElsterProcessNotSuccessful):
"""Exception raised in case of an unsuccessful process in the ERiC binaries.
The error code of the binary does not map to any of the other errors.
"""
pass
class GeneralEricaError(Exception):
"""Exception raised when an error occurred in Erica that is not an
expected ElsterProcessNotSuccessfulError"""
def __init__(self, message=None):
self.message = message
super().__init__()
def __str__(self):
return str(self.message)
class EricaIsMissingFieldError(GeneralEricaError):
"""Exception raised when an error occurred in Erica because a required field was not set"""
def __init__(self):
self.message = _l('form.lotse.input_invalid.MissingFieldsInputValidationError')
|
import glob, os, pexpect, pytest, sys, time
from forge.tests.common import mktree, defuzz, match
DIR = os.path.dirname(__file__)
SPECS = [os.path.relpath(n, DIR) for n in glob.glob(os.path.join(DIR, "*/*.spec"))] + \
[os.path.relpath(n, DIR) for n in glob.glob(os.path.join(DIR, "*.spec"))]
TEST_ID = ("test_id_%s" % time.time()).replace(".", "_")
@pytest.mark.parametrize("spec", SPECS)
def test(spec):
print
test_spec = os.path.join(DIR, spec)
test_dir = os.path.dirname(test_spec)
if not os.path.samefile(DIR, test_dir):
tree = {
"forge.yaml": """
# Global forge configuration
# Normally you would not want to check this into git, but this is here
# for testing purposes.
docker-repo: registry.hub.docker.com/forgeorg
user: forgetest
password: >
Zm9yZ2V0ZXN0
"""
}
for path, dirs, files in os.walk(test_dir):
for name in files:
key = os.path.join(os.path.relpath(path, test_dir), name)
if key.startswith("./"):
key = key[2:]
with open(os.path.join(path, name), "r") as fd:
tree[key] = fd.read()
else:
tree = {}
root = mktree(tree, TEST_ID=TEST_ID)
print "TEST_ID: %s" % TEST_ID
print "TEST_BASE: %s" % root
with open(test_spec) as fd:
ops = fd.read()
runner = Runner(root, ops)
runner.run()
MULTILINE = "MULTILINE"
DEFAULT = "DEFAULT"
class Runner(object):
multiline = ('MATCH', 'FILE')
def __init__(self, base, spec):
self.base = base
self.cwd = base
self.timeout = 30
self.spec = spec
self.child = None
def run(self):
mode = DEFAULT
for line in self.spec.splitlines():
if mode == DEFAULT:
if not line.strip(): continue
for stmt in line.split(";"):
if mode == MULTILINE:
raise Exception("multiline op must be last in line")
parts = stmt.split(None, 1)
op = parts.pop(0)
arg = parts.pop(0) if parts else None
if op in self.multiline:
mode = MULTILINE
body = ""
continue
else:
self.dispatch(op, arg)
elif mode == MULTILINE:
if line.rstrip() == "END":
mode = DEFAULT
self.dispatch(op, arg, body)
else:
body += line + "\n"
if mode == MULTILINE:
raise Exception("unterminated multiline op")
self.wait()
def dispatch(self, op, arg, body=None):
attr = getattr(self, "do_%s" % op, None)
if attr is None:
assert False, "unrecognized op: %s" % op
elif op in self.multiline:
attr(arg, body)
else:
attr(arg)
def wait(self):
if self.child is not None:
self.child.expect(pexpect.EOF, timeout=self.timeout)
assert self.child.wait() == 0
def do_RUN(self, arg):
self.wait()
arg = arg.replace("TEST_ID", TEST_ID).replace("TEST_BASE", self.base)
print "RUN", arg
self.child = pexpect.spawn(arg, cwd=self.cwd)
self.child.logfile = sys.stdout
def do_CWD(self, arg):
self.cwd = os.path.join(self.base, arg)
def do_TIMEOUT(self, arg):
self.timeout = float(arg)
def do_OUT(self, arg):
self.child.expect_exact(arg.strip(), timeout=self.timeout)
def do_NOT(self, arg):
self.child.expect(pexpect.EOF, timeout=self.timeout)
assert arg not in self.child.before
def do_TYPE(self, arg):
if arg.strip().lower() == "<enter>":
self.child.sendline()
elif arg.strip().lower() == "<esc>":
self.child.send("\x1B")
else:
self.child.sendline(arg)
def do_EOF(self, arg):
self.child.sendeof()
def do_ERR(self, arg):
self.child.expect(pexpect.EOF, timeout=self.timeout)
assert self.child.wait() != 0
self.child = None
def do_MATCH(self, _, pattern):
pattern = unicode(pattern).strip()
self.child.expect(pexpect.EOF, timeout=self.timeout)
output = self.child.before.strip()
defuzzed = defuzz(output.replace(TEST_ID, "TEST_ID").replace(self.base, "TEST_BASE"))
if not match(defuzzed, pattern.strip()):
print "OUTPUT:"
print output
print "DEFUZZED OUTPUT:"
print defuzzed
print "PATTERN:"
print pattern
assert False
def do_FILE(self, name, body):
self.wait()
path = os.path.join(self.cwd, name)
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
with open(path, "write") as fd:
fd.write(body.replace("TEST_ID", TEST_ID))
|
# Copyright 2021 The KServe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
import os
import numpy as np
from paddle import inference
import kserve
class PaddleModel(kserve.KFModel):
def __init__(self, name: str, model_dir: str):
super().__init__(name)
self.name = name
self.model_dir = model_dir
self.ready = False
self.predictor = None
self.input_tensor = None
self.output_tensor = None
def load(self) -> bool:
def get_model_files(ext: str) -> str:
file_list = []
for filename in os.listdir(model_path):
if filename.endswith(ext):
file_list.append(filename)
if len(file_list) == 0:
raise Exception("Missing {} model file".format(ext))
if len(file_list) > 1:
raise Exception("More than one {} model file".format(ext))
return os.path.join(model_path, file_list[0])
model_path = kserve.Storage.download(self.model_dir)
config = inference.Config(get_model_files('.pdmodel'), get_model_files('.pdiparams'))
# TODO: add GPU support
config.disable_gpu()
self.predictor = inference.create_predictor(config)
# TODO: add support for multiple input_names/output_names
input_names = self.predictor.get_input_names()
self.input_tensor = self.predictor.get_input_handle(input_names[0])
output_names = self.predictor.get_output_names()
self.output_tensor = self.predictor.get_output_handle(output_names[0])
self.ready = True
return self.ready
def predict(self, request: Dict) -> Dict:
instances = request["instances"]
try:
inputs = np.array(instances, dtype='float32')
except Exception as e:
raise Exception("Failed to initialize NumPy array from inputs:%s, %s"
% (e, instances)) from e
try:
self.input_tensor.copy_from_cpu(inputs)
self.predictor.run()
return {"predictions": self.output_tensor.copy_to_cpu().tolist()}
except Exception as e:
raise Exception("Failed to predict %s" % e) from e
|
from sklearn import svm
posDic = {
'a': 1,
'b': 2,
'c': 3,
'd': 4,
'e': 5,
'g': 6,
'h': 7,
'i': 8,
'j': 9,
'k': 10,
'm': 11,
'n': 12,
'nd': 13,
'nh': 14,
'ni': 15,
'nl': 16,
'ns': 17,
'nt': 18,
'nz': 19,
'o': 20,
'p': 21,
'q': 22,
'r': 23,
'u': 24,
'v': 25,
'wp': 26,
'ws': 27,
'x': 28,
}
best_svc = None
def findBestFit(X, y):
global best_svc
param_grid = {'kernel': ['linear', 'rbf',
'poly', 'sigmoid'], 'C': [1e-1, 1, 10]}
score = 0
kernel = ''
C = 0
for k in param_grid['kernel']:
for c in param_grid['C']:
svc = svm.SVC(kernel=k, C=c)
svc.fit(X, y)
s = svc.score(X, y)
if s > score:
score = s
kernel = k
C = c
print(score, kernel, C)
best_svc = svm.SVC(kernel=kernel, C=C)
best_svc.fit(X, y)
# --------------------------------------------------------------------
# 8位,0表示None
['池塘', '上', '的', '小', '桥'], ['n', 'nd', 'u', 'a', 'n'],
[12, 13, 24, 1, 12, 0, 0, 0]
{'标签': 1}
['房间', '里', '的', '床'], ['n', 'nd', 'u', 'n']
[12, 13, 24, 12, 0, 0, 0, 0]
{'标签': 2}
# --------------------------------------------------------------------
# --------------------------------------------------------------------
{
1: [(0, 1, '有', 3, 4), (3, 4, '在', 0, 1)],
2: [(0, 1, '有', 3), (3, '在', 0, 1)]
}
# --------------------------------------------------------------------
# X = [[12, 13, 24, 1, 12, 0, 0, 0], [12, 13, 24, 12, 0, 0, 0, 0]]
# y = [1, 2]
# findBestFit(X, y)
# print(best_svc.predict(X))
# --------------------------------------------------------------------
modes = [
['n', 'nd', 'u', 'a', 'n'],
['n', 'nd', 'u', 'n'],
]
def segment(asub, alist, wordlist):
asubStr = '&'.join(asub)
alistStr = '&'.join(alist)
count = alistStr.count(asubStr)
indices = []
res = []
startIndex = 0
for i in range(count):
index = alistStr.find(asubStr, startIndex)
listIndex = len(alistStr[:index].split('&')) - 1
indices.append(listIndex)
startIndex += len(asubStr)
for ii in indices:
res.append(wordlist[ii: ii + len(asub)])
print(res)
asub = ['n', 'nd', 'u', 'a', 'n']
alist = ['p', 'r', 'v', 'n', 'nd', 'u', 'a', 'n', 'wp', 'd', 'v', 'i', 'u', 'v', 'u', 'r', 'a', 'n', 'wp', 'r', 'v', 'u', 'd', 'a', 'wp', 'd', 'a', 'wp']
wordlist = ['每当', '我', '走过', '池塘', '上', '的', '小', '桥', ',', '都', '会', '不由自主', '地', '看', '着', '那些', '小', '鱼', ',', '它们', '玩', '得', '多么', '开心', '、', '多么', '快乐', '。']
segment(asub, alist, wordlist)
|
"""
Utilities to load IMU Data
"""
import numpy as np
__all__ = ["IMUData"]
class IMUData:
def __init__(self, **kwargs):
for kw, v in kwargs.items():
setattr(self, kw, v)
@staticmethod
def open(path: str) -> "IMUData":
fields = [
"lat",
"lon",
"alt",
"roll",
"pitch",
"yaw",
"vn",
"ve",
"vf",
"vl",
"vu",
"ax",
"ay",
"ay",
"af",
"al",
"au",
"wx",
"wy",
"wz",
"wf",
"wl",
"wu",
"pos_accuracy",
"vel_accuracy",
"navstat",
"numsats",
"posmode",
"velmode",
"orimode",
]
with open(path, "rt") as f:
vector = f.readline()
vector = np.array(list(map(float, vector.strip().split(" "))))
return IMUData(**dict(zip(fields, vector)))
|
import unittest
from eosfactory.eosf import *
verbosity([Verbosity.INFO, Verbosity.OUT, Verbosity.TRACE, Verbosity.DEBUG])
class Test(unittest.TestCase):
def run(self, result=None):
super().run(result)
@classmethod
def setUpClass(cls):
SCENARIO('''
Create a contract from template, then build and deploy it.
''')
reset()
create_wallet()
create_master_account("master")
COMMENT('''
Create test accounts:
''')
create_account("app", master)
def setUp(self):
pass
def test_01(self):
COMMENT('''
Create, build and deploy the contracts:
''')
create_account("right", master)
right_contract = Contract(right, "/root/ore-protocol/contracts/ore.rights_registry")
right_contract.build()
right_contract.deploy()
def test_02(self):
COMMENT('''
Register rights:
''')
right.push_action(
"upsertright",
{
"owner": app,
"right_name": "apimarket.manager.licenseApi",
"urls": [{
"base_right": "",
"url": " ore://manager.apim/action/licenseapi",
"method": "post",
"matches_params": [{
"name": "sla",
"value": "default"
}],
"token_life_span": 100,
"is_default": 1
}],
"issuer_whitelist": ["app.apim"]
},
permission=(app, Permission.ACTIVE))
with self.assertRaises(Error): #You are not the issuer of the existing contract. Update canceled.
right.push_action(
"upsertright",
{
"owner": app,
"right_name": "apimarket.manager.licenseApi",
"urls": [{
"base_right": "",
"url": " ore://manager.apim/action/licenseapi",
"method": "post",
"matches_params": [{
"name": "sla",
"value": "default"
}],
"token_life_span": 100,
"is_default": 1
}],
"issuer_whitelist": ["app.apim"]
},
permission=(app, Permission.ACTIVE))
right.push_action(
"upsertright",
{
"owner": app,
"right_name": "apimarket.manager.licenseApi2",
"urls": [{
"base_right": "",
"url": " ore://manager.apim/action/licenseapi",
"method": "post",
"matches_params": [{
"name": "sla",
"value": "default"
}],
"token_life_span": 100,
"is_default": 1
}],
"issuer_whitelist": ["app.apim"]
},
permission=(app, Permission.ACTIVE))
def tearDown(self):
pass
@classmethod
def tearDownClass(cls):
stop()
if __name__ == "__main__":
unittest.main()
|
import sys
from setuptools import find_packages
from setuptools import setup
assert sys.version_info[0] == 3 and sys.version_info[1] >= 6, "Global Notification System (Hive Blockchain) requires Python 3.6 or newer"
setup(
name='hive_gns',
version='0.1.0',
description='Global notification system for dApps on the Hive Blockchain.',
long_description=open('README.md', 'r', encoding='UTF-8').read(),
packages=find_packages(exclude=['scripts']),
install_requires=[
'requests',
'psycopg2',
'fastapi',
'uvicorn'
],
entry_points = {
'console_scripts': [
'hive_gns = hive_gns.run_hive_gns:run'
]
}
)
|
#-*- conding:utf-8 -*-
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER,MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.lib.packet import packet,ethernet,ether_types
from ryu.ofproto import ofproto_v1_3
from ryu.topology import event
from ryu.topology.api import get_switch,get_link,get_host
import zookeeper_server
import networkx as nx
import matplotlib.pyplot as plt
class Mutlti_Area_Contr(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self,*args,**kwargs):
super(Mutlti_Area_Contr,self).__init__(*args,**kwargs)
# self.mac_to_port = {}
self.topology_api_app = self
self.network = nx.Graph()
self.paths = {}
#switch features
@set_ev_cls(ofp_event.EventOFPSwitchFeatures,CONFIG_DISPATCHER)
def switch_features_hanlder(self,ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath,0,match,actions)
#add flow-table
def add_flow(self,datapath, priority, match, actions, buffer_id = None):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
if buffer_id:
mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
priority=priority,match=match,
instructions=inst)
else:
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
match=match,instructions=inst)
datapath.send_msg(mod)
#packet_in packet
@set_ev_cls(ofp_event.EventOFPPacketIn,MAIN_DISPATCHER)
def packet_in_handler(self,ev):
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']
# dpid = datapath.id
pkt = packet.Packet(msg.data)
eth_pkt = pkt.get_protocol(ethernet.ethernet)
src = eth_pkt.src
dst = eth_pkt.dst
out_port = self.get_out_port(datapath,src,dst,in_port)
actions = [parser.OFPActionOutput(out_port)]
if out_port != ofproto.OFPP_FLOOD:
match = parser.OFPMatch(in_port = in_port,eth_dst = dst)
self.add_flow(datapath,1,match,actions)
out = parser.OFPPacketOut(datapath=datapath,buffer_id=msg.buffer_id,
in_port=in_port,actions=actions,data=msg.data)
datapath.send_msg(out)
# msg = ev.msg
# datapath = msg.datapath
# ofproto = datapath.ofproto
# parser = datapath.ofproto_parser
# in_port = msg.match['in_port']
#
# dpid = datapath.id
# #dpid:{mac:port}
# self.mac_to_port.setdefault(dpid,{})
#
# pkt = packet.Packet(msg.data)
# eth = pkt.get_protocols(ethernet.ethernet)[0]
# #lldp packet
# lldp_packet = ether_types.ETH_TYPE_LLDP
#
# dst = eth.dst #destination mac
# src = eth.src #srouce mac
#
# self.mac_to_port[dpid][src] = in_port
#
# if dst in self.mac_to_port[dpid]:
# out_port = self.mac_to_port[dpid][dst]
# else:
# out_port = ofproto.OFPP_FLOOD
#
# actions = [parser.OFPActionOutput(out_port)]
#
# if out_port != ofproto.OFPP_FLOOD:
# match = parser.OFPMatch(in_port=in_port, eth_dst=dst, eth_src=src)
#
# if msg.buffer_id != ofproto.OFPCML_NO_BUFFER:
# self.add_flow(datapath,1,match,actions,msg.buffer_id)
# return
# else:
# self.add_flow(datapath,1,match,actions)
#
# data = None
# if msg.buffer_id == ofproto.OFPCML_NO_BUFFER:
# data = msg.data
#
# out = parser.OFPPacketOut(datapath=datapath,buffer_id=msg.buffer_id,
# in_port=in_port,actions=actions,data=data)
# datapath.send_msg(out)
#get all switch info and all links
# @set_ev_cls([event.EventSwitchEnter,event.EventSwitchLeave,
# event.EventPortAdd,event.EventPortDelete,
# event.EventPortModify,event.EventLinkAdd,event.EventLinkDelete],
# [CONFIG_DISPATCHER,MAIN_DISPATCHER])
@set_ev_cls([event.EventSwitchEnter],[CONFIG_DISPATCHER,MAIN_DISPATCHER])
def get_topology(self,ev):
switch_list = get_switch(self.topology_api_app,None)
switches = [switch.dp.id for switch in switch_list]
self.network.add_nodes_from(switches)
link_list = get_link(self.topology_api_app,None)
links = [(link.src.dpid,link.dst.dpid,{"attr_dict":{'port':link.src.port_no}}) for link in link_list]
self.network.add_edges_from(links)
#dst to src links
links = [(link.dst.dpid,link.src.dpid,{'attr_dict':{'port':link.dst.port_no}})
for link in link_list]
self.network.add_edges_from(links)
# print(dir(ev.link.dst.port_no))
# #get switch(dpid)
# switch_list = get_switch(self.topology_api_app,None)
# switches = [switch.dp.id for switch in switch_list] #switches
# # switch = ev.switch.dp.id #switch dpid
# # print(ev.switch.dp.id)
# self.network.add_nodes_from(switches)
#
# #get src links
# link_list = get_link(self.topology_api_app,None)
# # srclinks = [(link.src.dpid,link.dst.dpid,{'attr_dict':{'port':link.src.port_no,'srcmac':link.src.hw_addr}})
# # for link in link_list]
# srclinks = [(link.src.dpid,link.dst.dpid,{'attr_dict':{'port':link.src.port_no}})
# for link in link_list]
# # aa = [link.dst.hw_addr for link in link_list]
# # print('srclinks:',srclinks)
# self.network.add_edges_from(srclinks)
#
# #reverse links
# dstlinks = [(link.dst.dpid,link.src.dpid,{'attr_dict':{'port':link.dst.port_no}})
# for link in link_list]
# # print('dst_links:',dstlinks)
# self.network.add_edges_from(dstlinks)
#
# #get host
# hosts_list = get_host(self.topology_api_app)
# hosts = [(host.port.dpid,host.port.port_no,{'attr_dict':{'ip':host.ipv4,'mac':host.mac}})
# for host in hosts_list]
#
# if type(ev) == event.EventSwitchLeave:
# print('swleave:',ev)
# if type(ev) == event.EventLinkDelete:
# print('linkdel:',ev)
# if type(ev) == event.EventLinkAdd:
# print('linkadd:',ev.link.dst.port_no)
# get all switches,links,hosts
# add_switch_inf_to_ZkServer(switches,srclinks,hosts)
def get_out_port(self,datapath,src,dst,in_port):
dpid = datapath.id
if src not in self.network:
self.network.add_node(src)
self.network.add_edge(dpid, src, attr_dict={'port': in_port})
self.network.add_edge(src, dpid)
self.paths.setdefault(src, {})
if dst in self.network:
if dst not in self.paths[src]:
path = nx.shortest_path(self.network, src, dst) #algorithm shorst path
self.paths[src][dst] = path
path = self.paths[src][dst]
# p1 = nx.all_shortest_paths(self.network,source='00:00:00:00:00:01',target='00:00:00:00:00:03')
next_hop = path[path.index(dpid) + 1]
out_port = self.network[dpid][next_hop]['attr_dict']['port']
# print([p for p in p1])
print(dpid)
else:
out_port = datapath.ofproto.OFPP_FLOOD
return out_port
# add switch information to zk_server
def add_switch_inf_to_ZkServer(switches,srclinks,hosts=None):
# switches = '/' + str(switches)
# linkes = str(srclinks)
#get zk node,judge node whether is null
zk = zookeeper_server.Zookeeper_Server('127.0.0.1','4181')
# print('switch:',switches) #('switch:', [1, 2, 3])
# print('srclinks:',srclinks) #('srclinks:', [(2, 3, {'attr_dict': {'port': 3, 'srcmac': '72:f4:db:b7:8f:21'}}),
# (2, 1, {'attr_dict': {'port': 2, 'srcmac': '22:56:29:00:71:a0'}}),
# (3, 2, {'attr_dict': {'port': 2, 'srcmac': '36:fa:6e:fe:7f:20'}}),
# (1, 2, {'attr_dict': {'port': 2, 'srcmac': 'f6:3d:a0:bf:ed:ef'}})])
# print('hosts:',hosts) #('hosts:', [(2, 1, {'attr_dict': {'ip': [], 'mac': '96:e9:18:fc:8c:14'}}),
# (3, 1, {'attr_dict': {'ip': [], 'mac': '0a:c0:04:59:2f:b1'}}),
# (1, 1, {'attr_dict': {'ip': [], 'mac': '16:31:3d:97:c5:75'}})])
#check node and values and ,if node or values or host is nothing, add them
if zk.jude_node_exists('/controller'):
for switcha in switches:
#add links between switch and switch
for links in srclinks:
if zk.jude_node_exists('/controller' + '/' + str(switcha)):
get_node = zk.get_zk_node('/controller' + '/' + str(switcha))[0]
if switcha == links[0]:
node_value = {}
for node in get_node:
if zk.jude_node_exists('/controller' + '/' + str(switcha) + '/' + node):
get_value = zk.get_zk_node('/controller' + '/' + str(switcha) + '/' + node)[1][0]
node_value[str(node)] = eval(get_value)
if links not in node_value.values():
zk.create_zk_node('/controller' + '/' +str(switcha) + '/' + 'link',links)
else:
if switcha == links[0]:
zk.create_zk_node('/controller' + '/' + str(switcha) + '/' + 'link', links)
#add links between switch and host
for host in hosts:
if zk.jude_node_exists('/controller' + '/' + str(switcha)):
get_node = zk.get_zk_node('/controller' + '/' + str(switcha))[0]
if switcha == host[0]:
host_node_value = {}
for node in get_node:
if zk.jude_node_exists('/controller' + '/' + str(switcha) + '/' + node):
get_value = zk.get_zk_node('/controller' + '/' + str(switcha) + '/' + node)[1][0]
host_node_value[str(node)] = eval(get_value)
if host not in host_node_value.values():
zk.create_zk_node('/controller' + '/' + str(switcha) + '/' + 'host', host)
else:
if switcha == host[0]:
zk.create_zk_node('/controller' + '/' + str(switcha) + '/' + 'host', host)
|
import boto3
def remove_endpoints(region_name):
client = boto3.client("sagemaker", region_name=region_name)
endpoint_list = client.list_endpoints()
for endpoint in endpoint_list["Endpoints"]:
endpoint_name = endpoint["EndpointName"]
endpoint_status = client.describe_endpoint(EndpointName=endpoint_name)["EndpointStatus"]
if endpoint_status == "InService":
client.delete_endpoint(EndpointName=endpoint_name)
print(f"Deleted endpoint: {endpoint_name}")
def remove_notebooks(region_name):
client = boto3.client("sagemaker", region_name=region_name)
notebook_list = client.list_notebook_instances()
for notebook in notebook_list["NotebookInstances"]:
notebook_name = notebook["NotebookInstanceName"]
notebook_status = notebook["NotebookInstanceStatus"]
if notebook_status == "InService":
client.stop_notebook_instance(NotebookInstanceName=notebook_name)
print(f"Stop Notebook: {notebook_name}")
if notebook_status == "Stopped":
client.delete_notebook_instance(NotebookInstanceName=notebook_name)
print(f"Deleted Notebook: {notebook_name}")
def remove_models(region_name):
client = boto3.client("sagemaker", region_name=region_name)
models_list = client.list_models()
for model in models_list['Models']:
model_name = model['ModelName']
client.delete_model(ModelName=model_name)
print(f"Deleted model: {model_name}")
def remove_endpoints_configuration(region_name):
client = boto3.client("sagemaker", region_name=region_name)
endpoint_configs_list = client.list_endpoint_configs()['EndpointConfigs']
for endcpoint_config in endpoint_configs_list:
endcpoint_config_name = endcpoint_config['EndpointConfigName']
client.delete_endpoint_config(EndpointConfigName=endcpoint_config_name)
print(f"Deleted Endpoint Config: {endcpoint_config_name}")
|
from django.contrib import admin
from django.urls import path, include
from django.contrib.auth import views as auth_views
from django.urls import reverse_lazy
from .views import SignUp
app_name = "accounts"
urlpatterns = [
path("login/", auth_views.LoginView.as_view(), name="login"),
path("logout/", auth_views.LogoutView.as_view(), name="logout"),
path(
"password_change/",
auth_views.PasswordChangeView.as_view(
success_url=reverse_lazy("accounts:password_change_done")
),
name="password_change",
),
path(
"password_change_done/",
auth_views.PasswordChangeDoneView.as_view(),
name="password_change_done",
),
path(
"password_reset/",
auth_views.PasswordResetView.as_view(
success_url=reverse_lazy("accounts:password_reset_done")
),
name="password_reset",
),
path(
"password_reset_done/",
auth_views.PasswordResetDoneView.as_view(),
name="password_reset_done",
),
path(
"password_reset_confirm/",
auth_views.PasswordResetConfirmView.as_view(
success_url=reverse_lazy("accounts:password_reset_complete")
),
name="password_reset_done",
),
path(
"password_reset_complete/",
auth_views.PasswordResetCompleteView.as_view(),
name="password_reset_complete",
),
path("signup/", SignUp.as_view(), name="signup"),
# path('profile/', UpdateProfile.as_view(), name='profile'),
]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='BlogCategory',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('category', models.CharField(unique=True, max_length=70)),
('type', models.CharField(default=b'label-default', max_length=20, null=True, blank=True, choices=[(b'label-default', b'Default'), (b'label-primary', b'Primary'), (b'label-success', b'Success'), (b'label-info', b'Info'), (b'label-warning', b'Warning'), (b'label-danger', b'Danger')])),
('parent', models.ForeignKey(related_name='categoryParent', blank=True, to='blog.BlogCategory', null=True)),
],
options={
'abstract': False,
'verbose_name': 'Category',
'verbose_name_plural': 'Categories',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='BlogTag',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('tag', models.CharField(unique=True, max_length=70)),
],
options={
'ordering': ['tag'],
'abstract': False,
'verbose_name': 'Tag',
'verbose_name_plural': 'Tags',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='blogpost',
name='category',
field=models.ForeignKey(blank=True, to='blog.BlogCategory', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='blogpost',
name='tags',
field=models.ManyToManyField(to='blog.BlogTag', null=True, blank=True),
preserve_default=True,
),
]
|
''' 2. Faça um programa que receba três notas de um aluno, calcule e mostre a média
aritmética e a mensagem constante na tabela a seguir. Aos alunos que ficaram para
exame, calcule e mostre a nota que deverão tirar para serem aprovados, considerando
que a média exigida é 6,0.
Média Ponderada Conceito
0--------3 Reprovado
3--------7 Exame
7--------10 Aprovado ''' |
#!/usr/bin/env python
import setuptools
with open('README.rst', 'r') as o:
long_description = o.read()
setuptools.setup(
name = 'hyrule',
version = None,
install_requires = [
'hy @ git+https://github.com/hylang/hy@master#egg=hy-1.0'],
extras_require = dict(
docs = [
'Sphinx == 3.5.4',
'sphinxcontrib-hydomain @ git+https://github.com/hylang/sphinxcontrib-hydomain@master#egg=sphinxcontrib-hydomain-1.0']),
packages = setuptools.find_packages(),
author = "Paul Tagliamonte",
author_email = "[email protected]",
description = 'A utility library for the Hy programming language',
long_description = long_description,
license = 'Expat',
url = "http://hylang.org/",
platforms = ['any'],
classifiers = [
"Development Status :: 4 - Beta",
"License :: DFSG approved",
"License :: OSI Approved :: MIT License", # Really "Expat". Ugh.
"Operating System :: OS Independent",
"Programming Language :: Lisp",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10"])
|
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException, ElementNotInteractableException, NoSuchElementException
import itertools
import json
import pandas as pd
from collections import OrderedDict
from bs4 import BeautifulSoup as bs
import random
import time
import datetime
from dateutil.parser import parse
import re
import os
import smtplib
from email.message import EmailMessage
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
file_handler = logging.FileHandler('flightscrape.log')
file_handler.setFormatter(formatter)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
FILENAME = 'trips.json'
class Scraper:
@staticmethod
def _add_date_range(input_dict):
"""
Accesses the departure date and return date from a dictionary.
Considers an allowance of +- 1 day for both departure and return
dates, then computes all possible valid combinations of date pairs.
:param input_dict: dictionary containing trip info
:returns: a list of tuples each containing two datetime objects
(departure and return dates)
"""
dep_date = parse(input_dict['Dep_date'], dayfirst=True).date()
ret_date = parse(input_dict['Ret_date'], dayfirst=True).date()
day = datetime.timedelta(days=1)
dep_datelist = [dep_date - day, dep_date, dep_date + day]
ret_datelist = [ret_date - day, ret_date, ret_date + day]
datelist = []
for pair in itertools.product(dep_datelist, ret_datelist):
if pair[0] < pair[1]:
datelist.append(pair)
return datelist
options = Options()
options.headless = True
with open(FILENAME) as f:
user_dict = json.load(f)
arbi_val = next(iter(user_dict.values()))[0]
columns = ['Name'] + [k for k in arbi_val.keys()] + ['Date_pairs']
rows = []
for name, trips in user_dict.items():
for trip in trips:
# dunder func method is used because we cannot call a static
# method inside class body (descriptor binding)
date_pairs = _add_date_range.__func__(trip)
row = [name] + list(trip.values()) + [date_pairs]
rows.append(row)
data = pd.DataFrame(rows, columns=columns)
def __init__(self):
self.data = Scraper.data
self.driver = webdriver.Firefox(options=Scraper.options)
logger.debug('Firefox webdriver initiated')
def __str__(self):
return type(self).__name__
def scrape_site(self):
"""
Main function used to scrape the website according to the list
of URLs. Collects the information and converts it to a pandas
DataFrame to be saved to a csv file.
"""
frames = []
for i in range(len(self.data)):
urls = self.create_urls(i)
logger.debug(f'url batch {i+1} created. Commencing scrape...')
for j, url in enumerate(urls, 1):
time.sleep(random.randint(1, 3))
logger.debug(f'scraping url no. {j} from batch {i+1}')
soup = self.scrape_page(url)
results = self.parse_page(soup)
logger.debug(f'parsed page for url no. {j} from batch {i+1}')
results['Target Price(RM)'] = self.data.iloc[i]['Target_price']
results['Email'] = self.data.iloc[i]['Email_address']
df = pd.DataFrame(results)
df.insert(0, column='Name', value=self.data.iloc[i, 0])
df.insert(3, column='Departing Date',
value=self.data.iloc[i]['Date_pairs'][j - 1][0])
df.insert(7, column='Returning Date',
value=self.data.iloc[i]['Date_pairs'][j - 1][1])
frames.append(df)
logger.debug(f'done processing url no. {j} from batch {i+1}')
logger.debug('finished scraping, closing webdriver...')
self.driver.quit()
combined = pd.concat(frames, ignore_index=True)
combined.to_csv(f'{str(self)}.csv')
def scrape_page(self, url):
"""
Sends a GET request to the URL being scraped and waits for the page
to load completely before dumping the page source into a beautiful
soup object.
:param url: full url of the page to be scraped
:returns: page source converted to a beautiful soup object
"""
timeout = 50
self.driver.get(url)
try:
WebDriverWait(self.driver, timeout).until(self.elem_present)
self.close_popup()
except TimeoutException:
print(f'Timed out waiting for {url} to load')
if self.sort_cheap is not None:
try:
cheapest_tab = self.driver.find_element(By.XPATH,
self.sort_cheap)
cheapest_tab.click()
time.sleep(2)
except NoSuchElementException:
pass
soup = bs(self.driver.page_source, 'lxml')
return soup
def parse_page(self, soup):
"""
Parses the beautiful soup object to collect all the required
information.
:param soup: beautiful soup object to be parsed
:returns: an OrderedDict with flight parameters as keys and lists of
relevant information as values
"""
all_flights = soup.select(self.all_flights_tag)[:4]
data = OrderedDict()
for flight in all_flights:
if isinstance(self.airline_tag, str):
airlines = flight.select(self.airline_tag)
airlines = [x.text for x in airlines]
else:
airlines = flight.select(self.airline_tag[0])
airlines = [x.get(self.airline_tag[1]) for x in airlines]
dep_airline, ret_airline = airlines[0], airlines[1]
data.setdefault('Departing Airline', []).append(dep_airline)
data.setdefault('Returning Airline', []).append(ret_airline)
times = flight.select(self.times_tag)
times = [x.text.split('+')[0].strip() for x in times]
dep_time_1, dep_time_2 = times[0], times[1]
ret_time_1, ret_time_2 = times[2], times[3]
durations = flight.select(self.duration_tag)
durations = [x.text.strip() for x in durations]
dep_duration, ret_duration = durations[0], durations[1]
data.setdefault('Departing Time(Takeoff)', []).append(dep_time_1)
data.setdefault('Departing Time(Arrival)', []).append(dep_time_2)
data.setdefault('Dep. Flight Duration', []).append(dep_duration)
data.setdefault('Returning Time(Takeoff)', []).append(ret_time_1)
data.setdefault('Returning Time(Arrival)', []).append(ret_time_2)
data.setdefault('Ret. Flight Duration', []).append(ret_duration)
airports = flight.select(self.airports_tag)
airports = [x.text for x in airports]
airport_1, airport_2 = airports[0], airports[len(airports) // 2]
data.setdefault('Source Airport', []).append(airport_1)
data.setdefault('Destination Airport', []).append(airport_2)
prices = soup.select(self.price_tag)[:4]
prices = [re.search(r'\d+', x.text.strip()).group()
for x in prices]
data['Price(RM)'] = prices
return data
class Skyscanner(Scraper):
def __init__(self):
"""
Initialize the css selectors and extra details used to find the
required information as instance attributes
"""
super().__init__()
self.elem_present = EC.presence_of_element_located((By.CSS_SELECTOR,
'div.day-list-progress'
+ '[style="width: 100%; display: none;"]'))
self.sort_cheap = '//td[@class="tab"][@data-tab="price"]'
self.all_flights_tag = 'div.ItineraryContent__container-1Sb_S'
self.airline_tag = ('img.AirlineLogo__big-logo-image-3V2-Z', 'title')
self.times_tag = 'span.LegInfo__times-Qn_ji'
self.duration_tag = 'span.LegInfo__duration-2VgVw'
self.airports_tag = 'span.LegInfo__tooltipTarget-njlsT'
self.price_tag = 'a.CTASection__price-2bc7h.price'
self.dialogbox_tag = 'button.bpk-modal__close-button-2a-Xb '
def create_urls(self, row_num):
"""
Creates urls for scraping the skyscanner website using data from
the 'data' DataFrame object associated with the Scraper superclass.
:param row_num: Current row number of dataframe associated with
the Scraper superclass
:returns: a list of urls to scrape
"""
row = self.data.iloc[row_num]
urls = []
for pair in row['Date_pairs']:
url = ('https://www.skyscanner.com.my/transport/flights/'
+ row['Origin'].lower() + '/'
+ row['Destination'].lower() + '/'
+ str(pair[0]) + '/'
+ str(pair[1]) + '/'
+ '?adults=1&children=0&adultsv2=1&childrenv2=&infants=0&cabinclass=economy&rtn=1'
+ '&preferdirects=false&outboundaltsenabled=false&inboundaltsenabled=false&ref=home#results')
urls.append(url)
return(urls)
def close_popup(self):
"""
check for the presence of pop-up windows and closes them
"""
try:
close = self.driver.find_element(By.CSS_SELECTOR,
self.dialogbox_tag)
close.click()
except NoSuchElementException:
pass
class Kayak(Scraper):
def __init__(self):
"""
Initialize the css selectors and extra details used to find the
required information as instance attributes
"""
super().__init__()
elem = 'div.Common-Results-ProgressBar.theme-dark.Hidden'
self.elem_present = EC.presence_of_element_located((By.CSS_SELECTOR,
elem))
self.sort_cheap = None
self.all_flights_tag = 'div.mainInfo'
self.airline_tag = 'div.section.times div.bottom'
self.times_tag = 'span.time-pair'
self.duration_tag = 'div.section.duration div.top'
self.airports_tag = 'div.section.duration div.bottom span:not(.sep)'
self.price_tag = 'div.multi-row.featured-provider span.price.option-text'
self.dialogbox_tag = 'button.Button-No-Standard-Style.close'
def create_urls(self, row_num):
"""
Creates urls for scraping the Kayak website using data from the
'data' DataFrame object associated with the Scraper superclass.
:param row_num: Current row number of dataframe associated with
the Scraper superclass
:returns: a list of urls to scrape
"""
row = self.data.iloc[row_num]
urls = []
for pair in row['Date_pairs']:
url = ('https://www.kayak.com.my/flights/'
+ f"{row['Origin']}-{row['Destination']}/"
+ f"{pair[0]}/{pair[1]}"
+ '?sort=price_a')
urls.append(url)
return(urls)
def close_popup(self):
"""
check for the presence of pop-up windows and closes them
"""
popup_tag = 'input.Common-Widgets-Text-TextInput.driveByUserAddress'
try:
popup = self.driver.find_element(By.CSS_SELECTOR, popup_tag)
close = self.driver.find_elements(By.CSS_SELECTOR,
self.dialogbox_tag)
try:
close[-2].click()
except ElementNotInteractableException:
close[-1].click()
except NoSuchElementException:
pass
|
from logging_functions import *
import os
import numpy as np
from menpo.shape import PointCloud
from menpofit.clm import GradientDescentCLMFitter
import pickle
import math
import rspimage
jaw_line_inds = np.arange(0, 17)
nose_inds = np.arange(27, 36)
left_eye_inds = np.arange(36, 42)
right_eye_inds = np.arange(42, 48)
left_brow_inds = np.arange(17, 22)
right_brow_inds = np.arange(22, 27)
mouth_inds = np.arange(48, 68)
def sigmoid(x, rate, offset):
return 1 / (1 + math.exp(-rate * (x - offset)))
def calculate_evidence(patch_responses, rate=0.25, offset=20):
# from ECT: https://github.com/HongwenZhang/ECT-FaceAlignment
rspmapShape = patch_responses[0, 0, ...].shape
n_points = patch_responses.shape[0]
y_weight = [np.sum(patch_responses[i, 0, ...], axis=1) for i in range(n_points)]
x_weight = [np.sum(patch_responses[i, 0, ...], axis=0) for i in range(n_points)]
# y_weight /= y_weight.sum()
# x_weight /= x_weight.sum()
y_coordinate = range(0, rspmapShape[0])
x_coordinate = range(0, rspmapShape[1])
varList = [(np.abs(
np.average((y_coordinate - np.average(y_coordinate, weights=y_weight[i])) ** 2, weights=y_weight[i])),
np.abs(np.average((x_coordinate - np.average(x_coordinate, weights=x_weight[i])) ** 2,
weights=x_weight[i])))
for i in range(n_points)]
# patch_responses[patch_responses<0.001] = 0
prpList = [
(np.sum(patch_responses[i, 0, ...], axis=(-1, -2)), np.sum(patch_responses[i, 0, ...], axis=(-1, -2)))
for i in range(n_points)]
var = np.array(varList).flatten()
var[var == 0] = np.finfo(float).eps
var = np.sqrt(var)
var = 1 / var
weight = np.array(prpList).flatten()
weight *= var
# offset = np.average(weight) - 20
weight = [sigmoid(i, rate, offset) for i in weight]
weight = np.array(weight)
return weight
def get_patches_around_landmarks(heat_maps, menpo_shape, patch_size=(30,30), image_shape=256):
# from ECT: https://github.com/HongwenZhang/ECT-FaceAlignment
padH = int(image_shape / 2)
padW = int(image_shape / 2)
rps_zeros = np.zeros((1, 2 * image_shape, 2 * image_shape, menpo_shape.n_points))
rps_zeros[0, padH:padH + image_shape, padW:padW + image_shape, :] = heat_maps
rOffset = np.floor(patch_size[0] / 2).astype(int)
lOffset = patch_size[0] - rOffset
rspList = [rps_zeros[0, y - rOffset:y + lOffset, x - rOffset:x + lOffset, i] for i in range(menpo_shape.n_points)
for y in [np.around(menpo_shape.points[i][0] + 1 + padH).astype(int)]
for x in [np.around(menpo_shape.points[i][1] + 1 + padW).astype(int)]]
patches = np.array(rspList)[:, None, :, :]
return patches
def pdm_correct(init_shape, pdm_model, part_inds=None):
""" correct landmarks using pdm (point distribution model)"""
pdm_model.set_target(PointCloud(init_shape))
if part_inds is None:
return pdm_model.target.points
else:
return pdm_model.target.points[part_inds]
def weighted_pdm_transform(input_pdm_model, patches, shape, inirho=20):
# from ECT: https://github.com/HongwenZhang/ECT-FaceAlignment
weight = calculate_evidence(patches, rate=0.5, offset=10).reshape((1, -1))
pdm_model = input_pdm_model.copy()
# write project_weight
ini_rho2_inv_prior = np.hstack((np.zeros((4,)), inirho / pdm_model.model.eigenvalues))
J = np.rollaxis(pdm_model.d_dp(None), -1, 1)
J = J.reshape((-1, J.shape[-1]))
initial_shape_mean = shape.points.ravel() - pdm_model.model._mean
iniJe = - J.T.dot(initial_shape_mean * weight[0])
iniJWJ = J.T.dot(np.diag(weight[0]).dot(J))
inv_JJ = np.linalg.inv(iniJWJ + np.diag(ini_rho2_inv_prior))
initial_p = -inv_JJ.dot(iniJe)
# Update pdm
pdm_model._from_vector_inplace(initial_p)
return pdm_model.target.points
def w_pdm_correct(init_shape, patches, pdm_model, part_inds=None):
""" correct landmarks using weighted pdm"""
points = weighted_pdm_transform(input_pdm_model=pdm_model, patches=patches, shape=PointCloud(init_shape))
if (part_inds is not None and pdm_model.n_points < 68) or part_inds is None:
return points
else:
return points[part_inds]
def feature_based_pdm_corr(lms_init, models_dir, train_type='basic', patches=None):
""" correct landmarks using part-based pdm"""
jaw_line_inds = np.arange(0, 17)
nose_inds = np.arange(27, 36)
left_eye_inds = np.arange(36, 42)
right_eye_inds = np.arange(42, 48)
left_brow_inds = np.arange(17, 22)
right_brow_inds = np.arange(22, 27)
mouth_inds = np.arange(48, 68)
'''
selected number of PCs:
jaw:7
eye:3
brow:2
nose:5
mouth:7
'''
new_lms = np.zeros((68, 2))
parts = ['l_brow', 'r_brow', 'l_eye', 'r_eye', 'mouth', 'nose', 'jaw']
part_inds_opt = [left_brow_inds, right_brow_inds, left_eye_inds, right_eye_inds, mouth_inds, nose_inds,
jaw_line_inds]
pc_opt = [2, 2, 3, 3, 7, 5, 7]
for i, part in enumerate(parts):
part_inds = part_inds_opt[i]
pc = pc_opt[i]
temp_model = os.path.join(models_dir, train_type + '_' + part + '_' + str(pc))
filehandler = open(temp_model, "rb")
try:
pdm_temp = pickle.load(filehandler)
except UnicodeDecodeError:
pdm_temp = pickle.load(filehandler, fix_imports=True, encoding="latin1")
filehandler.close()
if patches is None:
part_lms_pdm = pdm_correct(lms_init[part_inds], pdm_temp)
else:
part_lms_pdm = w_pdm_correct(
init_shape=lms_init[part_inds], patches=patches, pdm_model=pdm_temp, part_inds=part_inds)
new_lms[part_inds] = part_lms_pdm
return new_lms
def clm_correct(clm_model_path, image, map, lms_init):
""" tune landmarks using clm (constrained local model)"""
filehandler = open(os.path.join(clm_model_path), "rb")
try:
part_model = pickle.load(filehandler)
except UnicodeDecodeError:
part_model = pickle.load(filehandler, fix_imports=True, encoding="latin1")
filehandler.close()
# from ECT: https://github.com/HongwenZhang/ECT-FaceAlignment
part_model.opt = dict()
part_model.opt['numIter'] = 5
part_model.opt['kernel_covariance'] = 10
part_model.opt['sigOffset'] = 25
part_model.opt['sigRate'] = 0.25
part_model.opt['pdm_rho'] = 20
part_model.opt['verbose'] = False
part_model.opt['rho2'] = 20
part_model.opt['ablation'] = (True, True)
part_model.opt['ratio1'] = 0.12
part_model.opt['ratio2'] = 0.08
part_model.opt['smooth'] = True
fitter = GradientDescentCLMFitter(part_model, n_shape=30)
image.rspmap_data = np.swapaxes(np.swapaxes(map, 1, 3), 2, 3)
fr = fitter.fit_from_shape(image=image, initial_shape=PointCloud(lms_init), gt_shape=PointCloud(lms_init))
w_pdm_clm = fr.final_shape.points
return w_pdm_clm
|
# Mel
num_mels = 80
text_cleaners = ['english_cleaners']
# FastSpeech
vocab_size = 300
max_seq_len = 3000
encoder_dim = 256
encoder_n_layer = 4
encoder_head = 2
encoder_conv1d_filter_size = 1024
decoder_dim = 256
decoder_n_layer = 4
decoder_head = 2
decoder_conv1d_filter_size = 1024
fft_conv1d_kernel = (9, 1)
fft_conv1d_padding = (4, 0)
duration_predictor_filter_size = 256
duration_predictor_kernel_size = 3
dropout = 0.1
# Train
checkpoint_path = "./model_new"
logger_path = "./logger"
mel_ground_truth = "./mels"
alignment_path = "./alignments"
batch_size = 32
epochs = 2000
n_warm_up_step = 4000
learning_rate = 1e-3
weight_decay = 1e-6
grad_clip_thresh = 1.0
decay_step = [500000, 1000000, 2000000]
save_step = 3000
log_step = 5
clear_Time = 20
batch_expand_size = 32
|
Subsets and Splits