input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
False
class _Optimizer(Ascender):
@pass_node
def parents_of(self, node):
children = node.C
assert len(children) == 1
child = children[0]
if isinstance(child, Node) and child.T in ("union", "join"):
return Node(child.T, [Node("parents_of", cc) for cc in child.C])
else:
return node
@pass_node
def children_of(self, node):
children = node.C
assert len(children) == 1
child = children[0]
if isinstance(child, Node) and child.T in ("union", "join"):
# parents (union(x,y,z)) = union(parents(x), parents(y), parents(z))
return Node(child.T, [Node("children_of", cc) for cc in child.C])
else:
return node
def meta_filter(self, children, meta):
assert len(children) == 2
query, meta_exp = children
return self.apply_meta_exp(query, meta_exp)
def apply_meta_exp(self, node, exp):
# propagate meta filter expression as close to "dataset" as possible
t = node.T
if t in ("join", "union"):
new_children = [self.apply_meta_exp(c, exp) for c in node.C]
return Node(t, new_children)
elif t == "minus":
assert len(node.C) == 2
left, right = node.C
return Node(t, [self.apply_meta_exp(left, exp), right])
elif t == "filter":
return Node("meta_filter", [node, exp])
elif t == "dataset":
assert len(node.C) == 2
ds, meta_exp = node.C
if meta_exp is None:
new_exp = exp
elif meta_exp.T == "and":
new_exp = meta_exp + [exp]
else:
new_exp = Node("meta_and", [meta_exp, exp])
return Node("dataset", [ds, new_exp])
else:
raise ValueError("Unknown node type in Optimizer.apply_meta_exp: %s" % (node,))
class _MetaExpOptmizer(Ascender):
def _flatten_bool(self, op, nodes):
#print("_flatten_bool: input:", nodes)
new_nodes = []
for c in nodes:
if c.T == op:
new_nodes += self._flatten_bool(op, c.C)
else:
new_nodes.append(c)
#print("_flatten_bool: output:", new_nodes)
return new_nodes
def meta_or(self, children, meta):
children = [x if x.T == "meta_and" else Node("meta_and", [x]) for x in self._flatten_bool("meta_or", children)]
out = Node("meta_or", children)
return out
def _generate_and_terms(self, path, rest):
if len(rest) == 0: yield path
else:
node = rest[0]
rest = rest[1:]
if node.T == "meta_or":
for c in node.C:
my_path = path + [c]
for p in self._generate_and_terms(my_path, rest):
yield p
else:
for p in self._generate_and_terms(path + [node], rest):
yield p
def meta_and(self, children, meta):
children = self._flatten_bool("meta_and", children)
or_present = False
for c in children:
if c.T == "meta_or":
or_present = True
break
if or_present:
paths = list(self._generate_and_terms([], children))
#print("paths:")
#for p in paths:
# print(p)
paths = [self._flatten_bool("meta_and", p) for p in paths]
#print("meta_and: children:", paths)
return Node("meta_or", [Node("meta_and", p) for p in paths])
else:
return Node("meta_and", children)
def _make_DNF(self, exp):
if exp is None: return None
if exp.T in CMP_OPS or exp.T == "in":
return self._make_DNF(Node("meta_and", [exp]))
elif exp.T == "meta_and":
return self._make_DNF(Node("meta_or", [exp]))
elif exp.T == "meta_or":
or_exp = []
assert exp.T == "meta_or"
for meta_and in exp.C:
and_exp = []
assert meta_and.T == "meta_and"
for c in meta_and.C:
assert c.T in CMP_OPS or c.T == "in"
and_exp.append((c.T, c.C[0], c.C[1]))
or_exp.append(and_exp)
return or_exp
def dataset(self, children, meta):
assert len(children) == 2
ds, exp = children
return Node("dataset", [ds, self._make_DNF(exp)])
class _SQLGenerator(Ascender):
def dataset(self, args, meta):
assert len(args) == 2
dataset_name, meta_exp = args
namespace, name = dataset_name.M
keep_meta = True
return Node("SQL", meta=DBDataset._list_files_sql(
namespace, name,
False, keep_meta, meta_exp,
"self", None))
class _Evaluator(Ascender):
def __init__(self, db, filters):
Ascender.__init__(self)
self.Filters = filters
self.DB = db
def parents_of(self, args, meta):
assert len(args) == 1
arg = args[0]
if False and arg.T == "dataset": # not implemented yet
return self.dataset(arg.C, arg.M, "parents")
else:
return arg.parents(with_metadata=True)
def children_of(self, args, meta):
assert len(args) == 1
arg = args[0]
#print("_Evaluator.children_of: arg:", arg)
if False and arg.T == "dataset": # not implemented yet
return self.dataset(arg.C, arg.M, "children")
else:
#print("children_of: calling children()...")
return arg.children(with_metadata=True)
def dataset(self, args, meta, provenance=None):
assert len(args) == 2
dataset_name, meta_exp = args
namespace, name = dataset_name.M
dataset = DBDataset.get(self.DB, namespace, name)
keep_meta = meta["keep_meta"]
files = dataset.list_files(condition=meta_exp,
relationship="self" if provenance is None else provenance,
with_metadata=keep_meta)
#print ("Evaluator.dataset: files:", files)
assert isinstance(files, DBFileSet)
return files
def union(self, args, meta):
return DBFileSet.union(self.DB, args)
def join(self, args, meta):
return DBFileSet.join(self.DB, args)
def minus(self, expressions, meta):
assert len(expressions) == 2
left, right = expressions
return left - right
def filter(self, args, meta):
name, params = meta
inputs = args
#print("Evaluator.filter: inputs:", inputs)
filter_function = self.Filters[name]
return DBFileSet(self.DB, filter_function(inputs, params))
def meta_filter(self, args, meta):
assert len(args) == 2
files, meta_exp = args
return DBFileSet(self.DB, (f for f in files if self.evaluate_meta_expression(f, meta_exp)))
def _eval_meta_bool(self, f, bool_op, parts):
assert len(parts) > 0
p0 = parts[0]
rest = parts[1:]
ok = self.evaluate_meta_expression(f, p0)
if bool_op == "and":
if len(rest) and ok:
ok = self._eval_meta_bool(f, bool_op, rest)
return ok
elif bool_op == "or":
if len(rest) and not ok:
ok = self._eval_meta_bool(f, bool_op, rest)
return ok
elif bool_op == "not":
assert len(rest) == 0
return not ok
else:
raise ValueError("Unrecognized boolean operation '%s'" % (op,))
BOOL_OPS = ("and", "or", "not")
def evaluate_meta_expression(self, f, meta_expression):
op, args = meta_expression.T, meta_expression.C
if op in self.BOOL_OPS:
return self._eval_meta_bool(f, op, args)
else:
#
name, value = args
attr_value = f.get_attribute(name, None)
if op == "<": return attr_value < value
elif op == ">": return attr_value > value
elif op == "<=": return attr_value <= value
elif op == ">=": return attr_value >= value
elif op in ("==",'='):
#print("evaluate_meta_expression:", repr(attr_value), repr(value))
return attr_value == value
elif op == "!=": return attr_value != value
elif op == "in": return value in attr_value # exception, e.g. 123 in event_list
else:
raise ValueError("Invalid comparison operator '%s' in %s" % (op, meta_expression))
def meta_exp_to_sql(self, meta_expression):
op, args = meta_expression.T, meta_expression.C
if op in self.BOOL_OPS:
bool_op = op
exps = args
else:
bool_op = "and"
if op in self.BOOL_OPS:
if op in ('or','and'):
sql_op = op
return (' ' + sql_op + ' ').join([
'(' + self.meta_exp_to_sql(part) + ')' for part in args])
elif op == 'not':
return ' not (' + self.meta_exp_to_sql(args[1]) + ')'
else:
raise ValueError("Unrecognized boolean operation '%s'" % (op,))
else:
name, value = args
if op in ('<', '>', '<=', '>=', '==', '=', '!='):
sql_op = '=' if op == '==' else op
if isinstance(value, bool): colname = "bool_value"
elif isinstance(value, int): colname = "int_value"
elif isinstance(value, float): colname = "float_value"
elif isinstance(value, str): colname = "string_value"
else:
raise ValueError("Unrecognized value type %s for attribute %s" % (type(value), name))
return "attr.name='%s' and attr.%s %s '%s'" % (name, colname, sql_op, value)
elif op == 'in':
value, _, name = meta_expression
if isinstance(value, bool): colname = "bool_array"
elif isinstance(value, int): colname = "int_array"
elif isinstance(value, float): colname = "float_array"
elif isinstance(value, str): colname = "string_array"
else:
raise ValueError("Unrecognized value type %s for attribute %s" % (type(value), name))
return "attr.name='%s' and '%s' in attr.%s" % (name, value, colname)
else:
raise ValueError("Invalid comparison operator '%s' in %s" % (op, meta_expression))
class Query(object):
_Parser = Lark(MQL_Grammar, start="exp")
def __init__(self, source, default_namespace=None):
self.Source = source
self.DefaultNamespace = default_namespace
self.Parsed = self.Optimized = self.Assembled = None
def remove_comments(self, text):
out = []
for l in text.split("\n"):
l = l.split('#', 1)[0]
out.append(l)
return '\n'.join(out)
def parse(self):
if self.Parsed is None:
tree = self._Parser.parse(self.remove_comments(self.Source))
self.Parsed = _Converter().convert(tree, self.DefaultNamespace)
return self.Parsed
def assemble(self, db, default_namespace = None):
if self.Assembled is None:
parsed = self.parse()
print("Query.assemble(): parsed:", parsed.pretty())
self.Assembled = _Assembler(db, default_namespace).walk(parsed)
print("Query.assemble: self.Assembled:", self.Assembled.pretty())
return self.Assembled
def skip_assembly(self):
if self.Assembled is None:
self.Assembled = self.parse()
return self.Assembled
def optimize(self):
#print("Query.optimize: entry")
assert self.Assembled is not None
#print("optimize: assembled:", self.Assembled.pretty())
optimized = _Optimizer().walk(self.Assembled)
optimized = _MetaExpOptmizer().walk(optimized)
self.Optimized = optimized
#print("Query.optimize: optimized:", self.Optimized)
return self.Optimized
def generate_sql(self):
return _SQLGenerator().walk(self.optimize())
def _limit_results(self, gen, limit):
for x in gen:
if limit > 0:
yield x
else:
break
limit -= 1
def run(self, db, filters={}, limit=None, with_meta=True):
# TODO: take with_meta into account
self.assemble(db, self.DefaultNamespace)
#print("Query.run: assemled:", self.Assembled.pretty())
optimized = self.optimize()
#print("Query.run: optimied:", optimized.pretty())
#print("Query.run: with_meta=", with_meta)
_MetaFlagPusher().visit(optimized, with_meta)
#print("Query.run: flag_applied:\n%s" % (optimized.pretty(),))
out = _Evaluator(db, filters).walk(optimized)
if limit is None:
return out
else:
return self._limit_results(out, limit)
@property
def code(self):
return self.parse().to_json()
@staticmethod
def from_db(db, namespace, name):
return Query(DBNamedQuery.get(db, namespace, | |
from vork.tokenizer import *
from vork.ast import *
class Parser:
def __init__(self, tokenizer: Tokenizer):
self.t = tokenizer
self.t.next_token()
self.frame = []
###################################################################################################################
# Expression parsing
#
# See https://www.tutorialspoint.com/go/go_operators_precedence.htm for the table that I used as reference
###################################################################################################################
def _parse_literal(self, check_range=True):
# Integer literal
if self.t.is_token(IntToken):
val = self.t.token.value
self.t.next_token()
return ExprIntegerLiteral(val)
# Float literal
elif self.t.is_token(FloatToken):
val = self.t.token.value
self.t.next_token()
return ExprFloatLiteral(val)
# Identifier
elif self.t.is_token(IdentToken):
val = self.t.token.value
self.t.next_token()
return ExprIdentifierLiteral(val)
# Array literal
elif self.t.match_token('['):
exprs = []
while not self.t.match_token(']'):
exprs.append(self.parse_expr())
# TODO: I remember there were some array attributes, will need to look it up
return ExprArrayLiteral(exprs)
# Parens
elif self.t.match_token('('):
expr = self.parse_expr()
self.t.expect_token(')')
return expr
else:
assert False, f'Unexpected token {self.t.token}'
def _parse_postfix(self):
expr = self._parse_literal()
# Postfix operators
if self.t.is_token('++') or self.t.is_token('--'):
pass
else:
# Top level expressions
while True:
# Member access
if self.t.match_token('.'):
assert self.t.is_token(IdentToken), f"Expected name, got {self.t.token}"
expr = ExprMemberAccess(expr, self.t.token.value)
self.t.next_token()
# Function call
elif self.t.match_token('('):
args = []
if not self.t.is_token(')'):
args = [self.parse_expr()]
while self.t.match_token(','):
args.append(self.parse_expr())
self.t.expect_token(')')
expr = ExprCall(expr, args)
# Array access
elif self.t.match_token('['):
expr = ExprIndexAccess(expr, self.parse_expr())
self.t.expect_token(']')
# In expression
elif self.t.match_keyword('in'):
expr = ExprIn(expr, self.parse_expr())
# Nothing more, so we probably done
else:
break
return expr
# TODO: deref (*), need to figure how to handle the ambiguity with multiplications
def _parse_unary(self):
# this can be done only one time
if self.t.is_token('-') or self.t.is_token('--') or self.t.is_token('++') or self.t.is_token('&'):
op = self.t.token.value
self.t.next_token()
expr = ExprUnary(op, self._parse_postfix())
# These can be done multiple times
elif self.t.is_token('!') or self.t.is_token('~') or self.t.is_token('*'):
op = self.t.token.value
self.t.next_token()
expr = ExprUnary(op, self._parse_unary())
# Implicit enum member access
elif self.t.match_token('.'):
assert self.t.is_token(IdentToken), f"Expected name, got {self.t.token}"
name = self.t.token.value
self.t.next_token()
return ExprImplicitEnum(name)
else:
self.t.push()
self.t.next_token()
# Check for ranged array literal
# TODO: for now we only allow for literals
# to be used in the ranged array, is
# that what we really want?
if self.t.is_token('..'):
self.t.pop()
expr_from = self._parse_literal()
self.t.expect_token('..')
expr_to = self._parse_literal()
return ExprRange(expr_from, expr_to)
self.t.pop()
expr = self._parse_postfix()
return expr
def _parse_multiplicative(self):
expr = self._parse_unary()
while self.t.is_token('*') or self.t.is_token('/') or self.t.is_token('%'):
op = self.t.token.value
self.t.next_token()
expr = ExprBinary(expr, op, self._parse_unary())
return expr
def _parse_additive(self):
expr = self._parse_multiplicative()
while self.t.is_token('+') or self.t.is_token('-'):
op = self.t.token.value
self.t.next_token()
expr = ExprBinary(expr, op, self._parse_multiplicative())
return expr
def _parse_shift(self):
expr = self._parse_additive()
while self.t.is_token('<<') or self.t.is_token('>>'):
op = self.t.token.value
self.t.next_token()
expr = ExprBinary(expr, op, self._parse_additive())
return expr
def _parse_relational(self):
expr = self._parse_shift()
while self.t.is_token('<') or self.t.is_token('>') or self.t.is_token('<=') or self.t.is_token('>='):
op = self.t.token.value
self.t.next_token()
expr = ExprBinary(expr, op, self._parse_shift())
return expr
def _parse_equality(self):
expr = self._parse_relational()
while self.t.is_token('==') or self.t.is_token('!='):
op = self.t.token.value
self.t.next_token()
expr = ExprBinary(expr, op, self._parse_relational())
return expr
def _parse_bitwise_and(self):
expr = self._parse_equality()
while self.t.match_token('&'):
expr = ExprBinary(expr, '&', self._parse_equality())
return expr
def _parse_bitwise_xor(self):
expr = self._parse_bitwise_and()
while self.t.match_token('^'):
expr = ExprBinary(expr, '^', self._parse_bitwise_and())
return expr
def _parse_bitwise_or(self):
expr = self._parse_bitwise_xor()
while self.t.match_token('|'):
expr = ExprBinary(expr, '|', self._parse_bitwise_xor())
return expr
def _parse_logical_and(self):
expr = self._parse_bitwise_or()
while self.t.match_token('&&'):
expr = ExprBinary(expr, '&&', self._parse_bitwise_or())
return expr
def _parse_logical_or(self):
expr = self._parse_logical_and()
while self.t.match_token('||'):
expr = ExprBinary(expr, '||', self._parse_logical_and())
return expr
def _parse_conditional(self):
# If expression
if self.t.match_keyword('if'):
condition = self.parse_expr()
block_true = self.parse_stmt_block()
assert self.t.match_keyword('else')
block_false = self.parse_stmt_block()
return ExprIf(condition, block_true, block_false)
else:
expr = self._parse_logical_or()
# Or expression
if self.t.match_keyword('or'):
block = self.parse_stmt_block()
return ExprOr(expr, block)
return expr
def _parse_assignment(self):
expr = self._parse_conditional()
while self.t.is_token('=') or self.t.is_token('+=') or self.t.is_token('-=') or self.t.is_token('*=') or \
self.t.is_token('/=') or self.t.is_token('%=') or self.t.is_token('>>=') or self.t.is_token('<<=') or \
self.t.is_token('&=') or self.t.is_token('^=') or self.t.is_token('|='):
op = self.t.token.value
self.t.next_token()
if isinstance(expr, ExprBinary):
expr = ExprBinary(expr.left, expr.op, ExprBinary(expr.right, op, self._parse_conditional()))
else:
expr = ExprBinary(expr, op, self._parse_conditional())
return expr
def parse_expr(self):
return self._parse_assignment()
# def parse_mut_expr(self):
# mut = False
# if self.t.match_keyword('mut'):
# mut = True
# return self.parse_expr(), mut
###################################################################################################################
# Statement parsing
###################################################################################################################
def _parse_var_decl(self):
# Mutable optional
mut = False
if self.t.match_keyword('mut'):
mut = True
# Get the names
assert self.t.is_token(IdentToken), f"Expected name, got {self.t.token}"
names = [self.t.token.value]
self.t.next_token()
while self.t.match_token(','):
assert self.t.is_token(IdentToken), f"Expected name, got {self.t.token}"
names.append(self.t.token.value)
self.t.next_token()
# The :=
self.t.expect_token(':=')
# The assigned expression
expr = self.parse_expr()
return StmtVarDecl(mut, names, expr)
def parse_stmt(self):
# Return statement
if self.t.match_keyword('return'):
exprs = []
# Return should always be before an end of block so that tells us we have no arguments
if not self.t.is_token('}'):
exprs.append(self.parse_expr())
while self.t.match_token(','):
exprs.append(self.parse_expr())
return StmtReturn(exprs)
# Assert statement
elif self.t.match_keyword('assert'):
return StmtAssert(self.parse_expr())
# Parse if
elif self.t.match_keyword('if'):
condition = self.parse_expr()
block_true = self.parse_stmt_block()
block_false = None
# Else part
if self.t.match_keyword('else'):
# We support `else if` without block before
if self.t.is_keyword('if'):
block_false = StmtBlock(self.frame[-1], [self.parse_stmt()])
# The block
else:
block_false = self.parse_stmt_block()
return StmtIf(condition, block_true, block_false)
# Block
if self.t.is_token('{'):
return self.parse_stmt_block()
# For statement
if self.t.match_keyword('for'):
# Check if a foreach
# will match (for name, name in test) and (for name in test)
self.t.push()
self.t.next_token()
if self.t.is_token(','):
self.t.pop()
assert self.t.is_token(IdentToken), f"Expected name, got {self.t.token}"
index = self.t.token.value
self.t.next_token()
self.t.expect_token(',')
assert self.t.is_token(IdentToken), f"Expected name, got {self.t.token}"
name = self.t.token.value
self.t.next_token()
self.t.expect_keyword('in')
expr = self.parse_expr()
block = self.parse_stmt_block()
return StmtForeach(index, name, expr, block)
elif self.t.is_keyword('in'):
self.t.pop()
assert self.t.is_token(IdentToken), f"Expected name, got {self.t.token}"
name = self.t.token.value
self.t.next_token()
self.t.expect_keyword('in')
expr = self.parse_expr()
block = self.parse_stmt_block()
return StmtForeach(None, name, expr, block)
self.t.pop()
# Check a forever loop
if self.t.is_token('{'):
block = self.parse_stmt_block()
return StmtFor(None, None, None, block)
# This is probably a normal c like loop
else:
val = None
cond = None
next = None
# TODO: support `for condition` loops
if not self.t.match_token(';'):
# TODO: variable declaration inside this argument
val = self.parse_expr()
self.t.expect_token(';')
if not self.t.match_token(';'):
cond = self.parse_expr()
self.t.expect_token(';')
if not self.t.is_token('{'):
next = self.parse_expr()
block = self.parse_stmt_block()
return StmtFor(val, cond, next, block)
# Unsafe block
if self.t.match_keyword('unsafe'):
return StmtUnsafe(self.parse_stmt_block())
# Defer block
if self.t.match_keyword('defer'):
return StmtDefer(self.parse_stmt_block())
# Variable declaration
if self.t.is_keyword('mut'):
return self._parse_var_decl()
# Might be variable declaration
if self.t.is_token(IdentToken):
self.t.push()
self.t.next_token()
# This verifies we got a variable declaration (a := ) or (a, b, c := )
if self.t.is_token(':=') or self.t.is_token(','):
self.t.pop()
return self._parse_var_decl()
else:
self.t.pop()
# Fallback on expression parsing
return StmtExpr(self.parse_expr())
def parse_stmt_block(self):
self.t.expect_token('{')
stmts = []
block = StmtBlock(self.frame[-1], stmts)
self.frame.append(block)
while not self.t.match_token('}'):
stmts.append(self.parse_stmt())
self.frame.pop()
return block
###################################################################################################################
# Declaration parsing
###################################################################################################################
def parse_type(self):
# Map
if self.t.match_keyword('map'):
self.t.expect_token('[')
key_type = self.parse_type()
self.t.expect_token(']')
value_type = self.parse_type()
return VMapType(key_type, value_type)
# Array
if self.t.match_token('['):
self.t.expect_token(']')
value_type = self.parse_type()
return VArrayType(value_type)
# Optional type
if self.t.match_token('?'):
return VOptionalType(self.parse_type())
# Pointer type
if self.t.match_token('&'):
return VPointerType(self.parse_type())
# Basic type
# TODO: support types from other modules
elif self.t.is_token(IdentToken):
t = VUnknownType(self.t.token.value)
self.t.next_token()
return t
else:
assert False, "Invalid type"
def _parse_func_param(self):
assert self.t.is_token(IdentToken), f"Expected name, got {self.t.token}"
name = self.t.token.value
self.t.next_token()
mut = False
if self.t.match_keyword('mut'):
mut = True
xtype = self.parse_type()
return FuncParam(mut, name, xtype)
def _parse_func(self, pub):
# Method (optional)
method = None
if self.t.match_token('('):
method = self._parse_func_param()
self.t.expect_token(')')
# Name
assert self.t.is_token(IdentToken), f"Expected name, got {self.t.token}"
name = self.t.token.value
self.t.next_token()
interop = False
if self.t.match_token('.'):
assert name == 'C'
assert self.t.is_token(IdentToken), f"Expected name, got {self.t.token}"
assert not pub, f'Interop functions can not be public!'
interop = True
name = self.t.token.value
self.t.next_token()
# Parameters
self.t.expect_token('(')
args = []
# Parse arguments if any
if not self.t.is_token(')'):
args.append(self._parse_func_param())
while self.t.match_token(','):
args.append(self._parse_func_param())
self.t.expect_token(')')
# the return value
ret_type = None
if not self.t.is_token('{') and not self.t.is_token(KeywordToken):
ret_type = self.parse_type()
func = FuncDecl(pub, interop, name, method, args, ret_type)
# The code
if not interop:
self.frame.append(func)
func.block = self.parse_stmt_block()
self.frame.pop()
else:
func.block = None
return func
def _parse_struct_element(self, access: StructMemberAccess):
assert self.t.is_token(IdentToken), f"Expected name, got {self.t.token}"
name = self.t.token.value
self.t.next_token()
xtype = self.parse_type()
return StructElement(access, name, xtype)
def _parse_struct(self, pub):
# Name
assert self.t.is_token(IdentToken), f"Expected name, got {self.t.token}"
name = self.t.token.value
self.t.next_token()
self.t.expect_token('{')
access = StructMemberAccess.PRIVATE
elements = []
while not self.t.match_token('}'):
# This | |
(RB.item()+(meshes['m']<0).copy()*par['borrwedge'])/PI.item()
EVm = np.reshape(np.asarray(np.reshape(np.multiply(RBaux.flatten(order='F').T.copy(),mutil_c.flatten(order='F').copy()),(mpar['nm']*mpar['nk'],mpar['nh']),order='F').dot(np.transpose(P.copy()))),(mpar['nm'],mpar['nk'],mpar['nh']),order='F')
result_EGM_policyupdate = EGM_policyupdate(EVm,EVk,Qminus.item(),PIminus.item(),RBminus.item(),inc,meshes,grid,par,mpar)
c_a_star = result_EGM_policyupdate['c_a_star']
m_a_star = result_EGM_policyupdate['m_a_star']
k_a_star = result_EGM_policyupdate['k_a_star']
c_n_star = result_EGM_policyupdate['c_n_star']
m_n_star = result_EGM_policyupdate['m_n_star']
meshaux = meshes.copy()
meshaux['h'][:,:,-1] = 1000.
## Update Marginal Value of Bonds
mutil_c_n = mutil(c_n_star.copy())
mutil_c_a = mutil(c_a_star.copy())
mutil_c_aux = par['nu']*mutil_c_a + (1-par['nu'])*mutil_c_n
aux = invmutil(mutil_c_aux.copy().flatten(order='F'))-np.squeeze(np.asarray(ControlSS[np.array(range(NN))]))
aux = np.reshape(aux,(mpar['nm'],mpar['nk'],mpar['nh']),order='F')
aux = sf.dct(aux.copy(),norm='ortho',axis=0)
aux = sf.dct(aux.copy(),norm='ortho',axis=1)
aux = sf.dct(aux.copy(),norm='ortho',axis=2)
DC = np.asmatrix(aux.copy().flatten(order='F')).T
RHS[nx+mutil_cind] = DC[indexMUdct]
## Update Marginal Value of capital
EVk = np.reshape(Vk,(mpar['nm']*mpar['nk'],mpar['nh']),order='F').dot(P.copy().T)
Vpoints = np.concatenate(( [meshaux['m'].flatten(order='F')],[meshaux['k'].flatten(order='F')],[meshaux['h'].flatten(order='F')]),axis=0).T
# griddata does not support extrapolation for 3D
Vk_next = griddata(Vpoints,np.asarray(EVk).flatten(order='F').copy(),(m_n_star.copy().flatten(order='F'),meshaux['k'].copy().flatten(order='F'),meshaux['h'].copy().flatten(order='F')),method='linear')
Vk_next_bounds = griddata(Vpoints,np.asarray(EVk).flatten(order='F').copy(),(m_n_star.copy().flatten(order='F'),meshaux['k'].copy().flatten(order='F'),meshaux['h'].copy().flatten(order='F')),method='nearest')
Vk_next[np.isnan(Vk_next.copy())] = Vk_next_bounds[np.isnan(Vk_next.copy())].copy()
Vk_aux = par['nu']*(Rminus.item()+Qminus.item())*mutil_c_a + (1-par['nu'])*Rminus.item()*mutil_c_n +par['beta']*(1-par['nu'])*np.reshape(Vk_next,(mpar['nm'],mpar['nk'],mpar['nh']),order='F')
aux = invmutil(Vk_aux.copy().flatten(order='F')) - np.squeeze(np.asarray(ControlSS[np.array(range(NN))+NN]))
aux = np.reshape(aux.copy(),(mpar['nm'],mpar['nk'],mpar['nh']),order='F')
aux = sf.dct(aux.copy(),norm='ortho',axis=0)
aux = sf.dct(aux.copy(),norm='ortho',axis=1)
aux = sf.dct(aux.copy(),norm='ortho',axis=2)
DC = np.asmatrix(aux.copy().flatten(order='F')).T
RHS[nx+Vkind] = DC[indexVKdct]
## Differences for distriutions
# find next smallest on-grid value for money choices
weight11 = np.empty((mpar['nm']*mpar['nk'],mpar['nh'],mpar['nh']))
weight12 = np.empty((mpar['nm']*mpar['nk'],mpar['nh'],mpar['nh']))
weight21 = np.empty((mpar['nm']*mpar['nk'],mpar['nh'],mpar['nh']))
weight22 = np.empty((mpar['nm']*mpar['nk'],mpar['nh'],mpar['nh']))
weightn1 = np.empty((mpar['nm']*mpar['nk'],mpar['nh'],mpar['nh']))
weightn2 = np.empty((mpar['nm']*mpar['nk'],mpar['nh'],mpar['nh']))
ra_genweight = GenWeight(m_a_star,grid['m'])
Dist_m_a = ra_genweight['weight'].copy()
idm_a = ra_genweight['index'].copy()
rn_genweight = GenWeight(m_n_star,grid['m'])
Dist_m_n = rn_genweight['weight'].copy()
idm_n = rn_genweight['index'].copy()
rk_genweight = GenWeight(k_a_star,grid['k'])
Dist_k = rk_genweight['weight'].copy()
idk_a = rk_genweight['index'].copy()
idk_n = np.reshape(np.tile(np.outer(np.ones((mpar['nm'])),np.array(range(mpar['nk']))),(1,1,mpar['nh'])),(mpar['nm'],mpar['nk'],mpar['nh']),order = 'F')
# Transition matrix for adjustment case
idm_a = np.tile(np.asmatrix(idm_a.copy().flatten('F')).T,(1,mpar['nh']))
idk_a = np.tile(np.asmatrix(idk_a.copy().flatten('F')).T,(1,mpar['nh']))
idh = np.kron(np.array(range(mpar['nh'])),np.ones((1,mpar['nm']*mpar['nk']*mpar['nh'])))
idm_a = idm_a.copy().astype(int)
idk_a = idk_a.copy().astype(int)
idh = idh.copy().astype(int)
index11 = np.ravel_multi_index([idm_a.flatten(order='F'),idk_a.flatten(order='F'),idh.flatten(order='F')],
(mpar['nm'],mpar['nk'],mpar['nh']),order='F')
index12 = np.ravel_multi_index([idm_a.flatten(order='F'),idk_a.flatten(order='F')+1,idh.flatten(order='F')],
(mpar['nm'],mpar['nk'],mpar['nh']),order='F')
index21 = np.ravel_multi_index([idm_a.flatten(order='F')+1,idk_a.flatten(order='F'),idh.flatten(order='F')],
(mpar['nm'],mpar['nk'],mpar['nh']),order='F')
index22 = np.ravel_multi_index([idm_a.flatten(order='F')+1,idk_a.flatten(order='F')+1,idh.flatten(order='F')],
(mpar['nm'],mpar['nk'],mpar['nh']),order='F')
# for no-adjustment case
idm_n = np.tile(np.asmatrix(idm_n.copy().flatten('F')).T,(1,mpar['nh']))
idk_n = np.tile(np.asmatrix(idk_n.copy().flatten('F')).T,(1,mpar['nh']))
idm_n = idm_n.copy().astype(int)
idk_n = idk_n.copy().astype(int)
indexn1 = np.ravel_multi_index([idm_n.flatten(order='F'),idk_n.flatten(order='F'),idh.flatten(order='F')],
(mpar['nm'],mpar['nk'],mpar['nh']),order='F')
indexn2 = np.ravel_multi_index([idm_n.flatten(order='F')+1,idk_n.flatten(order='F'),idh.flatten(order='F')],
(mpar['nm'],mpar['nk'],mpar['nh']),order='F')
for hh in range(mpar['nh']):
# corresponding weights
weight11_aux = (1-Dist_m_a[:,:,hh].copy())*(1-Dist_k[:,:,hh].copy())
weight12_aux = (1-Dist_m_a[:,:,hh].copy())*(Dist_k[:,:,hh].copy())
weight21_aux = Dist_m_a[:,:,hh].copy()*(1-Dist_k[:,:,hh].copy())
weight22_aux = Dist_m_a[:,:,hh].copy()*(Dist_k[:,:,hh].copy())
weightn1_aux = (1-Dist_m_n[:,:,hh].copy())
weightn2_aux = (Dist_m_n[:,:,hh].copy())
# dimensions (m*k,h',h)
weight11[:,:,hh] = np.outer(weight11_aux.flatten(order='F').copy(),P[hh,:].copy())
weight12[:,:,hh] = np.outer(weight12_aux.flatten(order='F').copy(),P[hh,:].copy())
weight21[:,:,hh] = np.outer(weight21_aux.flatten(order='F').copy(),P[hh,:].copy())
weight22[:,:,hh] = np.outer(weight22_aux.flatten(order='F').copy(),P[hh,:].copy())
weightn1[:,:,hh] = np.outer(weightn1_aux.flatten(order='F').copy(),P[hh,:].copy())
weightn2[:,:,hh] = np.outer(weightn2_aux.flatten(order='F').copy(),P[hh,:].copy())
weight11= np.ndarray.transpose(weight11.copy(),(0,2,1))
weight12= np.ndarray.transpose(weight12.copy(),(0,2,1))
weight21= np.ndarray.transpose(weight21.copy(),(0,2,1))
weight22= np.ndarray.transpose(weight22.copy(),(0,2,1))
rowindex = np.tile(range(mpar['nm']*mpar['nk']*mpar['nh']),(1,4*mpar['nh']))
H_a = sp.coo_matrix((np.hstack((weight11.flatten(order='F'),weight21.flatten(order='F'),weight12.flatten(order='F'),weight22.flatten(order='F'))),
(np.squeeze(rowindex), np.hstack((np.squeeze(np.asarray(index11)),np.squeeze(np.asarray(index21)),np.squeeze(np.asarray(index12)),np.squeeze(np.asarray(index22)))) )),
shape=(mpar['nm']*mpar['nk']*mpar['nh'],mpar['nm']*mpar['nk']*mpar['nh']) )
weightn1= np.ndarray.transpose(weightn1.copy(),(0,2,1))
weightn2= np.ndarray.transpose(weightn2.copy(),(0,2,1))
rowindex = np.tile(range(mpar['nm']*mpar['nk']*mpar['nh']),(1,2*mpar['nh']))
H_n = sp.coo_matrix((np.hstack((weightn1.flatten(order='F'),weightn2.flatten(order='F'))),
(np.squeeze(rowindex), np.hstack((np.squeeze(np.asarray(indexn1)),np.squeeze(np.asarray(indexn2)))) )),
shape=(mpar['nm']*mpar['nk']*mpar['nh'],mpar['nm']*mpar['nk']*mpar['nh']) )
# Joint transition matrix and transitions
H = par['nu']*H_a.copy() +(1-par['nu'])*H_n.copy()
JD_new = JDminus.flatten(order='F').copy().dot(H.todense())
JD_new = np.reshape(np.asarray(JD_new.copy()),(mpar['nm'],mpar['nk'],mpar['nh']),order='F')
# Next period marginal histograms
# liquid assets
aux_m = np.sum(np.sum(JD_new.copy(),axis=1),axis=1)
RHS[marginal_mind] = np.asmatrix(aux_m[:-1].copy()).T
# illiquid asset
aux_k = np.sum(np.sum(JD_new.copy(),axis=0),axis=1)
RHS[marginal_kind] = np.asmatrix(aux_k[:-1].copy()).T
# human capital
aux_h = np.sum(np.sum(JD_new.copy(),axis=0),axis=0)
RHS[marginal_hind] = np.asmatrix(aux_h[:-2].copy()).T
## Third Set: Government Budget constraint
# Return on bonds (Taylor Rule)
RHS[RBind] = np.log(par['RB'])+par['rho_R']*np.log(RBminus/par['RB']) + np.log(PIminus/par['PI'])*((1.-par['rho_R'])*par['theta_pi'])+EPS_TAYLOR
# Inflation jumps to equilibrate real bond supply and demand
if par['tau'] < 1:
taxrevenue = (1-par['tau'])*Wminus*Nminus + (1-par['tau'])*Profitminus
RHS[nx+PIind] = par['rho_B']*np.log(Bminus/targets['B'])+par['rho_B']*np.log(RBminus/par['RB']) - (par['rho_B']+par['gamma_pi'])*np.log(PIminus/par['PI']) - par['gamma_T'] *np.log(Tminus/targets['T'])
LHS[nx+PIind] = np.log(B/targets['B'])
# Government expenditure
RHS[nx+Gind] = B - Bminus*RBminus/PIminus +Tminus
RHS[nx+Tind] = taxrevenue
# Resulting price of capital
RHS[nx+Qind] = (par['phi']*(K/Kminus-1)+1) - par['ABS']
else:
RHS[nx+PIind] = targets['B']
LHS[nx+PIind] = B
RHS[nx+Gind] = targets['G']
RHS[nx+Tind] = 0.
RHS[nx+Qind] = (par['phi']*(K/Kminus-1)+1) - par['ABS']
## Difference
Difference = (LHS-RHS)
return {'Difference':Difference, 'LHS':LHS, 'RHS':RHS, 'JD_new': JD_new, 'c_a_star':c_a_star, 'm_a_star':m_a_star,
'k_a_star':k_a_star,'c_n_star':c_n_star,'m_n_star':m_n_star,'P':P}
def EGM_policyupdate(EVm,EVk, Qminus, PIminus, RBminus, inc, meshes,grid,par,mpar):
## EGM step 1
EMU = par['beta']*np.reshape(EVm.copy(),(mpar['nm'],mpar['nk'],mpar['nh']), order = 'F')
c_new = 1./np.power(EMU,(1./par['xi']))
# Calculate assets consistent with choices being (m')
# Calculate initial money position from the budget constraint,
# that leads to the optimal consumption choice
m_star_n = (c_new.copy() + meshes['m'].copy()-inc['labor'].copy()-inc['rent'].copy())
m_star_n = m_star_n.copy()/(RBminus/PIminus+(m_star_n.copy()<0)*par['borrwedge']/PIminus)
# Identify binding constraints
binding_constraints = meshes['m'].copy() < np.tile(m_star_n[0,:,:].copy(),(mpar['nm'],1,1))
# Consumption when drawing assets m' to zero: Eat all resources
Resource = inc['labor'].copy() + inc['rent'].copy() + inc['money'].copy()
m_star_n = np.reshape(m_star_n.copy(),(mpar['nm'],mpar['nk']*mpar['nh']),order='F')
c_n_aux = np.reshape(c_new.copy(),(mpar['nm'],mpar['nk']*mpar['nh']),order='F')
# Interpolate grid['m'] and c_n_aux defined on m_n_aux over grid['m']
# Check monotonicity of m_n_aux
if np.sum(np.abs(np.diff(np.sign(np.diff(m_star_n.copy(),axis=0)),axis=0)),axis=1).max() != 0.:
print(' Warning: non monotone future liquid asset choice encountered ')
c_update = np.zeros((mpar['nm'],mpar['nk']*mpar['nh']))
m_update = np.zeros((mpar['nm'],mpar['nk']*mpar['nh']))
for hh in range(mpar['nk']*mpar['nh']):
Savings = interp1d(np.squeeze(np.asarray(m_star_n[:,hh].copy())), grid['m'].copy(), fill_value='extrapolate')
m_update[:,hh] = Savings(grid['m'].copy())
Consumption = interp1d(np.squeeze(np.asarray(m_star_n[:,hh].copy())), np.squeeze(np.asarray(c_n_aux[:,hh].copy())), fill_value='extrapolate')
c_update[:,hh] = Consumption(grid['m'].copy())
c_n_star = np.reshape(c_update,(mpar['nm'],mpar['nk'],mpar['nh']),order = 'F')
m_n_star = np.reshape(m_update,(mpar['nm'],mpar['nk'],mpar['nh']),order = 'F')
c_n_star[binding_constraints] = np.squeeze(np.asarray(Resource[binding_constraints].copy() - grid['m'][0]))
m_n_star[binding_constraints] = grid['m'].copy().min()
m_n_star[m_n_star>grid['m'][-1]] = grid['m'][-1]
## EGM step 2: find Optimal Portfolio Combinations
term1 = par['beta']*np.reshape(EVk,(mpar['nm'],mpar['nk'],mpar['nh']),order = 'F')
E_return_diff = term1/Qminus - EMU
# Check quasi-monotonicity of E_return_diff
if np.sum(np.abs(np.diff(np.sign(E_return_diff),axis=0)),axis = 0).max() > 2.:
print(' Warning: multiple roots of portfolio choic encountered')
# Find an m_a for given ' taht solves the difference equation
m_a_aux = Fastroot(grid['m'],E_return_diff)
m_a_aux = np.maximum(m_a_aux.copy(),grid['m'][0])
m_a_aux = np.minimum(m_a_aux.copy(),grid['m'][-1])
m_a_aux = np.reshape(m_a_aux.copy(),(mpar['nk'],mpar['nh']),order = 'F')
## EGM step 3
# Constraints for money and capital are not binding
EMU = np.reshape(EMU.copy(),(mpar['nm'],mpar['nk']*mpar['nh']),order = 'F')
# Interpolation of psi-function at m*_n(m,k)
idx = np.digitize(m_a_aux, grid['m'])-1 # find indexes on grid next smallest to optimal policy
idx[m_a_aux<=grid['m'][0]] = 0 # if below minimum
idx[m_a_aux>=grid['m'][-1]] = mpar['nm']-2 #if above maximum
step = np.diff(grid['m'].copy()) # Stepsize on grid
s = (m_a_aux.copy() - grid['m'][idx])/step[idx] # Distance of optimal policy to next grid point
aux_index = np.array(range(0,(mpar['nk']*mpar['nh'])))*mpar['nm'] # aux for linear indexes
aux3 = EMU.flatten(order = 'F').copy()[idx.flatten(order='F').copy()+aux_index.flatten(order = 'F').copy()] # calculate linear indexes
# Interpolate EMU(m',k',s'*h',M',K') over m*_n(k'), m-dim is dropped
EMU_star = aux3 + s.flatten(order = 'F')*(EMU.flatten(order='F').copy()[idx.flatten(order = 'F').copy() + aux_index.flatten(order = 'F').copy()+1]-aux3) # linear interpolation
c_a_aux = 1/(EMU_star.copy()**(1/par['xi']))
cap_expenditure = np.squeeze(inc['capital'][0,:,:])
auxL = np.squeeze(inc['labor'][0,:,:])
# Resources that lead to capital choice k' = c + m*(k') + k' - w*h*N = value of todays cap and money holdings
Resource = c_a_aux.copy() + m_a_aux.flatten(order = 'F').copy() + cap_expenditure.flatten(order = 'F').copy() - auxL.flatten(order = 'F').copy()
c_a_aux = np.reshape(c_a_aux.copy(), (mpar['nk'], mpar['nh']),order = 'F')
Resource = np.reshape(Resource.copy(), (mpar['nk'], mpar['nh']),order = 'F')
# Money constraint is not binding, but capital constraint is binding
m_star_zero = np.squeeze(m_a_aux[0,:].copy()) # Money holdings that correspond to k'=0: m*(k=0)
# Use consumption at k'=0 from constrained problem, when m' is on grid
aux_c = np.reshape(c_new[:,0,:],(mpar['nm'], mpar['nh']),order = 'F')
aux_inc = np.reshape(inc['labor'][0,0,:],(1, mpar['nh']),order = 'F')
cons_list = []
res_list = []
mon_list = []
cap_list = []
for j in range(mpar['nh']):
# When choosing zero capital holdings, HHs might still want to choose money holdings smaller than m*(k'=0)
if m_star_zero[j]>grid['m'][0]:
# Calculate consumption policies, when HHs chooses money holdings lower than m*(k'=0) and capital holdings k'=0 and save them in cons_list
log_index = grid['m'].T.copy() < m_star_zero[j]
# aux_c is the consumption policy under no cap. adj.
c_k_cons = aux_c[log_index, j].copy()
cons_list.append( c_k_cons.copy() ) # Consumption at k'=0, m'<m_a*(0)
# Required Resources: Money choice + Consumption - labor income Resources that lead to k'=0 and m'<m*(k'=0)
res_list.append( grid['m'].T[log_index] + c_k_cons.copy() - aux_inc[0,j] )
mon_list.append( grid['m'].T[log_index])
cap_list.append( np.zeros((np.sum(log_index))))
# Merge lists
c_a_aux = np.reshape(c_a_aux.copy(),(mpar['nk'], mpar['nh']),order = 'F')
m_a_aux = np.reshape(m_a_aux.copy(),(mpar['nk'], mpar['nh']),order = 'F')
Resource = np.reshape(Resource.copy(),(mpar['nk'], mpar['nh']),order = 'F')
cons_list_1=[]
res_list_1=[]
mon_list_1=[]
cap_list_1=[]
for j in range(mpar['nh']):
cons_list_1.append( np.vstack((np.asmatrix(cons_list[j]).T, np.asmatrix(c_a_aux[:,j]).T)) )
res_list_1.append( np.vstack((np.asmatrix(res_list[j]).T, np.asmatrix(Resource[:,j]).T)) )
mon_list_1.append( np.vstack((np.asmatrix(mon_list[j]).T, np.asmatrix(m_a_aux[:,j]).T)) )
cap_list_1.append( np.vstack((np.asmatrix(cap_list[j].copy()).T, np.asmatrix(grid['k']).T)) )
## EGM step 4: Interpolate back to fixed grid
c_a_star = np.zeros((mpar['nm']*mpar['nk'], mpar['nh']),order = 'F')
m_a_star = np.zeros((mpar['nm']*mpar['nk'], mpar['nh']),order = 'F')
k_a_star = np.zeros((mpar['nm']*mpar['nk'], mpar['nh']),order = 'F')
Resource_grid = np.reshape(inc['capital']+inc['money']+inc['rent'],(mpar['nm']*mpar['nk'], mpar['nh']),order = 'F')
labor_inc_grid = np.reshape(inc['labor'],(mpar['nm']*mpar['nk'], mpar['nh']),order = 'F')
for j in range(mpar['nh']):
log_index=Resource_grid[:,j] < res_list[j][0]
# when at most one constraint binds:
# Check monotonicity of resources
if np.sum(np.abs(np.diff(np.sign(np.diff(res_list[j])))),axis = 0).max() != 0. :
print('warning(non monotone resource list encountered)')
cons = interp1d(np.squeeze(np.asarray(res_list_1[j].copy())), np.squeeze(np.asarray(cons_list_1[j].copy())),fill_value='extrapolate')
c_a_star[:,j] = cons(Resource_grid[:,j].copy())
mon = interp1d(np.squeeze(np.asarray(res_list_1[j].copy())), np.squeeze(np.asarray(mon_list_1[j].copy())),fill_value='extrapolate')
m_a_star[:,j] = mon(Resource_grid[:,j].copy())
cap = interp1d(np.squeeze(np.asarray(res_list_1[j].copy())), np.squeeze(np.asarray(cap_list_1[j].copy())),fill_value='extrapolate')
k_a_star[:,j] = cap(Resource_grid[:,j].copy())
# Lowest value of res_list corresponds to m_a'=0 and k_a'=0.
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Colony Framework
# Copyright (c) 2008-2020 Hive Solutions Lda.
#
# This file is part of Hive Colony Framework.
#
# Hive Colony Framework is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Colony Framework is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Colony Framework. If not, see <http://www.apache.org/licenses/>.
__author__ = "<NAME> <<EMAIL>>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2020 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
from . import structures
class RootEntity(structures.EntityClass):
"""
The root entity class, this class represents
a typical base class for a model hierarchy.
"""
object_id = dict(
id = True,
type = "integer",
generated = True
)
""" The object id of the root entity """
status = dict(
type = "integer"
)
""" The status of the entity (1-enabled, 2-disabled) """
metadata = dict(
type = "metadata"
)
""" Simple metadata value that is going to be used
for storage of structured data (maps and lists) """
def __init__(self):
"""
Constructor of the class.
"""
self.object_id = None
self.status = 1
class RootEntityAbstract(structures.EntityClass):
"""
The root entity abstract class, this class represents
a typical base class for a model hierarchy.
This version is set as an abstract class so that no
representation of it is created in the data source.
"""
abstract = True
""" Abstract class flag, indicating that this class is not
meant to be stored in the data source """
object_id = dict(
id = True,
type = "integer",
generated = True
)
""" The object id of the root entity abstract """
status = dict(
type = "integer"
)
""" The status of the entity (1-enabled, 2-disabled) """
def __init__(self):
"""
Constructor of the class.
"""
self.object_id = None
self.status = 1
class Loggable(structures.EntityClass):
"""
The (interface) class that decorates an entity with
the "loggable" attribute for polymorphic retrieval.
"""
object_id = dict(
id = True,
type = "integer",
generated = True
)
""" The object id of the "loggable" """
log_id = dict(
type = "integer",
generated = True,
generator_type = "table",
generator_field_name = "logable_log_id"
)
""" The id of the log entry (primary identifier) """
log_number = dict(
type = "integer"
)
""" The log number of the taxable """
def __init__(self):
"""
Constructor of the class.
"""
RootEntity.__init__(self)
self.object_id = None
self.log_id = None
self.log_number = 1009
class Taxable(RootEntity):
"""
The (interface) class that decorates an entity with
the taxable attribute for polymorphic retrieval.
"""
tax_number = dict(
type = "integer"
)
""" The tax number of the taxable """
def __init__(self):
RootEntity.__init__(self)
class Person(RootEntity):
"""
The person entity class, represents the set of typical
attributes of a person.
"""
name = dict(
type = "text"
)
""" The name of the person """
age = dict(
type = "integer"
)
""" The age of the person """
weight = dict(
type = "decimal"
)
""" The weight of the person """
parent = dict(
type = "relation"
)
""" The parent for the current person """
children = dict(
type = "relation"
)
""" The children of the current person """
dogs = dict(
type = "relation"
)
""" The dogs "owned" by the person """
cars = dict(
type = "relation"
)
""" The cars "owned" by the person """
employees = dict(
type = "relation"
)
""" The employees associated with the person """
address = dict(
type = "relation"
)
""" The address associated with the person """
def __init__(self):
"""
Constructor of the class.
"""
RootEntity.__init__(self)
self.name = "Anonymous"
self.age = 18
@staticmethod
def _relation_parent():
return dict(
type = "to-one",
target = Person,
reverse = "children",
is_mapper = True
)
@staticmethod
def _relation_children():
return dict(
type = "to-many",
target = Person,
reverse = "parent"
)
@staticmethod
def _relation_dogs():
return dict(
type = "to-many",
target = Dog,
reverse = "owner"
)
@staticmethod
def _relation_cars():
return dict(
type = "to-many",
target = Car,
reverse = "owners"
)
@staticmethod
def _relation_employees():
return dict(
type = "to-many",
target = Employee,
reverse = "boss"
)
@staticmethod
def _relation_address():
return dict(
type = "to-one",
target = Address,
reverse = "person",
is_mapper = True
)
@classmethod
def _attr_double_age(cls, instance):
return cls._attr(instance, "age") * 2
class Employee(Person, Loggable, Taxable):
"""
The employee entity class, the set of attributes
contained in this class should be able to represent
an employee in a typical enterprise system.
"""
salary = dict(
type = "integer"
)
""" The salary of the employee """
boss = dict(
type = "relation"
)
""" The boss of the employee (only one is allowed) """
def __init__(self):
"""
Constructor of the class.
"""
Person.__init__(self)
Loggable.__init__(self)
Taxable.__init__(self)
self.salary = 200
@staticmethod
def _relation_boss():
return dict(
type = "to-one",
target = Person,
reverse = "employees",
is_mapper = True
)
class Breeder(Person):
"""
The specialized version of a person that takes care of a
professional/specialized dog or cat.
"""
license_number = dict(
type = "text"
)
""" The license number as a set of characters for the
breeder that is going to identify him professionally """
@staticmethod
def _relation_dogs():
return dict(
type = "to-many",
target = BreedDog,
reverse = "owner"
)
class Address(RootEntity):
"""
The address entity class, representing the typical
set of attributes for a postal address.
"""
street = dict(
type = "text"
)
""" The street of the address """
number = dict(
type = "integer"
)
""" The door number of the address """
country = dict(
type = "text"
)
""" The country of the address """
person = dict(
type = "relation"
)
""" The person associated with the address """
def __init__(self):
"""
Constructor for the class.
"""
RootEntity.__init__(self)
self.street = "N/A"
self.number = 0
self.country = "N/A"
@staticmethod
def _relation_person():
return dict(
type = "to-one",
target = Person,
reverse = "address"
)
class Dog(RootEntity):
"""
The dog entity class, representing the typical
attribute of a pet with the characteristics
of a dog.
"""
name = dict(
type = "text"
)
""" The name of the dog """
owner = dict(
type = "relation"
)
""" The owner of the dog """
enemy = dict(
type = "relation"
)
""" The enemy of the dog """
def __init__(self):
"""
Constructor for the class.
"""
RootEntity.__init__(self)
self.name = "Anonymous"
@staticmethod
def _relation_owner():
return dict(
type = "to-one",
target = Person,
reverse = "dogs",
is_mapper = True
)
@staticmethod
def _relation_enemy():
return dict(
type = "to-one",
target = Cat,
is_mapper = True
)
class BreedDog(Dog):
"""
The specialized dog class for dogs that are meant to be
adopted by proper breeders, should contain special attributes
like the digital tag.
"""
digital_tag = dict(
type = "text"
)
""" The digital tag of the dog, meant to identify
it in any intervention """
@staticmethod
def _relation_owner():
return dict(
type = "to-one",
target = Breeder,
reverse = "dogs",
is_mapper = True
)
class Cat(RootEntity):
"""
The cat entity class, representing the typical
attribute of a pet with the characteristics
of a cat.
"""
name = dict(
type = "text"
)
""" The name of the cat """
def __init__(self):
"""
Constructor of the class.
"""
RootEntity.__init__(self)
self.name = "Anonymous"
class Car(RootEntity):
"""
The car entity class, representing the car vehicle
| |
"LOAD_DATETIME",
"STG_CUSTOMER_LOGIN_TS": "LOAD_DATETIME",
},
"src_ldts": "LOAD_DATETIME"
},
"PIT_CUSTOMER_LG": {
"source_model": "HUB_CUSTOMER_TS",
"src_pk": "CUSTOMER_PK",
"as_of_dates_table": "AS_OF_DATE",
"satellites":
{
"SAT_CUSTOMER_DETAILS_TS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATETIME"}
},
"SAT_CUSTOMER_LOGIN_TS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATETIME"}
}
},
"stage_tables":
{
"STG_CUSTOMER_DETAILS_TS": "LOAD_DATETIME",
"STG_CUSTOMER_LOGIN_TS": "LOAD_DATETIME",
},
"src_ldts": "LOAD_DATETIME"
},
"PIT_CUSTOMER_HG": {
"source_model": "HUB_CUSTOMER",
"src_pk": "CUSTOMER_PK",
"as_of_dates_table": "AS_OF_DATE",
"satellites":
{
"SAT_CUSTOMER_DETAILS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATE"}
},
"SAT_CUSTOMER_LOGIN": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATE"}
}
},
"stage_tables":
{
"STG_CUSTOMER_DETAILS": "LOAD_DATE",
"STG_CUSTOMER_LOGIN": "LOAD_DATE",
},
"src_ldts": "LOAD_DATE"
}
}
context.stage_columns = {
"RAW_STAGE_DETAILS":
["CUSTOMER_ID",
"CUSTOMER_NAME",
"CUSTOMER_ADDRESS",
"CUSTOMER_DOB",
"LOAD_DATE",
"SOURCE"],
"RAW_STAGE_DETAILS_TS":
["CUSTOMER_ID",
"CUSTOMER_NAME",
"CUSTOMER_ADDRESS",
"CUSTOMER_DOB",
"LOAD_DATETIME",
"SOURCE"],
"RAW_STAGE_LOGIN":
["CUSTOMER_ID",
"LAST_LOGIN_DATE",
"DEVICE_USED",
"LOAD_DATE",
"SOURCE"],
"RAW_STAGE_LOGIN_TS":
["CUSTOMER_ID",
"LAST_LOGIN_DATE",
"DEVICE_USED",
"LOAD_DATETIME",
"SOURCE"]
}
context.seed_config = {
"RAW_STAGE_DETAILS": {
"+column_types": {
"CUSTOMER_ID": "STRING",
"CUSTOMER_NAME": "STRING",
"CUSTOMER_ADDRESS": "STRING",
"CUSTOMER_DOB": "DATE",
"LOAD_DATE": "DATE",
"SOURCE": "STRING"
}
},
"RAW_STAGE_DETAILS_TS": {
"+column_types": {
"CUSTOMER_ID": "STRING",
"CUSTOMER_NAME": "STRING",
"CUSTOMER_ADDRESS": "STRING",
"CUSTOMER_DOB": "DATE",
"LOAD_DATETIME": "DATETIME",
"SOURCE": "STRING"
}
},
"RAW_STAGE_LOGIN": {
"+column_types": {
"CUSTOMER_ID": "STRING",
"LAST_LOGIN_DATE": "DATETIME",
"DEVICE_USED": "STRING",
"LOAD_DATE": "DATE",
"SOURCE": "STRING"
}
},
"RAW_STAGE_LOGIN_TS": {
"+column_types": {
"CUSTOMER_ID": "STRING",
"LAST_LOGIN_DATE": "DATETIME",
"DEVICE_USED": "STRING",
"LOAD_DATETIME": "DATETIME",
"SOURCE": "STRING"
}
},
"HUB_CUSTOMER": {
"+column_types": {
"CUSTOMER_PK": "STRING",
"CUSTOMER_ID": "STRING",
"LOAD_DATE": "DATE",
"SOURCE": "STRING"
}
},
"HUB_CUSTOMER_TS": {
"+column_types": {
"CUSTOMER_PK": "STRING",
"CUSTOMER_ID": "STRING",
"LOAD_DATETIME": "DATETIME",
"SOURCE": "STRING"
}
},
"SAT_CUSTOMER_DETAILS": {
"+column_types": {
"CUSTOMER_PK": "STRING",
"HASHDIFF": "STRING",
"CUSTOMER_NAME": "STRING",
"CUSTOMER_ADDRESS": "STRING",
"CUSTOMER_DOB": "DATE",
"EFFECTIVE_FROM": "DATE",
"LOAD_DATE": "DATE",
"SOURCE": "STRING"
}
},
"SAT_CUSTOMER_DETAILS_TS": {
"+column_types": {
"CUSTOMER_PK": "STRING",
"HASHDIFF": "STRING",
"CUSTOMER_NAME": "STRING",
"CUSTOMER_ADDRESS": "STRING",
"CUSTOMER_DOB": "DATE",
"EFFECTIVE_FROM": "DATETIME",
"LOAD_DATETIME": "DATETIME",
"SOURCE": "STRING"
}
},
"SAT_CUSTOMER_LOGIN": {
"+column_types": {
"CUSTOMER_PK": "STRING",
"HASHDIFF": "STRING",
"DEVICE_USED": "STRING",
"LAST_LOGIN_DATE": "DATETIME",
"EFFECTIVE_FROM": "DATE",
"LOAD_DATE": "DATE",
"SOURCE": "STRING"
}
},
"SAT_CUSTOMER_LOGIN_TS": {
"+column_types": {
"CUSTOMER_PK": "STRING",
"HASHDIFF": "STRING",
"DEVICE_USED": "STRING",
"LAST_LOGIN_DATE": "DATETIME",
"EFFECTIVE_FROM": "DATETIME",
"LOAD_DATETIME": "DATETIME",
"SOURCE": "STRING"
}
},
"AS_OF_DATE": {
"+column_types": {
"AS_OF_DATE": "DATETIME"
}
},
"PIT_CUSTOMER": {
"+column_types": {
"AS_OF_DATE": "DATETIME",
"CUSTOMER_PK": "STRING",
"SAT_CUSTOMER_DETAILS_PK": "STRING",
"SAT_CUSTOMER_DETAILS_LDTS": "DATETIME",
"SAT_CUSTOMER_LOGIN_PK": "STRING",
"SAT_CUSTOMER_LOGIN_LDTS": "DATETIME"
}
},
"PIT_CUSTOMER_TS": {
"+column_types": {
"AS_OF_DATE": "DATETIME",
"CUSTOMER_PK": "STRING",
"SAT_CUSTOMER_DETAILS_TS_PK": "STRING",
"SAT_CUSTOMER_DETAILS_TS_LDTS": "DATETIME",
"SAT_CUSTOMER_LOGIN_TS_PK": "STRING",
"SAT_CUSTOMER_LOGIN_TS_LDTS": "DATETIME"
}
},
"PIT_CUSTOMER_LG": {
"+column_types": {
"AS_OF_DATE": "DATETIME",
"CUSTOMER_PK": "STRING",
"SAT_CUSTOMER_DETAILS_TS_PK": "STRING",
"SAT_CUSTOMER_DETAILS_TS_LDTS": "DATETIME",
"SAT_CUSTOMER_LOGIN_TS_PK": "STRING",
"SAT_CUSTOMER_LOGIN_TS_LDTS": "DATETIME"
}
},
"PIT_CUSTOMER_HG": {
"+column_types": {
"AS_OF_DATE": "DATETIME",
"CUSTOMER_PK": "STRING",
"SAT_CUSTOMER_DETAILS_PK": "STRING",
"SAT_CUSTOMER_DETAILS_LDTS": "DATETIME",
"SAT_CUSTOMER_LOGIN_PK": "STRING",
"SAT_CUSTOMER_LOGIN_LDTS": "DATETIME"
}
}
}
@fixture
def pit_sqlserver(context):
"""
Define the structures and metadata to perform PIT load
"""
context.vault_structure_type = "pit"
context.hashed_columns = {
"STG_CUSTOMER_DETAILS": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["CUSTOMER_ADDRESS", "CUSTOMER_DOB", "CUSTOMER_NAME"]
}
},
"STG_CUSTOMER_LOGIN": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["DEVICE_USED", "LAST_LOGIN_DATE"]
}
},
"STG_CUSTOMER_PROFILE": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["DASHBOARD_COLOUR", "DISPLAY_NAME"]
}
}
}
context.derived_columns = {
"STG_CUSTOMER_DETAILS": {
"EFFECTIVE_FROM": "LOAD_DATE"
},
"STG_CUSTOMER_LOGIN": {
"EFFECTIVE_FROM": "LOAD_DATE"
},
"STG_CUSTOMER_PROFILE": {
"EFFECTIVE_FROM": "LOAD_DATE"
}
}
context.vault_structure_columns = {
"HUB_CUSTOMER": {
"source_model": ["STG_CUSTOMER_DETAILS",
"STG_CUSTOMER_LOGIN",
"STG_CUSTOMER_PROFILE"],
"src_pk": "CUSTOMER_PK",
"src_nk": "CUSTOMER_ID",
"src_ldts": "LOAD_DATE",
"src_source": "SOURCE"
},
"SAT_CUSTOMER_DETAILS": {
"source_model": "STG_CUSTOMER_DETAILS",
"src_pk": "CUSTOMER_PK",
"src_hashdiff": "HASHDIFF",
"src_payload": ["CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB"],
"src_eff": "EFFECTIVE_FROM",
"src_ldts": "LOAD_DATE",
"src_source": "SOURCE"
},
"SAT_CUSTOMER_LOGIN": {
"source_model": "STG_CUSTOMER_LOGIN",
"src_pk": "CUSTOMER_PK",
"src_hashdiff": "HASHDIFF",
"src_payload": ["LAST_LOGIN_DATE", "DEVICE_USED"],
"src_eff": "EFFECTIVE_FROM",
"src_ldts": "LOAD_DATE",
"src_source": "SOURCE"
},
"SAT_CUSTOMER_PROFILE": {
"source_model": "STG_CUSTOMER_PROFILE",
"src_pk": "CUSTOMER_PK",
"src_hashdiff": "HASHDIFF",
"src_payload": ["DASHBOARD_COLOUR", "DISPLAY_NAME"],
"src_eff": "EFFECTIVE_FROM",
"src_ldts": "LOAD_DATE",
"src_source": "SOURCE"
},
"PIT_CUSTOMER": {
"source_model": "HUB_CUSTOMER",
"src_pk": "CUSTOMER_PK",
"as_of_dates_table": "AS_OF_DATE",
"satellites":
{
"SAT_CUSTOMER_DETAILS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATE"}
},
"SAT_CUSTOMER_LOGIN": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATE"}
},
"SAT_CUSTOMER_PROFILE": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATE"}
}
},
"stage_tables":
{
"STG_CUSTOMER_DETAILS": "LOAD_DATE",
"STG_CUSTOMER_LOGIN": "LOAD_DATE",
"STG_CUSTOMER_PROFILE": "LOAD_DATE"
},
"src_ldts": "LOAD_DATE"
}
}
context.stage_columns = {
"RAW_STAGE_DETAILS":
["CUSTOMER_ID",
"CUSTOMER_NAME",
"CUSTOMER_ADDRESS",
"CUSTOMER_DOB",
"LOAD_DATE",
"SOURCE"]
,
"RAW_STAGE_LOGIN":
["CUSTOMER_ID",
"LAST_LOGIN_DATE",
"DEVICE_USED",
"LOAD_DATE",
"SOURCE"]
,
"RAW_STAGE_PROFILE":
["CUSTOMER_ID",
"DASHBOARD_COLOUR",
"DISPLAY_NAME",
"LOAD_DATE",
"SOURCE"]
}
context.seed_config = {
"RAW_STAGE_DETAILS": {
"+column_types": {
"CUSTOMER_ID": "VARCHAR(5)",
"CUSTOMER_NAME": "VARCHAR(10)",
"CUSTOMER_ADDRESS": "VARCHAR(30)",
"CUSTOMER_DOB": "DATE",
"LOAD_DATE": "DATETIME",
"SOURCE": "VARCHAR(10)"
}
},
"RAW_STAGE_LOGIN": {
"+column_types": {
"CUSTOMER_ID": "VARCHAR(5)",
"LAST_LOGIN_DATE": "DATETIME",
"DEVICE_USED": "VARCHAR(10)",
"LOAD_DATE": "DATETIME",
"SOURCE": "VARCHAR(10)"
}
},
"RAW_STAGE_PROFILE": {
"+column_types": {
"CUSTOMER_ID": "VARCHAR(5)",
"DASHBOARD_COLOUR": "VARCHAR(10)",
"DISPLAY_NAME": "VARCHAR(10)",
"LOAD_DATE": "DATETIME",
"SOURCE": "VARCHAR(10)"
}
},
"HUB_CUSTOMER": {
"+column_types": {
"CUSTOMER_PK": "BINARY(16)",
"CUSTOMER_ID": "VARCHAR(5)",
"LOAD_DATE": "DATETIME",
"SOURCE": "VARCHAR(10)"
}
},
"SAT_CUSTOMER_DETAILS": {
"+column_types": {
"CUSTOMER_PK": "BINARY(16)",
"HASHDIFF": "BINARY(16)",
"CUSTOMER_NAME": "VARCHAR(10)",
"CUSTOMER_ADDRESS": "VARCHAR(30)",
"CUSTOMER_DOB": "DATE",
"EFFECTIVE_FROM": "DATETIME",
"LOAD_DATE": "DATETIME",
"SOURCE": "VARCHAR(10)"
}
},
"SAT_CUSTOMER_LOGIN": {
"+column_types": {
"CUSTOMER_PK": "BINARY(16)",
"HASHDIFF": "BINARY(16)",
"DEVICE_USED": "VARCHAR(10)",
"LAST_LOGIN_DATE": "DATETIME",
"EFFECTIVE_FROM": "DATETIME",
"LOAD_DATE": "DATETIME",
"SOURCE": "VARCHAR(10)"
}
},
"SAT_CUSTOMER_PROFILE": {
"+column_types": {
"CUSTOMER_PK": "BINARY(16)",
"HASHDIFF": "BINARY(16)",
"DASHBOARD_COLOUR": "VARCHAR(10)",
"DISPLAY_NAME": "VARCHAR(10)",
"EFFECTIVE_FROM": "DATETIME",
"LOAD_DATE": "DATETIME",
"SOURCE": "VARCHAR(10)"
}
},
"AS_OF_DATE": {
"+column_types": {
"AS_OF_DATE": "DATETIME"
}
},
"PIT_CUSTOMER": {
"+column_types": {
"AS_OF_DATE": "DATETIME",
"CUSTOMER_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_LDTS": "DATETIME",
"SAT_CUSTOMER_LOGIN_PK": "BINARY(16)",
"SAT_CUSTOMER_LOGIN_LDTS": "DATETIME",
"SAT_CUSTOMER_PROFILE_PK": "BINARY(16)",
"SAT_CUSTOMER_PROFILE_LDTS": "DATETIME"
}
}
}
@fixture
def pit_one_sat_sqlserver(context):
"""
Define the structures and metadata to perform PIT load
"""
context.vault_structure_type = "pit"
context.hashed_columns = {
"STG_CUSTOMER_DETAILS": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["CUSTOMER_ADDRESS", "CUSTOMER_DOB", "CUSTOMER_NAME"]
}
},
"STG_CUSTOMER_DETAILS_TS": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["CUSTOMER_ADDRESS", "CUSTOMER_DOB", "CUSTOMER_NAME"]
}
}
}
context.derived_columns = {
"STG_CUSTOMER_DETAILS": {
"EFFECTIVE_FROM": "LOAD_DATE"
},
"STG_CUSTOMER_DETAILS_TS": {
"EFFECTIVE_FROM": "LOAD_DATETIME"
}
}
context.vault_structure_columns = {
"HUB_CUSTOMER": {
"source_model": ["STG_CUSTOMER_DETAILS",
],
"src_pk": "CUSTOMER_PK",
"src_nk": "CUSTOMER_ID",
"src_ldts": "LOAD_DATE",
"src_source": "SOURCE"
},
"HUB_CUSTOMER_TS": {
"source_model": ["STG_CUSTOMER_DETAILS_TS",
],
"src_pk": "CUSTOMER_PK",
"src_nk": "CUSTOMER_ID",
"src_ldts": "LOAD_DATETIME",
"src_source": "SOURCE"
},
"SAT_CUSTOMER_DETAILS": {
"source_model": "STG_CUSTOMER_DETAILS",
"src_pk": "CUSTOMER_PK",
"src_hashdiff": "HASHDIFF",
"src_payload": ["CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB"],
"src_eff": "EFFECTIVE_FROM",
"src_ldts": "LOAD_DATE",
"src_source": "SOURCE"
},
"SAT_CUSTOMER_DETAILS_TS": {
"source_model": "STG_CUSTOMER_DETAILS_TS",
"src_pk": "CUSTOMER_PK",
"src_hashdiff": "HASHDIFF",
"src_payload": ["CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB"],
"src_eff": "EFFECTIVE_FROM",
"src_ldts": "LOAD_DATETIME",
"src_source": "SOURCE"
},
"PIT_CUSTOMER": {
"source_model": "HUB_CUSTOMER",
"src_pk": "CUSTOMER_PK",
"as_of_dates_table": "AS_OF_DATE",
"satellites":
{
"SAT_CUSTOMER_DETAILS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATE"}
}
},
"stage_tables":
{
"STG_CUSTOMER_DETAILS": "LOAD_DATE",
},
"src_ldts": "LOAD_DATE"
},
"PIT_CUSTOMER_TS": {
"source_model": "HUB_CUSTOMER_TS",
"src_pk": "CUSTOMER_PK",
"as_of_dates_table": "AS_OF_DATE",
"satellites":
{
"SAT_CUSTOMER_DETAILS_TS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATETIME"}
}
},
"stage_tables":
{
"STG_CUSTOMER_DETAILS_TS": "LOAD_DATETIME",
},
"src_ldts": "LOAD_DATETIME"
},
"PIT_CUSTOMER_LG": {
"source_model": "HUB_CUSTOMER_TS",
"src_pk": "CUSTOMER_PK",
"as_of_dates_table": "AS_OF_DATE",
"satellites":
{
"SAT_CUSTOMER_DETAILS_TS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATETIME"}
}
},
"stage_tables":
{
"STG_CUSTOMER_DETAILS_TS": "LOAD_DATETIME",
},
"src_ldts": "LOAD_DATETIME"
},
"PIT_CUSTOMER_HG": {
"source_model": "HUB_CUSTOMER",
"src_pk": "CUSTOMER_PK",
"as_of_dates_table": "AS_OF_DATE",
"satellites":
{
"SAT_CUSTOMER_DETAILS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATE"}
}
},
"stage_tables":
{
"STG_CUSTOMER_DETAILS": "LOAD_DATE",
},
"src_ldts": "LOAD_DATE"
}
}
context.stage_columns = {
"RAW_STAGE_DETAILS":
["CUSTOMER_ID",
"CUSTOMER_NAME",
"CUSTOMER_ADDRESS",
"CUSTOMER_DOB",
"LOAD_DATE",
"SOURCE"],
"RAW_STAGE_DETAILS_TS":
["CUSTOMER_ID",
"CUSTOMER_NAME",
"CUSTOMER_ADDRESS",
"CUSTOMER_DOB",
"LOAD_DATETIME",
"SOURCE"]
}
context.seed_config = {
"RAW_STAGE_DETAILS": {
"+column_types": {
"CUSTOMER_ID": "VARCHAR(50)",
"CUSTOMER_NAME": "VARCHAR(50)",
"CUSTOMER_ADDRESS": "VARCHAR(50)",
"CUSTOMER_DOB": "DATE",
"LOAD_DATE": "DATE",
"SOURCE": "VARCHAR(50)"
}
},
"RAW_STAGE_DETAILS_TS": {
"+column_types": {
"CUSTOMER_ID": "VARCHAR(50)",
"CUSTOMER_NAME": "VARCHAR(50)",
"CUSTOMER_ADDRESS": "VARCHAR(50)",
"CUSTOMER_DOB": "DATE",
"LOAD_DATETIME": "DATETIME",
"SOURCE": "VARCHAR(50)"
}
},
"HUB_CUSTOMER": {
"+column_types": {
"CUSTOMER_PK": "BINARY(16)",
"CUSTOMER_ID": "VARCHAR(50)",
"LOAD_DATE": "DATE",
"SOURCE": "VARCHAR(50)"
}
},
"HUB_CUSTOMER_TS": {
"+column_types": {
"CUSTOMER_PK": "BINARY(16)",
"CUSTOMER_ID": "VARCHAR(50)",
"LOAD_DATETIME": "DATETIME",
"SOURCE": "VARCHAR(50)"
}
},
"SAT_CUSTOMER_DETAILS": {
"+column_types": {
"CUSTOMER_PK": "BINARY(16)",
"HASHDIFF": "BINARY(16)",
"CUSTOMER_NAME": "VARCHAR(50)",
"CUSTOMER_ADDRESS": "VARCHAR(50)",
"CUSTOMER_DOB": "DATE",
"EFFECTIVE_FROM": "DATE",
"LOAD_DATE": "DATE",
"SOURCE": "VARCHAR(50)"
}
},
"SAT_CUSTOMER_DETAILS_TS": {
"+column_types": {
"CUSTOMER_PK": "BINARY(16)",
"HASHDIFF": "BINARY(16)",
"CUSTOMER_NAME": "VARCHAR(50)",
"CUSTOMER_ADDRESS": "VARCHAR(50)",
"CUSTOMER_DOB": "DATE",
"EFFECTIVE_FROM": "DATETIME",
"LOAD_DATETIME": "DATETIME",
"SOURCE": "VARCHAR(50)"
}
},
"AS_OF_DATE": {
"+column_types": {
"AS_OF_DATE": "DATETIME"
}
},
"PIT_CUSTOMER": {
"+column_types": {
"AS_OF_DATE": "DATETIME",
"CUSTOMER_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_LDTS": "DATETIME"
}
},
"PIT_CUSTOMER_TS": {
"+column_types": {
"AS_OF_DATE": "DATETIME",
"CUSTOMER_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_TS_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_TS_LDTS": "DATETIME"
}
},
"PIT_CUSTOMER_LG": {
"+column_types": {
"AS_OF_DATE": "DATETIME",
"CUSTOMER_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_TS_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_TS_LDTS": "DATETIME"
}
},
"PIT_CUSTOMER_HG": {
"+column_types": {
"AS_OF_DATE": "DATETIME",
"CUSTOMER_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_LDTS": "DATETIME"
}
}
}
@fixture
def pit_two_sats_sqlserver(context):
"""
Define the structures and metadata to perform PIT load
"""
context.vault_structure_type = "pit"
context.hashed_columns = {
"STG_CUSTOMER_DETAILS": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["CUSTOMER_ADDRESS", "CUSTOMER_DOB", "CUSTOMER_NAME"]
}
},
"STG_CUSTOMER_DETAILS_TS": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["CUSTOMER_ADDRESS", "CUSTOMER_DOB", "CUSTOMER_NAME"]
}
},
"STG_CUSTOMER_LOGIN": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["DEVICE_USED", "LAST_LOGIN_DATE"]
}
},
"STG_CUSTOMER_LOGIN_TS": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["DEVICE_USED", "LAST_LOGIN_DATE"]
}
}
}
context.derived_columns = {
"STG_CUSTOMER_DETAILS": {
"EFFECTIVE_FROM": "LOAD_DATE"
},
"STG_CUSTOMER_DETAILS_TS": {
"EFFECTIVE_FROM": "LOAD_DATETIME"
| |
<reponame>deeptavker/pysph<filename>pysph/tools/ipy_viewer.py
import json
import glob
from pysph.solver.utils import load, get_files, mkdir
from IPython.display import display, Image, clear_output, HTML
import ipywidgets as widgets
import numpy as np
import matplotlib as mpl
mpl.use('module://ipympl.backend_nbagg')
# Now the user does not have to use the IPython magic command
# '%matplotlib ipympl' in the notebook, this takes care of it.
# The matplotlib backend needs to be set before matplotlib.pyplot
# is imported and this ends up violating the PEP 8 style guide.
import matplotlib.pyplot as plt
class Viewer(object):
'''
Base class for viewers.
'''
def __init__(self, path, cache=True):
self.path = path
self.paths_list = get_files(path)
# Caching #
# Note : Caching is only used by get_frame and widget handlers.
if cache:
self.cache = {}
else:
self.cache = None
def get_frame(self, frame):
'''Return particle arrays for a given frame number with caching.
Parameters
----------
frame : int
Returns
-------
A dictionary.
Examples
--------
>>> sample = Viewer2D('/home/deep/pysph/trivial_inlet_outlet_output/')
>>> sample.get_frame(12)
{
'arrays': {
'fluid': <pysph.base.particle_array.ParticleArray at 0x7f3f7d144d60>,
'inlet': <pysph.base.particle_array.ParticleArray at 0x7f3f7d144b98>,
'outlet': <pysph.base.particle_array.ParticleArray at 0x7f3f7d144c30>
},
'solver_data': {'count': 240, 'dt': 0.01, 't': 2.399999999999993}
}
'''
if self.cache is not None:
if frame in self.cache:
temp_data = self.cache[frame]
else:
self.cache[frame] = temp_data = load(self.paths_list[frame])
else:
temp_data = load(self.paths_list[frame])
return temp_data
def show_log(self):
'''
Prints the content of log file.
'''
print("Printing log : \n\n")
path = self.path + "/*.log"
with open(glob.glob(path)[0], 'r') as logfile:
for lines in logfile:
print(lines)
def show_results(self):
'''
Show if there are any png, jpeg, jpg, or bmp images.
'''
imgs = tuple()
for extension in ['png', 'jpg', 'jpeg', 'bmp']:
temppath = self.path + "*." + extension
for paths in glob.glob(temppath):
imgs += (Image(paths),)
if len(imgs) != 0:
display(*imgs)
else:
print("No results to show.")
def show_info(self):
'''
Print contents of the .info file present in the output directory,
keys present in results.npz, number of files and
information about paricle arrays.
'''
# General Info #
path = self.path + "/*.info"
with open(glob.glob(path)[0], 'r') as infofile:
data = json.load(infofile)
print('Printing info : \n')
for key in data.keys():
if key == 'cpu_time':
print(key + " : " + str(data[key]) + " seconds")
else:
print(key + " : " + str(data[key]))
print('Number of files : {}'.format(len(self.paths_list)))
# Particle Info #
temp_data = load(self.paths_list[0])['arrays']
for key in temp_data:
print(" {} :".format(key))
print(" Number of particles : {}".format(
temp_data[key].get_number_of_particles())
)
print(" Output Property Arrays : {}".format(
temp_data[key].output_property_arrays)
)
# keys in results.npz
from numpy import load as npl
path = self.path + "*results*"
files = glob.glob(path)
if len(files) != 0:
data = npl(files[0])
print("\nKeys in results.npz :")
print(data.keys())
def show_all(self):
self.show_info()
self.show_results()
self.show_log()
def _cmap_helper(self, data, array_name, for_plot_vectors=False):
'''
Helper Function:
Takes in a numpy array and returns its maximum,
minimum , subject to the constraints provided by the user
in the legend_lower_lim and legend_upper_lim text boxes.
Also returns the input array normalized by the maximum.
'''
pa_widgets = self._widgets.particles[array_name]
if for_plot_vectors is False:
ulim = pa_widgets.legend_upper_lim.value
llim = pa_widgets.legend_lower_lim.value
elif for_plot_vectors is True:
ulim = ''
llim = ''
if llim == '' and ulim == '':
pass
elif llim != '' and ulim == '':
for i in range(len(data)):
if data[i] < float(llim):
data[i] = float(llim)
elif llim == '' and ulim != '':
for i in range(len(data)):
if data[i] > float(ulim):
data[i] = float(ulim)
elif llim != '' and ulim != '':
for i in range(len(data)):
if data[i] > float(ulim):
data[i] = float(ulim)
elif data[i] < float(llim):
data[i] = float(llim)
actual_minm = np.min(data)
if llim != '' and actual_minm > float(llim):
actual_minm = float(llim)
actual_maxm = np.max(data)
if ulim != '' and actual_maxm < float(ulim):
actual_maxm = float(ulim)
if len(set(data)) == 1:
# This takes care of the case when all the values are the same.
# Use case is the initialization of some scalars (like density).
if ulim == '' and llim == '':
if actual_maxm != 0:
return actual_minm, actual_maxm, np.ones_like(data)
else:
return actual_minm, actual_maxm, np.zeros_like(data)
else:
data_norm = (data-actual_minm)/(actual_maxm-actual_minm)
return actual_minm, actual_maxm, data_norm
else:
data_norm = (data-actual_minm)/(actual_maxm-actual_minm)
return actual_minm, actual_maxm, data_norm
def _create_widgets(self):
if self.viewer_type == 'Viewer2D':
self._widgets = Viewer2DWidgets(
file_name=self.paths_list[0],
file_count=len(self.paths_list) - 1,
)
elif self.viewer_type == 'Viewer3D':
self._widgets = Viewer3DWidgets(
file_name=self.paths_list[0],
file_count=len(self.paths_list) - 1,
)
if 'general_properties' in self.config.keys():
gen_prop = self.config['general_properties']
for widget_name in gen_prop.keys():
try:
widget = getattr(
self._widgets,
widget_name
)
widget.value = gen_prop[widget_name]
except AttributeError:
continue
if 'cull_factor' in gen_prop.keys():
self.cull_factor = gen_prop['cull_factor']
if self.cull_factor > 0:
self._widgets.frame.step = self.cull_factor
self._widgets.play_button.step = self.cull_factor
else:
print('cull_factor must be a positive integer.')
self._widgets.frame.observe(self._frame_handler, 'value')
self._widgets.save_figure.on_submit(self._save_figure_handler)
self._widgets.delay_box.observe(self._delay_box_handler, 'value')
self._widgets.save_all_plots.observe(
self._save_all_plots_handler,
'value'
)
self._widgets.print_config.on_click(
self._print_present_config_dictionary
)
if self.viewer_type == 'Viewer2D':
self._widgets.show_solver_time.observe(
self._show_solver_time_handler,
'value'
)
# PLEASE NOTE:
# All widget handlers take in 'change' as an argument. This is usually
# a dictionary conatining information about the widget and the change
# in state. However, these functions are also used outside of the use
# case of a user-triggered-event, and in these scenarios None should
# be passed as the argument. This is of particular significance
# because in some of these functions plt.figure.show() gets called
# only if the argument passed is not None.
for array_name in self._widgets.particles.keys():
pa_widgets = self._widgets.particles[array_name]
# Changing the properties as per the configuration dictionary.
if array_name in self.config.keys():
pa_config = self.config[array_name]
for widget_name in pa_config.keys():
try:
widget = getattr(
pa_widgets,
widget_name
)
widget.value = pa_config[widget_name]
except AttributeError:
continue
for widget_name in list(pa_widgets.__dict__.keys())[1:]:
widget = getattr(
pa_widgets,
widget_name
)
if (widget_name == 'legend_lower_lim' or
widget_name == 'legend_upper_lim'):
widget_handler = self._legend_lim_handler
else:
widget_handler = getattr(
self,
'_' + widget_name + '_handler'
)
widget.observe(widget_handler, 'value')
def _legend_lim_handler(self, change):
array_name = change['owner'].owner
pa_widgets = self._widgets.particles[array_name]
if pa_widgets.scalar.value != 'None':
temp_data = self.get_frame(
self._widgets.frame.value
)['arrays']
sct = self._scatters[array_name]
n = pa_widgets.masking_factor.value
stride, component = self._stride_and_component(
temp_data[array_name], pa_widgets
)
c = self._get_c(
pa_widgets,
temp_data[array_name],
component,
stride
)
colormap = getattr(
plt.cm,
pa_widgets.scalar_cmap.value
)
min_c, max_c, c_norm = self._cmap_helper(
c,
array_name
)
if self.viewer_type == 'Viewer2D':
sct.set_facecolors(colormap(c_norm[::n]))
self._legend_handler(None)
self.figure.show()
elif self.viewer_type == 'Viewer3D':
sct.color = colormap(c_norm[::n])
self._legend_handler(None)
def _delay_box_handler(self, change):
self._widgets.play_button.interval = change['new']*1000
def _save_all_plots_handler(self, change):
if self.viewer_type == 'Viewer3D':
import ipyvolume.pylab as p3
if change['new'] is True:
mkdir('all_plots')
self._widgets.frame.disabled = True
self._widgets.play_button.disabled = True
self._widgets.delay_box.disabled = True
self._widgets.save_figure.disabled = True
self._widgets.save_all_plots.disabled = True
self._widgets.print_config.disabled = True
if self.viewer_type == 'Viewer2D':
self._widgets.show_solver_time.disabled = True
for array_name in self._widgets.particles.keys():
pa_widgets = self._widgets.particles[array_name]
for widget_name in list(pa_widgets.__dict__.keys())[1:]:
widget = getattr(
pa_widgets,
widget_name
)
widget.disabled = True
file_count = len(self.paths_list) - 1
for i in np.arange(0, file_count + 1, self.cull_factor):
self._widgets.frame.value = i
self._frame_handler(None)
if self.viewer_type == 'Viewer2D':
self.figure.savefig(
'all_plots/frame_%s.png' % i,
dpi=300
)
elif self.viewer_type == 'Viewer3D':
p3.savefig(
'all_plots/frame_%s.png' % i,
width=600,
height=600,
fig=self.plot
)
print(
"Saved the plots in the folder 'all_plots'" +
" in the present working directory"
)
self._widgets.frame.disabled = False
self._widgets.play_button.disabled = False
self._widgets.delay_box.disabled = False
self._widgets.save_figure.disabled = False
self._widgets.save_all_plots.disabled = False
self._widgets.print_config.disabled = False
if self.viewer_type == 'Viewer2D':
self._widgets.show_solver_time.disabled = False
self._widgets.save_all_plots.value = False
for array_name in self._widgets.particles.keys():
pa_widgets = self._widgets.particles[array_name]
for widget_name in list(pa_widgets.__dict__.keys())[1:]:
widget = getattr(
pa_widgets,
widget_name
)
widget.disabled = False
def _print_present_config_dictionary(self, change):
_widgets = self._widgets
config = {'general_properties': {}}
gen_prop = config['general_properties']
gen_prop['frame'] = _widgets.frame.value
gen_prop['delay_box'] = _widgets.delay_box.value
gen_prop['cull_factor'] = _widgets.frame.step
if self.viewer_type == 'Viewer2D':
gen_prop[
'show_solver_time'
] = _widgets.show_solver_time.value
for array_name in self._widgets.particles.keys():
pa_widgets = self._widgets.particles[array_name]
config[array_name] = {}
pa_config = config[array_name]
for widget_name in list(pa_widgets.__dict__.keys())[1:]:
widget = getattr(
pa_widgets,
widget_name
)
pa_config[widget_name] = widget.value
print(config)
def _masking_factor_handler(self, change):
array_name = change['owner'].owner
pa_widgets = self._widgets.particles[array_name]
if pa_widgets.is_visible.value is True:
n = pa_widgets.masking_factor.value
if n > 0:
temp_data = self.get_frame(self._widgets.frame.value)['arrays']
stride, component = self._stride_and_component(
temp_data[array_name], pa_widgets
)
c = self._get_c(
pa_widgets,
temp_data[array_name],
component,
stride
)
colormap = getattr(
plt.cm,
pa_widgets.scalar_cmap.value
)
min_c, max_c, c_norm = self._cmap_helper(
c,
array_name
)
if self.viewer_type == 'Viewer2D':
self._scatters[array_name].remove()
del self._scatters[array_name]
self._scatters[array_name] = self._scatter_ax.scatter(
temp_data[array_name].x[component::stride][::n],
temp_data[array_name].y[component::stride][::n],
s=pa_widgets.scalar_size.value,
)
self._scatters[array_name].set_facecolors(
colormap(c_norm[::n])
)
self.figure.show()
elif self.viewer_type == 'Viewer3D':
import ipyvolume.pylab as p3
copy = self.plot.scatters.copy()
copy.remove(self._scatters[array_name])
del self._scatters[array_name]
if array_name in self._vectors.keys():
copy.remove(self._vectors[array_name])
del | |
<reponame>TIBCOSoftware/fabrician-hadoop-enabler<filename>src/main/resources/common/gridlib/scripts/hadoop_enabler_common.py
from com.datasynapse.fabric.admin.info import AllocationInfo
from com.datasynapse.fabric.util import GridlibUtils, ContainerUtils
from com.datasynapse.fabric.common import RuntimeContextVariable, ActivationInfo
from com.datasynapse.fabric.admin import AdminManager
from com.datasynapse.fabric.admin.info import GridlibInfo
from xml.dom import minidom
from jarray import array
from java.lang.management import ManagementFactory
from subprocess import Popen, PIPE, STDOUT, call
import inspect
import os
import errno
import signal
import shlex
import traceback
import datetime
balancerqueue_dir = "/.fabric/balancer-queue/"
reservedproperties = ["fs.default.name",
"hadoop.tmp.dir",
"topology.script.file.name",
"dfs.http.address",
"dfs.secondary.http.address",
"dfs.datanode.address",
"dfs.datanode.http.address",
"dfs.datanode.ipc.address",
"dfs.hosts.exclude",
"dfs.name.dir",
"dfs.data.dir",
"mapred.job.tracker",
"mapred.job.tracker.http.address",
"mapred.task.tracker.http.address"
]
try: proxy
except NameError:
globals()['proxy'] = inspect.currentframe().f_back.f_globals['proxy']
else: pass
logger = ContainerUtils.getLogger(proxy)
def getDynamicGridlibDependencies():
logger.info("[hadoop_enabler_common] Beginning getDynamicGridlibDependencies()")
hadoopVersion = getHadoopVersion()
logger.info("[hadoop_enabler_common] Hadoop Distribution version is [" + str(hadoopVersion) +"]")
defaultDomainGridlib = GridlibInfo()
defaultDomainGridlib.name = "default-domain-type"
logger.info("[hadoop_enabler_common] Adding Hadoop distribution dependency")
gridlib = GridlibInfo()
gridlib.name = "hadoop-distribution"
gridlib.version = str(hadoopVersion)
logger.info("[hadoop_enabler_common] Exiting getDynamicGridlibDependencies()")
return array([gridlib, defaultDomainGridlib], GridlibInfo)
def getHadoopVersion():
hadoopVersionVar = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_DISTRIBUTION_VERSION')
if hadoopVersionVar == None:
logger.warning("[hadoop_enabler_common] ${hadoop_enabler_DISTRIBUTION_VERSION} is not set. Defaulting to Hadoop Version 1.0.4")
hadoopVersion = "1.0.4"
else:
hadoopVersion = hadoopVersionVar.getValue()
return str(hadoopVersion)
def doInit_common(additionalVariables):
workdir = proxy.getContainer().getRuntimeContext().getVariable('CONTAINER_WORK_DIR').getValue()
hadoopVersion = getHadoopVersion()
distributionDir = "hadoop-" + str(hadoopVersion)
hadoop_home_dir = os.path.join(workdir, distributionDir)
additionalVariables.add(RuntimeContextVariable("hadoop_enabler_HADOOP_HOME_DIR", hadoop_home_dir, RuntimeContextVariable.STRING_TYPE))
additionalVariables.add(RuntimeContextVariable("hadoop_enabler_HADOOP_BIN_DIR", os.path.join(hadoop_home_dir, "bin"), RuntimeContextVariable.STRING_TYPE))
additionalVariables.add(RuntimeContextVariable("hadoop_enabler_HADOOP_SBIN_DIR", os.path.join(hadoop_home_dir, "sbin"), RuntimeContextVariable.STRING_TYPE))
additionalVariables.add(RuntimeContextVariable("hadoop_enabler_HADOOP_CONF_DIR", os.path.join(hadoop_home_dir, "conf"), RuntimeContextVariable.ENVIRONMENT_TYPE))
""" Augment the Enablers's Hadoop Configuration files with values from user-supplied Hadoop Configuration files."""
doInitHadoopProps("hadoop_enabler_USERPROPS_CORE", "hadoop_enabler_USERPROPS_CORE_FILE", additionalVariables)
doInitHadoopProps("hadoop_enabler_USERPROPS_HDFS", "hadoop_enabler_USERPROPS_HDFS_FILE", additionalVariables)
doInitHadoopProps("hadoop_enabler_USERPROPS_MAPRED", "hadoop_enabler_USERPROPS_MAPRED_FILE", additionalVariables)
""" Create Hadoop tmp directory if it does not already exist"""
tmpdir = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_TMP_DIR').getValue()
try:
os.makedirs(tmpdir)
except OSError, exc:
if exc.errno == errno.EEXIST and os.path.isdir(tmpdir):
pass
else:
raise
def doInitHadoopProps(userProp_RCVname, userPropFile_RCVname, additionalVariables):
ContainerUtils.getLogger(proxy).info("[hadoop_enabler] Checking for user property file to augment [" + str(userProp_RCVname) + "].")
userPropsRCV = proxy.getContainer().getRuntimeContext().getVariable(userProp_RCVname)
userPropsPredefined = False
if (userPropsRCV == None):
userProps = ""
else:
userPropsPredefined = True
userProps = userPropsRCV.getValue()
ContainerUtils.getLogger(proxy).info("[hadoop_enabler] User properties variable ${" + userProp_RCVname +
"} is was imported or pre-defined on component. Starting value is [" + str(userProps) + "].")
userPropFile = proxy.getContainer().getRuntimeContext().getVariable(userPropFile_RCVname).getValue()
if (userPropFile != "") and os.path.isfile(userPropFile):
ContainerUtils.getLogger(proxy).info("[hadoop_enabler] User property file found [" + str(userPropFile) + "].")
xmldoc = minidom.parse(userPropFile)
propertylist = xmldoc.getElementsByTagName('property')
if propertylist == None or len(propertylist) == 0:
ContainerUtils.getLogger(proxy).warning("[hadoop_enabler] No property elements found in user property file.")
else:
for element in propertylist:
nameElements = element.getElementsByTagName("name")
name = getNodeText(nameElements[0])
isReserved = False
for reservedproperty in reservedproperties:
if reservedproperty.count(name) > 0:
isReserved = True
break
if isReserved:
ContainerUtils.getLogger(proxy).warning("[hadoop_enabler] The property [" + str(name) + "] is managed by the Hadoop Enabler. Will ignore user supplied value.")
else:
ContainerUtils.getLogger(proxy).info("[hadoop_enabler] Applying user property [" + str(element.toxml()) + "].")
userProps = userProps + element.toxml()
if userPropsPredefined:
proxy.getContainer().getRuntimeContext().getVariable(userProp_RCVname).setValue(userProps)
else:
additionalVariables.add(RuntimeContextVariable(userProp_RCVname, userProps, RuntimeContextVariable.STRING_TYPE,"User Supplied Hadoop properties" , False, RuntimeContextVariable.NO_INCREMENT))
"""
def getHadoopConfigParameter(file, propertyname):
from xml.dom import minidom
xmldoc = minidom.parse(file)
propertylist = xmldoc.getElementsByTagName('property')
value = []
for element in propertylist:
nameElements = element.getElementsByTagName("name")
name = getNodeText(nameElements[0])
ContainerUtils.getLogger(proxy).info("[hadoop_enabler] DEBUG name = [" + str(name) + "]")
if name == propertyname:
valueElements = element.getElementsByTagName("value")
valueCurNode = getNodeText(valueElements[0])
ContainerUtils.getLogger(proxy).info("[hadoop_enabler] DEBUG valueCurNode = [" + str(valueCurNode) + "]")
value.append(valueCurNode)
return ''.join(value)
"""
def getNodeText(node):
nodelist = node.childNodes
value = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
value.append(node.data)
return ''.join(value)
def doStart_common():
moveContentFiles()
createEnvironmentScript()
killOrphans()
changePermissions()
def moveContentFiles():
hadoop_home_dir = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_HADOOP_HOME_DIR').getValue()
work_dir = proxy.getContainer().getRuntimeContext().getVariable('CONTAINER_WORK_DIR').getValue()
if ContainerUtils.isWindows():
pass
else:
ContainerUtils.getLogger(proxy).info("[hadoop_enabler_common] Copying enabler content files to version-specific sub-directory.")
runCommand("cp -rv " + work_dir + "/hadoop-content/* " + hadoop_home_dir, shell=True)
# commandline = "cp -r " + work_dir + "/hadoop-content/* " + hadoop_home_dir
# runCommand(commandline)
def rcvTrue(rcv):
ContainerUtils.getLogger(proxy).finer("[hadoop_enabler_common] checking runtimecontext variable [" + str(rcv) + "]")
rcvvalue = proxy.getContainer().getRuntimeContext().getVariable(rcv).getValue()
ContainerUtils.getLogger(proxy).finest("[hadoop_enabler_common] value is [" + str(rcvvalue) + "].")
if (str(rcvvalue).lower() in ("yes", "y", "true", "t", "1")):
result = True
elif (str(rcvvalue).lower() in ("no", "n", "false", "f", "0")):
result = False
else:
raise Exception("[hadoop_enabler_common] Invalid value for boolean conversion: [" + str(rcvvalue) + "]")
ContainerUtils.getLogger(proxy).finer("[hadoop_enabler_common] Exiting Checking enabler flag. Result is [" + str(result) + "]")
return result
def getScript(script):
hadoop_bin_dir = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_HADOOP_BIN_DIR').getValue()
hadoop_sbin_dir = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_HADOOP_SBIN_DIR').getValue()
hadoopDaemonScript_bin = os.path.join(hadoop_bin_dir, script)
hadoopDaemonScript_sbin = os.path.join(hadoop_sbin_dir, script)
if os.path.isfile(hadoopDaemonScript_bin):
return hadoopDaemonScript_bin
elif os.path.isfile(hadoopDaemonScript_sbin):
return hadoopDaemonScript_sbin
else:
raise Exception("[hadoop_enabler_common] Unable to locate [" + script + "] in Hadoop distribution.")
def runCommand(commandline, stdin=None, stdout=None, expectedReturnCodes=None, suppressOutput=None, shell=None):
if (expectedReturnCodes == None): expectedReturnCodes = [0]
if (suppressOutput == None): suppressOutput = False
if (shell == None): shell = False
stderr = None
if (suppressOutput):
stdout=PIPE
stderr=PIPE
else:
ContainerUtils.getLogger(proxy).info("[hadoop_enabler_common] Running command [" + commandline + "]")
if shell:
args = commandline
else:
args = shlex.split(commandline)
os.unsetenv("LD_LIBRARY_PATH")
os.unsetenv("LD_PRELOAD")
if stdin == None:
p = Popen(args, stdout=stdout, stdin=None, stderr=stderr, shell=shell)
output = p.communicate()
else:
p = Popen(args, stdout=stdout, stdin=PIPE, stderr=stderr, shell=shell)
output = p.communicate(input=stdin)
outputlist = [p.returncode]
for item in output:
outputlist.append(item)
if (outputlist[0] in expectedReturnCodes ):
if not (suppressOutput):
ContainerUtils.getLogger(proxy).info("[hadoop_enabler_common] Command return code was [" + str(outputlist[0]) + "]")
printStdoutPipe(stdout, outputlist)
else:
ContainerUtils.getLogger(proxy).severe("[hadoop_enabler_common] Return code " + str(outputlist[0]) +
" was not in list of expected return codes" + str(expectedReturnCodes))
if (suppressOutput):
ContainerUtils.getLogger(proxy).severe("[hadoop_enabler_common] Command was [" + commandline + "]")
printStdoutPipe(stdout, outputlist)
ContainerUtils.getLogger(proxy).finer("[hadoop_enabler_common] exiting runCommand(). Returning outputlist:" + (str(outputlist)))
return outputlist
def printStdoutPipe(stdout, outputlist):
if (stdout == PIPE):
ContainerUtils.getLogger(proxy).info("[hadoop_enabler_common] Command STDOUT:")
print outputlist[1]
def killOrphans():
ContainerUtils.getLogger(proxy).info("[hadoop_enabler_common] Killing any orphaned process on this engine remaining from a previous execution")
hadoop_home_dir = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_HADOOP_HOME_DIR').getValue()
if ContainerUtils.isWindows():
raise Exception("[hadoop_enabler_common] Windows not yet implemented in by this enabler.")
else:
commandline = "ps x"
output = runCommand(commandline, expectedReturnCodes=[0, 255], stdout=PIPE, suppressOutput=True)
for line in output[1].splitlines():
if hadoop_home_dir in line:
if "java" in line:
ContainerUtils.getLogger(proxy).info("[hadoop_enabler_common] Issuing Kill command for orphaned process [" + str(line) + "]")
pid = int(line.split(None, 1)[0])
os.kill(pid, signal.SIGKILL)
def deleteHDFSFile(file):
hadoop_bin_dir = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_HADOOP_BIN_DIR').getValue()
commandline = os.path.join(hadoop_bin_dir, "hadoop") + " fs -rm " + str(file)
output = runCommand(commandline, expectedReturnCodes=[0, 255])
if (output[0] == 0):
ContainerUtils.getLogger(proxy).info("[hadoop_enabler_common] Successfully deleted [" + str(file) + "]")
elif (output[0] == 255):
ContainerUtils.getLogger(proxy).warning("[hadoop_enabler_common] File already deleted [" + str(file) + "]. Continuing Processing")
else:
ContainerUtils.getLogger(proxy).severe("[hadoop_enabler_common] Unexpected return code [" + str(output[0]) + "] when attempting to delete.]")
def getStatistic_common(name):
memoryBean = ManagementFactory.getMemoryMXBean()
if name == "enabler_HEAP_MEMORY_USAGE":
bytes = memoryBean.getHeapMemoryUsage().getUsed()
return bytes / 1024 / 1024 # convert to MiB
elif name == "enabler_NON_HEAP_MEMORY_USAGE":
bytes = memoryBean.getNonHeapMemoryUsage().getUsed()
return bytes / 1024 / 1024 # convert to MiB
elif name == "enabler_DATANODE_DECOMMISION_REQUESTS":
hadoop_home_dir = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_HADOOP_HOME_DIR').getValue()
if ContainerUtils.isWindows():
raise Exception("[hadoop_enabler_common] Windows not yet implemented in by this enabler.")
else:
commandline = "sh " + os.path.join(hadoop_home_dir, "bin", "hadoop") + " fs -count " + decommissionqueue_dir
output = runCommand(commandline, expectedReturnCodes=[0, 255], suppressOutput=True)
if (output[0] == 0):
stdout = str(output[1])
count = int(stdout.split()[1])
return int(count)
elif (output[0] == 255):
# Decommission request directory doesn't exist. Not expected to exist until the some datanode posts the first request
return int(0)
else:
ContainerUtils.getLogger(proxy).warning("[hadoop_enabler_common] Unexpected return code [" + str(output[0]) +
"] while attempting to retrieve statistic enabler_DATANODE_DECOMMISION_REQUESTS statistic. Assuming 0.")
print output
return int(0)
elif name.startswith('enabler_DISK_'):
tmpdir = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_TMP_DIR').getValue()
if name == "enabler_DISK_SPACE_FREE":
blocks = int(getStatistic_disk(tmpdir)[0])
return blocks / 1024 / 1024 # convert 1024-byte blocks to GiB
#return available
elif name == "enabler_DISK_SPACE_USED":
blocks = int(getStatistic_disk(tmpdir)[1])
return blocks / 1024 / 1024 # convert 1024-byte blocks to GiB
#return used
elif name == "enabler_DISK_SPACE_USED_PERCENT":
return getStatistic_disk(tmpdir)[2]
#return int(percent[:-1])
else:
raise Exception("[hadoop_enabler_common] Unsupported statistic type requested [" + str(name) + "]")
else:
raise Exception("[hadoop_enabler_common] Unsupported statistic type requested [" + str(name) + "]")
def getStatistic_disk(directory):
if ContainerUtils.isWindows():
raise Exception("[hadoop_enabler_common] Windows not yet implemented in by this enabler.")
return[0,0,0]
else:
df = Popen(["df", directory], stdout=PIPE)
output = df.communicate()[0]
device, size, used, available, percent, mountpoint = output.split("\n")[1].split()
return [available, used, int(percent[:-1])]
def getContainerRunningConditionPollPeriod():
pollperiod = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_ENABLER_RUNNING_POLLPERIOD').getValue()
ContainerUtils.getLogger(proxy).info("[hadoop_enabler_common] Will verify enabler is running every " + str(pollperiod) + " seconds.")
return float(pollperiod) * 1000
def getComponentRunningConditionErrorMessage():
return "hadoop heartbeat test unsuccessful"
def changePermissions():
hadoop_bin_dir = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_HADOOP_BIN_DIR').getValue()
hadoop_sbin_dir = proxy.getContainer().getRuntimeContext().getVariable('hadoop_enabler_HADOOP_SBIN_DIR').getValue()
java_home_dir = proxy.getContainer().getRuntimeContext().getVariable('GRIDLIB_JAVA_HOME').getValue()
if ContainerUtils.isWindows():
pass
else:
ContainerUtils.getLogger(proxy).info("[hadoop_enabler_common] | |
<gh_stars>0
import random
import sys
from collections import Counter
import Grandmas_Game_Closet as Main
import pygame
from pygame.locals import *
import shelve
# Colors used
RED = (255, 0, 0)
GREEN = (0, 255, 0)
DARKGREEN = (20, 100, 20)
BLUE = (0, 0, 255)
PURPLE = (255, 0, 255)
YELLOW = (255, 255, 0)
GREY = (100, 100, 100)
WHITE = (255, 255, 255)
NAVY = (60, 60, 100)
DARKGREY = (30, 30, 30)
BLACK = (0, 0, 0)
TAN = (222, 184, 135)
ORANGE = (255, 128, 0)
TEAL = (15, 225, 210)
# classes
class Peg(object):
"""a single peg
attributes: color, location"""
def __init__(self, peg_color, loc_index=None):
"""given a color and a location, create a peg
location is a pygame location, not an index value"""
self.color = peg_color
self.location = loc_index
def __eq__(self, other):
"""compare two pegs together. if other is not a peg,
then assume inequality"""
if isinstance(other, Peg):
return self.color == other.color
else:
return False
def hue(self):
"""return the color of the peg"""
return self.color
class PegRow(object):
"""a row of pegs, either the answer or a player guess
attributes: pegs (the set of pegs in the row),
pegchoices (the available colors for a given peg"""
def __init__(self, numpegs=5):
"""create a row of pegs the size of the row (default is 5"""
self.pegs = [None] * numpegs
self.pegchoices = {"red": RED, "blue": BLUE, "green": GREEN,
"purple": PURPLE, "yellow": YELLOW,
"tan": TAN, "white": WHITE, "black": BLACK,
"orange": ORANGE}
def __getitem__(self, c_index):
"""returned the peg located at a given index"""
return self.pegs[c_index]
class AnswerRow(PegRow):
"""the Answer version of a pegrow. Creates a randome selection
of pegs based of the available colors. no additional attributes"""
def __init__(self, numpegs=5):
pegcolors = ("red", "blue", "green", "purple", "yellow", "tan",
"white", "black", "orange")
super(AnswerRow, self).__init__()
for p_index in range(len(self.pegs)):
blind_peg = random.choice(pegcolors)
self.pegs[p_index] = (Peg(self.pegchoices[blind_peg], None))
class GuessRow(PegRow):
"""the Guess version of a pegrow. has additional methods for
manipulation. Can be used to create a two player version of
the game. no additional attributes"""
def __init__(self, numpegs=5):
"""import initilizations from PegRow"""
super(GuessRow, self).__init__()
self.answer_key = [WHITE] * len(self.pegs)
def add_peg(self, colors, p_index, loc_index):
"""adds a peg to the guess row"""
self.pegs[p_index] = Peg(colors, loc_index)
def is_equal(self, other):
"""checks how many of a given color from a given PegRow
are in the GuessRow. returns a list of colors, not a
boolean so did not use __eq__"""
self.answer_key = [WHITE] * len(other.pegs)
cntr = 0
a_counter = {RED: 0, BLUE: 0, GREEN: 0, YELLOW: 0, PURPLE: 0,
ORANGE: 0, TAN: 0, WHITE: 0, BLACK: 0}
others = []
for pIndex in range(len(other.pegs)):
others.append(other.pegs[pIndex].hue())
b_counter = Counter(others)
"""populate for 1 to len(pegs) the number of correct colors"""
for pIndex in range(len(self.pegs)):
if type(self.pegs[pIndex]) != int: # catch init errors
if self.pegs[pIndex] in other.pegs:
if a_counter[self.pegs[pIndex].hue()] < \
b_counter[self.pegs[pIndex].hue()]:
self.answer_key[cntr] = RED
a_counter[self.pegs[pIndex].hue()] += 1
cntr += 1
cntr = 0
# overwrite correct colors (above) with correct color and location
for peg in range(5):
if self.pegs[peg] == other.pegs[peg]:
self.answer_key[cntr] = GREEN
cntr += 1
return self.answer_key
def tup(self):
"""used to save self.pegs as immutable form to the list of
answers given"""
return tuple(self.pegs)
# pygame helper functions
def submit(screen):
"""check if the submitted answer is the
hidden answer, clear the screen of the guess"""
correct_pegs = TEMP_PEGS.is_equal(HIDDEN_COLORS)
greens = 0
for p in correct_pegs:
if p == GREEN:
greens += 1
if greens == 5: # five greens equalivant to TEMP_PEGS == HIDDEN_COLORS
win_text = pygame.font.Font('freesansbold.ttf', 25)
win_render = win_text.render("YOU WIN!", True, WHITE, BLACK)
win_rect = win_render.get_rect()
win_rect.center = (340, 354)
screen.blit(win_render, win_rect)
return None, None # end game
else:
ANSWER_TREE.insert(0, (
correct_pegs, TEMP_PEGS.tup()))
if len(ANSWER_TREE) > 8:
max_show = 8 # the screen only has space for 8 answers
else:
max_show = len(ANSWER_TREE)
print_guesses(screen, last_guess=max_show)
# hide previous guess
pygame.draw.rect(screen, NAVY, pygame.Rect(350, 125, 300, 100))
peg_hole(screen, GUESS_PEG1, GREY)
peg_hole(screen, GUESS_PEG2, GREY)
peg_hole(screen, GUESS_PEG3, GREY)
peg_hole(screen, GUESS_PEG4, GREY)
peg_hole(screen, GUESS_PEG5, GREY)
return 0, max_show
def print_guesses(screen, first_guess=0, last_guess=1, answer_tree_x=44,
answer_tree_y=115):
"""print a given set of 8 or fewer answers to the screen"""
for print_index in range(len(ANSWER_TREE)):
if first_guess <= print_index <= last_guess:
for a_color in range(5):
loc = (answer_tree_x, answer_tree_y)
peg = ANSWER_TREE[print_index][1]
try:
pygame.draw.circle(screen, peg[a_color].hue(),
loc, GUESS_RADIUS)
except AttributeError: # for accidental submissions
warning_box("Please select all five colors", screen)
answer_tree_x -= 50 * a_color
for bad_color in range(a_color+1):
pygame.draw.circle(screen, GREY,
(answer_tree_x, answer_tree_y),
GUESS_RADIUS)
answer_tree_x += 50
return None
answer_tree_x += 50
pegs = ANSWER_TREE[print_index][0]
for answer in pegs: # print is_equal
test_text = pygame.font.Font('freesansbold.ttf', 25)
test_button = test_text.render("|", True, answer, GREY)
test_rect = test_button.get_rect()
test_rect.center = (answer_tree_x, answer_tree_y)
screen.blit(test_button, test_rect)
answer_tree_x += 5
answer_tree_y += 40
answer_tree_x = 44
def warning_box(text, screen):
"""print given text to the warning box at the top of the screen"""
text_list = []
temp_text = ""
next_line = 50
warn_location = (375, 25, 220, 125)
text_center = 485
while len(text) > 0:
temp_text += text[0]
try:
text = text[1:]
# limit length of line to 10 characters
if len(temp_text) >= 10 and temp_text[-1] == " ":
text_list.append(temp_text)
temp_text = ""
except IndexError:
pass
temp_text += text
text_list.append(temp_text)
warning_text = [""] * len(text_list)
warning_frame = [""] * len(text_list)
warning_rect = [""] * len(text_list)
pygame.draw.rect(screen, GREY, pygame.Rect(warn_location))
for line in range(len(text_list)):
warning_text[line] = pygame.font.Font('freesansbold.ttf', 20)
warning_frame[line] = warning_text[line].render(text_list[line],
True, RED, GREY)
warning_rect[line] = warning_frame[line].get_rect()
warning_rect[line].center = (text_center, next_line)
screen.blit(warning_frame[line], warning_rect[line])
next_line += 20
def is_color(pos, col_index, attempt, screen):
"""add peg to guess, and verify no more than 5 pegs are added"""
attempt_x, attempt_y = attempt
total_colors = 5
nocollide_flag = False
warn_txt = "To exchange a peg, click on the " \
"peg you wish to edit first."
loc_color_switcher = {0: RED, 1: GREEN, 2: BLUE, 3: YELLOW, 4: PURPLE,
5: TAN, 6: ORANGE, 7: WHITE, 8: BLACK}
x_corr_switcher = {0: TEMP_X_1, 1: TEMP_X_2, 2: TEMP_X_3, 3: TEMP_X_4,
4: TEMP_X_5}
for i in range(9):
if CIRCLE[i].collidepoint(pos):
if col_index < total_colors:
return loc_color_switcher[i], \
col_index, (attempt_x, attempt_y)
else:
warning_box(warn_txt, screen)
nocollide_flag = True
if nocollide_flag:
try:
for i in range(5):
if TEMP_PEGS.pegs[i].location.collidepoint(pos):
return DARKGREY, i, (x_corr_switcher[i], attempt_y)
except IndexError:
pass
except AttributeError:
pass
return None, col_index, attempt
def draw_shield(size, screen):
"""hide answer"""
left = 15
top = 15
length = 320
pygame.draw.rect(screen, GREY, SHIELD)
peg_hole(screen, SOLVED_PEG1)
peg_hole(screen, SOLVED_PEG2)
peg_hole(screen, SOLVED_PEG3)
peg_hole(screen, SOLVED_PEG4)
peg_hole(screen, SOLVED_PEG5)
if size > 0:
pygame.draw.rect(screen, WHITE, (left, top, length, size))
pygame.display.update()
FPSCLOCK.tick(FPS)
def peg_hole(screen, location, outercolor=DARKGREY, innercolor=BLACK,
big_radius=10, small_radius=5):
"""Draw pegs"""
pygame.draw.circle(screen, outercolor, location, big_radius, 0)
pygame.draw.circle(screen, innercolor, location, small_radius, 0)
def get_pegs(screen, prevcolor, colors=20):
"""used in windows and linux machines to animate shield closing"""
if colors > 0:
pygame.draw.rect(screen, prevcolor, SHIELD)
randcolor = random.choice(PEGS)
pygame.time.wait(100)
pygame.display.update()
get_pegs(screen, randcolor, colors - 1)
else:
pygame.draw.rect(screen, WHITE, SHIELD)
pygame.display.update()
def end_game(chk1, chk2):
"""check if game won; chk1 and chk2 come from submit function"""
if chk1 is None and chk2 is None:
return False
return True
def blit_win(screen):
"""finish game"""
pygame.draw.rect(screen, GREY, SHIELD)
peg_hole(screen, SOLVED_PEG1, outercolor=HIDDEN_COLORS[0].hue(),
big_radius=GUESS_RADIUS, small_radius=0)
peg_hole(screen, SOLVED_PEG2, outercolor=HIDDEN_COLORS[1].hue(),
innercolor=HIDDEN_COLORS[1].hue(), big_radius=GUESS_RADIUS,
small_radius=0)
peg_hole(screen, SOLVED_PEG3, outercolor=HIDDEN_COLORS[2].hue(),
innercolor=HIDDEN_COLORS[2].hue(), big_radius=GUESS_RADIUS,
small_radius=0)
peg_hole(screen, SOLVED_PEG4, outercolor=HIDDEN_COLORS[3].hue(),
innercolor=HIDDEN_COLORS[3].hue(), big_radius=GUESS_RADIUS,
small_radius=0)
peg_hole(screen, SOLVED_PEG5, outercolor=HIDDEN_COLORS[4].hue(),
innercolor=HIDDEN_COLORS[4].hue(), big_radius=GUESS_RADIUS,
small_radius=0)
warning_box("Congratulations! You WON!\n New Game? Press 'Y' or 'N'",
screen)
for event in pygame.event.get():
if event.type == KEYUP and event.key == K_y:
new_game(screen)
elif event.type == KEYUP and event.key == K_n:
Main.menu()
elif event.type == QUIT or (event.type == KEYUP and (
event.key == K_q or
event.key == K_ESCAPE)):
saved_game = shelve.open("SavedGame")
saved_game['Story_Mode_Finished'] = 1
# save location story mode
saved_game.close()
pygame.quit()
sys.exit()
pygame.display.update()
def new_game(screen, independant=True):
"""Start a new game"""
global CIRCLE, SHOW_LAST, ANSWER_TREE
global HIDDEN_COLORS, TEMP_PEGS
pygame.init()
Main.resetglobals()
reset_master_globals()
pygame.display.set_caption('Brain Games')
HIDDEN_COLORS = AnswerRow()
CIRCLE = []
draw_screen(screen)
for coverage in range(0, SHIELD_HEIGHT, 4):
draw_shield(coverage, screen)
ANSWER_TREE = []
TEMP_PEGS = GuessRow()
SHOW_LAST = len(ANSWER_TREE)
peg_color = WHITE
get_pegs(screen, peg_color)
play = True
first = last = 0
while True:
for event in pygame.event.get():
if event.type == MOUSEBUTTONUP and \
menu_rect.collidepoint(event.pos):
Main.menu()
elif event.type == MOUSEBUTTONUP and \
instr_rect.collidepoint(event.pos):
instructions()
draw_screen(screen)
if len(ANSWER_TREE) > 8:
max_show = 8
else:
max_show = len(ANSWER_TREE)
print_guesses(screen=screen, last_guess=max_show)
if play:
play, first, | |
and not volume.deviceType == 'quorum':
continue
if volume.deviceType == 'iscsi':
if volume.useVirtio:
if disk.source.name__ and disk.source.name_ in volume.installPath:
return disk, disk.target.dev_
else:
if disk.source.dev__ and volume.volumeUuid in disk.source.dev_:
return disk, disk.target.dev_
elif volume.deviceType == 'file':
if disk.source.file__ and disk.source.file_ == volume.installPath:
return disk, disk.target.dev_
elif volume.deviceType == 'ceph':
if disk.source.name__ and disk.source.name_ in volume.installPath:
return disk, disk.target.dev_
elif volume.deviceType == 'scsilun':
if disk.source.dev__ and volume.installPath in disk.source.dev_:
return disk, disk.target.dev_
elif volume.deviceType == 'block':
if disk.source.dev__ and disk.source.dev_ in volume.installPath:
return disk, disk.target.dev_
elif volume.deviceType == 'quorum':
logger.debug("quorum file path is %s" % disk.backingStore.source.file_)
if disk.backingStore.source.file_ and disk.backingStore.source.file_ in volume.installPath:
disk.driver.type_ = "qcow2"
disk.source = disk.backingStore.source
return disk, disk.backingStore.source.file_
if not is_exception:
return None, None
logger.debug('%s is not found on the vm[uuid:%s], xml: %s' % (volume.installPath, self.uuid, self.domain_xml))
raise kvmagent.KvmError('unable to find volume[installPath:%s] on vm[uuid:%s]' % (volume.installPath, self.uuid))
def _is_ft_vm(self):
return any(disk.type_ == "quorum" for disk in self.domain_xmlobject.devices.get_child_node_as_list('disk'))
def resize_volume(self, volume, size):
device_id = volume.deviceId
target_disk, disk_name = self._get_target_disk(volume)
alias_name = target_disk.alias.name_
r, o, e = bash.bash_roe("virsh qemu-monitor-command %s block_resize drive-%s %sB --hmp"
% (self.uuid, alias_name, size))
logger.debug("resize volume[%s] of vm[%s]" % (alias_name, self.uuid))
if r != 0:
raise kvmagent.KvmError(
'unable to resize volume[id:{1}] of vm[uuid:{0}] because {2}'.format(device_id, self.uuid, e))
def take_live_volumes_delta_snapshots(self, vs_structs):
"""
:type vs_structs: list[VolumeSnapshotJobStruct]
:rtype: list[VolumeSnapshotResultStruct]
"""
disk_names = []
return_structs = []
memory_snapshot_struct = None
snapshot = etree.Element('domainsnapshot')
disks = e(snapshot, 'disks')
logger.debug(snapshot)
if len(vs_structs) == 0:
return return_structs
def get_size(install_path):
"""
:rtype: long
"""
return VmPlugin._get_snapshot_size(install_path)
logger.debug(vs_structs)
need_memory_snapshot = False
for vs_struct in vs_structs:
if vs_struct.live is False or vs_struct.full is True:
raise kvmagent.KvmError("volume %s is not live or full snapshot specified, "
"can not proceed")
if vs_struct.memory:
e(snapshot, 'memory', None, attrib={'snapshot': 'external', 'file': vs_struct.installPath})
need_memory_snapshot = True
snapshot_dir = os.path.dirname(vs_struct.installPath)
if not os.path.exists(snapshot_dir):
os.makedirs(snapshot_dir)
memory_snapshot_struct = vs_struct
continue
target_disk, disk_name = self._get_target_disk(vs_struct.volume)
if target_disk is None:
logger.debug("can not find %s" % vs_struct.volume.deviceId)
continue
snapshot_dir = os.path.dirname(vs_struct.installPath)
if not os.path.exists(snapshot_dir):
os.makedirs(snapshot_dir)
disk_names.append(disk_name)
d = e(disks, 'disk', None, attrib={'name': disk_name, 'snapshot': 'external', 'type': 'file'})
e(d, 'source', None, attrib={'file': vs_struct.installPath})
e(d, 'driver', None, attrib={'type': 'qcow2'})
return_structs.append(VolumeSnapshotResultStruct(
vs_struct.volumeUuid,
target_disk.source.file_,
vs_struct.installPath,
get_size(target_disk.source.file_)))
self.refresh()
for disk in self.domain_xmlobject.devices.get_child_node_as_list('disk'):
if disk.target.dev_ not in disk_names:
e(disks, 'disk', None, attrib={'name': disk.target.dev_, 'snapshot': 'no'})
xml = etree.tostring(snapshot)
logger.debug('creating live snapshot for vm[uuid:{0}] volumes[id:{1}]:\n{2}'.format(self.uuid, disk_names, xml))
snap_flags = libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA | libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_ATOMIC
if not need_memory_snapshot:
snap_flags |= libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY
try:
self.domain.snapshotCreateXML(xml, snap_flags)
if memory_snapshot_struct:
return_structs.append(VolumeSnapshotResultStruct(
memory_snapshot_struct.volumeUuid,
memory_snapshot_struct.installPath,
memory_snapshot_struct.installPath,
get_size(memory_snapshot_struct.installPath)))
return return_structs
except libvirt.libvirtError as ex:
logger.warn(linux.get_exception_stacktrace())
raise kvmagent.KvmError(
'unable to take live snapshot of vm[uuid:{0}] volumes[id:{1}], {2}'.format(self.uuid, disk_names, str(ex)))
def take_volume_snapshot(self, volume, install_path, full_snapshot=False):
device_id = volume.deviceId
target_disk, disk_name = self._get_target_disk(volume)
snapshot_dir = os.path.dirname(install_path)
if not os.path.exists(snapshot_dir):
os.makedirs(snapshot_dir)
previous_install_path = target_disk.source.file_
back_file_len = len(self._get_backfile_chain(previous_install_path))
# for RHEL, base image's back_file_len == 1; for ubuntu back_file_len == 0
first_snapshot = full_snapshot and (back_file_len == 1 or back_file_len == 0)
def take_delta_snapshot():
snapshot = etree.Element('domainsnapshot')
disks = e(snapshot, 'disks')
d = e(disks, 'disk', None, attrib={'name': disk_name, 'snapshot': 'external', 'type': 'file'})
e(d, 'source', None, attrib={'file': install_path})
e(d, 'driver', None, attrib={'type': 'qcow2'})
# QEMU 2.3 default create snapshots on all devices
# but we only need for one
self.refresh()
for disk in self.domain_xmlobject.devices.get_child_node_as_list('disk'):
if disk.target.dev_ != disk_name:
e(disks, 'disk', None, attrib={'name': disk.target.dev_, 'snapshot': 'no'})
xml = etree.tostring(snapshot)
logger.debug('creating snapshot for vm[uuid:{0}] volume[id:{1}]:\n{2}'.format(self.uuid, device_id, xml))
snap_flags = libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY | libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA
try:
self.domain.snapshotCreateXML(xml, snap_flags)
return previous_install_path, install_path
except libvirt.libvirtError as ex:
logger.warn(linux.get_exception_stacktrace())
raise kvmagent.KvmError(
'unable to take snapshot of vm[uuid:{0}] volume[id:{1}], {2}'.format(self.uuid, device_id, str(ex)))
def take_full_snapshot():
self.block_stream_disk(volume)
return take_delta_snapshot()
if first_snapshot:
# the first snapshot is always full snapshot
# at this moment, delta snapshot returns the original volume as full snapshot
return take_delta_snapshot()
if full_snapshot:
return take_full_snapshot()
else:
return take_delta_snapshot()
def block_stream_disk(self, volume):
target_disk, disk_name = self._get_target_disk(volume)
install_path = target_disk.source.file_
logger.debug('start block stream for disk %s' % disk_name)
self.domain.blockRebase(disk_name, None, 0, 0)
logger.debug('block stream for disk %s in processing' % disk_name)
def wait_job(_):
logger.debug('block stream is waiting for %s blockRebase job completion' % disk_name)
return not self._wait_for_block_job(disk_name, abort_on_error=True)
if not linux.wait_callback_success(wait_job, timeout=21600, ignore_exception_in_callback=True):
raise kvmagent.KvmError('block stream failed')
def wait_backing_file_cleared(_):
return not linux.qcow2_get_backing_file(install_path)
if not linux.wait_callback_success(wait_backing_file_cleared, timeout=60, ignore_exception_in_callback=True):
raise kvmagent.KvmError('block stream succeeded, but backing file is not cleared')
def list_blk_sources(self):
"""list domain blocks (aka. domblklist) -- but with sources only"""
tree = etree.fromstring(self.domain_xml)
res = []
for disk in tree.findall("devices/disk"):
for src in disk.findall("source"):
src_file = src.get("file")
if src_file is None:
continue
res.append(src_file)
return res
def migrate(self, cmd):
if self.state == Vm.VM_STATE_SHUTDOWN:
raise kvmagent.KvmError('vm[uuid:%s] is stopped, cannot live migrate,' % cmd.vmUuid)
current_hostname = linux.get_host_name()
if cmd.migrateFromDestination:
hostname = cmd.destHostIp.replace('.', '-')
else:
hostname = cmd.srcHostIp.replace('.', '-')
if current_hostname == 'localhost.localdomain' or current_hostname == 'localhost':
# set the hostname, otherwise the migration will fail
shell.call('hostname %s.zstack.org' % hostname)
destHostIp = cmd.destHostIp
destUrl = "qemu+tcp://{0}/system".format(destHostIp)
tcpUri = "tcp://{0}".format(destHostIp)
flag = (libvirt.VIR_MIGRATE_LIVE |
libvirt.VIR_MIGRATE_PEER2PEER |
libvirt.VIR_MIGRATE_UNDEFINE_SOURCE)
if cmd.autoConverge:
flag |= libvirt.VIR_MIGRATE_AUTO_CONVERGE
if cmd.xbzrle:
flag |= libvirt.VIR_MIGRATE_COMPRESSED
if cmd.storageMigrationPolicy == 'FullCopy':
flag |= libvirt.VIR_MIGRATE_NON_SHARED_DISK
elif cmd.storageMigrationPolicy == 'IncCopy':
flag |= libvirt.VIR_MIGRATE_NON_SHARED_INC
# to workaround libvirt bug (c.f. RHBZ#1494454)
if LIBVIRT_MAJOR_VERSION >= 4:
if any(s.startswith('/dev/') for s in self.list_blk_sources()):
flag |= libvirt.VIR_MIGRATE_UNSAFE
if cmd.useNuma:
flag |= libvirt.VIR_MIGRATE_PERSIST_DEST
stage = get_task_stage(cmd)
timeout = 1800 if cmd.timeout is None else cmd.timeout
class MigrateDaemon(plugin.TaskDaemon):
def __init__(self, domain):
super(MigrateDaemon, self).__init__(cmd, 'MigrateVm', timeout)
self.domain = domain
def _get_percent(self):
try:
stats = self.domain.jobStats()
if libvirt.VIR_DOMAIN_JOB_DATA_REMAINING in stats and libvirt.VIR_DOMAIN_JOB_DATA_TOTAL in stats:
remain = stats[libvirt.VIR_DOMAIN_JOB_DATA_REMAINING]
total = stats[libvirt.VIR_DOMAIN_JOB_DATA_TOTAL]
if total == 0:
return
percent = min(99, 100.0 - remain * 100.0 / total)
return get_exact_percent(percent, stage)
except libvirt.libvirtError:
pass
except:
logger.debug(linux.get_exception_stacktrace())
def _cancel(self):
logger.debug('cancelling vm[uuid:%s] migration' % cmd.vmUuid)
self.domain.abortJob()
def __exit__(self, exc_type, exc_val, exc_tb):
super(MigrateDaemon, self).__exit__(exc_type, exc_val, exc_tb)
if exc_type == libvirt.libvirtError:
raise kvmagent.KvmError(
'unable to migrate vm[uuid:%s] to %s, %s' % (cmd.vmUuid, destUrl, str(exc_val)))
with MigrateDaemon(self.domain):
logger.debug('migrating vm[uuid:{0}] to dest url[{1}]'.format(self.uuid, destUrl))
self.domain.migrateToURI2(destUrl, tcpUri, None, flag, None, 0)
try:
logger.debug('migrating vm[uuid:{0}] to dest url[{1}]'.format(self.uuid, destUrl))
if not linux.wait_callback_success(self.wait_for_state_change, callback_data=None, timeout=timeout):
try: self.domain.abortJob()
except: pass
raise kvmagent.KvmError('timeout after %d seconds' % timeout)
except kvmagent.KvmError:
raise
except:
logger.debug(linux.get_exception_stacktrace())
logger.debug('successfully migrated vm[uuid:{0}] to dest url[{1}]'.format(self.uuid, destUrl))
def _interface_cmd_to_xml(self, cmd, action=None):
vhostSrcPath = cmd.addons['vhostSrcPath'] if cmd.addons else None
brMode = cmd.addons['brMode'] if cmd.addons else None
interface = Vm._build_interface_xml(cmd.nic, None, vhostSrcPath, action, brMode)
def addon():
if cmd.addons and cmd.addons['NicQos']:
qos = cmd.addons['NicQos']
Vm._add_qos_to_interface(interface, qos)
addon()
return etree.tostring(interface)
def _wait_vm_run_until_seconds(self, sec):
vm_pid = linux.find_process_by_cmdline([kvmagent.get_qemu_path(), self.uuid])
if not vm_pid:
raise Exception('cannot find pid for vm[uuid:%s]' % self.uuid)
up_time = linux.get_process_up_time_in_second(vm_pid)
def wait(_):
return linux.get_process_up_time_in_second(vm_pid) > sec
if up_time < sec and not linux.wait_callback_success(wait, timeout=60):
raise Exception("vm[uuid:%s] seems hang, its process[pid:%s] up-time is not increasing after %s seconds" %
(self.uuid, vm_pid, 60))
def attach_iso(self, cmd):
iso = cmd.iso
if iso.deviceId >= len(self.ISO_DEVICE_LETTERS):
err = 'vm[uuid:%s] exceeds max iso limit, device id[%s], but only 0 ~ %d are allowed' % (self.uuid, iso.deviceId, len(self.ISO_DEVICE_LETTERS) - 1)
logger.warn(err)
raise kvmagent.KvmError(err)
device_letter = self.ISO_DEVICE_LETTERS[iso.deviceId]
dev = self._get_iso_target_dev(device_letter)
bus = self._get_controller_type()
if iso.path.startswith('ceph'):
ic = IsoCeph()
ic.iso = iso
cdrom = ic.to_xmlobject(dev, bus)
else:
if iso.path.startswith('sharedblock'):
iso.path = shared_block_to_file(iso.path)
cdrom = etree.Element('disk', {'type': 'file', 'device': 'cdrom'})
e(cdrom, 'driver', None, {'name': 'qemu', 'type': 'raw'})
e(cdrom, 'source', None, {'file': iso.path})
e(cdrom, 'target', None, {'dev': dev, 'bus': bus})
e(cdrom, 'readonly', None)
xml = etree.tostring(cdrom)
if LIBVIRT_MAJOR_VERSION >= 4:
addr = find_domain_cdrom_address(self.domain.XMLDesc(0), dev)
ridx = xml.rindex('<')
xml = xml[:ridx] + addr.dump() + xml[ridx:]
logger.debug('attaching ISO to the vm[uuid:%s]:\n%s' % (self.uuid, xml))
try:
self.domain.updateDeviceFlags(xml, libvirt.VIR_DOMAIN_AFFECT_LIVE)
except libvirt.libvirtError as ex:
err = str(ex)
logger.warn('unable to attach the iso to the VM[uuid:%s], %s' % (self.uuid, err))
if "QEMU command 'change': error connecting: Operation not supported" in err:
raise Exception('cannot hotplug ISO to the VM[uuid:%s]. It is a libvirt bug: %s.'
' you can power-off the vm and attach again.' %
(self.uuid, 'https://bugzilla.redhat.com/show_bug.cgi?id=1541702'))
elif 'timed out waiting for disk tray status update' in err:
raise Exception(
'unable to attach the iso to the VM[uuid:%s]. It seems met some internal error,'
' you can reboot the vm | |
<filename>plotting/attenuation.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
CERN@school: Analysis functions for the attenuation experiment.
See the README.md file for more information.
"""
#...for the logging.
import logging as lg
#...for the MATH.
import math
#...for even more MATH.
import numpy as np
#...for the least squares fitting.
from scipy import optimize
from scipy.optimize import curve_fit
# Import the plotting libraries.
import pylab as plt
#...for the colours. Oh, the colours!
from matplotlib.colors import LogNorm
# Load the LaTeX text plot libraries.
from matplotlib import rc
# Uncomment to use LaTeX for the plot text.
#rc('font',**{'family':'serif','serif':['Computer Modern']})
#rc('text', usetex=True)
#...for the chi^2 method.
from plotting.stats import chi2
def straight_line(x, m, c):
return (x * m) + c #np.log(27003)
class DataPoint:
def __init__(self, d_i, B_i, B_0):
""" Constructor. """
## The thickness of the material.
self.__d_i = float(d_i)
## The number of particles detected.
self.__B_i = float(B_i)
self.__ln_B_i = None
if self.__B_i > 0.0:
self.__ln_B_i = np.log(self.__B_i)
## The initial number of particles.
self.__B_0 = float(B_0)
## The estimated attenuation coefficient from this data point.
self.__mu_est = None
## The error on B_i (y) - binomial.
self.__y_error = np.sqrt(self.__B_i * (1.0 - (self.__B_i/self.__B_0)))
## The upper y error.
self.__y_error_upper = None
## The lower y error.
self.__y_error_lower = None
if self.__d_i > 0.0:
mu = (1.0 / self.__d_i)
mu *= np.log(self.__B_0 / self.__B_i)
self.__mu_est = mu
lg.info(" * d_i: %5.3f B_i: % 8d B_0: % 8d" % (self.__d_i, self.__B_i, self.__B_0))
def __lt__(self, other):
return self.get_thickness() < other.get_thickness()
def get_thickness(self):
return self.__d_i
def get_count(self):
return self.__B_i
def get_predicted_count(self, mu, B_0):
return B_0 * (np.exp(-mu * self.__d_i))
def get_log_count(self):
return self.__ln_B_i
def get_predicted_log_count(self, mu, B_0):
return np.log(B_0) - (mu*self.__d_i)
def get_initial(self):
return self.__B_0
def get_success_rate(self):
return self.__B_i / self.__B_0
def get_estimated_attenuation_coefficient(self):
return self.__mu_est
def get_y_error(self):
return self.__y_error
def get_y_error_upper(self):
return self.__y_error_upper
def get_y_error_lower(self):
return self.__y_error_lower
def get_log_y_error_upper(self, mu, B_0):
err = np.sqrt(self.__B_i * (1.0 - (self.__B_i/self.__B_0)))
err_pc = 100.0 * err / float(self.__B_i)
upper_bound = self.__B_i + err
ln_upper_bound = np.log(upper_bound)
lower_bound = self.__B_i - err
ln_lower_bound = np.log(lower_bound)
lg.info(" *")
lg.info(" * Calculating error on (%f, %d):" % (self.__d_i, self.__B_i))
lg.info(" *")
lg.info(" *--> \sigma_{B_i} = sqrt(B_i * (1 - B_i/B_0)) = %f (%f %%)" % (err, err_pc))
lg.info(" *--> { %f ---> %f" % (upper_bound, ln_upper_bound))
lg.info(" *--> B_i = %f ---> %f" % (self.__B_i, np.log(self.__B_i)))
lg.info(" *--> { %f ---> %f" % (lower_bound, ln_lower_bound))
lg.info(" *")
return ln_upper_bound - np.log(self.__B_i)
def get_log_y_error_lower(self, mu, B_0):
err = np.sqrt(self.__B_i * (1.0 - (self.__B_i/self.__B_0)))
err_pc = 100.0 * err / float(self.__B_i)
upper_bound = self.__B_i + err
ln_upper_bound = np.log(upper_bound)
lower_bound = self.__B_i - err
ln_lower_bound = np.log(lower_bound)
lg.info(" *")
lg.info(" * Calculating error on (%f, %d):" % (self.__d_i, self.__B_i))
lg.info(" *")
lg.info(" *--> \sigma_{B_i} = sqrt(B_i * (1 - B_i/B_0)) = %f (%f %%)" % (err, err_pc))
lg.info(" *--> { %f ---> %f" % (upper_bound, ln_upper_bound))
lg.info(" *--> B_i = %f ---> %f" % (self.__B_i, np.log(self.__B_i)))
lg.info(" *--> { %f ---> %f" % (lower_bound, ln_lower_bound))
lg.info(" *")
return np.log(self.__B_i) - ln_lower_bound
def get_thickness_error(self):
return 0.01 # [mm]
class DataPoints:
""" A wrapper class for the data points in the attenuation experiment. """
def __init__(self, data_points):
## A list of DataPoints.
self.__dps = data_points
# Calculate all of the properties of the data points.
# Get the estimated attenuation coefficient and B_0 by fitting
# ln(B_i) vs. d_i to a straight line.
## The estimate of the attenuation coefficient.
self.__mu_est = None
## The estimated error on the attenuation coefficient (MLL).
self.__mu_est_err_mll = None
## The estimate of the mean free path.
self.__mfp_est = None
## The estimated error on the mean free path (MLL).
self.__mfp_est_err_mll = None
## The estimated initial attempts (fit).
self.__B_0_est = None
# First, let's use the curve_fit function to fit the points
# to a straight line.
## An array of the x values (thicknesses).
self.__xs = np.array(self.get_thicknesses())
## An array of the y values (thicknesses).
self.__ys = np.array(self.get_log_of_successes())
## A list of the estimated parameters, m and c.
self.__parameter_estimates = None
## The covariance matrix of the parameter estimates.
self.__covariance_matrix = None
# Perform the fitting.
self.__parameter_estimates, self.__covariance_matrix = curve_fit(straight_line, self.__xs, self.__ys)
# Assign the estimate values and errors.
self.__mu_est = -1.0 * self.__parameter_estimates[0]
#
self.__mfp_est = 1.0 / self.__mu_est
#
self.__B_0_est = np.exp(self.__parameter_estimates[1])
# Now use the Maximum Log Likelihood method to estimate the error.
# Loop over the data points
sum_of_terms = 0.0
lg.info(" *")
lg.info(" * Looping over the data points:")
lg.info(" *")
for dp in sorted(self.__dps):
lg.info(" * | d_i = % 5.2f [mm] | B_i = % 8d |" % (dp.get_thickness(), dp.get_count()))
B_i_times_B_0 = dp.get_count() * self.__B_0_est
B_0_minus_B_i = self.__B_0_est - dp.get_count()
count_frac = float(B_i_times_B_0) / float(B_0_minus_B_i)
d_i_squared = dp.get_thickness() * dp.get_thickness()
d_i_squared_times_count_frac = d_i_squared * count_frac
sum_of_terms += d_i_squared_times_count_frac
lg.info(" * |-----------------------------------|")
lg.info(" * | d_i^{2} = %f" % (d_i_squared))
lg.info(" * | |")
lg.info(" * | | B_i * B_0 = %d" % (B_i_times_B_0))
lg.info(" * | | B_0 - B_i = %d" % (B_0_minus_B_i))
lg.info(" * | |-->Y/Z = %f" % (count_frac))
lg.info(" * | |")
lg.info(" * | *-->X*Y/Z = %f" % (d_i_squared_times_count_frac))
lg.info(" * |")
lg.info(" *")
lg.info(" * Sum of terms = %f [mm^{2}] " % (sum_of_terms))
lg.info(" *")
#
self.__mu_est_err_mll = 1.0/(np.sqrt(sum_of_terms))
#
self.__mu_est_err_mll_pc = 100.0 * (self.__mu_est_err_mll/self.__mu_est)
#
self.__mfp_est_err_mll = (1.0/(self.__mu_est * self.__mu_est)) * self.__mu_est_err_mll
#
self.__mfp_est_err_mll_pc = 100.0 * (self.__mfp_est_err_mll / self.__mfp_est)
#
lg.info(" * 1/sqrt(sum) = %f [mm^{-1}]" % (self.__mu_est_err_mll))
lg.info(" *")
lg.info(" *")
lg.info(" * from curve_fit:")
lg.info(" *")
lg.info(" *--> \mu (MLL) = (% 10.5f \pm % 10.5f) [mm^{-1}] % 6.2f %%" % \
(self.__mu_est, self.__mu_est_err_mll, self.__mu_est_err_mll_pc))
lg.info(" *")
lg.info(" *--> <x> = 1 / \mu = (% 10.5f \pm % 10.5f) [mm] % 6.2f %%" % \
(self.__mfp_est, self.__mfp_est_err_mll, self.__mfp_est_err_mll_pc))
lg.info(" *")
lg.info(" *--> B_0 = % 10.2f particles" % (self.__B_0_est))
lg.info(" *")
# Calculate the Chi^2 values for the estimated distribution.
#chi2_est, n_deg_est, chi2_div_est = chi2(self.get_successes(), self.get_predicted_successes(), 2)
self.__chi_squared_value, self.__chi_squared_dof, chi2_div_est = \
chi2(self.get_log_of_successes(), self.get_predicted_log_of_successes(), 2)
lg.info(" * Estimated distribution (\hat{\mu}, \hat{B_{0}}):")
lg.info(" *--> \Chi^2 = % 7.5f" % (self.__chi_squared_value))
lg.info(" *--> N_freedom = % d" % (self.__chi_squared_dof))
lg.info(" *")
def get_thicknesses(self):
return [dp.get_thickness() for dp in self.__dps]
def get_successes(self):
return [dp.get_count() for dp in self.__dps]
def get_predicted_successes(self):
return [dp.get_predicted_count(self.__mu_est, self.__B_0_est) for dp in self.__dps]
def get_log_of_successes(self):
return [dp.get_log_count() for dp in self.__dps]
def get_predicted_log_of_successes(self):
return [dp.get_predicted_log_count(self.__mu_est, self.__B_0_est) for dp in self.__dps]
def get_successes_upper_errors(self):
return [dp.get_y_error_upper(self.__mu_est, self.__B_0_est) for dp in self.__dps]
def get_log_of_successes_upper_errors(self):
return [dp.get_log_y_error_upper(self.__mu_est, self.__B_0_est) for dp in self.__dps]
def get_successes_lower_errors(self):
return [dp.get_y_error_lower(self.__mu_est, self.__B_0_est) for dp in self.__dps]
def get_log_of_successes_lower_errors(self):
return [dp.get_log_y_error_lower(self.__mu_est, self.__B_0_est) for dp in self.__dps]
def get_estimated_attenuation_coefficient(self):
return self.__mu_est
def get_error_on_the_estimated_attenuation_coefficient(self):
""" Get the standard error on the est. attenuation coefficient. """
# Maximum Log Likelihood method.
return self.__mu_est_err_mll
def get_estimated_mean_free_path(self):
return self.__mfp_est
def get_error_on_the_estimated_mean_free_path(self):
return self.__mfp_est_err_mll
def get_estimated_initial_count(self):
return self.__B_0_est
def get_log_of_the_estimated_initial_count(self):
return np.log(self.__B_0_est)
def get_chi_squared_fit_value(self):
return self.__chi_squared_value
def get_chi_squared_fit_dof(self):
return self.__chi_squared_dof
def write_html_table(self):
ipg = ""
ipg += " <table>\n"
#
# The headings.
ipg += " <tr>"
ipg += "<th colspan='3'>d<sub>i</sub> / mm</th>"
ipg += "<th colspan='3'>B<sub>i</sub></th>"
ipg += "</tr>\n"
# Loop over the data points to get the table rows.
for dp in self.__dps:
ipg += " <tr>"
#
# The thickness values.
ipg += "<td style='font-family:Monospace'>% 4.2f</td>" % (dp.get_thickness())
ipg += "<td>±</td>"
ipg += "<td style='font-family:Monospace'>% 4.2f</td>" % (dp.get_thickness_error())
#
# The counts.
ipg += "<td style='text-align:right; font-family:Monospace;'>%d</a></td>" % (dp.get_count())
ipg += "<td>±</td>"
ipg += "<td style='font-family:Monospace'>% 2d</td>" % (dp.get_y_error())
ipg += "</tr>\n"
ipg += " </table>\n"
return ipg
def write_latex_table(self):
ts = ""
for dp in self.__dps:
ts += "% 4.2f & % 6d & $\\pm$ & % 2d \\\\\n" % \
(dp.get_thickness(), \
dp.get_count() , dp.get_y_error())
return ts
class AttenuationPlot:
""" Wrapper class for the attenuation plot. """
#def __init__(self, dat, sim, **kwargs):
def __init__(self, data_points, **kwargs):
lg.info(" *")
lg.info(" * Initialising AttenuationPlot object...")
lg.info(" *")
self.__dps = data_points
# GETTING READY TO MAKE THE PLOTS
#=================================
| |
# coding: utf-8
# Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
import filecmp
import json
import pytest
import oci
import services.object_storage.src.oci_cli_object_storage as oci_cli_object_storage
import os
import random
import shutil
import six
import string
from tests import util
from tests import test_config_container
from mimetypes import guess_type
OBJECTS_TO_CREATE_IN_BUCKET_FOR_BULK_GET = 100
OBJECTS_TO_CREATE_IN_FOLDER_FOR_BULK_PUT = 20
CONTENT_STRING_LENGTH = 5000
MID_SIZED_FILE_IN_MEBIBTYES = 20
LARGE_CONTENT_FILE_SIZE_IN_MEBIBYTES = 150 # Default multipart is 128MiB
# Holds the objects we create and their content so that we can verify results
bulk_get_object_to_content = {}
bulk_get_prefix_to_object = {
'a/b/c/d': [],
'a/b/c': [],
'a/b': [],
'/a': [],
'': []
}
bulk_get_bucket_name = None
bulk_put_large_files = set()
bulk_put_mid_sized_files = set()
root_bulk_put_folder = None
bulk_put_bucket_name = None
@pytest.fixture
def vcr_fixture(request):
with test_config_container.create_vcr(cassette_library_dir='services/object_storage/tests/cassettes').use_cassette('object_storage_bulk_operations_{name}.yml'.format(name=request.function.__name__)):
yield
# Generate test data for different operations:
#
# Bulk Get: create a new bucket and populate it with some objects, then tear it all down afterwards
# Bulk Put: create a folder structure containing small and large files, then tear it all down afterwards
# Bulk Delete: uses the folders and files generated for bulk put
@pytest.fixture(scope='module', autouse=True)
def generate_test_data(object_storage_client):
global bulk_get_object_to_content, bulk_get_bucket_name, root_bulk_put_folder, bulk_put_large_files, bulk_put_mid_sized_files, bulk_put_bucket_name
# Create a test bucket
create_bucket_request = oci.object_storage.models.CreateBucketDetails()
create_bucket_request.name = 'ObjectStorageBulkGetTest_{}'.format(util.random_number_string())
create_bucket_request.compartment_id = util.COMPARTMENT_ID
util.clear_test_data(object_storage_client, util.NAMESPACE, util.COMPARTMENT_ID, create_bucket_request.name)
object_storage_client.create_bucket(util.NAMESPACE, create_bucket_request)
bulk_get_bucket_name = create_bucket_request.name
# Create items at various heirarchy levels (to be surfaced as different directories on disk)
for i in range(OBJECTS_TO_CREATE_IN_BUCKET_FOR_BULK_GET):
if i % 5 == 4:
object_name = 'a/b/c/d/Object_{}'.format(i)
bulk_get_prefix_to_object['a/b/c/d'].append(object_name)
elif i % 5 == 3:
object_name = 'a/b/c/Object_{}'.format(i)
bulk_get_prefix_to_object['a/b/c'].append(object_name)
elif i % 5 == 2:
object_name = 'a/b/Object_{}'.format(i)
bulk_get_prefix_to_object['a/b'].append(object_name)
elif i % 5 == 1:
# This is equivalent to a/ on the file system because we drop the leading slash (we drop path separators from the front to avoid unexpected results)
object_name = '/a/Object_{}'.format(i)
bulk_get_prefix_to_object['/a'].append(object_name)
else:
# At the root of the bucket
object_name = 'Object_{}'.format(i)
bulk_get_prefix_to_object[''].append(object_name)
object_content = generate_random_string(CONTENT_STRING_LENGTH)
object_storage_client.put_object(util.NAMESPACE, create_bucket_request.name, object_name, object_content)
bulk_get_object_to_content[object_name] = object_content
# makedirs creates all subfolders recursively
root_bulk_put_folder = 'tests/temp/bulk_put_{}'.format(util.random_number_string())
bulk_put_folder_leaf = '{}/subfolder1/subfolder2/subfolder3'.format(root_bulk_put_folder)
if not os.path.exists(bulk_put_folder_leaf):
os.makedirs(bulk_put_folder_leaf)
create_bucket_request = oci.object_storage.models.CreateBucketDetails()
create_bucket_request.name = 'ObjectStorageBulkPutTest_{}'.format(util.random_number_string())
create_bucket_request.compartment_id = util.COMPARTMENT_ID
util.clear_test_data(object_storage_client, util.NAMESPACE, util.COMPARTMENT_ID, create_bucket_request.name)
object_storage_client.create_bucket(util.NAMESPACE, create_bucket_request)
bulk_put_bucket_name = create_bucket_request.name
subfolders = ['', 'subfolder1', 'subfolder1/subfolder2', 'subfolder1/subfolder2/subfolder3']
for subfolder in subfolders:
if subfolder == '':
full_folder = root_bulk_put_folder
else:
full_folder = os.path.join(root_bulk_put_folder, subfolder)
for i in range(OBJECTS_TO_CREATE_IN_FOLDER_FOR_BULK_PUT + 1):
file_path = '{}/object_{}'.format(full_folder, i)
if i != 0 and i % OBJECTS_TO_CREATE_IN_FOLDER_FOR_BULK_PUT == 0:
# Put in one big file per subfolder
util.create_large_file(file_path, LARGE_CONTENT_FILE_SIZE_IN_MEBIBYTES)
bulk_put_large_files.add(file_path)
elif i != 0 and i % 10 == 0:
# Put in the occasional file with a reasonable size so that we can force multipart
util.create_large_file(file_path, MID_SIZED_FILE_IN_MEBIBTYES)
bulk_put_mid_sized_files.add(file_path)
else:
with open(file_path, 'w') as f:
f.write(generate_random_string(CONTENT_STRING_LENGTH))
yield
# Tear down stuff by deleting all the things and then deleting the buckets
delete_bucket_and_all_items(object_storage_client, bulk_get_bucket_name)
delete_bucket_and_all_items(object_storage_client, bulk_put_bucket_name)
# Remove all directories recursively
shutil.rmtree(root_bulk_put_folder)
@util.skip_while_rerecording
def test_normalize_object_name_path():
assert '/this/is/a/path' == oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage('/this/is/a/path')
assert '/this/is/a/path' == oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage('/this/is/a/path', '/')
assert '/this/is/a/path' == oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage('\\this\\is\\a\\path', '\\')
assert '/this/is/a/path' == oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage('\\this/is/a\\path', '\\')
assert 'thisisapath' == oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage('thisisapath')
assert 'thisisapath' == oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage('thisisapath', '/')
assert 'thisisapath' == oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage('thisisapath', '\\')
@util.skip_while_rerecording
def test_get_all_objects_in_bucket(vcr_fixture):
download_folder = 'tests/temp/get_all_{}'.format(bulk_get_bucket_name)
result = invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--download-dir', download_folder])
print(result.output)
# Ensure that content matches
for object_name in bulk_get_object_to_content:
if object_name[0] == '/' or object_name[0] == '\\':
file_path = os.path.join(download_folder, object_name[1:])
else:
file_path = os.path.join(download_folder, object_name)
with open(file_path, 'r') as content_file:
content = content_file.read()
assert content == bulk_get_object_to_content[object_name]
assert len(bulk_get_object_to_content) == get_count_of_files_in_folder_and_subfolders(download_folder)
shutil.rmtree(download_folder)
@util.skip_while_rerecording
def test_get_directory_and_subdirectories(vcr_fixture):
download_folder = 'tests/temp/get_directory_and_subdirectories_{}'.format(bulk_get_bucket_name)
# This should get us a/b/<object>, a/b/c/<object> and a/b/c/d/<object>
invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--download-dir', download_folder, '--prefix', 'a/b'])
for object_name in bulk_get_prefix_to_object['a/b']:
file_path = os.path.join(download_folder, object_name)
with open(file_path, 'r') as content_file:
content = content_file.read()
assert content == bulk_get_object_to_content[object_name]
for object_name in bulk_get_prefix_to_object['a/b/c']:
file_path = os.path.join(download_folder, object_name)
with open(file_path, 'r') as content_file:
content = content_file.read()
assert content == bulk_get_object_to_content[object_name]
for object_name in bulk_get_prefix_to_object['a/b/c/d']:
file_path = os.path.join(download_folder, object_name)
with open(file_path, 'r') as content_file:
content = content_file.read()
assert content == bulk_get_object_to_content[object_name]
assert len(bulk_get_prefix_to_object['a/b']) + len(bulk_get_prefix_to_object['a/b/c']) + len(bulk_get_prefix_to_object['a/b/c/d']) == get_count_of_files_in_folder_and_subfolders(download_folder)
shutil.rmtree(download_folder)
@util.skip_while_rerecording
def test_get_directory_no_subdirectory(vcr_fixture):
download_folder = 'tests/temp/get_directory_only_{}'.format(bulk_get_bucket_name)
invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--download-dir', download_folder, '--prefix', 'a/b/c/', '--delimiter', '/'])
for object_name in bulk_get_prefix_to_object['a/b/c']:
file_path = os.path.join(download_folder, object_name)
with open(file_path, 'r') as content_file:
content = content_file.read()
assert content == bulk_get_object_to_content[object_name]
assert len(bulk_get_prefix_to_object['a/b/c']) == get_count_of_files_in_folder_and_subfolders(download_folder)
shutil.rmtree(download_folder)
@util.skip_while_rerecording
def test_get_files_skipped():
download_folder = 'tests/temp/skip_and_replace_{}'.format(bulk_get_bucket_name)
invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--download-dir', download_folder])
# Sanity check
assert len(bulk_get_object_to_content) == get_count_of_files_in_folder_and_subfolders(download_folder)
# We should skip over all objects since there is no --overwrite. There should be prompts
result = invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--download-dir', download_folder])
parsed_result = parse_json_response_from_mixed_output(result.output)
assert 'Are you sure you want to overwrite it?' in result.output
assert len(parsed_result['skipped-objects']) == len(bulk_get_object_to_content)
# We should skip over all objects since we say --no-overwrite. Additionally there should be no prompts
result = invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--download-dir', download_folder, '--no-overwrite'])
parsed_result = parse_json_response_from_mixed_output(result.output)
assert 'Are you sure you want to overwrite it?' not in result.output
assert len(parsed_result['skipped-objects']) == len(bulk_get_object_to_content)
# We should skip over no objects since we --overwrite
result = invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--download-dir', download_folder, '--overwrite'])
parsed_result = parse_json_response_from_mixed_output(result.output)
assert len(parsed_result['skipped-objects']) == 0
shutil.rmtree(download_folder)
@util.skip_while_rerecording
def test_get_no_objects(vcr_fixture):
download_folder = 'tests/temp/no_objects_{}'.format(bulk_get_bucket_name)
invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--download-dir', download_folder, '--prefix', 'batman'])
assert 0 == get_count_of_files_in_folder_and_subfolders(download_folder)
shutil.rmtree(download_folder)
@util.skip_while_rerecording
def test_get_multipart(object_storage_client):
create_bucket_request = oci.object_storage.models.CreateBucketDetails()
create_bucket_request.name = 'ObjectStorageBulkGetMultipartsTest_{}'.format(util.random_number_string())
create_bucket_request.compartment_id = util.COMPARTMENT_ID
util.clear_test_data(object_storage_client, util.NAMESPACE, util.COMPARTMENT_ID, create_bucket_request.name)
object_storage_client.create_bucket(util.NAMESPACE, create_bucket_request)
large_file_root_dir = os.path.join('tests', 'temp', 'multipart_get_large_files')
if not os.path.exists(large_file_root_dir):
os.makedirs(large_file_root_dir)
util.create_large_file(os.path.join(large_file_root_dir, '1.bin'), LARGE_CONTENT_FILE_SIZE_IN_MEBIBYTES)
util.create_large_file(os.path.join(large_file_root_dir, '2.bin'), LARGE_CONTENT_FILE_SIZE_IN_MEBIBYTES)
util.create_large_file(os.path.join(large_file_root_dir, '3.bin'), LARGE_CONTENT_FILE_SIZE_IN_MEBIBYTES)
util.create_large_file(os.path.join(large_file_root_dir, '4.bin'), LARGE_CONTENT_FILE_SIZE_IN_MEBIBYTES)
util.create_large_file(os.path.join(large_file_root_dir, '5.bin'), LARGE_CONTENT_FILE_SIZE_IN_MEBIBYTES)
util.create_large_file(os.path.join(large_file_root_dir, '6.bin'), 1) # Creates a 1 MiB file for variety
invoke([
'os', 'object', 'bulk-upload',
'--namespace', util.NAMESPACE,
'--bucket-name', create_bucket_request.name,
'--src-dir', large_file_root_dir
])
large_file_verify_dir = os.path.join('tests', 'temp', 'multipart_get_large_files_verify')
invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', create_bucket_request.name, '--download-dir', large_file_verify_dir, '--multipart-download-threshold', '128'])
assert get_count_of_files_in_folder_and_subfolders(large_file_verify_dir) == 6
assert filecmp.cmp(os.path.join(large_file_root_dir, '1.bin'), os.path.join(large_file_verify_dir, '1.bin'))
assert filecmp.cmp(os.path.join(large_file_root_dir, '2.bin'), os.path.join(large_file_verify_dir, '2.bin'))
assert filecmp.cmp(os.path.join(large_file_root_dir, '3.bin'), os.path.join(large_file_verify_dir, '3.bin'))
assert filecmp.cmp(os.path.join(large_file_root_dir, '4.bin'), os.path.join(large_file_verify_dir, '4.bin'))
assert filecmp.cmp(os.path.join(large_file_root_dir, '5.bin'), os.path.join(large_file_verify_dir, '5.bin'))
assert filecmp.cmp(os.path.join(large_file_root_dir, '6.bin'), os.path.join(large_file_verify_dir, '6.bin'))
shutil.rmtree(large_file_root_dir)
shutil.rmtree(large_file_verify_dir)
delete_bucket_and_all_items(object_storage_client, create_bucket_request.name)
# Since we've created a reasonable number of objects in this test suite, it's a good opportunity to test using the --all and --limit parameters
@util.skip_while_rerecording
def test_list_all_objects_operations(vcr_fixture):
result = invoke(['os', 'object', 'list', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--all'])
parsed_result = json.loads(result.output)
assert len(parsed_result['data']) == OBJECTS_TO_CREATE_IN_BUCKET_FOR_BULK_GET
assert 'next-start-with' not in result.output
result = invoke(['os', 'object', 'list', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--all', '--page-size', '20'])
parsed_result = json.loads(result.output)
assert len(parsed_result['data']) == OBJECTS_TO_CREATE_IN_BUCKET_FOR_BULK_GET
assert 'next-start-with' not in result.output
result = invoke(['os', 'object', 'list', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--limit', '47'])
parsed_result = json.loads(result.output)
assert len(parsed_result['data']) == 47
assert 'next-start-with' in result.output
result = invoke(['os', 'object', 'list', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--limit', '33', '--page-size', '3'])
parsed_result = json.loads(result.output)
assert len(parsed_result['data']) == 33
assert 'next-start-with' in result.output
# Bulk puts objects, uses multipart where appropriate (when we breach the default of 128MiB)
@util.skip_while_rerecording
def test_bulk_put_default_options():
result = invoke(['os', 'object', 'bulk-upload', '--namespace', util.NAMESPACE, '--bucket-name', bulk_put_bucket_name, '--src-dir', root_bulk_put_folder])
# No failures or skips and we uploaded everything
parsed_result = parse_json_response_from_mixed_output(result.output)
assert parsed_result['skipped-objects'] == []
assert parsed_result['upload-failures'] == {}
assert len(parsed_result['uploaded-objects']) == get_count_of_files_in_folder_and_subfolders(root_bulk_put_folder)
# Pull everything down and verify that the files match (everything in source appears in destination and they are equal)
download_folder = 'tests/temp/verify_files_{}'.format(bulk_put_bucket_name)
invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_put_bucket_name, '--download-dir', download_folder])
object_name_set = set()
for dir_name, subdir_list, file_list in os.walk(root_bulk_put_folder):
for file in file_list:
source_file_path = os.path.join(dir_name, file)
downloaded_file_path = source_file_path.replace(root_bulk_put_folder, download_folder)
assert os.path.exists(downloaded_file_path)
assert filecmp.cmp(source_file_path, downloaded_file_path, shallow=False)
# Sanity check that we're reporting back that we uploaded the right files
assert get_object_name_from_path(root_bulk_put_folder, source_file_path) in parsed_result['uploaded-objects']
object_name_set.add(get_object_name_from_path(root_bulk_put_folder, source_file_path))
# If we try and put it in the same bucket without --overwrite then everything should be skipped. There should be prompts
result = invoke(['os', 'object', 'bulk-upload', '--namespace', util.NAMESPACE, '--bucket-name', bulk_put_bucket_name, '--src-dir', root_bulk_put_folder])
parsed_result = parse_json_response_from_mixed_output(result.output)
assert 'Are you sure you want to overwrite it?' in result.output
assert set(parsed_result['skipped-objects']) == object_name_set
assert parsed_result['upload-failures'] == {}
assert parsed_result['uploaded-objects'] == {}
# If we say to --no-overwrite then everything should be skipped. There should be no prompts
result = invoke(['os', 'object', 'bulk-upload', '--namespace', util.NAMESPACE, '--bucket-name', bulk_put_bucket_name, '--src-dir', root_bulk_put_folder, '--no-overwrite'])
parsed_result = parse_json_response_from_mixed_output(result.output)
assert 'Are you sure you | |
#! /usr/local/bin/python3
""" Populate tables of program requirements and mappings of courses to those requirements.
A "program" is a requirement block with a block_type of MAJOR, MINOR, or CONC, but these blocks
may reference OTHER blocks. DEGREE, LIBL, REQUISITE, and SCHOOL blocks are not handled here.
[There are eight active LIBL blocks at Baruch, one active REQUISITE block at Baruch, and one
active SCHOOL block ("Hold for future use") at BMCC.]
Extract both the context (the label structure) and the specificity (how many alternatives there
are) for each course.
Block and CopyRules augment the top-level document when encountered.
BlockType, noncourse, and Remarks are all irrelevant for present purposes.
For Conditionals, the condition string serves as the name of the requirements; for bare Else
clauses, the complement of the If clause's condition serves as the name.
Specificity depends on the structure of the course_list, the group (and area) structure, and
conditional factors.
Assumes that all parse_trees for the institution are up to date.
Ignores blocks that are not current and trees with an 'error' key.
"""
import os
import sys
import csv
import json
from argparse import ArgumentParser
from collections import namedtuple
from pgconnection import PgConnection
from dgw_parser import dgw_parser
from body_qualifiers import format_body_qualifiers
from header_productions import format_header_productions
from quarantine_manager import QuarantineManager
from pprint import pprint
DEBUG = os.getenv('DEBUG_REQUIREMENT_MAPPINGS')
quarantined_dict = QuarantineManager()
number_names = ['none', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine',
'ten', 'eleven', 'twelve']
# Create list of active programs
active_programs = []
conn = PgConnection()
cursor = conn.cursor()
cursor.execute("""
select institution, academic_plan, plan_type from cuny_programs where program_status = 'A'
""")
for row in cursor.fetchall():
plan_type = ('MAJOR' if row.plan_type == 'MAJ'
else 'MINOR' if row.plan_type == 'MIN' else 'row.plan_type')
active_programs.append((row.institution, plan_type, row.academic_plan))
cursor.execute("select institution, subplan, subplan_type from cuny_subplans where status = 'A'")
for row in cursor.fetchall():
subplan_type = 'CONC' if row.subplan_type in ['MIN', 'OPT', 'SPC', 'TRK'] else row.subplan_type
active_programs.append((row.institution, subplan_type, row.subplan))
conn.close()
# Information about active courses found in course lists.
ActiveCourse = namedtuple('ActiveCourse',
'course_id offer_nbr discipline catalog_number title credits '
'course_qualifiers')
# A Requirement’s context is a list of labels, conditions, and group info; active_courses is a list
# of ActiveCourse tuples. If the Requirement is disjunctive (OR), the length of the active_courses
# list gives the number of alternative courses that can satisfy the requirement (“may take”). But if
# the Requirement is conjunctive (AND), the course is required (“must take”). But note that this
# doesn’t tell whether the requirement itself is required or optional: that depends on its group and
# conditional contexts, if any.
Requirement = namedtuple('Requirement',
'institution requirement_id requirement_name '
'num_classes num_credits is_disjunctive active_courses '
'program_qualifiers requirement_qualifiers')
# emit()
# -------------------------------------------------------------------------------------------------
def emit(requirement: Requirement, program_qualifiers: list, context: list) -> None:
""" Update the database.
TABLE program_requirements (
id serial primary key,
institution text not null,
requirement_id text not null,
requirement_name text not null,
num_courses_required text not null,
course_alternatives text not null,
conjunction text,
num_credits_required text not null,
credit_alternatives text not null,
context jsonb not null,
program_qualifiers jsonb not null,
requirement_qualifiers jsonb not null, ...
TABLE course_requirement_mappings (
course_id integer,
offer_nbr integer,
program_requirement_id integer references program_requirements(id) on delete cascade,
course_qualifiers jsonb not null, ...
"""
if DEBUG:
print(f'*** emit({requirement=}, {context=})', file=sys.stderr)
assert len(context) > 0, f'emit with no context'
conn = PgConnection()
cursor = conn.cursor()
# The first item in the context list is the name of the requirement.
try:
context_0 = context.pop(0)
institution, requirement_id, block_type, block_value = context_0.split()
except ValueError as ve:
exit(f'“{context_0}” does not split into 4 parts')
and_or = 'OR' if requirement.is_disjunctive else 'AND'
course_alternatives = len(requirement.active_courses)
# The number of credit alternatives can be a range 'cause of courses where there is a range.
min_credit_alternatives = 0.0
max_credit_alternatives = 0.0
for course in requirement.active_courses:
if ':' in course.credits:
min_credits, max_credits = course.credits.split(':')
min_credit_alternatives += float(min_credits)
max_credit_alternatives += float(max_credits)
else:
num_credits = float(course.credits)
min_credit_alternatives += num_credits
max_credit_alternatives += num_credits
if min_credit_alternatives == max_credit_alternatives:
credit_alternatives = f'{min_credit_alternatives:0,.1f}'
else:
credit_alternatives = f'{min_credit_alternatives:0,.1f} to {max_credit_alternatives:0,.1f}'
if DEBUG:
print(institution, requirement_id, requirement.requirement_name, requirement.num_classes,
course_alternatives, and_or, requirement.num_credits, credit_alternatives, context,
file=sys.stderr)
# See if the requirement already exists
assert isinstance(requirement.requirement_name, str), (f'Not a string: '
f'{requirement.requirement_name}')
cursor.execute(f"""
select id
from program_requirements
where institution = %s
and requirement_id = %s
and requirement_name = %s
""", (institution, requirement_id, requirement.requirement_name))
if cursor.rowcount == 0:
# Not yet: add it:
# Note that the same requirement can appear in different contexts, such as when there are
# different ways of satisfying it depending on a student's concentration. This is normal.
cursor.execute(f"""insert into program_requirements values
(default, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) on conflict do nothing
returning id
""", (institution, requirement_id, requirement.requirement_name,
requirement.num_classes, course_alternatives, and_or,
requirement.num_credits, credit_alternatives, json.dumps(context),
json.dumps(requirement.program_qualifiers),
json.dumps(requirement.requirement_qualifiers)))
assert cursor.rowcount == 1
program_requirement_id = int(cursor.fetchone().id)
for course in requirement.active_courses:
# Convert the with-clause expression string into a list
if course.course_qualifiers is None:
course_qualifiers = []
else:
course_qualifiers = course.course_qualifiers.split(',')
# Check if the course mapping already exists
cursor.execute(f"""select course_qualifiers
from course_requirement_mappings
where program_requirement_id = {program_requirement_id}
and course_id = {course.course_id}
and offer_nbr = {course.offer_nbr}
""")
if cursor.rowcount > 0:
# Yes, check for anomalies
if cursor.rowcount == 1:
row = cursor.fetchone()
if row.course_qualifiers != course_qualifiers:
print(f'{institution} {requirement_id} “{requirement.requirement_name}” '
f'{row.course_qualifiers=} <> {course_qualifiers=}', file=sys.stderr)
else:
print(f'Impossible situation: {cursor.rowcount} rows in course_requirement_keys with'
f'same {institution=}, {requirement_id=} {requirement.requirement_name=}',
file=sys.stderr)
else:
# Safe to insert the mapping for this course
cursor.execute(f"""insert into course_requirement_mappings values(
{course.course_id},
{course.offer_nbr},
{program_requirement_id},
%s) on conflict do nothing
""", (json.dumps(course_qualifiers), ))
assert cursor.rowcount == 1
conn.commit()
conn.close()
# iter_list()
# -------------------------------------------------------------------------------------------------
def iter_list(items: list,
program_qualifiers: list,
requirement_qualifiers: list,
calling_context: list) -> None:
"""
"""
if DEBUG:
print(f'*** iter_list({len(items)=}, {program_qualifiers}, {requirement_qualifiers=}, '
f'{calling_context=})',
file=sys.stderr)
local_context = calling_context + []
for value in items:
if isinstance(value, list):
iter_list(value, program_qualifiers, requirement_qualifiers, local_context)
elif isinstance(value, dict):
iter_dict(value, program_qualifiers, requirement_qualifiers, local_context)
else:
# Mebbe its a remark?
print(f'iter_list: Neither list nor dict: {value=} {len(local_context)=}', file=sys.stderr)
return None
# iter_dict()
# -------------------------------------------------------------------------------------------------
def iter_dict(item: dict,
program_qualifiers: list,
requirement_qualifiers: list,
calling_context: list) -> None:
""" If there is a course list, emit the context in which it occurs, the nature of the requirement,
and the courses. The context is a list of labels (no remarks), augmented with information
for conditionals (condition, if-true, if-false) and groups (m of n groups required; this is
group # i of n)
Otherwise, augment the context and process sub-lists and sub-dicts.
"""
assert isinstance(item, dict), (f'{type(item)} is not dict in iter_dict. {item=}')
if DEBUG:
print(f'*** iter_dict({item.keys()=}, {program_qualifiers=}, {requirement_qualifiers=}, '
f'{calling_context=})',
file=sys.stderr)
local_qualifiers = requirement_qualifiers + format_body_qualifiers(item)
local_context = calling_context + []
if 'label' in item.keys():
requirement_name = item.pop('label')
else:
requirement_name = None
ignored_keys = ['allow_credits', 'allow_classes', 'blocktype', 'copy_rules', 'remark']
for key in item.keys():
if key in ignored_keys:
continue
# Subsets, Groups, and Conditionals
if key == 'subset':
""" subset : BEGINSUB
( conditional_body => conditional
| block => ignore
| blocktype => ignore
| class_credit_body => requirements
| copy_rules => ignore
| course_list
| group_requirement => group_requirements
| noncourse => ignore
| rule_complete => ignore
)+
ENDSUB qualifier* (remark | label)*;
"""
subset = item['subset']
# There should be a non-empty label naming the subset requirement.
subset_context = []
if 'label' in subset.keys():
label_str = subset.pop('label')
if label_str:
subset_context = [label_str]
if len(subset_context) == 0:
print(f'Subset with no label {calling_context}', file=sys.stderr)
# There might be qualifiers: format will pop them
subset_qualifiers = format_body_qualifiers(subset)
# Now see what else is there
for subset_key, subset_value in subset.items():
if subset_key in ['conditional', 'course_list', 'group_requirements', 'requirements']:
if isinstance(subset_value, dict):
iter_dict(subset_value,
program_qualifiers,
local_qualifiers + subset_qualifiers,
local_context + subset_context)
elif isinstance(subset_value, list):
iter_list(subset_value,
program_qualifiers,
local_qualifiers + subset_qualifiers,
local_context + subset_context)
else:
print(f'{subset_key} is neither list nor dict in {local_context + subset_context}',
file=sys.stderr)
if key == 'conditional':
conditional = item['conditional']
condition_label = []
condition = conditional['condition']
label = conditional['label']
if label:
condition_label.append(label)
if_true = conditional['if_true']
iter_list(if_true,
program_qualifiers,
local_qualifiers,
local_context + condition_label + [f'{condition} is true'])
# Else clause is optional
if 'if_false' in conditional.keys():
if_false = conditional['if_false']
iter_list(if_false,
program_qualifiers,
local_qualifiers,
local_context + condition_label + [f'{condition} is not true'])
if key == 'group_requirements':
group_requirements = item['group_requirements']
# Each group | |
from __future__ import print_function
import os
import sys
import time
import pickle
import itertools
import numpy as np
import theano
import lasagne
from lasagne.utils import floatX
from utils import BColors, print_net_architecture
import theano.tensor as T
from data_pool import DataPool
from batch_iterators import threaded_generator_from_iterator
class Network(object):
"""
Neural Network
"""
def __init__(self, net, print_architecture=True):
"""
Constructor
"""
self.net = net
self.compute_output = None
self.compute_output_dict = dict()
self.saliency_function = None
# get input shape of network
l_in = lasagne.layers.helper.get_all_layers(self.net)[0]
self.input_shape = l_in.output_shape
if print_architecture:
print_net_architecture(net, detailed=True)
def fit(self, data, training_strategy, dump_file=None, log_file=None):
""" Train model """
print("Training neural network...")
col = BColors()
# create data pool if raw data is given
if "X_train" in data:
data_pools = dict()
data_pools['train'] = DataPool(data['X_train'], data['y_train'])
data_pools['valid'] = DataPool(data['X_valid'], data['y_valid'])
else:
data_pools = data
# check if out_path exists
if dump_file is not None:
out_path = os.path.dirname(dump_file)
if out_path != '' and not os.path.exists(out_path):
os.mkdir(out_path)
# log model evolution
if log_file is not None:
out_path = os.path.dirname(log_file)
if out_path != '' and not os.path.exists(out_path):
os.mkdir(out_path)
# adaptive learning rate
learn_rate = training_strategy.ini_learning_rate
learning_rate = theano.shared(floatX(learn_rate))
learning_rate.set_value(training_strategy.adapt_learn_rate(training_strategy.ini_learning_rate, 0))
# initialize evaluation output
pred_tr_err, pred_val_err, overfitting = [], [], []
tr_accs, va_accs = [], []
print("Compiling theano train functions...")
iter_funcs = self._create_iter_functions(y_tensor_type=training_strategy.y_tensor_type,
objective=training_strategy.objective, learning_rate=learning_rate,
l_2=training_strategy.L2,
compute_updates=training_strategy.update_parameters,
use_weights=training_strategy.use_weights,
debug_mode=training_strategy.debug_mode,
layer_update_filter=training_strategy.layer_update_filter)
print("Starting training...")
now = time.time()
try:
# initialize early stopping
last_improvement = 0
best_model = lasagne.layers.get_all_param_values(self.net)
# iterate training epochs
best_va_dice = 0.0
prev_tr_loss, prev_va_loss = 1e7, 1e7
prev_acc_tr, prev_acc_va = 0.0, 0.0
for epoch in self._train(iter_funcs, data_pools, training_strategy.build_train_batch_iterator(),
training_strategy.build_valid_batch_iterator(), training_strategy.report_dices,
debug_mode=training_strategy.debug_mode):
print("Epoch {} of {} took {:.3f}s".format(epoch['number'], training_strategy.max_epochs, time.time() - now))
now = time.time()
# --- collect train output ---
tr_loss, va_loss = epoch['train_loss'], epoch['valid_loss']
train_acc, valid_acc = epoch['train_acc'], epoch['valid_acc']
train_dices, valid_dices = epoch['train_dices'], epoch['valid_dices']
overfit = epoch['overfitting']
# prepare early stopping
if training_strategy.best_model_by_accurary:
improvement = valid_acc > prev_acc_va
else:
improvement = va_loss < prev_va_loss
if improvement:
last_improvement = 0
best_model = lasagne.layers.get_all_param_values(self.net)
best_epoch = epoch['number']
best_opt_state = [_u.get_value() for _u in iter_funcs['updates'].keys()]
# dump net parameters during training
if dump_file is not None:
with open(dump_file, 'wb') as fp:
pickle.dump(best_model, fp)
last_improvement += 1
# print train output
txt_tr = 'costs_tr %.5f ' % tr_loss
if tr_loss < prev_tr_loss:
txt_tr = col.print_colored(txt_tr, BColors.OKGREEN)
prev_tr_loss = tr_loss
txt_tr_acc = '(%.3f)' % train_acc
if train_acc > prev_acc_tr:
txt_tr_acc = col.print_colored(txt_tr_acc, BColors.OKGREEN)
prev_acc_tr = train_acc
txt_tr += txt_tr_acc + ', '
txt_val = 'costs_val %.5f ' % va_loss
if va_loss < prev_va_loss:
txt_val = col.print_colored(txt_val, BColors.OKGREEN)
prev_va_loss = va_loss
txt_va_acc = '(%.3f)' % valid_acc
if valid_acc > prev_acc_va:
txt_va_acc = col.print_colored(txt_va_acc, BColors.OKGREEN)
prev_acc_va = valid_acc
txt_val += txt_va_acc + ', '
print(' lr: %.7f, patience: %d' % (learn_rate, training_strategy.patience - last_improvement + 1))
print(' ' + txt_tr + txt_val + 'tr/val %.3f' % overfit)
# report dice coefficients
if training_strategy.report_dices:
train_str = ' train |'
for key in np.sort(train_dices.keys()):
train_str += ' %.2f: %.3f |' % (key, train_dices[key])
print(train_str)
train_acc = np.max(train_dices.values())
valid_str = ' valid |'
for key in np.sort(valid_dices.keys()):
txt_va_dice = ' %.2f: %.3f |' % (key, valid_dices[key])
if valid_dices[key] > best_va_dice and valid_dices[key] == np.max(valid_dices.values()):
best_va_dice = valid_dices[key]
txt_va_dice = col.print_colored(txt_va_dice, BColors.OKGREEN)
valid_str += txt_va_dice
print(valid_str)
valid_acc = np.max(valid_dices.values())
# collect model evolution data
tr_accs.append(train_acc)
va_accs.append(valid_acc)
pred_tr_err.append(tr_loss)
pred_val_err.append(va_loss)
overfitting.append(overfit)
# save results
exp_res = dict()
exp_res['pred_tr_err'] = pred_tr_err
exp_res['tr_accs'] = tr_accs
exp_res['pred_val_err'] = pred_val_err
exp_res['va_accs'] = va_accs
exp_res['overfitting'] = overfitting
if log_file is not None:
with open(log_file, 'w') as fp:
pickle.dump(exp_res, fp)
# --- early stopping: preserve best model ---
if last_improvement > training_strategy.patience:
print(col.print_colored("Early Stopping!", BColors.WARNING))
status = "Epoch: %d, Best Validation Loss: %.5f: Acc: %.5f" % (
best_epoch, prev_va_loss, prev_acc_va)
print(col.print_colored(status, BColors.WARNING))
if training_strategy.refinement_strategy.n_refinement_steps <= 0:
break
else:
status = "Loading best parameters so far and refining (%d) with decreased learn rate ..." % \
training_strategy.refinement_strategy.n_refinement_steps
print(col.print_colored(status, BColors.WARNING))
# reset net to best weights
lasagne.layers.set_all_param_values(self.net, best_model)
# reset optimizer
for _u, value in zip(iter_funcs['updates'].keys(), best_opt_state):
_u.set_value(value)
# update learn rate
learn_rate = training_strategy.refinement_strategy.adapt_learn_rate(learn_rate)
training_strategy.patience = training_strategy.refinement_strategy.refinement_patience
last_improvement = 0
# maximum number of epochs reached
if epoch['number'] >= training_strategy.max_epochs:
break
# update learning rate
learn_rate = training_strategy.adapt_learn_rate(learn_rate, epoch['number'])
learning_rate.set_value(learn_rate)
except KeyboardInterrupt:
pass
# set net to best weights
lasagne.layers.set_all_param_values(self.net, best_model)
# return best validation loss
if training_strategy.best_model_by_accurary:
return prev_acc_va
else:
return prev_va_loss
def predict_proba(self, input):
"""
Predict on test samples
"""
# prepare input for prediction
if not isinstance(input, list):
input = [input]
# reshape to network input
if input[0].ndim < len(self.input_shape):
input[0] = input[0].reshape([1] + list(input[0].shape))
if self.compute_output is None:
self.compute_output = self._compile_prediction_function()
return self.compute_output(*input)
def predict(self, input):
"""
Predict class labels on test samples
"""
return np.argmax(self.predict_proba(input), axis=1)
def compute_layer_output(self, input, layer):
"""
Compute output of given layer
layer: either a string (name of layer) or a layer object
"""
# prepare input for prediction
if not isinstance(input, list):
input = [input]
# reshape to network input
if input[0].ndim < len(self.input_shape):
input[0] = input[0].reshape([1] + list(input[0].shape))
# get layer by name
if not isinstance(layer, lasagne.layers.Layer):
for l in lasagne.layers.helper.get_all_layers(self.net):
if l.name == layer:
layer = l
break
# compile prediction function for target layer
if layer not in self.compute_output_dict:
self.compute_output_dict[layer] = self._compile_prediction_function(target_layer=layer)
return self.compute_output_dict[layer](*input)
def compute_saliency(self, input, nonlin=lasagne.nonlinearities.rectify):
"""
Compute saliency maps using guided backprop
"""
# prepare input for prediction
if not isinstance(input, list):
input = [input]
# reshape to network input
if input[0].ndim < len(self.input_shape):
input[0] = input[0].reshape([1] + list(input[0].shape))
if not self.saliency_function:
self.saliency_function = self._compile_saliency_function(nonlin)
return self.saliency_function(*input)
def save(self, file_path):
"""
Save model to disk
"""
with open(file_path, 'w') as fp:
params = lasagne.layers.get_all_param_values(self.net)
pickle.dump(params, fp, -1)
def load(self, file_path):
"""
load model from disk
"""
with open(file_path, 'r') as fp:
params = pickle.load(fp)
lasagne.layers.set_all_param_values(self.net, params)
def _compile_prediction_function(self, target_layer=None):
"""
Compile theano prediction function
"""
# get network output nad compile function
if target_layer is None:
target_layer = self.net
# collect input vars
all_layers = lasagne.layers.helper.get_all_layers(target_layer)
input_vars = []
for l in all_layers:
if isinstance(l, lasagne.layers.InputLayer):
input_vars.append(l.input_var)
net_output = lasagne.layers.get_output(target_layer, deterministic=True)
return theano.function(inputs=input_vars, outputs=net_output)
def _create_iter_functions(self, y_tensor_type, objective, learning_rate, l_2, compute_updates, use_weights,
debug_mode, layer_update_filter):
""" Create functions for training, validation and testing to iterate one epoch. """
# init target tensor
targets = y_tensor_type('y')
weights = y_tensor_type('w').astype("float32")
# get input layer
all_layers = lasagne.layers.helper.get_all_layers(self.net)
# collect input vars
input_vars = []
for l in all_layers:
if isinstance(l, lasagne.layers.InputLayer):
input_vars.append(l.input_var)
# compute train costs
tr_output = lasagne.layers.get_output(self.net, deterministic=False)
if use_weights:
tr_cost = objective(tr_output, targets, weights)
tr_input = input_vars + [targets, weights]
else:
tr_cost = objective(tr_output, targets)
tr_input = input_vars + [targets]
# regularization costs
tr_reg_cost = 0
# regularize RNNs
for l in all_layers:
# if l.name == "norm_reg_rnn":
#
# H = lasagne.layers.get_output(l, deterministic=False)
# H_l2 = T.sqrt(T.sum(H ** 2, axis=-1))
# norm_diffs = (H_l2[:, 1:] - H_l2[:, :-1]) ** 2
# norm_preserving_loss = T.mean(norm_diffs)
#
# beta = 1.0
# tr_cost += beta * norm_preserving_loss
if l.name == "norm_reg_rnn":
H = lasagne.layers.get_output(l, deterministic=False)
steps = T.arange(1, l.output_shape[1])
def compute_norm_diff(k, H):
n0 = ((H[:, k - 1, :]) ** 2).sum(1).sqrt()
n1 = ((H[:, k, :]) ** 2).sum(1).sqrt()
return (n1 - n0) ** 2
norm_diffs, _ = theano.scan(fn=compute_norm_diff, outputs_info=None,
non_sequences=[H], sequences=[steps])
beta = 1.0
norm_preserving_loss = T.mean(norm_diffs)
tr_reg_cost += beta * norm_preserving_loss
# compute validation costs
va_output = lasagne.layers.get_output(self.net, deterministic=True)
# estimate accuracy
if y_tensor_type == T.ivector:
va_acc = 100.0 * T.mean(T.eq(T.argmax(va_output, axis=1), targets), dtype=theano.config.floatX)
tr_acc = 100.0 * T.mean(T.eq(T.argmax(tr_output, axis=1), targets), dtype=theano.config.floatX)
elif y_tensor_type == T.vector:
va_acc = 100.0 * T.mean(T.eq(T.ge(va_output.flatten(), 0.5), targets), dtype=theano.config.floatX)
tr_acc = 100.0 * T.mean(T.eq(T.ge(tr_output.flatten(), 0.5), targets), dtype=theano.config.floatX)
else:
va_acc, tr_acc = None, None
# collect all parameters of net and compute updates
all_params = lasagne.layers.get_all_params(self.net, trainable=True)
# filter parameters to update by layer name
if layer_update_filter:
all_params = [p for p in all_params if layer_update_filter in p.name]
# add weight decay
if l_2 is not None:
all_layers = lasagne.layers.get_all_layers(self.net)
tr_reg_cost += l_2 * lasagne.regularization.regularize_layer_params(all_layers, lasagne.regularization.l2)
# compute updates
all_grads = lasagne.updates.get_or_compute_grads(tr_cost + tr_reg_cost, all_params)
updates | |
<filename>plotext/_figure.py
from plotext._utility.color import no_color_name, color_code
from plotext._utility.data import brush, transpose, replace
from plotext._utility.string import only_spaces
from plotext._utility.color import uncolorize
from plotext._matrices import figure_matrices
from plotext._default import figure_default
from plotext._utility.file import save_text
from plotext._subplot import subplot_class
from plotext._utility.plot import *
from time import time
##############################################
########## Figure Container ############
##############################################
class figure_class():
def __init__(self):
self.default = figure_default()
self.matrices = figure_matrices()
self.set_size(None, None) # the figure width and height
self.limit_size = self.default.limit_size # if True, the figure can expand beyond terminal size
self.rows, self.cols = [1, 1] # the number of rows and cols of the matrix of subplots
self.set_subplots()
self.row, self.col = [0, 0] # the active plot coordinates in the matrix
self.set_subplot(1, 1) # set the current subplot to work on
self.canvas = ''
self.time = None
def set_size(self, width = None, height = None):
self.width = None if width is None else int(width)
self.height = None if height is None else int(height)
self.size = [self.width, self.height]
def set_subplots(self, rows = None, cols = None): # it sets the figure grid size
rows = 1 if rows is None else rows
cols = 1 if cols is None else cols
if rows > self.default.rows_max:
raise ValueError("Subplots rows above limit of " + str(self.default.rows_max))
if cols > self.default.cols_max:
raise ValueError("Subplots cols above limit of " + str(self.default.cols_max))
rows = rows if rows < self.default.rows_max else self.default.rows_max
cols = cols if cols < self.default.cols_max else self.default.cols_max
self.rows, self.cols = [rows, cols]
self.subplots = [[subplot_class(r, c) for c in range(self.cols)] for r in range(self.rows)]
self.set_subplot(1, 1) # automatically sets the current subplot to work on to the first
def set_subplot(self, row = None, col = None): # it sets the current subplot to work on
row = 1 if row is None else row
col = 1 if col is None else col
self.row, self.col = [row - 1, col - 1]
self.subplot = self.subplots[self.row][self.col]
def save_fig(self, path):
import os
_, extension = os.path.splitext(path)
if extension == ".html":
text = self.matrices.to_html()
else:
text = uncolorize(self.matrices.to_canvas())
save_text(path, text)
##############################################
########### Clear Functions ############
##############################################
def clear_figure(self):
self.__init__()
def colorless(self):
row, col = self.row, self.col
for r in range(self.rows):
for c in range(self.cols):
self.set_subplot(r + 1, c + 1)
self.clear_color()
self.set_subplot(self.row, self.col)
def clear_plot(self):
self.subplot.__init__(self.row, self.col)
def clear_data(self):
self.subplot.data_init()
#self.set_size(None, None) # usefull for streaming data
self.subplot.set_size(None, None) # usefull for streaming data
def clear_color(self):
self.subplot.color_sequence = [no_color_name] * len(self.subplot.color_sequence)
self.subplot.color = [no_color_name] * len(self.subplot.color)
self.subplot.axes_color = no_color_name
self.subplot.ticks_color = no_color_name
self.subplot.canvas_color = no_color_name
def clear_terminal(self, lines = None):
if lines is None:
write('\033c')
else:
for r in range(lines):
write("\033[A") # moves the curson up
write("\033[2K") # clear the entire line
##############################################
########### Set Functions ##############
##############################################
def plot_size(self, width = None, height = None):
width = None if width is None else width
height = None if height is None else height
self.subplot.set_size(width, height)
def limitsize(self, limit_xsize = None, limit_ysize = None): # can't call it limit_size here bacause of internal parameter named the same
limit_xsize = self.default.limit_size[0] if limit_xsize is None else bool(limit_xsize)
limit_ysize = limit_xsize if limit_ysize is None else bool(limit_ysize)
self.limit_size = [limit_xsize, limit_ysize]
def span(self, colspan = None, rowspan = None):
colspan = 1 if colspan is None or colspan <= 0 else colspan
rowspan = 1 if rowspan is None or rowspan <= 0 else rowspan
colspan = min(colspan, self.cols - self.col)
rowspan = min(rowspan, self.rows - self.row)
self.subplot.rowspan = rowspan
self.subplot.colspan = colspan
def title(self, title = None):
title = None if title is None else str(title).strip()
spaces = only_spaces(title)
title = None if spaces else title
self.subplot.title = title
def xlabel(self, xlabel = None, xside = None):
xlabel = None if xlabel is None else str(xlabel).strip()
spaces = only_spaces(xlabel)
xlabel = None if spaces else xlabel
pos = self.subplot.xside_to_pos(xside)
self.subplot.xlabel[pos] = xlabel
def ylabel(self, ylabel = None, yside = None):
ylabel = None if ylabel is None else str(ylabel).strip()
spaces = only_spaces(ylabel)
ylabel = None if spaces else ylabel
pos = self.subplot.yside_to_pos(yside)
self.subplot.ylabel[pos] = ylabel
def xaxis(self, state = None, xside = None):
pos = self.subplot.xside_to_pos(xside)
state = self.subplot.default.xaxes[pos] if state is None else bool(state)
self.subplot.xaxes[pos] = state
def yaxis(self, state = None, yside = None):
pos = self.subplot.yside_to_pos(yside)
state = self.subplot.default.yaxes[pos] if state is None else bool(state)
self.subplot.yaxes[pos] = state
def frame(self, state = True):
[self.xaxis(state, side) for side in self.subplot.default.xside]
[self.yaxis(state, side) for side in self.subplot.default.yside]
def grid(self, horizontal = None, vertical = None):
horizontal = self.subplot.default.grid[0] if horizontal is None else bool(horizontal)
vertical = horizontal if vertical is None else bool(vertical)
self.subplot.grid = [horizontal, vertical]
def canvas_color(self, color = None):
code = color_code(color, 0)
nocolor = code[0] == 3
color = self.subplot.default.canvas_color if color is None or nocolor else color
self.subplot.canvas_color = color
def ticks_color(self, color = None):
code = color_code(color, 1)
nocolor = code[0] == 3
color = self.subplot.default.ticks_color if color is None or nocolor else color
self.subplot.ticks_color = color
def axes_color(self, color = None):
code = color_code(color, 0)
nocolor = code[0] == 3
color = self.subplot.default.axes_color if color is None or nocolor else color
self.subplot.axes_color = color
def xlim(self, lower = None, upper = None, xside = None):
lower = None if lower is None else float(lower)
upper = None if upper is None else float(upper)
xlim = [lower, upper]
xlim = xlim if xlim == [None] * 2 else [min(xlim), max(xlim)]
pos = self.subplot.xside_to_pos(xside)
self.subplot.xlim[pos] = xlim
def ylim(self, left = None, right = None, yside = None):
left = None if left is None else float(left)
right = None if right is None else float(right)
ylim = [left, right]
ylim = ylim if ylim == [None] * 2 else [min(ylim), max(ylim)]
pos = self.subplot.yside_to_pos(yside)
self.subplot.ylim[pos] = ylim
def xscale(self, scale = None, xside = None):
default_case = (scale is None or scale not in self.subplot.default.xscale)
scale = self.subplot.default.xscale[0] if default_case else scale
pos = self.subplot.xside_to_pos(xside)
self.subplot.xscale[pos] = scale
def yscale(self, scale = None, yside = None):
default_case = (scale is None or scale not in self.subplot.default.yscale)
scale = self.subplot.default.yscale[0] if default_case else scale
pos = self.subplot.yside_to_pos(yside)
self.subplot.yscale[pos] = scale
def xfrequency(self, frequency = None, xside = None):
pos = self.subplot.xside_to_pos(xside)
frequency = self.subplot.default.xfrequency[pos] if frequency is None else int(frequency)
if frequency == 0:
self.xticks([], [], xside)
self.subplot.xfrequency[pos] = frequency
def yfrequency(self, frequency = None, yside = None):
pos = self.subplot.yside_to_pos(yside)
frequency = self.subplot.default.yfrequency[pos] if frequency is None else int(frequency)
if frequency == 0:
self.yticks([], [], yside)
self.subplot.yfrequency[pos] = frequency
def xticks(self, ticks = None, labels = None, xside = None):
pos = self.subplot.xside_to_pos(xside)
ticks = self.subplot.default.xticks[pos] if ticks is None else list(ticks)
labels = ticks if labels is None else list(labels)
labels = list(map(str, labels))
ticks = list(map(_utility.string_to_time, ticks)) if len(ticks) > 0 and type(ticks[0]) == str else ticks
ticks, labels = brush(ticks, labels)
self.subplot.xticks[pos] = ticks
self.subplot.xlabels[pos] = labels
self.subplot.xfrequency[pos] = self.subplot.xfrequency[pos] if ticks is None else len(ticks)
def yticks(self, ticks = None, labels = None, yside = None):
pos = self.subplot.yside_to_pos(yside)
ticks = self.subplot.default.yticks[pos] if ticks is None else list(ticks)
labels = ticks if labels is None else list(labels)
labels = list(map(str, labels))
ticks, labels = brush(ticks, labels)
self.subplot.yticks[pos] = ticks
self.subplot.ylabels[pos] = labels
self.subplot.yfrequency[pos] = self.subplot.yfrequency[pos] if ticks is None else len(ticks)
##############################################
########### Show Functions #############
##############################################
def build(self):
t = time()
self.get_size_max()
self.get_size_matrices()
self.set_size(0, 0)
for row in range(1, self.rows + 1):
for col in range(1, self.cols + 1):
self.set_subplot(row, col)
self.set_subplot_size()
self.subplot.correct_frequency()
self.subplot.adjust_height()
self.subplot.set_scale()
self.subplot.get_xlim()
self.subplot.get_ylim()
self.subplot.get_height_canvas()
self.subplot.get_yticks()
self.subplot.get_width_canvas()
self.subplot.adjust_width()
self.subplot.get_xticks()
self.subplot.get_relative_ticks()
self.subplot.create_matrices()
self.subplot.add_grid()
self.subplot.add_extra_lines()
self.subplot.update_matrix()
self.subplot.add_legend()
self.subplot.add_yaxis()
self.subplot.add_xaxis()
self.subplot.add_labels()
self.set_size(sum(self.widths[0]), sum(transpose(self.heights)[0]))
self.join_matrices()
self.to_canvas()
self.time = time() - t
return self.canvas
def show(self):
self.build()
write(self.canvas)
def get_size_max(self):
term_size = replace(terminal_size(), self.default.terminal_size)
self.width_max = term_size[0] if self.limit_size[0] else self.default.terminal_infinite_size[0]
self.height_max = term_size[1] if self.limit_size[1] else self.default.terminal_infinite_size[1]
self.height_max -= 2 # last terminal row is always occupied the other one is for ipython
| |
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
hyper/http20/stream
~~~~~~~~~~~~~~~~~~~
Objects that make up the stream-level abstraction of hyper's HTTP/2 support.
These objects are not expected to be part of the public HTTP/2 API: they're
intended purely for use inside hyper's HTTP/2 abstraction.
Conceptually, a single HTTP/2 connection is made up of many streams: each
stream is an independent, bi-directional sequence of HTTP headers and data.
Each stream is identified by a monotonically increasing integer, assigned to
the stream by the endpoint that initiated the stream.
"""
from . import exceptions
from ..common.headers import HTTPHeaderMap
from .util import h2_safe_headers
import logging
log = logging.getLogger(__name__)
# Define the largest chunk of data we'll send in one go. Realistically, we
# should take the MSS into account but that's pretty dull, so let's just say
# 1kB and call it a day.
MAX_CHUNK = 1024
class Stream(object):
"""
A single HTTP/2 stream.
A stream is an independent, bi-directional sequence of HTTP headers and
data. Each stream is identified by a single integer. From a HTTP
perspective, a stream _approximately_ matches a single request-response
pair.
"""
def __init__(self,
stream_id,
window_manager,
connection,
send_outstanding_data,
recv_cb,
close_cb):
self.stream_id = stream_id
self.headers = HTTPHeaderMap()
self.events = []
# Set to a key-value set of the response headers once their
# HEADERS..CONTINUATION frame sequence finishes.
self.response_headers = None
# Set to a key-value set of the response trailers once their
# HEADERS..CONTINUATION frame sequence finishes.
self.response_trailers = None
# A dict mapping the promised stream ID of a pushed resource to a
# key-value set of its request headers. Entries are added once their
# PUSH_PROMISE..CONTINUATION frame sequence finishes.
self.promised_headers = {}
# Unconsumed response data chunks. Empties after every call to _read().
self.data = []
# Whether the remote side has completed the stream.
self.remote_closed = False
# Whether we have closed the stream.
self.local_closed = False
# There are two flow control windows: one for data we're sending,
# one for data being sent to us.
self._in_window_manager = window_manager
# Save off a reference to the state machine wrapped with lock.
self._conn = connection
# Save off a data callback.
self._send_outstanding_data = send_outstanding_data
self._recv_cb = recv_cb
self._close_cb = close_cb
def add_header(self, name, value, replace=False):
"""
Adds a single HTTP header to the headers to be sent on the request.
"""
if not replace:
self.headers[name] = value
else:
self.headers.replace(name, value)
def send_headers(self, end_stream=False):
"""
Sends the complete saved header block on the stream.
"""
headers = self.get_headers()
with self._conn as conn:
conn.send_headers(self.stream_id, headers, end_stream)
self._send_outstanding_data()
if end_stream:
self.local_closed = True
def send_data(self, data, final):
"""
Send some data on the stream. If this is the end of the data to be
sent, the ``final`` flag _must_ be set to True. If no data is to be
sent, set ``data`` to ``None``.
"""
# Define a utility iterator for file objects.
def file_iterator(fobj):
while True:
data = fobj.read(MAX_CHUNK)
yield data
if len(data) < MAX_CHUNK:
break
# Build the appropriate iterator for the data, in chunks of CHUNK_SIZE.
if hasattr(data, 'read'):
chunks = file_iterator(data)
else:
chunks = (data[i:i+MAX_CHUNK]
for i in range(0, len(data), MAX_CHUNK))
for chunk in chunks:
self._send_chunk(chunk, final)
def _read(self, amt=None):
"""
Read data from the stream. Unlike a normal read behaviour, this
function returns _at least_ ``amt`` data, but may return more.
"""
def listlen(list):
return sum(map(len, list))
# Keep reading until the stream is closed or we get enough data.
while (not self.remote_closed and
(amt is None or listlen(self.data) < amt)):
self._recv_cb(stream_id=self.stream_id)
result = b''.join(self.data)
self.data = []
return result
def _read_one_frame(self):
"""
Reads a single data frame from the stream and returns it.
"""
# Keep reading until the stream is closed or we have a data frame.
while not self.remote_closed and not self.data:
self._recv_cb(stream_id=self.stream_id)
try:
return self.data.pop(0)
except IndexError:
return None
def receive_response(self, event):
"""
Receive response headers.
"""
# TODO: If this is called while we're still sending data, we may want
# to stop sending that data and check the response. Early responses to
# big uploads are almost always a problem.
self.response_headers = HTTPHeaderMap(event.headers)
def receive_trailers(self, event):
"""
Receive response trailers.
"""
self.response_trailers = HTTPHeaderMap(event.headers)
def receive_push(self, event):
"""
Receive the request headers for a pushed stream.
"""
self.promised_headers[event.pushed_stream_id] = event.headers
def receive_data(self, event):
"""
Receive a chunk of data.
"""
size = event.flow_controlled_length
increment = self._in_window_manager._handle_frame(size)
# Append the data to the buffer.
self.data.append(event.data)
if increment:
try:
with self._conn as conn:
conn.increment_flow_control_window(
increment, stream_id=self.stream_id
)
except exceptions.StreamClosedError:
# We haven't got to it yet, but the stream is already
# closed. We don't need to increment the window in this
# case!
pass
else:
self._send_outstanding_data()
def receive_end_stream(self, event):
"""
All of the data is returned now.
"""
self.remote_closed = True
def receive_reset(self, event):
"""
Stream forcefully reset.
"""
self.remote_closed = True
self._close_cb(self.stream_id)
def get_headers(self):
"""
Provides the headers to the connection object.
"""
# Strip any headers invalid in H2.
return h2_safe_headers(self.headers)
def getheaders(self):
"""
Once all data has been sent on this connection, returns a key-value set
of the headers of the response to the original request.
"""
# Keep reading until all headers are received.
while self.response_headers is None:
self._recv_cb(stream_id=self.stream_id)
# Find the Content-Length header if present.
self._in_window_manager.document_size = (
int(self.response_headers.get(b'content-length', [0])[0])
)
return self.response_headers
def gettrailers(self):
"""
Once all data has been sent on this connection, returns a key-value set
of the trailers of the response to the original request.
.. warning:: Note that this method requires that the stream is
totally exhausted. This means that, if you have not
completely read from the stream, all stream data will be
read into memory.
:returns: The key-value set of the trailers, or ``None`` if no trailers
were sent.
"""
# Keep reading until the stream is done.
while not self.remote_closed:
self._recv_cb(stream_id=self.stream_id)
return self.response_trailers
def get_pushes(self, capture_all=False):
"""
Returns a generator that yields push promises from the server. Note
that this method is not idempotent; promises returned in one call will
not be returned in subsequent calls. Iterating through generators
returned by multiple calls to this method simultaneously results in
undefined behavior.
:param capture_all: If ``False``, the generator will yield all buffered
push promises without blocking. If ``True``, the generator will
first yield all buffered push promises, then yield additional ones
as they arrive, and terminate when the original stream closes.
"""
while True:
for pair in self.promised_headers.items():
yield pair
self.promised_headers = {}
if not capture_all or self.remote_closed:
break
self._recv_cb(stream_id=self.stream_id)
def close(self, error_code=None):
"""
Closes the stream. If the stream is currently open, attempts to close
it as gracefully as possible.
:param error_code: (optional) The error code to reset the stream with.
:returns: Nothing.
"""
# FIXME: I think this is overbroad, but for now it's probably ok.
if not (self.remote_closed and self.local_closed):
try:
with self._conn as conn:
conn.reset_stream(self.stream_id, error_code or 0)
except exceptions.ProtocolError:
# If for any reason we can't reset the stream, just
# tolerate it.
pass
else:
self._send_outstanding_data(tolerate_peer_gone=True)
self.remote_closed = True
self.local_closed = True
self._close_cb(self.stream_id)
@property
def _out_flow_control_window(self):
"""
The size of our outbound flow control window.
"""
with self._conn as conn:
return conn.local_flow_control_window(self.stream_id)
def _send_chunk(self, data, final):
"""
Implements most of the sending logic.
Takes a single chunk of size at most MAX_CHUNK, wraps it in a frame and
sends it. Optionally sets the END_STREAM flag if this is the last chunk
(determined by being of size less than MAX_CHUNK) and no more data is
to be sent.
"""
# If we don't fit in the connection window, try popping frames off the
# connection in hope that one might be a window update frame.
while len(data) > self._out_flow_control_window:
self._recv_cb()
# If the length of the data is less than MAX_CHUNK, we're probably
# at the end of the file. If this is the end of the data, mark it
# as END_STREAM.
end_stream = False
if len(data) < MAX_CHUNK and final:
end_stream = True
# Send | |
# Shmup - Part 22
# simple start/end screens
# by KidsCanCode 2015
# A space shmup in multiple parts
# For educational purposes only
# Art from Kenney.nl
# Frozen Jam by tgfcoder <https://twitter.com/tgfcoder> licensed under CC-BY-3
import pygame as pg
import random
import sys
from os import path
sound_dir = path.join(path.dirname(__file__), 'snd')
img_dir = path.join(path.dirname(__file__), 'img')
# define some colors (R, G, B)
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
BLACK = (0, 0, 0)
RED = (255, 0, 0)
YELLOW = (255, 255, 0)
# game settings
WIDTH = 480
HEIGHT = 600
FPS = 60
TITLE = "SHMUP"
BGCOLOR = BLACK
POWERUP_TIME = 5000
def draw_text(surf, text, size, x, y):
# generic function to draw some text
font_name = pg.font.match_font('arial')
font = pg.font.Font(font_name, size)
text_surface = font.render(text, True, WHITE)
text_rect = text_surface.get_rect()
text_rect.midtop = (x, y)
surf.blit(text_surface, text_rect)
def wait_for_key():
# utility function to pause waiting for a keypress
# still allow Esc to exit
# Actually, we look for KEYUP event, not KEYPRESS
if len(pg.event.get(pg.QUIT)) > 0:
pg.quit()
sys.exit()
keyup_events = pg.event.get(pg.KEYUP)
if len(keyup_events) == 0:
return None
if keyup_events[0].key == pg.K_ESCAPE:
pg.quit()
sys.exit()
return keyup_events[0].key
def draw_shield_bar(surf, x, y, pct):
if pct < 0:
pct = 0
BAR_LENGTH = 100
BAR_HEIGHT = 10
fill = (pct / 100) * BAR_LENGTH
outline_rect = pg.Rect(x, y, BAR_LENGTH, BAR_HEIGHT)
fill_rect = pg.Rect(x, y, fill, BAR_HEIGHT)
pg.draw.rect(surf, GREEN, fill_rect)
pg.draw.rect(surf, WHITE, outline_rect, 2)
def draw_lives(surf, img, x, y, lives):
for i in range(lives):
img_rect = img.get_rect()
img_rect.x = x + 30 * i
img_rect.y = y
surf.blit(img, img_rect)
############ DEFINE SPRITES ############
class Player(pg.sprite.Sprite):
# player sprite - moves left/right, shoots
def __init__(self, game, *groups):
pg.sprite.Sprite.__init__(self, *groups)
self.game = game
self.image = pg.transform.scale(game.player_image, (50, 38))
self.rect = self.image.get_rect()
self.radius = 22
# uncomment to test the radius
# pg.draw.circle(self.image, RED, self.rect.center, self.radius)
self.rect.centerx = WIDTH / 2
self.rect.bottom = HEIGHT - 10
self.speedx = 0
self.shield = 100
self.shoot_delay = 250
self.last_shot = pg.time.get_ticks()
self.power = 1
self.power_time = pg.time.get_ticks()
self.lives = 3
self.hidden = False
self.hide_timer = pg.time.get_ticks()
def hide(self):
# hide player temporarily
self.hidden = True
self.hide_timer = pg.time.get_ticks()
self.loc = self.rect.center
self.rect.center = (WIDTH / 2, HEIGHT + 200)
def update(self):
# unhide if hidden
if self.hidden and pg.time.get_ticks() - self.hide_timer > 1000:
self.hidden = False
self.rect.center = self.loc
# timeout for powerups
if self.power >= 2 and pg.time.get_ticks() - self.power_time > POWERUP_TIME:
self.power -= 1
self.power_time = pg.time.get_ticks()
# only move if arrow key is pressed
self.speedx = 0
keystate = pg.key.get_pressed()
if keystate[pg.K_LEFT]:
self.speedx = -5
if keystate[pg.K_RIGHT]:
self.speedx = 5
if keystate[pg.K_SPACE]:
self.shoot()
# move the sprite
self.rect.x += self.speedx
# stop at the edges
if self.rect.right > WIDTH:
self.rect.right = WIDTH
if self.rect.left < 0:
self.rect.left = 0
def powerup(self):
self.game.power_sound.play()
self.power += 1
self.power_time = pg.time.get_ticks()
def shoot(self):
now = pg.time.get_ticks()
if not self.hidden and now - self.last_shot > self.shoot_delay:
self.last_shot = now
if self.power == 1:
self.shoot_delay = 250
Bullet(self.game.bullet_image, self.rect.centerx, self.rect.top,
[self.game.all_sprites, self.game.bullets])
self.game.pew_sound.play()
if self.power == 2:
self.shoot_delay = 250
Bullet(self.game.bullet_image, self.rect.left, self.rect.centery,
[self.game.all_sprites, self.game.bullets])
Bullet(self.game.bullet_image, self.rect.right, self.rect.centery,
[self.game.all_sprites, self.game.bullets])
self.game.pew_sound.play()
if self.power >= 3:
self.shoot_delay = 150
Bullet(self.game.bullet_image, self.rect.left, self.rect.centery,
[self.game.all_sprites, self.game.bullets])
Bullet(self.game.bullet_image, self.rect.right, self.rect.centery,
[self.game.all_sprites, self.game.bullets])
Bullet(self.game.bullet_image, self.rect.centerx, self.rect.top,
[self.game.all_sprites, self.game.bullets])
self.game.pew_sound.play()
class Mob(pg.sprite.Sprite):
# mob sprite - spawns above top and moves downward
def __init__(self, images, *groups):
pg.sprite.Sprite.__init__(self, *groups)
self.image0 = random.choice(images)
self.image0.set_colorkey(BLACK)
self.image = self.image0.copy()
self.rect = self.image.get_rect()
self.radius = int(self.rect.width * 0.85 / 2)
# uncomment to test the radius
# pg.draw.circle(self.image, RED, self.rect.center, self.radius)
self.rect.x = random.randrange(WIDTH - self.rect.width)
self.rect.y = random.randrange(-80, -50)
self.speedx = random.randrange(-3, 3)
self.speedy = random.randrange(1, 8)
self.rot = 0
self.rot_speed = random.randrange(-10, 10)
self.last_update = pg.time.get_ticks()
def rotate(self):
now = pg.time.get_ticks()
if now - self.last_update > 50:
self.last_update = now
self.rot = (self.rot + self.rot_speed) % 360
new_image = pg.transform.rotate(self.image0, self.rot)
old_center = self.rect.center
self.image = new_image
self.rect = self.image.get_rect()
self.rect.center = old_center
def update(self):
self.rotate()
self.rect.x += self.speedx
self.rect.y += self.speedy
if self.rect.top > HEIGHT or self.rect.right < 0 or self.rect.left > WIDTH:
self.rect.y = random.randrange(-80, -50)
self.rect.x = random.randrange(WIDTH - self.rect.width)
self.speedy = random.randrange(1, 8)
class Bullet(pg.sprite.Sprite):
def __init__(self, img, x, y, *groups):
pg.sprite.Sprite.__init__(self, *groups)
self.image = img
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
self.rect.bottom = y
self.rect.centerx = x
self.speedy = -10
def update(self):
self.rect.y += self.speedy
# kill if off top of screen
if self.rect.bottom < 0:
self.kill()
class Powerup(pg.sprite.Sprite):
def __init__(self, images, *groups):
pg.sprite.Sprite.__init__(self, *groups)
self.type = random.choice(['shield', 'gun'])
self.image = images[self.type]
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
self.rect.bottom = -20
self.rect.x = random.randrange(WIDTH - self.rect.width)
self.speedy = 3
def update(self):
self.rect.y += self.speedy
# kill if off bottom of screen
if self.rect.top > HEIGHT:
self.kill()
class Explosion(pg.sprite.Sprite):
def __init__(self, anim, center, size, *groups):
pg.sprite.Sprite.__init__(self, *groups)
self.anim = anim
self.size = size
self.image = anim[self.size][0]
self.rect = self.image.get_rect()
self.rect.center = center
self.frame = 0
self.last_update = pg.time.get_ticks()
self.frame_rate = 75
def update(self):
now = pg.time.get_ticks()
if now - self.last_update > self.frame_rate:
self.last_update = now
self.frame += 1
if self.frame == len(self.anim[self.size]):
self.kill()
else:
center = self.rect.center
self.image = self.anim[self.size][self.frame]
self.rect = self.image.get_rect()
self.rect.center = center
# initialize pg
pg.init()
pg.mixer.init()
class Game:
# The Game object will initialize the game, run the game loop,
# and display start/end screens
def __init__(self):
# initialize the game and create the window
self.screen = pg.display.set_mode((WIDTH, HEIGHT))
pg.display.set_caption(TITLE)
# start the clock
self.clock = pg.time.Clock()
self.load_data()
def new(self):
# initialize all your variables and do all the setup for a new game
self.all_sprites = pg.sprite.Group()
self.mobs = pg.sprite.Group()
self.bullets = pg.sprite.Group()
self.powerups = pg.sprite.Group()
self.player = Player(self, [self.all_sprites])
for i in range(15):
Mob(self.meteor_images, [self.all_sprites, self.mobs])
self.score = 0
self.last_powerup = pg.time.get_ticks()
pg.mixer.music.play(loops=-1)
def load_data(self):
# load all your assets (sounds, images, etc.)
self.pew_sound = pg.mixer.Sound(path.join(sound_dir, 'pew.wav'))
self.shield_sound = pg.mixer.Sound(path.join(sound_dir, 'pow4.wav'))
self.power_sound = pg.mixer.Sound(path.join(sound_dir, 'pow5.wav'))
self.player_die_sound = pg.mixer.Sound(path.join(sound_dir, 'rumble1.ogg'))
self.expl_sounds = []
for snd in ['expl3.wav', 'expl6.wav']:
self.expl_sounds.append(pg.mixer.Sound(path.join(sound_dir, snd)))
pg.mixer.music.load(path.join(sound_dir, 'tgfcoder-FrozenJam-SeamlessLoop.ogg'))
pg.mixer.music.set_volume(0.4)
self.background = pg.image.load(path.join(img_dir, 'starfield.png')).convert()
self.background_rect = self.background.get_rect()
self.player_image = pg.image.load(path.join(img_dir, 'playerShip1_orange.png')).convert()
self.player_image.set_colorkey(BLACK)
self.player_mini_image = pg.transform.scale(self.player_image, (25, 19))
self.bullet_image = pg.image.load(path.join(img_dir, 'laserRed16.png')).convert()
meteor_list = ['meteorBrown_med3.png', 'meteorBrown_med1.png',
'meteorBrown_small2.png', 'meteorBrown_tiny1.png']
self.meteor_images = []
for img in meteor_list:
self.meteor_images.append(pg.image.load(path.join(img_dir, img)).convert())
self.powerup_images = {}
self.powerup_images['shield'] = pg.image.load(path.join(img_dir, 'shield_gold.png')).convert()
self.powerup_images['gun'] = pg.image.load(path.join(img_dir, 'bolt_gold.png')).convert()
self.explosion_anim = {}
self.explosion_anim['lg'] = []
self.explosion_anim['sm'] = []
self.explosion_anim['player'] = []
for i in range(9):
img = pg.image.load(path.join(img_dir, 'regularExplosion0{}.png'.format(i))).convert()
img.set_colorkey(BLACK)
img1 = pg.transform.scale(img, (75, 75))
self.explosion_anim['lg'].append(img1)
img2 = pg.transform.scale(img, (32, 32))
self.explosion_anim['sm'].append(img2)
img = pg.image.load(path.join(img_dir, 'sonicExplosion0{}.png'.format(i))).convert()
img.set_colorkey(BLACK)
self.explosion_anim['player'].append(img)
def run(self):
# The Game loop - set self.running to False to end the game
self.running = True
while self.running:
self.clock.tick(FPS)
self.events()
self.update()
self.draw()
def quit(self):
pg.quit()
sys.exit()
def update(self):
# the update part of the game loop
self.all_sprites.update()
# check if bullets hit mobs
hits = pg.sprite.groupcollide(self.mobs, self.bullets, True, True)
for hit in hits:
# more points for smaller hits
self.score += 25 - hit.radius
Explosion(self.explosion_anim, hit.rect.center, 'lg', [self.all_sprites])
random.choice(self.expl_sounds).play()
Mob(self.meteor_images, [self.all_sprites, self.mobs])
# check if mobs hit player
hits = pg.sprite.spritecollide(self.player, self.mobs, True, pg.sprite.collide_circle)
for hit in hits:
self.player.shield -= hit.radius * 2
Explosion(self.explosion_anim, hit.rect.center, 'sm', [self.all_sprites])
Mob(self.meteor_images, [self.all_sprites, self.mobs])
if self.player.shield <= 0:
# spawn a player explosion and delete the player sprite
self.player_die_sound.play()
self.death_explosion = Explosion(self.explosion_anim, self.player.rect.center, 'player', [self.all_sprites])
self.player.hide()
self.player.lives -= 1
self.player.shield = 100
self.player.power = 1
# if player died and explosion finished
if self.player.lives == 0 and not self.death_explosion.alive():
self.running = False
pg.mixer.music.stop()
# check if player hits powerup
hits = pg.sprite.spritecollide(self.player, self.powerups, True)
for hit in hits:
if hit.type == 'shield':
self.player.shield += 20
self.shield_sound.play()
if self.player.shield > 100:
self.player.shield = 100
if hit.type == 'gun':
self.player.powerup()
# spawn a powerup (maybe)
now = pg.time.get_ticks()
if now - self.last_powerup > 3000 and random.random() > 0.99:
self.last_powerup = now
Powerup(self.powerup_images, [self.all_sprites, self.powerups])
def draw(self):
# draw everything to the screen
self.screen.fill(BGCOLOR)
self.screen.blit(self.background, self.background_rect)
self.all_sprites.draw(self.screen)
score_text = str(self.score)
draw_text(self.screen, score_text, 18, WIDTH / 2, 10)
draw_shield_bar(self.screen, 5, 5, self.player.shield)
draw_lives(self.screen, self.player_mini_image, WIDTH - 100, 5, self.player.lives)
fps_txt = "FPS: {:.2f}".format(self.clock.get_fps())
pg.display.set_caption(fps_txt)
pg.display.flip()
def events(self):
# catch all events here
for event | |
"""
In large part lifted from
https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py
but with 1d convolutions and arbitrary kernel sizes
"""
from typing import Callable, List, Literal, Optional
import torch
import torch.nn as nn
from torch import Tensor
def convN(
in_planes: int,
out_planes: int,
kernel_size: int = 3,
stride: int = 1,
groups: int = 1,
dilation: int = 1,
) -> nn.Conv1d:
"""1d convolution with padding"""
if not kernel_size % 2:
raise ValueError("Can't use even sized kernels")
return nn.Conv1d(
in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
padding=dilation * int(kernel_size // 2),
groups=groups,
bias=False,
dilation=dilation,
)
def conv1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv1d:
"""kernel-size 1 convolution"""
return nn.Conv1d(
in_planes, out_planes, kernel_size=1, stride=stride, bias=False
)
class BasicBlock(nn.Module):
expansion: int = 1
def __init__(
self,
inplanes: int,
planes: int,
kernel_size: int = 3,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
) -> None:
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm1d
if groups != 1 or base_width != 64:
raise ValueError(
"BasicBlock only supports groups=1 and base_width=64"
)
if dilation > 1:
raise NotImplementedError(
"Dilation > 1 not supported in BasicBlock"
)
# Both self.conv1 and self.downsample layers
# downsample the input when stride != 1
self.conv1 = convN(inplanes, planes, kernel_size, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = convN(planes, planes, kernel_size)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
"""
Bottleneck blocks implement one extra convolution
compared to basic blocks. In this layers, the `planes`
parameter is generally meant to _downsize_ the number
of feature maps first, which then get expanded out to
`planes * Bottleneck.expansion` feature maps at the
output of the layer.
"""
expansion: int = 4
def __init__(
self,
inplanes: int,
planes: int,
kernel_size: int = 3,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
) -> None:
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm1d
width = int(planes * (base_width / 64.0)) * groups
# conv1 does no downsampling, just reduces the number of
# feature maps from inplanes to width (where width == planes)
# if groups == 1 and base_width == 64
self.conv1 = convN(inplanes, width, kernel_size)
self.bn1 = norm_layer(width)
# conv2 keeps the same number of feature maps,
# but downsamples along the time axis if stride
# or dilation > 1
self.conv2 = convN(width, width, kernel_size, stride, groups, dilation)
self.bn2 = norm_layer(width)
# conv3 expands the feature maps back out to planes * expansion
self.conv3 = conv1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
"""1D ResNet architecture
Simple extension of ResNet to 1D convolutions with
arbitrary kernel sizes to support the longer timeseries
used in BBH detection.
Args:
num_ifos:
The number of interferometers used for BBH
detection. Sets the channel dimension of the
input tensor
layers:
A list representing the number of residual
blocks to include in each "layer" of the
network. Total layers (e.g. 50 in ResNet50)
is `2 + sum(layers) * factor`, where factor
is `2` for vanilla `ResNet` and `3` for
`BottleneckResNet`.
kernel_size:
The size of the convolutional kernel to
use in all residual layers. _NOT_ the size
of the input kernel to the network, which
is determined at run-time.
zero_init_residual:
Flag indicating whether to initialize the
weights of the batch-norm layer in each block
to 0 so that residuals are initialized as
identities. Can improve training results.
groups:
Number of convolutional groups to use in all
layers. Grouped convolutions induce local
connections between feature maps at subsequent
layers rather than global. Generally won't
need this to be >1, and wil raise an error if
>1 when using vanilla `ResNet`.
width_per_group:
Base width of each of the feature map groups,
which is scaled up by the typical expansion
factor at each layer of the network. Meaningless
for vanilla `ResNet`.
stride_type:
Whether to achieve downsampling on the time axis
by strided or dilated convolutions for each layer.
If left as `None`, strided convolutions will be
used at each layer. Otherwise, `stride_type` should
be one element shorter than `layers` and indicate either
`stride` or `dilation` for each layer after the first.
norm_layer:
The layer type to use for normalization after each
convolution. If left as `None`, defaults to 1D batch norm
(`torch.nn.BatchNorm1d`).
"""
block = BasicBlock
def __init__(
self,
num_ifos: int,
layers: List[int],
kernel_size: int = 3,
zero_init_residual: bool = False,
groups: int = 1,
width_per_group: int = 64,
# TODO: use Literal["stride", "dilation"] once typeo fix is in
stride_type: Optional[List[str]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None,
) -> None:
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm1d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
# TODO: should we support passing a single string
# for simplicity here?
if stride_type is None:
# each element in the tuple indicates if we should replace
# the stride with a dilated convolution instead
stride_type = ["stride"] * (len(layers) - 1)
if len(stride_type) != (len(layers) - 1):
raise ValueError(
"'stride_type' should be None or a "
"{}-element tuple, got {}".format(len(layers) - 1, stride_type)
)
self.groups = groups
self.base_width = width_per_group
# start with a basic conv-bn-relu-maxpool block
# to reduce the dimensionality before the heavy
# lifting starts
self.conv1 = nn.Conv1d(
num_ifos,
self.inplanes,
kernel_size=7,
stride=2,
padding=3,
bias=False,
)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)
# now create layers of residual blocks where each
# layer uses the same number of feature maps for
# all its blocks (some power of 2 times 64).
# Don't downsample along the time axis in the first
# layer, but downsample in all the rest (either by
# striding or dilating depending on the stride_type
# argument)
residual_layers = [self._make_layer(64, layers[0], kernel_size)]
it = zip(layers[1:], stride_type)
for i, (num_blocks, stride) in enumerate(it):
block_size = 64 * 2 ** (i + 1)
layer = self._make_layer(
block_size,
num_blocks,
kernel_size,
stride=2,
stride_type=stride,
)
residual_layers.append(layer)
self.residual_layers = nn.ModuleList(residual_layers)
# Average pool over each feature map to create a
# single value for each feature map that we'll use
# in the fully connected head
self.avgpool = nn.AdaptiveAvgPool1d(1)
# use a fully connected layer to map from the
# feature maps to the binary output that we need
self.fc = nn.Linear(block_size * self.block.expansion, 1)
for m in self.modules():
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(
m.weight, mode="fan_out", nonlinearity="relu"
)
elif isinstance(m, (nn.BatchNorm1d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros,
# and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to
# https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(
self,
planes: int,
blocks: int,
kernel_size: int = 3,
stride: int = 1,
stride_type: Literal["stride", "dilation"] = "stride",
) -> nn.Sequential:
block = self.block
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if stride_type == "dilation":
self.dilation *= stride
stride = 1
elif stride_type != "stride":
raise ValueError("Unknown stride type {stride}")
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
| |
#!/usr/bin/env python3
# pcrunner/main.py
# vim: ai et ts=4 sw=4 sts=4 ft=python fileencoding=utf-8
'''
pcrunner.main
-------------
Main entry point for the pcrunner command.
'''
import argparse
import io
import itertools
import logging
import logging.handlers
import os
import re
import shlex
import stat
import subprocess
import sys
import threading
import time
from glob import glob
from queue import Queue
from urllib.parse import urlencode
from urllib.request import Request, urlopen
from . import __version__, configuration
from .daemon import Daemon
from .exception import PostFailed, PostResultTooBig
logger = logging.getLogger(__name__)
class PassiveCheckRunnerDaemon(Daemon):
def __init__(self, pcrunner):
self.pcrunner = pcrunner
self.pid_file = pcrunner.pid_file
def run(self):
self.pcrunner.run()
class Check(object):
def __init__(self, result_type, name, command, hostname):
self.result_type = result_type
self.name = name
self.command = command
self.hostname = hostname
self.pid = None
self.process = None
self.status_code = 3
self.terminated = False
self.stdout = ''
self.stderr = ''
self.performance_data = ''
self.starttime = 0
self.endtime = 0
def start(self):
# should be called once
assert self.endtime == 0
self.starttime = time.time()
def end(self):
# should be called once
assert self.endtime == 0
self.endtime = time.time()
def run(self):
"""
Run the command and saves excection data
"""
# Start the time
self.start()
logger.debug('check %s: started at %s', self.name, self.starttime)
try:
if os.name == 'nt':
cmd = self.command
else:
cmd = shlex.split(self.command, posix=True)
# Start process
logger.debug('check %s: start subprocess %s', self.name, cmd)
self.process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
except OSError as error:
self.end()
self.status_code = 3
self.stdout = ' '
self.stderr = '{0}'.format(error)
logger.error(
'check %s: failed: duration: %.4f command: %s'
'return code %d stdout: %s stderr: %s',
self.name,
self.duration,
self.command,
self.status_code,
self.stdout,
self.stderr,
)
else:
# Procces started
self.pid = self.process.pid
logger.debug('check %s: subprocess PID: %d', self.name, self.pid)
# Wait for output
stdout, stderr = self.process.communicate()
# Procces ended, stop the time
self.end()
if self.terminated:
# Time must have ran out
# check got terminated
self.status_code = 3
self.stdout = ''
self.stderr = 'terminated, max time reached'
logger.error('check %s: %s ', self.name, self.stderr)
else:
self.status_code = self.process.returncode
self.stdout = ' '.join(str(stdout).splitlines())
self.stderr = ' '.join(str(stderr).splitlines())
logger.debug(
'check %s: finished: PID: %d return code %d',
self.name,
self.pid,
self.status_code,
)
def terminate(self):
"""
Terminates check if still running.
"""
if self.pid is not None and self.endtime == 0:
# Check started but not ended
logger.debug(
'check %s: terminated duration: %.4f PID: %d',
self.name,
self.duration,
self.pid,
)
try:
self.process.terminate()
except OSError as error:
logger.critical(
'check %d: termination failed PID %d error: %s',
self.name,
self.pid,
error,
)
logger.critical('Exiting main program now')
sys.exit(3)
else:
self.terminated = True
elif self.endtime:
logger.debug(
'check %s: already finished, not terminating: '
'PID: %d return code %d',
self.name,
self.pid,
self.status_code,
)
else:
logger.debug('check %s: not started, not terminating', self.name)
@property
def duration(self):
return self.endtime - self.starttime
@property
def elapsed(self):
return time.time() - self.starttime
@property
def plugin_output(self):
'''
Checks (loosely) if performance data is form of:
rx_errors=0;;;0;tx_errors=0;;;0;
Otherwise remove '|' and everything after.
'''
res = ' '.join(
(self.stdout, self.stderr, self.performance_data)
).strip()
if '|' in res:
output, perf = res.split('|', 1)
s = re.search(r'.+=[\w\.;=]*', perf)
if s:
res = '{0}|{1}'.format(output, s.group())
logger.debug('YEAH: %s', res)
else:
logger.warning(
'check %s: invalid perf data: %s',
self.name,
res,
)
res = output
return res
def __repr__(self):
'''
Representation in NSCA format
'''
if self.result_type == 'PROCESS_SERVICE_CHECK_RESULT':
return '[{0:.0f}] {1};{2};{3};{4};{5}'.format(
self.endtime,
self.result_type,
self.hostname,
self.name,
self.status_code,
self.plugin_output,
)
else:
return '[{0:.0f}] {1};{2};{3};{4}'.format(
self.endtime,
self.result_type,
self.hostname,
self.status_code,
self.plugin_output,
)
class CheckRun(object):
def __init__(self, hostname):
self.hostname = hostname
class PassiveCheckRunner(object):
def __init__(
self,
nsca_web_url,
nsca_web_username,
nsca_web_password,
hostname,
command_file,
result_file,
result_dir,
max_procs,
interval,
lines_per_post,
pid_file,
http_timeout,
max_line_size,
):
self.nsca_web_url = nsca_web_url
self.nsca_web_username = nsca_web_username
self.nsca_web_password = <PASSWORD>
self.hostname = hostname
self.command_file = command_file
self.result_file = result_file
self.result_dir = result_dir
self.max_procs = max_procs
self.interval = interval
self.lines_per_post = lines_per_post
self.pid_file = pid_file
self.http_timeout = http_timeout
self.max_line_size = max_line_size
self.timeout = self.interval - 10
self.check_pcrunner = Check(
'PROCESS_SERVICE_CHECK_RESULT',
'pcrunner',
'pcrunner',
self.hostname,
)
self.current_check_results = []
self.check_results_external_commands = []
# Get commands
self.check_command_list = configuration.read_check_commands(
self.command_file
)
def __repr__(self):
mesg = '<pcrunner nsca_web_url: {0} nsca_web_username: {1}'
mesg += ' hostname: {2} command_file: {3} result_file: {4}'
mesg += ' max_procs:{5} timeout:{6} interval:{7} lines_per_post:{8}>'
return mesg.format(
self.nsca_web_url,
self.nsca_web_username,
self.hostname,
self.command_file,
self.result_file,
self.max_procs,
self.timeout,
self.interval,
self.lines_per_post,
)
def get_checks(self):
self.checks = []
for args in self.check_command_list:
args['hostname'] = self.hostname
self.checks.append(Check(**args))
self.number_of_checks = len(self.checks)
def kill_running_checks(self):
# don't block if start_queue is empty
if self.start_queue is not None or not self.start_queue.empty():
# Remove checks that not got started from start_queue.
# Put on temperary list to get start_queue empty a.s.a.p.
while True:
# Get all checks from start_queue unit we hit a None
check = self.start_queue.get()
if check is None:
# Put 'None' that got just removed on the queue again
# to make all threads end
self.start_queue.put(None)
break
self.checks_not_started.append(check)
self.number_of_checks_not_started = len(self.checks_not_started)
# Kill running checks.
while not self.run_queue.empty():
check = self.run_queue.get()
logger.debug('check %s: terminate', check.name)
check.terminate()
if check.terminated:
logger.debug('check %s: terminated', check.name)
self.number_of_checks_terminated += 1
logger.debug('check %s: on finished queue', check.name)
logger.error('%d checks terminated', self.number_of_checks_terminated)
# Write status_code and stderr for checks that did not start.
for check in self.checks_not_started:
check.start()
check.end()
check.status_code = 3
check.stdout = ''
check.stderr = 'check not started, max time exceeded'
# Move check result queue.
self.finished_queue.put(check)
logger.debug('check %s: on finished queue', check.name)
logger.error(check.stderr)
logger.error(
'%d checks not started', self.number_of_checks_not_started
)
def check_results_from_finished_queue(self):
self.current_check_results = []
while not self.finished_queue.empty():
check = self.finished_queue.get()
logger.debug('format check result: %s', check)
self.current_check_results.append('{0}\n'.format(check))
def post(self, lines):
results = ''.join(lines).encode('utf-8')
number_of_lines = len(lines)
if len(results) > self.lines_per_post * self.max_line_size:
raise PostResultTooBig
values = {
'username': self.nsca_web_username,
'password': <PASSWORD>,
'input': results,
}
data = urlencode(values).encode('utf-8')
data_len = len(data)
headers = {
'User-Agent': 'pcrunner',
'Content-length': data_len,
'Content-type': 'application/x-www-form-urlencoded',
}
request = Request(self.nsca_web_url, data, headers)
try:
logger.debug(
'Posting %d results to: %s with length %d.',
number_of_lines,
self.nsca_web_url,
data_len,
)
response = urlopen(request, timeout=self.http_timeout)
except Exception as error:
logger.error(
'Failed to post %d results to %s: %s',
number_of_lines,
self.nsca_web_url,
error,
)
raise PostFailed
else:
http_response_code = response.getcode()
logger.debug('HTTP return code: %s', http_response_code)
if http_response_code != 200:
logger.error('HTTP return code: %s', http_response_code)
raise PostFailed
def post_results_previous_run(self):
'''
If a previous result file exists post the results that are found in
this file in chunks of number of lines per post. If post fails save
failed checks in ``self.results_post_failed``.
'''
try:
with io.open(self.result_file, 'r', encoding='utf-8') as fd:
# There are results which are not posted in previous run.
# Try post them.
logger.debug(
'result file %s exists, try post old results',
self.result_file,
)
# Iterate through result file.
# Post lines_per_post>
while True:
lines = list(itertools.islice(fd, self.lines_per_post))
if not lines:
break
try:
self.post(lines)
except PostFailed:
self.results_post_failed += lines
# Get rest of the lines
self.results_post_failed += list(
itertools.islice(fd, None)
)
logger.debug(
'%d lines of old check results saved for later'
' posting',
len(self.results_post_failed),
)
break
# Remove current result file.
# failed results are saved in self.results_post_failed
logger.debug('remove current result file: %s', self.result_file)
try:
os.remove(self.result_file)
except OSError as error:
logger.error(error)
except IOError:
# There is no result file: no old results to post.
logger.debug(
'No result file (%s) of previous run.', self.result_file
)
def read_results_from_spool_dir(self):
if self.result_dir:
logger.debug(
'reading results files from spool direcotry %s',
self.result_dir,
)
epoch_time_fmt = 10 * '[0-9]'
result_files = glob(
'{0}/{1}*'.format(self.result_dir, epoch_time_fmt)
)
for result_file in result_files:
with io.open(result_file, 'r', encoding='utf-8') as fd:
logger.debug('reading results from %s', result_file)
for line in fd.readlines():
if len(line) < self.max_line_size:
self.check_results_external_commands.append(line)
else:
logger.warning(
'line in result file {0} exceeds max length '
'of {1})',
result_file,
self.max_line_size,
)
try:
os.remove(result_file)
logger.debug('deleting %s', result_file)
except OSError as error:
logger.error(error)
else:
logger.debug(
'No result directory configured: not reading results'
' from external commands.'
)
def write_failed_results(self):
logger.debug(
'Saving %d results to file: %s',
len(self.results_post_failed),
self.result_file,
)
try:
with io.open(self.result_file, 'w', encoding='utf-8') as fd:
fd.write(''.join(self.results_post_failed))
except Exception as error:
logger.error(error)
def post_results(self):
self.results_post_failed = []
check_results = []
# If there is a previous result file post it
self.post_results_previous_run()
# Get results from external commands
self.read_results_from_spool_dir()
# Combine and sort results from current and external commands
check_results = (
self.current_check_results + self.check_results_external_commands
)
# make sure it's sorted
| |
#!/usr/bin/python
#
# File: DockSim.py
# Author: <NAME>
# Email: <EMAIL>
# Date: Dec 20, 2015
#----------------------------------------------------------------------------
from __future__ import print_function, division
from collections import namedtuple
from math import sqrt, trunc
StateVec = namedtuple('StateVec', 'phase distTraveled currVelocity fuelRemaining tEnd')
#----------------------------------------------------------------------------
class FlightParams(object):
""" An object to hold the flight profile parameters
tAft is the duration of the acceleration burn, in seconds
tCoast is the duration of the coast phase, in seconds
tFore is the duration of the deceleration burn, in seconds
aAft is the force of acceleration, in m/sec^2
aFore is the force of deceleration, in m/sec^2
rFuel is the rate of fuel consumption in kg/sec
qFuel is the initial amount of fuel, in kg
dist is the initial distance to the dock, in m
vMin is the minimum sucessful docking velocity, in m/s
vMax is the maximum sucessful docking velocity, in m/s
vInit is the ship's initial velocity, in m/s
tSim is the maximum duration of the simulation in seconds (an int)
The user flight profile parameters: tAft, tCoast, and tFore, are
forced to be values representable as ddd.d. tSim is forced to
be an int.
"""
def __init__(self, tAft, tCoast, tFore, aAft, aFore,
rFuel, qFuel, dist, vMin, vMax, vInit, tSim):
self.tAft = (trunc(tAft * 10) % 10000)/10.0
self.tCoast = (trunc(tCoast * 10) % 10000)/10.0
self.tFore = (trunc(tFore * 10) % 10000)/10.0
self.aAft = aAft
self.aFore = aFore
self.rFuel = rFuel
self.qFuel = qFuel
self.dist = dist
self.vMin = vMin
self.vMax = vMax
self.vInit = vInit
self.tSim = int(tSim)
#----------------------------------------------------------------------------
class DockSim(object):
""" DockSim contains the flight profile simulation parameters and computes
simulation output values.
"""
# Flight parameters
# (TODO: should come from MS Settings table)
MAX_V_DOCK = 0.1 # max terminal velocity for successful dock in m/sec
MIN_V_DOCK = 0.01 # min terminal velocity for successful dock in m/sec
INITIAL_V = 0.0 # velocity at start of simulation in m/sec
# Longest flight time allowed
# (must be greater than maximum burn length self.qFuel/self.rFuel)
# (TODO: should come from MS Settings table)
MAX_FLIGHT_DURATION_S = 1000 * 60 # 1000 minutes
# Flight phases
START_PHASE = 0
ACCEL_PHASE = 1
COAST_PHASE = 2
DECEL_PHASE = 3
GLIDE_PHASE = 4
END_PHASE = 5
PHASE_STR = { START_PHASE: "START",
ACCEL_PHASE: "ACCELERATE",
COAST_PHASE: "COAST",
DECEL_PHASE: "DECELERATE",
GLIDE_PHASE: "GLIDE",
END_PHASE : "END",
}
# Status value returned at end of travel interval computation
INTERVAL_DNF = 0 # Did not finish
INTERVAL_DEST = 1 # Dest reached
INTERVAL_END = 2 # End of time interval reached
# Final simulation result conditions
OUTCOME_DNF = "OUTCOME_DNF"
OUTCOME_NO_FUEL = "OUTCOME_NO_FUEL"
OUTCOME_TOO_SLOW = "OUTCOME_TOO_SLOW"
OUTCOME_TOO_FAST = "OUTCOME_TOO_FAST"
OUTCOME_SUCCESS = "OUTCOME_SUCCESS"
def __init__(self, fp):
""" Store the simulation parameters.
fp is a FlightParams namedtuple.
Raises ValueError if any of the flight characteristics are out of
range, but allows the user-supplied time values to be anything.
"""
# User-supplied flight profile parameters
self.tAft = fp.tAft # sec (aft acceleration burn)
self.tCoast = fp.tCoast # sec (coasting interval)
self.tFore = fp.tFore # sec (forward deceleration burn)
# Capsule flight characteristics parameters
self.aAft = fp.aAft # m/sec^2 (aft acceleration)
self.aFore = fp.aFore # m/sec^2 (forward deceleration)
self.rFuel = fp.rFuel # kg/sec (fuel consumption rate)
self.qFuel = fp.qFuel # kg (initial fuel quantity)
self.dist = fp.dist # m (initial distance to dock)
self.vMin = fp.vMin # m/s (min docking velocity)
self.vMax = fp.vMax # m/s (max docking velocity)
self.v0 = fp.vInit # m/sec (initial velocity)
# Validate some parameters
if self.rFuel <= 0.0:
raise ValueError("Fuel consumption rate must be greater than 0 if you hope to get anywhere")
if self.qFuel <= 0.0:
raise ValueError("Fuel quantity must be greater than 0 if you hope to get anywhere")
if self.dist <= 0.0:
raise ValueError("Distance to travel must be greater than 0")
if self.aFore <= 0.0:
raise ValueError("Fore thruster (nose maneuvering jets) acceleration must be greater than 0")
if self.aAft <= 0.0:
raise ValueError("Aft thruster (rear engine) acceleration must be greater than 0")
def outcome(self, state):
""" Determine the nature of the failure from the final state """
status = self.OUTCOME_SUCCESS
if state.currVelocity <= 0.0:
status = self.OUTCOME_DNF
elif state.fuelRemaining <= 0.0:
status = self.OUTCOME_NO_FUEL
elif state.currVelocity < self.vMin:
status = self.OUTCOME_TOO_SLOW
elif state.currVelocity > self.vMax:
status = self.OUTCOME_TOO_FAST
return status
def accelVelocity(self):
""" Return the velocity at the end of the acceleration phase """
return self.shipState(self.tAft).currVelocity
def coastVelocity(self):
""" Return the velocity during the coast phase """
return self.shipState(self.tAft + self.tCoast).currVelocity
def decelVelocity(self):
""" Return the velocity at the end of the deceleration phase """
return self.shipState(self.tAft + self.tCoast + self.tFore).currVelocity
def terminalVelocity(self):
""" Return the terminal velocity of the maneuver. """
return self.shipState(self.flightDuration()).currVelocity
def safeDockingVelocity(self, v):
""" Return True if v is in the safe docking range """
return v >= self.vMin and v <= self.vMax
def dockIsSuccessful(self):
""" Return True if the ship docks with a terminal velocity
between self.vMin and self.vMax.
"""
return self.safeDockingVelocity(self.terminalVelocity())
def distanceTraveled(self, dt, v0, a=0.0):
""" Compute the distance traveled.
dt is the amount of time traveled, in seconds
v0 is the velocity at the start of the time interval, in m/s
a is the amount of constant acceleration being applied during
the interval, in m/s^2
Returns the distance traveled during the timeInterval, in meters
computed by the formula d = v0 * dt + 0.5 * a * dt**2
"""
return (v0 + 0.5 * a * dt) * dt
def velocity(self, dt, v0, a):
""" Compute resulting velocity from initial velocity, accel, and time interval """
return v0 + a * dt
def fuelConsumed(self, dt):
""" Compute amount of fuel consumed by a burn of dt """
return dt * self.rFuel # time * rate of consumption
def timeToTravel(self, d, v0, a):
""" Return the time it takes to traverse a distance, d.
d is the distance to be traversed, in meters (d >= 0)
v0 is the initial velocity, in m/s
a is the constant acceleration, in m/s**2
Returns the positive time in seconds to go the distance d,
or the negative time it takes for the velocity to go to 0
if a negative acceleration causes the velocity to go negative,
or None if v0 <= 0
Note: This may handle more cases than it needs to, but that's
okay.
"""
if a == 0.0:
if v0 == 0.0:
return None
else:
return d/v0
else:
disc = v0**2 - 2.0 * a * (-d)
if disc < 0.0:
# Negative acceleration will cause the velocity to go negative,
# also resulting in no real solution for the time
# so instead we will return the time it takes the velocity to go to zero
return v0/a # either v0 or a is negative
else:
return (-v0 + sqrt(v0**2 - 2.0 * a * (-d))) / a
def timeToStop(self, v0, a):
""" Return the time it takes for velocity to go to zero.
v0 must be >= 0.0 or ValueError is raised.
a must be < 0.0 or ValueError is raised.
Returns the time in seconds for the initial velocity to be
reduced to zero.
"""
if a >= 0.0:
raise ValueError("a must be < 0.0")
if v0 < 0.0:
raise ValueError("v0 must be >= 0.0")
# Use: v = v0 + a * t
# Solve for v = 0:
# v0 + a * t = 0
# t = -v0/a
return -v0/a
def timeUntilFuelRunsOut(self, qFuel):
""" Return the burn time until fuel is completely consumed.
qFuel is the amount of fuel, in kg.
Assumes constant burn rate, self.rFuel.
Returns the time in seconds of the maximum burn.
"""
return qFuel/self.rFuel
def computeNoThrustTravelInterval(self, dt, v0, distToDest, qFuel):
""" Compute distance traveled, ending | |
<reponame>profesia/luft<filename>luft/common/column.py
# -*- coding: utf-8 -*-
"""Column."""
from typing import List, Optional, Union
from luft.common.config import EMBULK_TYPE_MAPPER
from luft.common.logger import setup_logger
# Setup logger
logger = setup_logger('common', 'INFO')
class Column:
"""Column."""
def __init__(self, name: str, data_type: str, rename: Optional[str] = None,
escape: Optional[bool] = False, mandatory: Optional[bool] = False,
pk: Optional[bool] = False, default_value: Optional[str] = None,
ignored: Optional[bool] = False, tech_column: Optional[bool] = False, metadata: Optional[bool] = False):
"""Create column.
Parameters:
name (str): column name.
rename (str): rename column in SQL.
data_type (str): data type. Varchar, integer, etc.
escape (bool): wheter to escape column name.
mandatory (bool): wheter column is mandatory.
pk (bool): wheter column is primary key.
default_value (str): default fix value.
ignored (bool): wheter column is ignored in historization phase.
tech_column(bool): wheter column is just a technical. Prefixed DW_.
metadata: wheter column is metadata from elasticsearch
"""
self.name = name
self.rename = rename
self.data_type = data_type
self.escape = escape
self.mandatory = mandatory
self.pk = pk
self.default_value = default_value
self.ignored = ignored
self.tech_column = tech_column
self.metadata = metadata
self.index = 0
def set_index(self, index: int):
"""Set index of column."""
self.index = index
def get_name(self, col_type: str = 'all', filter_ignored: bool = True,
include_tech: bool = True) -> Optional[str]:
"""Return column name or rename.
Parameters:
col_type (str): what type of columns should be returned. Default `all`.
Values:
- all - primary and nonprimary keys are returned
- pk - only primary keys are returned
- nonpk - only nonprimary keys are returned
filter_ignored (bool): wheter ignored column should be filtered out from result.
Default True.
include_tech (bool): wheter technical columns should be included in result. Columns
prefixed with DW_.
Returns:
(str): column name
"""
# Decide if value should be returned
if self._should_return(col_type, filter_ignored, include_tech):
name = self.rename or self.name
return name
return None
def get_index(self, col_type: str = 'all', filter_ignored: bool = True,
include_tech: bool = True) -> Optional[str]:
"""Get indexed value for Loading into Snowflake.
E.g. `$1 as Column_Name`.
Parameters:
col_type (str): what type of columns should be returned. Default `all`.
Values:
- all - primary and nonprimary keys are returned
- pk - only primary keys are returned
- nonpk - only nonprimary keys are returned
filter_ignored (bool): wheter ignored column should be filtered out from result.
Default True.
include_tech (bool): wheter technical columns should be included in result. Columns
prefixed with DW_.
Returns:
(str): index
"""
if self._should_return(col_type, filter_ignored, include_tech):
return f'${self.index} AS {self.get_name()}'
return None
def get_aliased_name(self, col_type: str = 'all', filter_ignored: bool = True,
include_tech: bool = True) -> Optional[str]:
"""Return full aliased column name.
E.g. `col_name as Column_Name`.
Parameters:
col_type (str): what type of columns should be returned. Default `all`.
Values:
- all - primary and nonprimary keys are returned
- pk - only primary keys are returned
- nonpk - only nonprimary keys are returned
filter_ignored (bool): wheter ignored column should be filtered out from result.
Default True.
include_tech (bool): wheter technical columns should be included in result. Columns
prefixed with DW_.
Returns:
(str): aliased name
"""
# Decide if value should be returned
if self._should_return(col_type, filter_ignored, include_tech):
return (f'{self._get_value_part()}'
f' AS {self.get_name()}')
return None
def get_def(self, col_type: str = 'all', filter_ignored: bool = True,
include_tech: bool = True,
supported_types: Union[List[str], None] = None) -> Optional[str]:
"""Return column sql definition.
E.g. `col_name VARCHAR NOT NULL`.
Parameters:
col_type (str): what type of columns should be returned. Default `all`.
Values:
- all - primary and nonprimary keys are returned
- pk - only primary keys are returned
- nonpk - only nonprimary keys are returned
filter_ignored (bool): wheter ignored column should be filtered out from result.
Default True.
include_tech (bool): wheter technical columns should be included in result. Columns
prefixed with DW_.
Returns:
(str): sql column definition
"""
if self._should_return(col_type, filter_ignored, include_tech):
return (f'{self.get_name()}'
f' {self._get_type(supported_types=supported_types)}'
f'{self._get_mandatory_def()}')
return None
def get_coalesce(self, col_type: str = 'all', filter_ignored: bool = True,
include_tech: bool = True) -> Optional[str]:
"""Return coalesce of two columns.
E.g. `COALESCE(t.col_name, s.col_name) AS col_name`.
Parameters:
col_type (str): what type of columns should be returned. Default `all`.
Values:
- all - primary and nonprimary keys are returned
- pk - only primary keys are returned
- nonpk - only nonprimary keys are returned
filter_ignored (bool): wheter ignored column should be filtered out from result.
Default True.
include_tech (bool): wheter technical columns should be included in result. Columns
prefixed with DW_.
Returns:
(str): sql column definition
"""
if self._should_return(col_type, filter_ignored, include_tech):
if self.pk:
return f'COALESCE(s.{self.get_name()}, t.{self.get_name()}) ' \
f'AS {self.get_name()}'
return f'CASE WHEN s.DW_LOAD_DATE IS NOT NULL THEN s.{self.get_name()} ' \
f'ELSE t.{self.get_name()} END AS {self.get_name()}'
return None
def get_join(self, col_type: str = 'all', filter_ignored: bool = True,
include_tech: bool = True) -> Optional[str]:
"""Get join condition of tables s and t.
E.g. `s.col_name = t.col_name`.
Parameters:
col_type (str): what type of columns should be returned. Default `all`.
Values:
- all - primary and nonprimary keys are returned
- pk - only primary keys are returned
- nonpk - only nonprimary keys are returned
filter_ignored (bool): wheter ignored column should be filtered out from result.
Default True.
include_tech (bool): wheter technical columns should be included in result. Columns
prefixed with DW_.
Returns:
(str): sql join
"""
if self._should_return(col_type, filter_ignored, include_tech):
return f's.{self.get_name()} = t.{self.get_name()}'
return None
def get_embulk_column_option(self, col_type: str = 'all', filter_ignored: bool = True,
include_tech: bool = True) -> Optional[str]:
"""Get column option for Embulk.
E.g. `col_name {value_type: string}`.
Parameters:
col_type (str): what type of columns should be returned. Default `all`.
Values:
- all - primary and nonprimary keys are returned
- pk - only primary keys are returned
- nonpk - only nonprimary keys are returned
filter_ignored (bool): wheter ignored column should be filtered out from result.
Default True.
include_tech (bool): wheter technical columns should be included in result. Columns
prefixed with DW_.
Returns:
(str): embulk type
"""
# Decide if value should be returned
if self._should_return(col_type, filter_ignored, include_tech):
return f'{self.get_name()}: {{value_type: {self._embulk_column_mapper()}}}'
return None
def _get_value_part(self, col_type: str = 'all', filter_ignored: bool = True,
include_tech: bool = True) -> Optional[str]:
"""Return value part. It is column name or constant. Used in aliasing: 4 AS COLUMN_NAME."""
value = self.default_value or self.name.upper()
# Decide if value should be returned
if self._should_return(col_type, filter_ignored, include_tech):
escape_symbol = '`' if self.escape else ''
return f'{escape_symbol}{value}{escape_symbol}'
return None
def _get_clean_data_type(self):
"""Return data type without bracket. E.g. string(10) will be just string."""
return self.data_type.split('(')[0]
def _get_type(self, col_type: str = 'all', filter_ignored: bool = True,
include_tech: bool = True, without_length: bool = True,
supported_types: Union[List[str], None] = None) -> Optional[str]:
"""Return column type if it is valid data type."""
supported_types = supported_types or []
if self._should_return(col_type, filter_ignored, include_tech):
clean_type = self._get_clean_data_type().upper()
if clean_type in supported_types:
if without_length:
return clean_type.upper()
else:
return self.data_type.upper()
raise TypeError(
f'Column type `{clean_type}` is not supported data type.')
return None
def _get_mandatory_def(self, col_type: str = 'all', filter_ignored: bool = True,
include_tech: bool = True) -> Optional[str]:
"""Return mandatory definition."""
if self._should_return(col_type, filter_ignored, include_tech):
return ' NOT NULL' if self.mandatory else ''
return None
def _embulk_column_mapper(self) -> str:
"""Map SQL data type to Embulk.
Returns:
(str): Embulk data type
"""
clean_type = self._get_clean_data_type().lower()
try:
# Decide if value should be returned
return EMBULK_TYPE_MAPPER[clean_type]
except KeyError:
raise TypeError(
f'Column type `{clean_type}` does not exists in column_type_mapper!')
# pylint: disable=R0911
def _should_return(self, col_type: str = 'all', filter_ignored: bool = True,
include_tech: bool = True) -> bool:
"""Decice if column should be returned.
Parameters:
col_type (str): what type of columns should be returned. Default `all`.
Values:
- all - primary and nonprimary keys are returned
- pk - only primary keys are returned
- nonpk - only nonprimary keys are returned
filter_ignored (bool): wheter ignored column should be filtered out from result.
Default True.
include_tech (bool): wheter technical columns should be included in result. Columns
prefixed with DW_.
Returns:
(bool): wheter column should be returned
"""
| |
import re, sys
from requests.structures import CaseInsensitiveDict
from .stashbox import StashBoxInterface
from . import gql_fragments
from . import log as stash_logger
from .types import PhashDistance
from .classes import GQLWrapper
class StashInterface(GQLWrapper):
port = ""
url = ""
headers = {
"Accept-Encoding": "gzip, deflate",
"Content-Type": "application/json",
"Accept": "application/json",
"Connection": "keep-alive",
"DNT": "1"
}
cookies = {}
def __init__(self, conn:dict={}, fragments:dict={}):
global log
conn = CaseInsensitiveDict(conn)
log = conn.get("Logger", stash_logger)
# Session cookie for authentication
self.cookies = {}
if conn.get("SessionCookie"):
self.cookies['session'] = conn['SessionCookie']['Value']
scheme = conn.get('Scheme', 'http')
domain = conn.get('Domain', 'localhost')
self.port = conn.get('Port', 9999)
# Stash GraphQL endpoint
self.url = f'{scheme}://{domain}:{self.port}/graphql'
log.debug(f"Using stash GraphQl endpoint at {self.url}")
try:
# test query to ensure good connection
self.call_gql("query Configuration {configuration{general{stashes{path}}}}")
except Exception as e:
log.error(f"Could not connect to Stash at {self.url}")
log.error(e)
sys.exit()
self.sbox_endpoints = {}
self.fragments = fragments
self.fragments.update(gql_fragments.STASHAPP)
def __match_alias_item(self, search, items):
item_matches = {}
for item in items:
if re.match(rf'{search}$', item["name"], re.IGNORECASE):
log.debug(f'matched "{search}" to "{item["name"]}" ({item["id"]}) using primary name')
item_matches[item["id"]] = item
break
if not item["aliases"]:
continue
for alias in item["aliases"]:
if re.match(rf'{search}$', alias.strip(), re.IGNORECASE):
log.info(f'matched "{search}" to "{item["name"]}" ({item["id"]}) using alias')
item_matches[item["id"]] = item
return list(item_matches.values())
def __match_performer_alias(self, search, performers):
item_matches = {}
for item in performers:
if re.match(rf'{search}$', item["name"], re.IGNORECASE):
log.info(f'matched "{search}" to "{item["name"]}" ({item["id"]}) using primary name')
item_matches[item["id"]] = item
break
if not item["aliases"]:
continue
for alias in item["aliases"]:
parsed_alias = alias.strip()
if ":" in alias:
parsed_alias = alias.split(":")[-1].strip()
if re.match(rf'{search}$', parsed_alias, re.IGNORECASE):
log.info(f'matched "{search}" to "{item["name"]}" ({item["id"]}) using alias')
item_matches[item["id"]] = item
return list(item_matches.values())
def call_gql(self, query, variables={}):
return self._callGraphQL(query, variables)
def graphql_configuration(self):
query = """
query Configuration {
configuration {
...ConfigData
}
}
"""
result = self._callGraphQL(query)
return result['configuration']
def metadata_scan(self, paths:list=[]):
query = """
mutation metadataScan($input:ScanMetadataInput!) {
metadataScan(input: $input)
}
"""
variables = {
'input': {
'paths' : paths,
'useFileMetadata': False,
'stripFileExtension': False,
'scanGeneratePreviews': False,
'scanGenerateImagePreviews': False,
'scanGenerateSprites': False,
'scanGeneratePhashes': True
}
}
result = self._callGraphQL(query, variables)
return result
# Tag CRUD
def find_tag(self, tag_in, create=False):
# assume input is a tag ID if int
if isinstance(tag_in, int):
query = "query FindTag($id: ID!) { findTag(id: $id) { ...stashTag } }"
variables = {"id": tag_in }
result = self._callGraphQL(query, variables)
return result["findTag"]
name = None
if isinstance(tag_in, dict):
if tag_in.get("stored_id"):
return self.find_tag(int(tag_in["stored_id"]))
if tag_in.get("name"):
name = tag_in["name"]
if isinstance(tag_in, str):
name = tag_in
if not name:
log.warning(f'find_tag expects int, str, or dict not {type(tag_in)} "{tag_in}"')
return
for tag in self.find_tags(q=name):
if tag["name"].lower() == name.lower():
return tag
if any(name.lower() == a.lower() for a in tag["aliases"] ):
return tag
if create:
return self.create_tag({"name":name})
def create_tag(self, tag):
query = """
mutation tagCreate($input:TagCreateInput!) {
tagCreate(input: $input){
...stashTag
}
}
"""
variables = {'input': tag}
result = self._callGraphQL(query, variables)
return result["tagCreate"]
# TODO update_tag()
def destroy_tag(self, tag_id):
query = """
mutation tagDestroy($input: TagDestroyInput!) {
tagDestroy(input: $input)
}
"""
variables = {'input': {
'id': tag_id
}}
self._callGraphQL(query, variables)
# Tags CRUD
def find_tags(self, q="", f={}, fragment=None):
query = """
query FindTags($filter: FindFilterType, $tag_filter: TagFilterType) {
findTags(filter: $filter, tag_filter: $tag_filter) {
count
tags {
...stashTag
}
}
}
"""
if fragment:
query = re.sub(r'\.\.\.stashTag', fragment, query)
variables = {
"filter": {
"direction": "ASC",
"per_page": -1,
"q": q,
"sort": "name"
},
"tag_filter": f
}
result = self._callGraphQL(query, variables)
return result["findTags"]["tags"]
# Performer CRUD
def find_performer(self, performer_data, create_missing=False):
# assume input is a tag ID if int
if isinstance(performer_data, int):
query = "query FindPerformer($id: ID!) { findPerformer(id: $id) { ...stashPerformer } }"
variables = {"id": performer_data }
result = self._callGraphQL(query, variables)
return result["findPerformer"]
name = None
if isinstance(performer_data, dict):
if performer_data.get("stored_id"):
return self.find_tag(int(performer_data["stored_id"]))
if performer_data.get("name"):
name = performer_data["name"]
if isinstance(performer_data, str):
name = performer_data
if not name:
log.warning(f'find_tag expects int, str, or dict not {type(performer_data)} "{performer_data}"')
return
name = name.strip()
performer_data = {"name": name}
performers = self.find_performers(q=name)
for p in performers:
if not p.get("aliases"):
continue
alias_delim = re.search(r'(\/|\n|,|;)', p["aliases"])
if alias_delim:
p["aliases"] = p["aliases"].split(alias_delim.group(1))
elif len(p["aliases"]) > 0:
p["aliases"] = [p["aliases"]]
else:
log.warning(f'Could not determine delim for aliases "{p["aliases"]}"')
performer_matches = self.__match_performer_alias(name, performers)
# none if multiple results from a single name performer
if len(performer_matches) > 1 and name.count(' ') == 0:
return None
elif len(performer_matches) > 0:
return performer_matches[0]
if create_missing:
log.info(f'Create missing performer: "{name}"')
return self.create_performer(performer_data)
def create_performer(self, performer_data):
query = """
mutation($input: PerformerCreateInput!) {
performerCreate(input: $input) {
id
}
}
"""
variables = {'input': performer_data}
result = self._callGraphQL(query, variables)
return result['performerCreate']['id']
def update_performer(self, performer_data):
query = """
mutation performerUpdate($input:PerformerUpdateInput!) {
performerUpdate(input: $input) {
id
}
}
"""
variables = {'input': performer_data}
result = self._callGraphQL(query, variables)
return result['performerUpdate']['id']
# TODO delete_performer()
# Performers CRUD
def find_performers(self, q="", f={}, fragment=None):
query = """
query FindPerformers($filter: FindFilterType, $performer_filter: PerformerFilterType) {
findPerformers(filter: $filter, performer_filter: $performer_filter) {
count
performers {
...stashPerformer
}
}
}
"""
if fragment:
query = re.sub(r'\.\.\.stashPerformer', fragment, query)
variables = {
"filter": {
"q": q,
"per_page": -1,
"sort": "name",
"direction": "ASC"
},
"performer_filter": f
}
result = self._callGraphQL(query, variables)
return result['findPerformers']['performers']
# Studio CRUD
def find_studio(self, studio, create_missing=False, domain_pattern=r'[^.]*\.[^.]{2,3}(?:\.[^.]{2,3})?$'):
if not studio.get("name"):
return None
name = studio["name"]
studio_matches = []
if re.match(domain_pattern, name):
url_search = self.find_studios(f={
"url":{ "value": name, "modifier": "INCLUDES" }
})
for s in url_search:
if re.search(rf'{name}',s["url"]):
log.info(f'matched "{name}" to {s["url"]} using URL')
studio_matches.append(s)
name_results = self.find_studios(q=name)
studio_matches.extend(self.__match_alias_item(name, name_results))
if len(studio_matches) > 1 and name.count(' ') == 0:
return None
elif len(studio_matches) > 0:
return studio_matches[0]
if create_missing:
log.info(f'Create missing studio: "{name}"')
return self.create_studio(studio)
def create_studio(self, studio):
query = """
mutation($name: String!) {
studioCreate(input: { name: $name }) {
id
}
}
"""
variables = {
'name': studio['name']
}
result = self._callGraphQL(query, variables)
studio['id'] = result['studioCreate']['id']
return self.update_studio(studio)
def update_studio(self, studio):
query = """
mutation StudioUpdate($input:StudioUpdateInput!) {
studioUpdate(input: $input) {
id
}
}
"""
variables = {'input': studio}
result = self._callGraphQL(query, variables)
return result["studioUpdate"]["id"]
# TODO delete_studio()
def get_studio(self, studio, get_root_parent=False):
query = """
query FindStudio($studio_id: ID!) {
findStudio(id: $studio_id) {
...stashStudio
}
}
"""
variables = {
"studio_id": studio.get("id")
}
result = self._callGraphQL(query, variables)
studio = result['findStudio']
if get_root_parent and studio and studio.get("parent_studio"):
return self.get_studio(studio["parent_studio"], get_root_parent=True)
return studio
def find_studios(self, q="", f={}, fragment=None):
query = """
query FindStudios($filter: FindFilterType, $studio_filter: StudioFilterType) {
findStudios(filter: $filter, studio_filter: $studio_filter) {
count
studios {
...stashStudio
}
}
}
"""
if fragment:
query = re.sub(r'\.\.\.stashStudio', fragment, query)
variables = {
"filter": {
"q": q,
"per_page": -1,
"sort": "name",
"direction": "ASC"
},
"studio_filter": f
}
result = self._callGraphQL(query, variables)
return result['findStudios']['studios']
# Movie CRUD
def find_movie(self, movie, create_missing=False):
name = movie["name"]
movies = self.find_movies(q=name)
movie_matches = self.__match_alias_item(name, movies)
if len(movie_matches) > 0:
if len(movie_matches) == 1:
return movie_matches[0]
else:
log.warning(f'Too many matches for movie "{name}"')
return None
if create_missing:
log.info(f'Creating missing Movie "{name}"')
return self.create_movie(movie)
def create_movie(self, movie):
name = movie["name"]
query = """
mutation($name: String!) {
movieCreate(input: { name: $name }) {
id
}
}
"""
variables = {'name': name}
result = self._callGraphQL(query, variables)
movie['id'] = result['movieCreate']['id']
return self.update_movie(movie)
def update_movie(self, movie):
query = """
mutation MovieUpdate($input:MovieUpdateInput!) {
movieUpdate(input: $input) {
id
}
}
"""
variables = {'input': movie}
result = self._callGraphQL(query, variables)
return result['movieUpdate']['id']
# TODO delete_movie()
# Movies CRUD
def find_movies(self, q="", f={}, fragment=None):
query = """
query FindMovies($filter: FindFilterType, $movie_filter: MovieFilterType) {
findMovies(filter: $filter, movie_filter: $movie_filter) {
count
movies {
...stashMovie
}
}
}
"""
if fragment:
query = re.sub(r'\.\.\.stashMovie', fragment, query)
variables = {
"filter": {
"per_page": -1,
"q": q
},
"movie_filter": f
}
result = self._callGraphQL(query, variables)
return result['findMovies']['movies']
#Gallery CRUD
def create_gallery(self, path:str=""):
if path:
return self.metadata_scan([path])
# TODO find_gallery()
def update_gallery(self, gallery_data):
query = """
mutation GalleryUpdate($input:GalleryUpdateInput!) {
galleryUpdate(input: $input) {
id
}
}
"""
variables = {'input': gallery_data}
result = self._callGraphQL(query, variables)
return result["galleryUpdate"]["id"]
# TODO delete_gallery()
# BULK Gallery
def find_galleries(self, q="", f={}, fragment=None):
query = """
query FindGalleries($filter: FindFilterType, $gallery_filter: GalleryFilterType) {
findGalleries(gallery_filter: $gallery_filter, filter: $filter) {
count
galleries {
...stashGallery
}
}
}
"""
if fragment:
query = re.sub(r'\.\.\.stashGallery', fragment, query)
variables = {
"filter": {
"q": q,
"per_page": -1,
"sort": "path",
"direction": "ASC"
},
"gallery_filter": f
}
result = self._callGraphQL(query, variables)
return result['findGalleries']['galleries']
# Scene CRUD
def create_scene(self, path:str=""):
if path:
return self.metadata_scan([path])
def find_scene(self, id:int, fragment=None):
query = """
query FindScene($scene_id: ID) {
findScene(id: $scene_id) {
...stashScene
}
}
"""
if fragment:
query = re.sub(r'\.\.\.stashScene', fragment, query)
variables = {"scene_id": id}
result = self._callGraphQL(query, variables)
return result['findScene']
def update_scene(self, update_input):
query = """
mutation sceneUpdate($input:SceneUpdateInput!) {
sceneUpdate(input: $input) {
id
}
}
"""
variables = {'input': update_input}
result = self._callGraphQL(query, variables)
return result["sceneUpdate"]["id"]
def destroy_scene(self, scene_id, delete_file=False):
query = """
mutation SceneDestroy($input:SceneDestroyInput!) {
sceneDestroy(input: $input)
}
"""
variables = {
"input": {
"delete_file": delete_file,
"delete_generated": True,
"id": scene_id
}
}
result = self._callGraphQL(query, variables)
return result['sceneDestroy']
# BULK Scenes
def create_scenes(self, paths:list=[]):
return self.metadata_scan(paths)
def find_scenes(self, f={}, filter={"per_page": -1}, fragment=None):
query = """
query FindScenes($filter: FindFilterType, $scene_filter: SceneFilterType, $scene_ids: [Int!]) {
findScenes(filter: $filter, scene_filter: $scene_filter, scene_ids: $scene_ids) {
count
scenes {
...stashScene
}
}
}
"""
if fragment:
query = re.sub(r'\.\.\.stashScene', fragment, query)
variables = {
"filter": filter,
"scene_filter": f
}
result = self._callGraphQL(query, variables)
return result['findScenes']['scenes']
def update_scenes(self, updates_input):
query = """
mutation BulkSceneUpdate($input:BulkSceneUpdateInput!) {
bulkSceneUpdate(input: $input) {
id
}
}
"""
variables = {'input': updates_input}
result = self._callGraphQL(query, variables)
return result["bulkSceneUpdate"]
def destroy_scenes(self, scene_ids, delete_file=False):
query = """
mutation ScenesDestroy($input:ScenesDestroyInput!) {
scenesDestroy(input: $input)
}
"""
variables = {
"input": {
"delete_file": delete_file,
"delete_generated": True,
"ids": scene_ids
}
}
result = self._callGraphQL(query, variables)
return result['scenesDestroy']
def merge_scene_markers(self, target_scene_id: int, source_scene_ids: list):
def get_scene_markers(scene_id) -> list:
query = """
query GetSceneMarkers($scene_id: ID) {
findScene(id: $scene_id) {
scene_markers {
title
seconds
primary_tag { id }
tags { id }
}
}
}
"""
variables = { "scene_id": scene_id }
return self._callGraphQL(query, variables)["findScene"]["scene_markers"]
def create_scene_marker(marker_create_input:dict):
query = """
mutation SceneMarkerCreate($marker_input: SceneMarkerCreateInput!) {
sceneMarkerCreate(input: $marker_input) {
id
}
}
"""
variables = { "marker_input": marker_create_input }
return self._callGraphQL(query, variables)["sceneMarkerCreate"]
existing_marker_timestamps = [marker["seconds"] for marker in get_scene_markers(target_scene_id)]
markers_to_merge = []
for source_scene_id in source_scene_ids:
markers_to_merge.extend(get_scene_markers(source_scene_id))
created_markers = []
for marker in markers_to_merge:
if marker["seconds"] in existing_marker_timestamps:
# skip existing marker
# TODO merge missing data between markers
continue
marker_id = create_scene_marker({
"title": marker["title"],
"seconds": marker["seconds"],
"scene_id": target_scene_id,
"primary_tag_id": marker["primary_tag"]["id"],
"tag_ids": [t["id"] for t in marker["tags"]],
})
created_markers.append(marker_id)
return created_markers
def merge_scenes(self, target_scene_id:int, source_scene_ids:list, exclusions={}):
min_scene_fragment="""
title
details
url
date
rating
studio { id }
galleries { id }
performers { id }
tags { id }
movies { movie { id } scene_index }
"""
merged_markers = self.merge_scene_markers(target_scene_id, source_scene_ids)
log.info(f"Merged {len(merged_markers)} markers from {source_scene_ids} to {target_scene_id}")
target_meta = self.find_scene(target_scene_id, fragment=min_scene_fragment)
for source_id in source_scene_ids:
source_data = self.find_scene(source_id, fragment=min_scene_fragment)
scene_update = {
"ids": [target_scene_id],
"gallery_ids": {
"ids": [ g["id"] for g in source_data["galleries"] if g["id"] not in exclusions.get("gallery_ids",[]) ],
"mode": "ADD"
},
"performer_ids": {
"ids": [ p["id"] for p in source_data["performers"] if p["id"] not in exclusions.get("performer_ids",[]) ],
"mode": "ADD"
},
"tag_ids": {
"ids": [ t["id"] for t in source_data["tags"] if t["id"] not in exclusions.get("tag_ids",[]) ],
"mode": "ADD"
},
"movie_ids": {
"ids": [ sm["movie"]["id"] for sm in source_data["movies"] ],
"mode": "ADD"
},
}
if source_data.get("studio"):
scene_update["studio_id"] = source_data["studio"]["id"]
if source_data.get("date") and target_meta.get("date", "9999-99-99") > source_data["date"]:
scene_update["date"] = source_data["date"]
if source_data.get("url"):
scene_update["url"] = source_data["url"]
updated_scene_ids = self.update_scenes(scene_update)
return updated_scene_ids
# Scraper Operations
def reload_scrapers(self):
query = """
mutation ReloadScrapers {
reloadScrapers
}
"""
result = self._callGraphQL(query)
return result["reloadScrapers"]
def list_performer_scrapers(self, type):
query = """
query ListPerformerScrapers {
listPerformerScrapers {
id
name
performer {
supported_scrapes
}
}
}
"""
ret = []
result = self._callGraphQL(query)
for r in result["listPerformerScrapers"]:
if type in r["performer"]["supported_scrapes"]:
ret.append(r["id"])
return ret
def list_scene_scrapers(self, type):
query = """
query listSceneScrapers {
listSceneScrapers {
id
name
scene{
supported_scrapes
}
}
}
"""
ret = []
result = self._callGraphQL(query)
for r in result["listSceneScrapers"]:
if type in r["scene"]["supported_scrapes"]:
ret.append(r["id"])
return ret
def list_gallery_scrapers(self, type):
query = """
query ListGalleryScrapers {
listGalleryScrapers {
id
name
gallery {
supported_scrapes
}
}
}
"""
ret = []
result = self._callGraphQL(query)
for r in result["listGalleryScrapers"]:
if type in r["gallery"]["supported_scrapes"]:
ret.append(r["id"])
return ret
def list_movie_scrapers(self, type):
query = """
query listMovieScrapers {
listMovieScrapers {
id
name
movie {
supported_scrapes
}
}
}
"""
ret = []
result | |
#d64r2 = self._res_block(Concatenate()([d64, d64r, interpolated64]), (3, 3), batch_norm=True, activation='lrelu', name=name+'_d64_r2')
#d64r3 = self._res_block(d64r2, (3, 3), batch_norm=True, activation='lrelu', name=name+'_d64_r3')
#img64 = Conv2D(3, (3, 3), padding='same', strides=(1, 1), activation='tanh', name=name+'_img64', kernel_initializer=TruncatedNormal(stddev=0.02), bias_initializer=Zeros())(d64r3)
# size128
#deconv3
d128 = Conv2DTranspose(base_filters, (3, 3), padding='same', strides=(2, 2), name=name+'_d128', use_bias=False, kernel_initializer=RandomNormal(stddev=0.02))(Concatenate()([d64r,f64,c64r,mc_in_img64]))
d128 = BatchNormalization(epsilon=1e-5, name=name+'_d128_bn')(d128)
d128 = self._add_activation(d128, 'relu')
#d128r = self._res_block(Concatenate()([c128r, mc_in_img128, f128]), (5, 5), batch_norm=True, activation='lrelu', name=name+'_d128_r')
d128r = self._res_block(d128, (5, 5), batch_norm=True, activation='lrelu', name=name+'_d128_r')
#interpolated128 = Lambda(lambda x: tf.image.resize_bilinear(x, [128, 128]))(img64) # Use Lambda layer to wrap tensorflow func, resize_bilinear
#print('img32',img32)
#print('d32',d32)
#print('interpolated64',interpolated64)
#print('d128',d128)
in_leye = Input(shape=(multipie_gen.EYE_H, multipie_gen.EYE_W, 3))
in_reye = Input(shape=(multipie_gen.EYE_H, multipie_gen.EYE_W, 3))
in_nose = Input(shape=(multipie_gen.NOSE_H, multipie_gen.NOSE_W, 3))
in_mouth = Input(shape=(multipie_gen.MOUTH_H, multipie_gen.MOUTH_W, 3))
front_leye_img, front_leye_feat, front_reye_img, front_reye_feat, front_nose_img, front_nose_feat, front_mouth_img, front_mouth_feat\
= self.parts_rotator()([in_leye, in_reye, in_nose, in_mouth])
combined_parts_img = combine_parts([128, 128], front_leye_img, front_reye_img, front_nose_img, front_mouth_img)
combined_parts_feat = combine_parts([128, 128], front_leye_feat, front_reye_feat, front_nose_feat, front_mouth_feat)
#conv5
d128r1c = Conv2D(base_filters, (5, 5), padding='same', strides=(1, 1), name=name+'_d128_r1c', use_bias=False, kernel_initializer=TruncatedNormal(stddev=0.02))(Concatenate()([d128r, f128, c128r, combined_parts_feat, combined_parts_img, mc_in_img128]))
#d128r2 = self._res_block(Concatenate()([d128, d128r, interpolated128, combined_parts_feat, combined_parts_img]), (3, 3), batch_norm=True, activation='lrelu', name=name+'_d128_r2')
d128r1c = BatchNormalization(epsilon=1e-5, name=name+'_d128r1c_bn')(d128)
d128r1c = self._add_activation(d128r1c, 'relu')
d128r2c = self._res_block(d128r1c, (5, 5), batch_norm=True, activation='lrelu', name=name+'_d128_r2c')
#conv6
d128r2c2= Conv2D(base_filters//2, (3, 3), padding='same', strides=(1, 1), name=name+'_d128_r2c2', use_bias=False, kernel_initializer=TruncatedNormal(stddev=0.02))(d128r2c)
d128r2c2= BatchNormalization(epsilon=1e-5, name=name+'_d128r2c2_bn')(d128)
d128r2c2 = self._add_activation(d128r2c2, 'relu')
d128r3c2= self._res_block(d128r2c2, (3, 3), batch_norm=True, activation='lrelu', name=name+'_d128_r3c2')
#conv7
img128= Conv2D(3, (3, 3), padding='same', strides=(1, 1), name=name+'_img128', use_bias=False, kernel_initializer=TruncatedNormal(stddev=0.02))(d128r3c2)
#d128r2c = Conv2D(base_filters, (5, 5), padding='same', strides=(1, 1), name=name+'_d128_r2c', use_bias=False, kernel_initializer=TruncatedNormal(stddev=0.02))(d128r2)
#d128r2c = BatchNormalization(epsilon=1e-5, name=name+'_d128_r2c_bn')(d128r2c)
#d128r2c = self._add_activation(d128r2c, 'lrelu')
#d128r3c = self._res_block(d128r2c, (3, 3), batch_norm=True, activation='lrelu', name=name+'_d128_r3c')
#d128r3c2 = Conv2D(base_filters//2, (3, 3), padding='same', strides=(1, 1), name=name+'_d128_r3c2', use_bias=False, kernel_initializer=TruncatedNormal(stddev=0.02))(d128r3c)
#d128r3c2 = BatchNormalization(epsilon=1e-5, name=name+'_d128_r3c2_bn')(d128r3c2)
#d128r3c2 = self._add_activation(d128r3c2, 'lrelu')
#img128 = Conv2D(3, (3, 3), padding='same', strides=(1, 1), activation='tanh', name=name+'_img128', kernel_initializer=TruncatedNormal(stddev=0.02), bias_initializer=Zeros())(d128r3c2)
#print('d128r2c', d128r2c)
#print('d128r3c', d128r3c)
#print('d128r3c2',d128r3c2)
#print('Input',Input)
#print('test_out',fc2_with_noise)
#print('f8',f8)
#print('f32',f32)
#print('img128',img128)
#ret_model = CloudableModel(inputs=[in_img, in_leye, in_reye, in_nose, in_mouth, in_noise], outputs=[img128, img64, img32, fc2, front_leye_img, front_reye_img, front_nose_img, front_mouth_img], name=full_name)
ret_model = CloudableModel(inputs=[in_img, in_leye, in_reye, in_nose, in_mouth, in_noise], outputs=[img128, img64, img32, fc2, front_leye_img, front_reye_img, front_nose_img, front_mouth_img], name=full_name)
#ret_model.summary()
#
#img32=in_img
#img64=c128
#img128=in_img
#
return ret_model
def build_classifier(self, name='classifier'):
"""
build classifier model.
"""
full_name = name
# shorten name
name = name[0]
in_feat = Input(shape=(256,))
X = Dropout(0.7)(in_feat)
clas = Dense(multipie_gen.NUM_SUBJECTS, activation='softmax', kernel_initializer=RandomNormal(stddev=0.02), kernel_regularizer=regularizers.l2(0.005),
use_bias=False, name=name+'_dense')(X)
ret_classifier = CloudableModel(inputs=in_feat, outputs=clas, name=full_name)
#ret_classifier.summary()
return ret_classifier
def build_train_generator_model(self):
"""
build train model for generator.
this model wraps generator and classifier, adds interface for loss functions.
"""
in_img = Input(shape=(multipie_gen.IMG_H, multipie_gen.IMG_W, 3))
in_leye = Input(shape=(multipie_gen.EYE_H, multipie_gen.EYE_W, 3))
in_reye = Input(shape=(multipie_gen.EYE_H, multipie_gen.EYE_W, 3))
in_nose = Input(shape=(multipie_gen.NOSE_H, multipie_gen.NOSE_W, 3))
in_mouth = Input(shape=(multipie_gen.MOUTH_H, multipie_gen.MOUTH_W, 3))
in_noise = Input(shape=(100,))
img128, img64, img32, fc2, front_leye_img, front_reye_img, front_nose_img, front_mouth_img\
= self.generator()([in_img, in_leye, in_reye, in_nose, in_mouth, in_noise])
subject_id = self.classifier()(fc2)
#img128_gray = Lambda(lambda x: tf.image.rgb_to_grayscale(x))(img128)
#lcnn_vec, lcnn_map = self.lcnn.extractor()(img128_gray)
# add name label to connect with each loss functions
img128_px = Lambda(lambda x:x, name = "00img128px")(img128)
img128_sym = Lambda(lambda x:x, name = "01img128sym")(img128)
img128_ip = Lambda(lambda x:x, name = "02ip")(img128)
img128_adv = Lambda(lambda x:x, name = "03adv")(img128)
img128_tv = Lambda(lambda x:x, name = "04tv")(img128)
img64_px = Lambda(lambda x:x, name = "05img64px")(img64)
img64_sym = Lambda(lambda x:x, name = "06img64sym")(img64)
img32_px = Lambda(lambda x:x, name = "07img32px")(img32)
img32_sym = Lambda(lambda x:x, name = "08img32sym")(img32)
subject_id = Lambda(lambda x:x, name = "09classify")(subject_id)
leye = Lambda(lambda x:x, name = "10leye")(front_leye_img)
reye = Lambda(lambda x:x, name = "11reye")(front_reye_img)
nose = Lambda(lambda x:x, name = "12nose")(front_nose_img)
mouth = Lambda(lambda x:x, name = "13mouth")(front_mouth_img)
ret_model = CloudableModel(inputs=[in_img, in_leye, in_reye, in_nose, in_mouth, in_noise],
outputs=[img128_px, img128_sym, img128_ip, img128_adv, img128_tv, img64_px, img64_sym, img32_px, img32_sym, subject_id, leye, reye, nose, mouth],
name='train_genarator_model')
#ret_model.summary()
return ret_model
def build_parts_rotator(self, base_filters=64):
"""
build models for all each part rotator.
"""
leye_rotator = self.build_part_rotator('leye', base_filters=base_filters, in_h=multipie_gen.EYE_H , in_w=multipie_gen.EYE_W)
reye_rotator = self.build_part_rotator('reye', base_filters=base_filters, in_h=multipie_gen.EYE_H , in_w=multipie_gen.EYE_W)
nose_rotator = self.build_part_rotator('nose', base_filters=base_filters, in_h=multipie_gen.NOSE_H , in_w=multipie_gen.NOSE_W)
mouth_rotator = self.build_part_rotator('mouth', base_filters=base_filters, in_h=multipie_gen.MOUTH_H , in_w=multipie_gen.MOUTH_W)
in_leye = Input(shape=(multipie_gen.EYE_H, multipie_gen.EYE_W, 3))
in_reye = Input(shape=(multipie_gen.EYE_H, multipie_gen.EYE_W, 3))
in_nose = Input(shape=(multipie_gen.NOSE_H, multipie_gen.NOSE_W, 3))
in_mouth = Input(shape=(multipie_gen.MOUTH_H, multipie_gen.MOUTH_W, 3))
out_leye_img, out_leye_feat = leye_rotator(in_leye)
out_reye_img, out_reye_feat = reye_rotator(in_reye)
out_nose_img, out_nose_feat = nose_rotator(in_nose)
out_mouth_img, out_mouth_feat = mouth_rotator(in_mouth)
ret_model = CloudableModel(inputs=[in_leye, in_reye, in_nose, in_mouth],
outputs=[out_leye_img, out_leye_feat, out_reye_img, out_reye_feat, out_nose_img, out_nose_feat, out_mouth_img, out_mouth_feat], name='parts_rotator')
#ret_model.summary()
return ret_model
def build_part_rotator(self, name, in_h, in_w, base_filters=64):
"""
build model for one part rotator.
"""
in_img = Input(shape=(in_h, in_w, 3))
c0 = Conv2D(base_filters, (3, 3), padding='same', strides=(1, 1), name=name+'_c0', kernel_initializer=TruncatedNormal(stddev=0.02), bias_initializer=Zeros())(in_img)
c0r = self._res_block(c0, (3, 3), batch_norm=True, activation='lrelu', name=name+'_c0_r')
c1 = Conv2D(base_filters*2, (3, 3), padding='same', strides=(2, 2), name=name+'_c1', use_bias=False, kernel_initializer=TruncatedNormal(stddev=0.02))(c0r)
c1 = BatchNormalization(name=name+'_c1_bn')(c1)
c1 = self._add_activation(c1, 'lrelu')
c1r = self._res_block(c1, (3, 3), batch_norm=True, activation='lrelu', name=name+'_c1_r')
c2 = Conv2D(base_filters*4, (3, 3), padding='same', strides=(2, 2), name=name+'_c2', use_bias=False, kernel_initializer=TruncatedNormal(stddev=0.02))(c1r)
c2 = BatchNormalization(name=name+'_c2_bn')(c2)
c2 = self._add_activation(c2, 'lrelu')
c2r = self._res_block(c2, (3, 3), batch_norm=True, activation='lrelu', name=name+'_c2_r')
c3 = Conv2D(base_filters*8, (3, 3), padding='same', strides=(2, 2), name=name+'_c3', use_bias=False, kernel_initializer=TruncatedNormal(stddev=0.02))(c2r)
c3 = BatchNormalization(name=name+'_c3_bn')(c3)
c3 = self._add_activation(c3, 'lrelu')
c3r = self._res_block(c3, (3, 3), batch_norm=True, activation='lrelu', name=name+'_c3_r')
c3r2 = self._res_block(c3r, (3, 3), batch_norm=True, activation='lrelu', name=name+'_c3_r2')
d1 = Conv2DTranspose(base_filters*4, (3, 3), padding='same', strides=(2, 2), name=name+'_d1', use_bias=True, kernel_initializer=RandomNormal(stddev=0.02))(c3r2)
d1 = BatchNormalization(name=name+'_d1_bn')(d1)
d1 = self._add_activation(d1, 'lrelu')
after_select_d1 = Conv2D(base_filters*4, (3, 3), padding='same', strides=(1, 1), name=name+'_asd1', use_bias=False, kernel_initializer=TruncatedNormal(stddev=0.02))(Concatenate()([d1, c2r]))
after_select_d1 = BatchNormalization(name=name+'_asd1_bn')(after_select_d1)
after_select_d1 = self._add_activation(after_select_d1, 'lrelu')
d1r = self._res_block(after_select_d1, (3, 3), batch_norm=True, activation='lrelu', name=name+'_d1_r')
d2 = Conv2DTranspose(base_filters*2, (3, 3), padding='same', strides=(2, 2), name=name+'_d2', use_bias=False, kernel_initializer=RandomNormal(stddev=0.02))(d1r)
d2 = BatchNormalization(name=name+'_d2_bn')(d2)
d2 = self._add_activation(d2, 'lrelu')
after_select_d2 = Conv2D(base_filters*2, (3, 3), padding='same', strides=(1, 1), name=name+'_asd2', use_bias=False, kernel_initializer=TruncatedNormal(stddev=0.02))(Concatenate()([d2, c1r]))
after_select_d2 = BatchNormalization(name=name+'_asd2_bn')(after_select_d2)
after_select_d2 = self._add_activation(after_select_d2, 'lrelu')
d2r = self._res_block(after_select_d2, (3, 3), batch_norm=True, activation='lrelu', name=name+'_d2_r')
d3 = Conv2DTranspose(base_filters, (3, 3), padding='same', strides=(2, 2), name=name+'_d3', use_bias=False, kernel_initializer=RandomNormal(stddev=0.02))(d2r)
d3 = BatchNormalization(name=name+'_d3_bn')(d3)
d3 = self._add_activation(d3, 'lrelu')
after_select_d3 = Conv2D(base_filters, (3, 3), padding='same', strides=(1, 1), name=name+'_asd3', use_bias=False, kernel_initializer=TruncatedNormal(stddev=0.02))(Concatenate()([d3, c0r]))
after_select_d3 = BatchNormalization(name=name+'_asd3_bn')(after_select_d3)
after_select_d3 = self._add_activation(after_select_d3, 'lrelu')
part_feat = self._res_block(after_select_d3, (3, 3), batch_norm=True, activation='lrelu', name=name+'_d3_r')
part_img = Conv2D(3, (3, 3), padding='same', strides=(1, 1), activation='tanh', name=name+'_c4', kernel_initializer=TruncatedNormal(stddev=0.02), bias_initializer=Zeros())(part_feat)
ret_model = CloudableModel(inputs=[in_img], outputs=[part_img, part_feat], name= name + '_rotator')
#ret_model.summary()
return ret_model
def build_discriminator(self, name='discriminator', base_filters=64):
"""
build model for discriminator.
"""
full_name = name
# shorten name
name = name[0]
in_img = Input(shape=(multipie_gen.IMG_H, multipie_gen.IMG_W, 3))
c64 = Conv2D(base_filters, (3, 3), padding='same', strides=(2, 2), name=name+'_c64', kernel_initializer=TruncatedNormal(stddev=0.02), bias_initializer=Zeros())(in_img)
c64 = self._add_activation(c64, 'lrelu')
c32 = Conv2D(base_filters*2, (3, 3), padding='same', strides=(2, 2), name=name+'_c32', use_bias=True, kernel_initializer=TruncatedNormal(stddev=0.02))(c64)
#c32 = BatchNormalization(center=True, scale=True, name=name+'_c32_bn')(c32)
c32 = self._add_activation(c32, 'lrelu')
c16 = Conv2D(base_filters*4, (3, 3), padding='same', strides=(2, 2), name=name+'_c16', use_bias=True, kernel_initializer=TruncatedNormal(stddev=0.02))(c32)
#c16 = BatchNormalization(center=True, scale=True, name=name+'_c16_bn')(c16)
c16 = self._add_activation(c16, 'lrelu')
c8 = Conv2D(base_filters*8, (3, 3), padding='same', strides=(2, 2), name=name+'_c8', use_bias=True, kernel_initializer=TruncatedNormal(stddev=0.02))(c16)
#c8 = BatchNormalization(center=True, scale=True, name=name+'_c8_bn')(c8)
c8 = self._add_activation(c8, 'lrelu')
c8r = self._res_block(c8, (3, 3), batch_norm=False, activation='lrelu', name=name+'_c8_r')
c4 = Conv2D(base_filters*8, (3, 3), padding='same', strides=(2, 2), name=name+'_c4', use_bias=True, kernel_initializer=TruncatedNormal(stddev=0.02))(c8r)
#c4 = BatchNormalization(center=True, scale=True, name=name+'_c4_bn')(c4)
c4 = self._add_activation(c4, 'lrelu')
c4r = self._res_block(c4, (3, 3), batch_norm=False, activation='lrelu', name=name+'_c4_r')
feat = Conv2D(1, (1, 1), padding='same', strides=(1, 1), name=name+'_c4_r_c', activation='sigmoid', kernel_initializer=TruncatedNormal(stddev=0.02), bias_initializer=Zeros())(c4r)
ret_model = CloudableModel(inputs=in_img, outputs=feat, name=full_name)
#ret_model.summary()
return ret_model
class SaveWeightsCallback(Callback):
def __init__(self, target_models, out_dir, period):
"""
Args:
target_models (list): list of save target models
out_dir (str): output dir
period (int): save interval epochs
"""
self.target_models = target_models
self.out_dir = out_dir
self.period = period
def on_epoch_end(self, epoch, logs):
if (epoch + 1) % self.period == 0:
for target_model in self.target_models:
out_model_dir = '{}{}/'.format(self.out_dir, target_model.name)
tf.gfile.MakeDirs(out_model_dir)
target_model.save_weights(out_model_dir + 'epoch{epoch:04d}_loss{loss:.3f}.hdf5'.format(epoch=epoch + 1, loss=logs['loss']), overwrite=True)
def train_gan(self, gen_datagen_creator, gen_train_batch_size, gen_valid_batch_size,
disc_datagen_creator, disc_batch_size, disc_gt_shape,
optimizer,
gen_steps_per_epoch=300, disc_steps_per_epoch=10, epochs=100,
out_dir='../out/', out_period=5, is_output_img=False,
lr=0.001, decay=0, lambda_128=1, lambda_64=1, lambda_32=1.5,
lambda_sym=3e-1, lambda_ip=1e1, lambda_adv=2e1, lambda_tv=1e-3,
lambda_class=4e-1, lambda_parts=3):
"""
train both generator and discriminator as GAN.
Args:
gen_datagen_creator (func): | |
# # ⚠ Warning
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# [🥭 Mango Markets](https://mango.markets/) support is available at:
# [Docs](https://docs.mango.markets/)
# [Discord](https://discord.gg/67jySBhxrg)
# [Twitter](https://twitter.com/mangomarkets)
# [Github](https://github.com/blockworks-foundation)
# [Email](mailto:<EMAIL>)
import pyserum.enums
import typing
from decimal import Decimal
from pyserum._layouts.instructions import INSTRUCTIONS_LAYOUT as PYSERUM_INSTRUCTIONS_LAYOUT, InstructionType as PySerumInstructionType
from pyserum.enums import OrderType as PySerumOrderType, Side as PySerumSide
from pyserum.instructions import settle_funds as pyserum_settle_funds, SettleFundsParams as PySerumSettleFundsParams
from pyserum.market import Market as PySerumMarket
from pyserum.open_orders_account import make_create_account_instruction as pyserum_make_create_account_instruction
from solana.keypair import Keypair
from solana.publickey import PublicKey
from solana.system_program import CreateAccountParams, create_account
from solana.sysvar import SYSVAR_RENT_PUBKEY
from solana.transaction import AccountMeta, TransactionInstruction
from spl.token.constants import ACCOUNT_LEN, TOKEN_PROGRAM_ID
from spl.token.instructions import CloseAccountParams, InitializeAccountParams, TransferParams, close_account, create_associated_token_account, initialize_account, transfer
from .account import Account
from .combinableinstructions import CombinableInstructions
from .constants import SYSTEM_PROGRAM_ADDRESS
from .context import Context
from .group import Group
from .layouts import layouts
from .orders import Order, OrderType, Side
from .perpmarket import PerpMarket
from .perpmarketdetails import PerpMarketDetails
from .rootbank import NodeBank, RootBank
from .token import Token
from .tokenaccount import TokenAccount
from .tokeninfo import TokenInfo
from .wallet import Wallet
# 🥭 Instructions
#
# This file contains the low-level instruction functions that build the raw instructions
# to send to Solana.
#
# One important distinction between these functions and the more common `create instruction functions` in
# Solana is that these functions *all return a combinable of instructions and signers*.
#
# It's likely that some operations will require actions split across multiple instructions because of
# instruction size limitiations, so all our functions are prepared for this without having to change
# the function signature in future.
#
# # 🥭 build_create_solana_account_instructions function
#
# Creates and initializes an SPL token account. Can add additional lamports too but that's usually not
# necesary.
#
def build_create_solana_account_instructions(context: Context, wallet: Wallet, mango_program_address: PublicKey, size: int, lamports: int = 0) -> CombinableInstructions:
minimum_balance = context.client.get_minimum_balance_for_rent_exemption(size)
account = Keypair()
create_instruction = create_account(
CreateAccountParams(wallet.address, account.public_key, lamports + minimum_balance, size, mango_program_address))
return CombinableInstructions(signers=[account], instructions=[create_instruction])
# # 🥭 build_create_spl_account_instructions function
#
# Creates and initializes an SPL token account. Can add additional lamports too but that's usually not
# necesary.
#
# Prefer `build_create_spl_account_instructions()` over this function. This function should be
# reserved for cases where you specifically don't want the associated token account.
#
def build_create_spl_account_instructions(context: Context, wallet: Wallet, token: Token, lamports: int = 0) -> CombinableInstructions:
create_account_instructions = build_create_solana_account_instructions(
context, wallet, TOKEN_PROGRAM_ID, ACCOUNT_LEN, lamports)
initialize_instruction = initialize_account(InitializeAccountParams(
TOKEN_PROGRAM_ID, create_account_instructions.signers[0].public_key, token.mint, wallet.address))
return create_account_instructions + CombinableInstructions(signers=[], instructions=[initialize_instruction])
# # 🥭 build_create_associated_spl_account_instructions function
#
# Creates and initializes an 'associated' SPL token account. This is the usual way of creating a
# token account now. `build_create_spl_account_instructions()` should be reserved for cases where
# you specifically don't want the associated token account.
#
def build_create_associated_spl_account_instructions(context: Context, wallet: Wallet, token: Token) -> CombinableInstructions:
create_account_instructions = create_associated_token_account(wallet.address, wallet.address, token.mint)
return CombinableInstructions(signers=[], instructions=[create_account_instructions])
# # 🥭 build_transfer_spl_tokens_instructions function
#
# Creates an instruction to transfer SPL tokens from one account to another.
#
def build_transfer_spl_tokens_instructions(context: Context, wallet: Wallet, token: Token, source: PublicKey, destination: PublicKey, quantity: Decimal) -> CombinableInstructions:
amount = int(quantity * (10 ** token.decimals))
instructions = [transfer(TransferParams(TOKEN_PROGRAM_ID, source, destination, wallet.address, amount, []))]
return CombinableInstructions(signers=[], instructions=instructions)
# # 🥭 build_close_spl_account_instructions function
#
# Creates an instructio to close an SPL token account and transfers any remaining lamports to the wallet.
#
def build_close_spl_account_instructions(context: Context, wallet: Wallet, address: PublicKey) -> CombinableInstructions:
return CombinableInstructions(signers=[], instructions=[close_account(CloseAccountParams(TOKEN_PROGRAM_ID, address, wallet.address, wallet.address))])
# # 🥭 build_create_serum_open_orders_instructions function
#
# Creates a Serum openorders-creating instruction.
#
def build_create_serum_open_orders_instructions(context: Context, wallet: Wallet, market: PySerumMarket) -> CombinableInstructions:
new_open_orders_account = Keypair()
minimum_balance = context.client.get_minimum_balance_for_rent_exemption(layouts.OPEN_ORDERS.sizeof())
instruction = pyserum_make_create_account_instruction(
owner_address=wallet.address,
new_account_address=new_open_orders_account.public_key,
lamports=minimum_balance,
program_id=market.state.program_id(),
)
return CombinableInstructions(signers=[new_open_orders_account], instructions=[instruction])
# # 🥭 build_serum_place_order_instructions function
#
# Creates a Serum order-placing instruction using V3 of the NewOrder instruction.
#
def build_serum_place_order_instructions(context: Context, wallet: Wallet, market: PySerumMarket, source: PublicKey, open_orders_address: PublicKey, order_type: OrderType, side: Side, price: Decimal, quantity: Decimal, client_id: int, fee_discount_address: typing.Optional[PublicKey]) -> CombinableInstructions:
serum_order_type: PySerumOrderType = PySerumOrderType.POST_ONLY if order_type == OrderType.POST_ONLY else PySerumOrderType.IOC if order_type == OrderType.IOC else PySerumOrderType.LIMIT
serum_side: PySerumSide = PySerumSide.SELL if side == Side.SELL else PySerumSide.BUY
instruction = market.make_place_order_instruction(
source,
wallet.to_deprecated_solana_account(),
serum_order_type,
serum_side,
float(price),
float(quantity),
client_id,
open_orders_address,
fee_discount_address
)
return CombinableInstructions(signers=[], instructions=[instruction])
# # 🥭 build_serum_consume_events_instructions function
#
# Creates an event-consuming 'crank' instruction.
#
def build_serum_consume_events_instructions(context: Context, market_address: PublicKey, event_queue_address: PublicKey, open_orders_addresses: typing.Sequence[PublicKey], limit: int = 32) -> CombinableInstructions:
instruction = TransactionInstruction(
keys=[
AccountMeta(pubkey=pubkey, is_signer=False, is_writable=True)
for pubkey in [*open_orders_addresses, market_address, event_queue_address]
],
program_id=context.serum_program_address,
data=PYSERUM_INSTRUCTIONS_LAYOUT.build(
dict(instruction_type=PySerumInstructionType.CONSUME_EVENTS, args=dict(limit=limit))
),
)
# The interface accepts (and currently requires) two accounts at the end, but
# it doesn't actually use them.
random_account = Keypair().public_key
instruction.keys.append(AccountMeta(random_account, is_signer=False, is_writable=True))
instruction.keys.append(AccountMeta(random_account, is_signer=False, is_writable=True))
return CombinableInstructions(signers=[], instructions=[instruction])
# # 🥭 build_serum_settle_instructions function
#
# Creates a 'settle' instruction.
#
def build_serum_settle_instructions(context: Context, wallet: Wallet, market: PySerumMarket, open_orders_address: PublicKey, base_token_account_address: PublicKey, quote_token_account_address: PublicKey) -> CombinableInstructions:
vault_signer = PublicKey.create_program_address(
[bytes(market.state.public_key()), market.state.vault_signer_nonce().to_bytes(8, byteorder="little")],
market.state.program_id(),
)
instruction = pyserum_settle_funds(
PySerumSettleFundsParams(
market=market.state.public_key(),
open_orders=open_orders_address,
owner=wallet.address,
base_vault=market.state.base_vault(),
quote_vault=market.state.quote_vault(),
base_wallet=base_token_account_address,
quote_wallet=quote_token_account_address,
vault_signer=vault_signer,
program_id=market.state.program_id(),
)
)
return CombinableInstructions(signers=[], instructions=[instruction])
# # 🥭 build_spot_settle_instructions function
#
# Creates a 'settle' instruction for spot markets.
#
# /// Settle all funds from serum dex open orders
# ///
# /// Accounts expected by this instruction (18):
# ///
# /// 0. `[]` mango_group_ai - MangoGroup that this mango account is for
# /// 1. `[]` mango_cache_ai - MangoCache for this MangoGroup
# /// 2. `[signer]` owner_ai - MangoAccount owner
# /// 3. `[writable]` mango_account_ai - MangoAccount
# /// 4. `[]` dex_prog_ai - program id of serum dex
# /// 5. `[writable]` spot_market_ai - dex MarketState account
# /// 6. `[writable]` open_orders_ai - open orders for this market for this MangoAccount
# /// 7. `[]` signer_ai - MangoGroup signer key
# /// 8. `[writable]` dex_base_ai - base vault for dex MarketState
# /// 9. `[writable]` dex_quote_ai - quote vault for dex MarketState
# /// 10. `[]` base_root_bank_ai - MangoGroup base vault acc
# /// 11. `[writable]` base_node_bank_ai - MangoGroup quote vault acc
# /// 12. `[]` quote_root_bank_ai - MangoGroup quote vault acc
# /// 13. `[writable]` quote_node_bank_ai - MangoGroup quote vault acc
# /// 14. `[writable]` base_vault_ai - MangoGroup base vault acc
# /// 15. `[writable]` quote_vault_ai - MangoGroup quote vault acc
# /// 16. `[]` dex_signer_ai - dex PySerumMarket signer account
# /// 17. `[]` spl token program
def build_spot_settle_instructions(context: Context, wallet: Wallet, account: Account,
market: PySerumMarket, group: Group, open_orders_address: PublicKey,
base_rootbank: RootBank, base_nodebank: NodeBank,
quote_rootbank: RootBank, quote_nodebank: NodeBank) -> CombinableInstructions:
vault_signer = PublicKey.create_program_address(
[bytes(market.state.public_key()), market.state.vault_signer_nonce().to_bytes(8, byteorder="little")],
market.state.program_id(),
)
settle_instruction = TransactionInstruction(
keys=[
AccountMeta(is_signer=False, is_writable=False, pubkey=group.address),
AccountMeta(is_signer=False, is_writable=True, pubkey=group.cache),
AccountMeta(is_signer=True, is_writable=False, pubkey=wallet.address),
AccountMeta(is_signer=False, is_writable=True, pubkey=account.address),
AccountMeta(is_signer=False, is_writable=False, pubkey=context.serum_program_address),
AccountMeta(is_signer=False, is_writable=True, pubkey=market.state.public_key()),
AccountMeta(is_signer=False, is_writable=True, pubkey=open_orders_address),
AccountMeta(is_signer=False, is_writable=False, pubkey=group.signer_key),
AccountMeta(is_signer=False, is_writable=True, pubkey=market.state.base_vault()),
AccountMeta(is_signer=False, is_writable=True, pubkey=market.state.quote_vault()),
AccountMeta(is_signer=False, is_writable=False, pubkey=base_rootbank.address),
AccountMeta(is_signer=False, is_writable=True, pubkey=base_nodebank.address),
AccountMeta(is_signer=False, is_writable=False, pubkey=quote_rootbank.address),
AccountMeta(is_signer=False, is_writable=True, pubkey=quote_nodebank.address),
AccountMeta(is_signer=False, is_writable=True, pubkey=base_nodebank.vault),
AccountMeta(is_signer=False, is_writable=True, pubkey=quote_nodebank.vault),
AccountMeta(is_signer=False, is_writable=False, pubkey=vault_signer),
AccountMeta(is_signer=False, is_writable=False, pubkey=TOKEN_PROGRAM_ID)
],
program_id=context.mango_program_address,
data=layouts.SETTLE_FUNDS.build(dict())
)
return CombinableInstructions(signers=[], instructions=[settle_instruction])
# # 🥭 build_compound_serum_place_order_instructions function
#
# This function puts a trade on the Serum orderbook and then cranks and settles.
# It follows the pattern described here:
# https://solanadev.blogspot.com/2021/05/order-techniques-with-project-serum.html
#
# Here's an example (Raydium?) transaction that does this:
# https://solanabeach.io/transaction/3Hb2h7QMM3BbJCK42BUDuVEYwwaiqfp2oQUZMDJvUuoyCRJD5oBmA3B8oAGkB9McdCFtwdT2VrSKM2GCKhJ92FpY
#
# Basically, it tries to send to a 'buy/sell' and settle all in one transaction.
#
# It does this by:
# * Sending a Place Order (V3) instruction
# * Sending a Consume Events (crank) instruction
# * Sending a Settle Funds instruction
# all in the same transaction. With V3 Serum, this should consistently settle funds to the wallet
# immediately if the order is filled (either because it's IOC or because it matches an order on the
# orderbook).
#
def build_compound_serum_place_order_instructions(context: Context, wallet: Wallet, market: PySerumMarket, source: PublicKey, open_orders_address: PublicKey, all_open_orders_addresses: typing.Sequence[PublicKey], order_type: OrderType, side: Side, price: Decimal, quantity: Decimal, client_id: int, base_token_account_address: PublicKey, quote_token_account_address: PublicKey, fee_discount_address: typing.Optional[PublicKey], consume_limit: int = 32) -> CombinableInstructions:
place_order = build_serum_place_order_instructions(
context, wallet, market, source, open_orders_address, order_type, side, price, quantity, client_id, fee_discount_address)
consume_events = build_serum_consume_events_instructions(
context, market.state.public_key(), market.state.event_queue(), all_open_orders_addresses, consume_limit)
settle = build_serum_settle_instructions(
context, wallet, market, open_orders_address, base_token_account_address, quote_token_account_address)
return place_order + consume_events + settle
# # 🥭 build_cancel_perp_order_instruction function
#
# Builds the instructions necessary for cancelling a perp order.
#
def build_cancel_perp_order_instructions(context: Context, wallet: Wallet, account: Account, perp_market_details: PerpMarketDetails, order: Order, invalid_id_ok: bool) -> CombinableInstructions:
# Prefer | |
graph.
feat : str
The feature field.
Returns
-------
tensor
The tensor obtained.
Examples
--------
>>> import dgl
>>> import torch as th
Create two :class:`~dgl.DGLGraph` objects and initialize their
node features.
>>> g1 = dgl.DGLGraph() # Graph 1
>>> g1.add_nodes(2)
>>> g1.ndata['h'] = th.tensor([[1., 0.], [2., 0.]])
>>> g2 = dgl.DGLGraph() # Graph 2
>>> g2.add_nodes(3)
>>> g2.ndata['h'] = th.tensor([[1., 0.], [2., 0.], [3., 0.]])
Softmax over node attribute :attr:`h` in a batched graph.
>>> bg = dgl.batch([g1, g2], node_attrs='h')
>>> dgl.softmax_nodes(bg, 'h')
tensor([[0.2689, 0.5000], # [0.2689, 0.7311] = softmax([1., 2.])
[0.7311, 0.5000], # [0.5000, 0.5000] = softmax([0., 0.])
[0.0900, 0.3333], # [0.0900, 0.2447, 0.6652] = softmax([1., 2., 3.])
[0.2447, 0.3333], # [0.3333, 0.3333, 0.3333] = softmax([0., 0., 0.])
[0.6652, 0.3333]])
Softmax over node attribute :attr:`h` in a single graph.
>>> dgl.softmax_nodes(g1, 'h')
tensor([[0.2689, 0.5000], # [0.2689, 0.7311] = softmax([1., 2.])
[0.7311, 0.5000]]), # [0.5000, 0.5000] = softmax([0., 0.])
Notes
-----
If the input graph has batch size greater then one, the softmax is applied at
each single graph in the batched graph.
"""
return _softmax_on(graph, 'nodes', feat)
def softmax_edges(graph, feat):
"""Apply batch-wise graph-level softmax over all the values of edge field
:attr:`feat` in :attr:`graph`.
Parameters
----------
graph : DGLGraph
The graph.
feat : str
The feature field.
Returns
-------
tensor
The tensor obtained.
Examples
--------
>>> import dgl
>>> import torch as th
Create two :class:`~dgl.DGLGraph` objects and initialize their
edge features.
>>> g1 = dgl.DGLGraph() # Graph 1
>>> g1.add_nodes(2)
>>> g1.add_edges([0, 1], [1, 0])
>>> g1.edata['h'] = th.tensor([[1., 0.], [2., 0.]])
>>> g2 = dgl.DGLGraph() # Graph 2
>>> g2.add_nodes(3)
>>> g2.add_edges([0, 1, 2], [1, 2, 0])
>>> g2.edata['h'] = th.tensor([[1., 0.], [2., 0.], [3., 0.]])
Softmax over edge attribute :attr:`h` in a batched graph.
>>> bg = dgl.batch([g1, g2], edge_attrs='h')
>>> dgl.softmax_edges(bg, 'h')
tensor([[0.2689, 0.5000], # [0.2689, 0.7311] = softmax([1., 2.])
[0.7311, 0.5000], # [0.5000, 0.5000] = softmax([0., 0.])
[0.0900, 0.3333], # [0.0900, 0.2447, 0.6652] = softmax([1., 2., 3.])
[0.2447, 0.3333], # [0.3333, 0.3333, 0.3333] = softmax([0., 0., 0.])
[0.6652, 0.3333]])
Softmax over edge attribute :attr:`h` in a single graph.
>>> dgl.softmax_edges(g1, 'h')
tensor([[0.2689, 0.5000], # [0.2689, 0.7311] = softmax([1., 2.])
[0.7311, 0.5000]]), # [0.5000, 0.5000] = softmax([0., 0.])
Notes
-----
If the input graph has batch size greater then one, the softmax is applied at each
example in the batch.
"""
return _softmax_on(graph, 'edges', feat)
def broadcast_nodes(graph, feat_data):
"""Broadcast :attr:`feat_data` to all nodes in :attr:`graph`, and return a
tensor of node features.
Parameters
----------
graph : DGLGraph
The graph.
feat_data : tensor
The feature to broadcast. Tensor shape is :math:`(*)` for single graph, and
:math:`(B, *)` for batched graph.
Returns
-------
tensor
The node features tensor with shape :math:`(N, *)`.
Examples
--------
>>> import dgl
>>> import torch as th
Create two :class:`~dgl.DGLGraph` objects and initialize their
node features.
>>> g1 = dgl.DGLGraph() # Graph 1
>>> g1.add_nodes(2)
>>> g2 = dgl.DGLGraph() # Graph 2
>>> g2.add_nodes(3)
>>> bg = dgl.batch([g1, g2])
>>> feat = th.rand(2, 5)
>>> feat
tensor([[0.4325, 0.7710, 0.5541, 0.0544, 0.9368],
[0.2721, 0.4629, 0.7269, 0.0724, 0.1014]])
Broadcast feature to all nodes in the batched graph, feat[i] is broadcast to nodes
in the i-th example in the batch.
>>> dgl.broadcast_nodes(bg, feat)
tensor([[0.4325, 0.7710, 0.5541, 0.0544, 0.9368],
[0.4325, 0.7710, 0.5541, 0.0544, 0.9368],
[0.2721, 0.4629, 0.7269, 0.0724, 0.1014],
[0.2721, 0.4629, 0.7269, 0.0724, 0.1014],
[0.2721, 0.4629, 0.7269, 0.0724, 0.1014]])
Broadcast feature to all nodes in the batched graph.
>>> dgl.broadcast_nodes(g1, feat[0])
tensor([[0.4325, 0.7710, 0.5541, 0.0544, 0.9368],
[0.4325, 0.7710, 0.5541, 0.0544, 0.9368]])
Notes
-----
feat[i] is broadcast to the nodes in i-th graph in the batched graph.
"""
return _broadcast_on(graph, 'nodes', feat_data)
def broadcast_edges(graph, feat_data):
"""Broadcast :attr:`feat_data` to all edges in :attr:`graph`, and return a
tensor of edge features.
Parameters
----------
graph : DGLGraph
The graph.
feat_data : tensor
The feature to broadcast. Tensor shape is :math:`(*)` for single
graph, and :math:`(B, *)` for batched graph.
Returns
-------
tensor
The edge features tensor with shape :math:`(E, *)`
Examples
--------
>>> import dgl
>>> import torch as th
Create two :class:`~dgl.DGLGraph` objects and initialize their
edge features.
>>> g1 = dgl.DGLGraph() # Graph 1
>>> g1.add_nodes(2)
>>> g1.add_edges([0, 1], [1, 0])
>>> g2 = dgl.DGLGraph() # Graph 2
>>> g2.add_nodes(3)
>>> g2.add_edges([0, 1, 2], [1, 2, 0])
>>> bg = dgl.batch([g1, g2])
>>> feat = th.rand(2, 5)
>>> feat
tensor([[0.4325, 0.7710, 0.5541, 0.0544, 0.9368],
[0.2721, 0.4629, 0.7269, 0.0724, 0.1014]])
Broadcast feature to all edges in the batched graph, feat[i] is broadcast to edges
in the i-th example in the batch.
>>> dgl.broadcast_edges(bg, feat)
tensor([[0.4325, 0.7710, 0.5541, 0.0544, 0.9368],
[0.4325, 0.7710, 0.5541, 0.0544, 0.9368],
[0.2721, 0.4629, 0.7269, 0.0724, 0.1014],
[0.2721, 0.4629, 0.7269, 0.0724, 0.1014],
[0.2721, 0.4629, 0.7269, 0.0724, 0.1014]])
Broadcast feature to all edges in the batched graph.
>>> dgl.broadcast_edges(g1, feat[0])
tensor([[0.4325, 0.7710, 0.5541, 0.0544, 0.9368],
[0.4325, 0.7710, 0.5541, 0.0544, 0.9368]])
Notes
-----
feat[i] is broadcast to the edges in i-th graph in the batched graph.
"""
return _broadcast_on(graph, 'edges', feat_data)
def topk_nodes(graph, feat, k, descending=True, idx=None):
"""Return graph-wise top-k node features of field :attr:`feat` in
:attr:`graph` ranked by keys at given index :attr:`idx`. If :attr:
`descending` is set to False, return the k smallest elements instead.
If idx is set to None, the function would return top-k value of all
indices, which is equivalent to calling
:code:`torch.topk(graph.ndata[feat], dim=0)`
for each example of the input graph.
Parameters
----------
graph : DGLGraph
The graph.
feat : str
The feature field.
k : int
The k in "top-k"
descending : bool
Controls whether to return the largest or smallest elements.
idx : int or None, defaults to None
The index of keys we rank :attr:`feat` on, if set to None, we sort
the whole :attr:`feat`.
Returns
-------
tuple of tensors
The first tensor returns top-k node features of each single graph of
the input graph:
a tensor with shape :math:`(B, K, D)` would be returned, where
:math:`B` is the batch size of the input graph.
The second tensor returns the top-k node indices of each single graph
of the input graph:
a tensor with shape :math:`(B, K)`(:math:`(B, K, D)` if` idx
is set to None) would be returned, where
:math:`B` is the batch size of the input graph.
Examples
--------
>>> import dgl
>>> import torch as th
Create two :class:`~dgl.DGLGraph` objects and initialize their
node features.
>>> g1 = dgl.DGLGraph() # Graph 1
>>> g1.add_nodes(4)
>>> g1.ndata['h'] = th.rand(4, 5)
>>> g1.ndata['h']
tensor([[0.0297, 0.8307, 0.9140, 0.6702, 0.3346],
[0.5901, 0.3030, 0.9280, 0.6893, 0.7997],
[0.0880, 0.6515, 0.4451, 0.7507, 0.5297],
[0.5171, 0.6379, 0.2695, 0.8954, 0.5197]])
>>> g2 = dgl.DGLGraph() # Graph 2
>>> g2.add_nodes(5)
>>> g2.ndata['h'] = th.rand(5, 5)
>>> g2.ndata['h']
tensor([[0.3168, 0.3174, 0.5303, 0.0804, 0.3808],
[0.1323, 0.2766, 0.4318, 0.6114, 0.1458],
[0.1752, 0.9105, 0.5692, 0.8489, 0.0539],
[0.1931, 0.4954, 0.3455, 0.3934, 0.0857],
[0.5065, 0.5182, 0.5418, 0.1520, 0.3872]])
Top-k over node attribute :attr:`h` in a batched graph.
>>> bg = dgl.batch([g1, g2], node_attrs='h')
>>> dgl.topk_nodes(bg, 'h', 3)
(tensor([[[0.5901, 0.8307, 0.9280, 0.8954, 0.7997],
[0.5171, 0.6515, 0.9140, 0.7507, 0.5297],
[0.0880, 0.6379, 0.4451, 0.6893, 0.5197]],
[[0.5065, 0.9105, 0.5692, 0.8489, 0.3872],
[0.3168, 0.5182, 0.5418, 0.6114, 0.3808],
[0.1931, 0.4954, 0.5303, 0.3934, 0.1458]]]), tensor([[[1, 0, 1, 3, 1],
[3, 2, 0, 2, 2],
[2, 3, 2, 1, 3]],
[[4, 2, 2, 2, 4],
[0, 4, 4, 1, 0],
[3, 3, 0, 3, 1]]]))
Top-k over node attribute :attr:`h` along index -1 in a batched graph.
(used in SortPooling)
>>> dgl.topk_nodes(bg, 'h', 3, idx=-1)
(tensor([[[0.5901, 0.3030, 0.9280, 0.6893, 0.7997],
[0.0880, 0.6515, 0.4451, 0.7507, 0.5297],
[0.5171, 0.6379, 0.2695, 0.8954, 0.5197]],
[[0.5065, 0.5182, 0.5418, 0.1520, 0.3872],
[0.3168, 0.3174, 0.5303, 0.0804, 0.3808],
[0.1323, 0.2766, 0.4318, 0.6114, 0.1458]]]), tensor([[1, 2, 3],
[4, 0, 1]]))
Top-k over node attribute :attr:`h` in a single graph.
>>> dgl.topk_nodes(g1, 'h', 3)
(tensor([[[0.5901, 0.8307, 0.9280, 0.8954, 0.7997],
[0.5171, 0.6515, 0.9140, 0.7507, 0.5297],
[0.0880, 0.6379, 0.4451, 0.6893, 0.5197]]]), tensor([[[1, 0, 1, 3, 1],
[3, 2, | |
<reponame>TugberkArkose/MLScheduler
power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0704087,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.25799,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.397975,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.188804,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.32694,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.187509,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.703254,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.125609,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.81006,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.075186,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00684429,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.075131,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0506177,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.150317,
'Execution Unit/Register Files/Runtime Dynamic': 0.057462,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.200837,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.549203,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 1.97329,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000128293,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000128293,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000111033,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 4.25945e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000727128,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00109475,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00125542,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0486601,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 3.0952,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.117729,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.165272,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 5.46623,
'Instruction Fetch Unit/Runtime Dynamic': 0.334011,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.130932,
'L2/Runtime Dynamic': 0.037829,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.29559,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.04058,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0665964,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0665963,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.61135,
'Load Store Unit/Runtime Dynamic': 1.4356,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.164215,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.32843,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0582806,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0602397,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.192448,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0193218,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.451703,
'Memory Management Unit/Runtime Dynamic': 0.0795615,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 20.032,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.262307,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0128108,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.09465,
'Renaming Unit/Int Front End RAT/Subthreshold | |
<reponame>jdsika/TUM_HOly<filename>openrave/python/ikfast_generator_cpp_sympy0_6.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Software License Agreement (Lesser GPL)
#
# Copyright (C) 2009-2012 <NAME>
#
# ikfast is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# at your option) any later version.
#
# ikfast is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""generates C++ code from the IKFastSolver AST.
"""
from __future__ import with_statement # for python 2.5
from sympy import __version__ as sympy_version
if sympy_version >= '0.7.0':
raise ImportError('ikfast needs sympy 0.6.x')
import sys, copy, time, datetime
try:
from openravepy.metaclass import AutoReloader
except:
class AutoReloader:
pass
# import the correct iktypes from openravepy (if present)
try:
from openravepy import IkParameterization
IkType = IkParameterization.Type
except:
class IkType:
Transform6D=0x67000001
Rotation3D=0x34000002
Translation3D=0x33000003
Direction3D=0x23000004
Ray4D=0x46000005
Lookat3D=0x23000006
TranslationDirection5D=0x56000007
TranslationXY2D=0x22000008
TranslationXYOrientation3D=0x33000009
TranslationLocalGlobal6D=0x3600000a
TranslationXAxisAngle4D=0x4400000b
TranslationYAxisAngle4D=0x4400000c
TranslationZAxisAngle4D=0x4400000d
TranslationXAxisAngleZNorm4D=0x4400000e
TranslationYAxisAngleXNorm4D=0x4400000f
TranslationZAxisAngleYNorm4D=0x44000010
from sympy import *
try:
import re # for indenting
except ImportError:
pass
try:
from itertools import izip, combinations
except ImportError:
def combinations(items,n):
if n == 0: yield[]
else:
for i in xrange(len(items)):
for cc in combinations(items[i+1:],n-1):
yield [items[i]]+cc
import logging
log = logging.getLogger('ikfast')
from sympy.core import function # for sympy 0.7.1+
class fmod(function.Function):
nargs = 2
is_real = True
is_Function = True
class atan2check(atan2):
nargs = 2
is_real = True
is_Function = True
def evalNumbers(expr):
"""Replaces all numbers with symbols, this is to make gcd faster when fractions get too big"""
if expr.is_number:
return expr.evalf()
elif expr.is_Mul:
result = S.One
for arg in expr.args:
result *= evalNumbers(arg)
elif expr.is_Add:
result = S.Zero
for arg in expr.args:
result += evalNumbers(arg)
elif expr.is_Pow:
# don't replace the exponent
result = evalNumbers(expr.base)**expr.exp
elif expr.is_Function:
args = [evalNumbers(arg) for arg in expr.args]
return expr.func(*args)
else:
result = expr
return result
def customcse(rawexprs,symbols=None):
if not hasattr(rawexprs,'__iter__') and not hasattr(rawexprs,'__array__'):
rawexprs = [rawexprs]
if symbols is None:
symbols = cse_main.numbered_symbols('x')
# fractions can get big, so evaluate as many decimals as possible
complexitysubs = [(Symbol('POW'),1),(Symbol('ADD'),1),(Symbol('MUL'),1)]
reduced_exprs = []
allexprs = []
for expr in rawexprs:
evalexpr = evalNumbers(expr)
complexity = evalexpr.count_ops().subs(complexitysubs)
# need to threshold complexity or otherwise cse will not terminate
if complexity > 300:
reduced_exprs.append(evalexpr)
else:
allexprs.append(evalexpr)
reduced_exprs.append(None)
newreplacements = []
if len(allexprs)>0:
replacements,reduced_exprs2 = cse(allexprs,symbols=symbols)
# have to maintain the same order
for expr in reduced_exprs2:
for i in range(len(reduced_exprs)):
if reduced_exprs[i] is None:
reduced_exprs[i] = expr
break
assert(all([expr is not None for expr in reduced_exprs]))
# look for any expressions of the order of (x**(1/a))**b, usually computer wants x^(b/a)
for r in replacements:
newr = r[1]
if newr.is_Pow and newr.exp.is_number and newr.base.is_Symbol:
baseexpr = newr.base.subs(replacements)
if baseexpr.is_Pow and baseexpr.exp.is_number:
newreplacements.append((r[0],baseexpr.base**(newr.exp*baseexpr.exp)))
continue
newreplacements.append((r[0],newr))
return newreplacements,reduced_exprs
class CodeGenerator(AutoReloader):
"""Generates C++ code from an AST generated by IKFastSolver.
"""
def __init__(self,kinematicshash='',version=''):
self.symbolgen = cse_main.numbered_symbols('x')
self.strprinter = printing.StrPrinter()
self.freevars = None # list of free variables in the solution
self.freevardependencies = None # list of variables depending on the free variables
self.functions = dict()
self.kinematicshash=kinematicshash
self.resetequations() # dictionary of symbols already written
self.version=version
def resetequations(self):
self.dictequations = [[],[]]
def copyequations(self,dictequations=None):
if dictequations is None:
dictequations=self.dictequations
return [copy.copy(dictequations[0]),copy.copy(dictequations[1])]
def generate(self, solvertree):
code = """/// autogenerated analytical inverse kinematics code from ikfast program part of OpenRAVE
/// \\author <NAME>
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// ikfast version %s generated on %s
/// To compile with gcc:
/// gcc -lstdc++ ik.cpp
/// To compile without any main function as a shared object (might need -llapack):
/// gcc -fPIC -lstdc++ -DIKFAST_NO_MAIN -DIKFAST_CLIBRARY -shared -Wl,-soname,libik.so -o libik.so ik.cpp
#define IKFAST_HAS_LIBRARY
#include "ikfast.h"
using namespace ikfast;
// check if the included ikfast version matches what this file was compiled with
#define IKFAST_COMPILE_ASSERT(x) extern int __dummy[(int)x]
IKFAST_COMPILE_ASSERT(IKFAST_VERSION==%s);
#include <cmath>
#include <vector>
#include <limits>
#include <algorithm>
#include <complex>
#define IKFAST_STRINGIZE2(s) #s
#define IKFAST_STRINGIZE(s) IKFAST_STRINGIZE2(s)
#ifndef IKFAST_ASSERT
#include <stdexcept>
#include <sstream>
#include <iostream>
#ifdef _MSC_VER
#ifndef __PRETTY_FUNCTION__
#define __PRETTY_FUNCTION__ __FUNCDNAME__
#endif
#endif
#ifndef __PRETTY_FUNCTION__
#define __PRETTY_FUNCTION__ __func__
#endif
#define IKFAST_ASSERT(b) { if( !(b) ) { std::stringstream ss; ss << "ikfast exception: " << __FILE__ << ":" << __LINE__ << ": " <<__PRETTY_FUNCTION__ << ": Assertion '" << #b << "' failed"; throw std::runtime_error(ss.str()); } }
#endif
#if defined(_MSC_VER)
#define IKFAST_ALIGNED16(x) __declspec(align(16)) x
#else
#define IKFAST_ALIGNED16(x) x __attribute((aligned(16)))
#endif
#define IK2PI ((IkReal)6.28318530717959)
#define IKPI ((IkReal)3.14159265358979)
#define IKPI_2 ((IkReal)1.57079632679490)
#ifdef _MSC_VER
#ifndef isnan
#define isnan _isnan
#endif
#endif // _MSC_VER
// lapack routines
extern "C" {
void dgetrf_ (const int* m, const int* n, double* a, const int* lda, int* ipiv, int* info);
void zgetrf_ (const int* m, const int* n, std::complex<double>* a, const int* lda, int* ipiv, int* info);
void dgetri_(const int* n, const double* a, const int* lda, int* ipiv, double* work, const int* lwork, int* info);
void dgesv_ (const int* n, const int* nrhs, double* a, const int* lda, int* ipiv, double* b, const int* ldb, int* info);
void dgetrs_(const char *trans, const int *n, const int *nrhs, double *a, const int *lda, int *ipiv, double *b, const int *ldb, int *info);
void dgeev_(const char *jobvl, const char *jobvr, const int *n, double *a, const int *lda, double *wr, double *wi,double *vl, const int *ldvl, double *vr, const int *ldvr, double *work, const int *lwork, int *info);
}
using namespace std; // necessary to get std math routines
#ifdef IKFAST_NAMESPACE
namespace IKFAST_NAMESPACE {
#endif
inline float IKabs(float f) { return fabsf(f); }
inline double IKabs(double f) { return fabs(f); }
inline float IKsqr(float f) { return f*f; }
inline double IKsqr(double f) { return f*f; }
inline float IKlog(float f) { return logf(f); }
inline double IKlog(double f) { return log(f); }
// allows asin and acos to exceed 1
#ifndef IKFAST_SINCOS_THRESH
#define IKFAST_SINCOS_THRESH ((IkReal)0.000001)
#endif
// used to check input to atan2 for degenerate cases
#ifndef IKFAST_ATAN2_MAGTHRESH
#define IKFAST_ATAN2_MAGTHRESH ((IkReal)2e-6)
#endif
// minimum distance of separate solutions
#ifndef IKFAST_SOLUTION_THRESH
#define IKFAST_SOLUTION_THRESH ((IkReal)1e-6)
#endif
inline float IKasin(float f)
{
IKFAST_ASSERT( f > -1-IKFAST_SINCOS_THRESH && f < 1+IKFAST_SINCOS_THRESH ); // any more error implies something is wrong with the solver
if( f <= -1 ) return float(-IKPI_2);
else if( f >= 1 ) return float(IKPI_2);
return asinf(f);
}
inline double IKasin(double f)
{
IKFAST_ASSERT( f > -1-IKFAST_SINCOS_THRESH && f < 1+IKFAST_SINCOS_THRESH ); // any more error implies something is wrong with the solver
if( f <= -1 ) return -IKPI_2;
else if( f >= 1 ) return IKPI_2;
return asin(f);
}
// return positive value in [0,y)
inline float IKfmod(float x, float y)
{
while(x < 0) {
x += y;
}
return fmodf(x,y);
}
// return positive value in [0,y)
inline double IKfmod(double x, double y)
{
while(x < 0) {
x += y;
}
return fmod(x,y);
}
inline float IKacos(float f)
{
IKFAST_ASSERT( f > -1-IKFAST_SINCOS_THRESH && f < 1+IKFAST_SINCOS_THRESH ); // any more error implies something is wrong with the solver
if( f <= -1 ) return float(IKPI);
else if( f >= 1 ) return float(0);
return acosf(f);
}
inline double IKacos(double f)
{
IKFAST_ASSERT( f > -1-IKFAST_SINCOS_THRESH && f < 1+IKFAST_SINCOS_THRESH ); // any more error implies something is wrong with the solver
if( f <= -1 ) return IKPI;
else if( f >= 1 ) return 0;
return acos(f);
}
inline float IKsin(float f) { return sinf(f); }
inline double IKsin(double f) { return sin(f); }
inline float IKcos(float f) { return cosf(f); }
inline double IKcos(double f) { return cos(f); }
inline float IKtan(float f) { return tanf(f); }
inline double IKtan(double f) { return tan(f); }
inline float IKsqrt(float f) { if( f <= 0.0f ) return 0.0f; return sqrtf(f); }
inline double IKsqrt(double f) { if( f <= 0.0 ) return 0.0; return sqrt(f); }
inline float IKatan2(float fy, float fx) {
if( isnan(fy) ) {
IKFAST_ASSERT(!isnan(fx)); // if both are nan, probably wrong value will be returned
return float(IKPI_2);
}
else if( isnan(fx) ) {
return 0;
}
return atan2f(fy,fx);
}
inline double IKatan2(double fy, double fx) | |
<reponame>lasconic/randomsheetmusic
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Name: musicxml/mxObjects.py
# Purpose: MusicXML objects for conversion to and from music21
#
# Authors: <NAME>
#
# Copyright: Copyright © 2009-2014 <NAME> and the music21 Project
# License: LGPL or BSD, see license.txt
#-------------------------------------------------------------------------------
'''
This module defines an object representation of MusicXML, used for converting to and from
MusicXML and music21.
'''
import sys
import copy
import unittest
#import codecs
from music21 import defaults
from music21 import common
from music21 import exceptions21
from music21 import xmlnode
from music21.ext import six
from music21 import environment
_MOD = 'musicxml.py'
environLocal = environment.Environment(_MOD)
#-------------------------------------------------------------------------------
# store the highest version number of m21 that pickled mxl object files
# are compatible; compatible pickles (always written with the m21 version)
# are >= to this value
# if changes are made here that are not compatible, the m21 version number
# needs to be increased and this number needs to be set to that value
VERSION_MINIMUM = (1, 9, 0)
# new objects to add: octave-shift, in direction-type
# bracket, in direction-type
# Notations -> ornaments, trill-mark/wavy-line
# notations -> glissando
# dashes, in direction-type
#-------------------------------------------------------------------------------
# notes
# problem with element tree:
# http://effbot.org/zone/element.htm
# Note that the standard element writer creates a compact output.
# There is no built-in support for pretty printing or user-defined
# namespace prefixes in the current version, so the output may not
# always be suitable for human consumption (to the extent XML is
# suitable for human consumption, that is).
# unicode and python issues
# http://evanjones.ca/python-utf8.html
# TODO: deal with grace notes, in particular duration handling
# TODO: add print <print new-system="yes"/>
# tests matching
# 01a, 01b, 01c, 01d
# 02a, 02c, 02d, 02e
# tests that do not match
# 02b-Rest-PitchedRest.xml
# rests currently do not store display settings
#-------------------------------------------------------------------------------
# these single-entity tags are bundled together.
# order here may matter in performance
DYNAMIC_MARKS = ['p', 'pp', 'ppp', 'pppp', 'ppppp', 'pppppp',
'f', 'ff', 'fff', 'ffff', 'fffff', 'ffffff',
'mp', 'mf', 'sf', 'sfp', 'sfpp', 'fp', 'rf', 'rfz', 'sfz', 'sffz', 'fz']
ARTICULATION_MARKS = ['staccato', 'accent', 'strong-accent', 'tenuto',
'detached-legato', 'staccatissimo', 'spiccato',
'scoop', 'plop', 'doit', 'falloff', 'breath-mark',
'caesura', 'stress', 'unstress']
TECHNICAL_MARKS = ['up-bow', 'down-bow', 'harmonic', 'open-string',
'thumb-position', 'fingering', 'pluck', 'double-tongue',
'triple-tongue', 'stopped', 'snap-pizzicato', 'fret',
'string', 'hammer-on', 'pull-off', 'tap', 'heel',
'toe', 'fingernails']
# 'bend' : not implemented as needs many sub components
#-------------------------------------------------------------------------------
def yesNoToBoolean(value):
if value in ['yes', True]:
return True
return False
def booleanToYesNo(value):
if value:
return 'yes'
return 'no'
#-------------------------------------------------------------------------------
class TagException(exceptions21.Music21Exception):
pass
class TagLibException(exceptions21.Music21Exception):
pass
class MusicXMLException(exceptions21.Music21Exception):
pass
class DocumentException(exceptions21.Music21Exception):
pass
#-------------------------------------------------------------------------------
class Tag(common.SlottedObject):
'''Object to store tags as encountered by SAX. Tags can be open or closed based on the
status attribute. Tags can store character data collected between their defined tags.
These objects are used only for finding and collecting tag attributes and elements.
As we do not need character data for all tags, tags have an optional flag to select
if the are to collect character data.
'''
__slots__ = ('tag', 'cdFlag', 'status', 'charData', 'className', 'count')
def __init__(self, tag, cdFlag=False, className=None):
'''
>>> t = musicxml.mxObjects.Tag('note')
>>> t.start()
>>> t.start() # catch double starts
Traceback (most recent call last):
TagException: Tag (note) is already started.
Calling the tag returns the charData
>>> t.cdFlag = True # can handle charData
>>> t.charData = 'Hello'
>>> t()
'Hello'
'''
self.tag = tag
self.cdFlag = cdFlag # character data flag
self.status = False
self.charData = u''
self.className = className
self.count = 0 # Used for statistics/debugging -- audit; not otherwise used
def start(self):
if self.status: # already open
raise TagException('Tag (%s) is already started.' % self.tag)
self.status = True
def end(self):
if not self.status:
raise TagException('Tag (%s) is already ended.' % self.tag)
self.status = False
# when not doing audit checks, no need to count
#self.count += 1 # increment on close
def clear(self):
self.charData = u''
def __eq__(self, other):
if other == self.tag: return True
else: return False
def __ne__(self, other):
if other != self.tag: return 1
else: return False
def __call__(self):
return self.charData
def __str__(self):
return "%s: %s" % (self.tag, self.charData)
class TagLib(object):
'''
An object to store all MusicXML tags as :class:`~music21.musicxml.base.Tag` objects.
Tag objects are used just to identify tags, store element contents and status in SAX parsing.
With this design some tags (called simple elements) can be used simply in
SAX parsing as structural monitors, but not be instantiated as objects for content
delivery.
'''
def __init__(self):
'''
>>> tl = musicxml.mxObjects.TagLib()
>>> tl['voice'].tag
'voice'
>>> tl['voice'].status # open or closed
False
>>> tl.audit()
(True, 'TagLib audit: no errors found.')
>>> tl['note'].start()
>>> tl.audit()
(False, 'TagLib audit: 1 errors found:\\ntag <note> left open')
'''
self._t = {}
# store tag, charDataBool, className
# charDataBool is if this tag stores char data
# order here is based on most-often used, found through empirical tests
# all tags under collection must be defined here, even if they do not
# have an object but are defined only as simple entities
_tags = [
('voice', True),
('note', False, Note),
('duration', True), # no object, just a tag
('type', True),
('beam', True, Beam),
('step', True),
('stem', True),
('pitch', False, Pitch),
('octave', True),
('alter', True),
('notations', False, Notations),
('measure', False, Measure),
('slur', False, Slur),
('articulations', False, Articulations),
('rest', False, Rest),
('accidental', True, Accidental),
('direction', False, Direction),
('direction-type', False, DirectionType),
('dot', False, Dot),
('dynamics', False, Dynamics),
('tied', False, Tied),
('tie', False, Tie),
('chord', False),
('lyric', False, Lyric),
('syllabic', True),
('text', True),
('trill-mark', False, TrillMark),
('mordent', False, Mordent),
('inverted-mordent', False, InvertedMordent),
('turn', False, Turn),
('delayed-turn', False, DelayedTurn),
('inverted-turn', False, InvertedTurn),
('accidental-mark', True, AccidentalMark),
('shake', False, Shake),
('schleifer', False, Schleifer),
('tremolo', False, Tremolo),
('attributes', False, Attributes),
('divisions', True),
('staff-details', False, StaffDetails),
('staff-lines', True),
('staff-size', True),
('forward', False, Forward),
('backup', False, Backup),
('grace', False, Grace),
# this position is not based on measured tag usage
('sound', False, Sound),
('words', True, Words),
('offset', True), # no object
('defaults', False, Defaults),
('scaling', False, Scaling),
('millimeters', True),
('tenths', True),
('print', False, Print),
('page-layout', False, PageLayout),
('page-margins', False, PageMargins),
('page-height', True),
('page-width', True),
('system-layout', False, SystemLayout),
('system-margins', False, SystemMargins),
('right-margin', True),
('left-margin', True),
('top-margin', True),
('bottom-margin', True),
('system-distance', True),
('top-system-distance', True),
('staff-layout', False, StaffLayout),
('staff-distance', True),
('metronome', False, Metronome), # no char data
('beat-unit', True, BeatUnit),
('beat-unit-dot', False, BeatUnitDot),
('per-minute', True, PerMinute),
('time-modification', False, TimeModification),
('actual-notes', True),
('normal-notes', True),
('normal-type', True),
('normal-dot', True),
('tuplet', False, Tuplet),
('notehead', True, Notehead),
('technical', False, Technical),
('wedge', False, Wedge),
('octave-shift', False, OctaveShift),
('bracket', False, Bracket),
('wavy-line', False, WavyLine),
('glissando', True, Glissando),
('dashes', False, Dashes),
('ornaments', False, Ornaments),
('part', False, Part),
('key', False, Key),
('fifths', True),
('mode', True),
('cancel', True),
('key-step', True, KeyStep),
('key-alter', True, KeyAlter),
('key-octave', True, KeyOctave),
('transpose', False, Transpose),
('diatonic', True),
('chromatic', True),
('octave-change', True),
('time', False, Time),
('beats', True, Beats),
('beat-type', True, BeatType),
('clef', False, Clef),
('sign', True),
('line', True),
('clef-octave-change', True),
('staff', True),
('fermata', True, Fermata),
('barline', False, Barline),
('ending', False, Ending),
('segno', False, Segno),
('coda', False, Coda),
('bar-style', True),
('repeat', False, Repeat),
('measure-style', False, MeasureStyle),
('multiple-rest', True),
('staves', True),
('display-step', True, DisplayStep),
('display-octave', True, DisplayOctave),
]
_tags += DYNAMIC_MARKS
_tags += ARTICULATION_MARKS
_tags += TECHNICAL_MARKS
_tags += [('other-dynamics', True, DynamicMark),
('other-articulation', True, ArticulationMark),
('other-technical', True, TechnicalMark),
('score-partwise', False),
('score-timewise', False),
('movement-title', True),
('movement-number', True),
('work', False, Work),
('work-title', True),
('work-number', True),
('opus', False),
('identification', False, Identification),
('rights', True),
('creator', True, Creator),
| |
str(default_val)
output = output.strip()
if not output_re or re.match(output_re, output):
break
else:
print "Invalid input, must match %s" % output_re
return output
def ConfigureHostnames(config):
"""This configures the hostnames stored in the config."""
if flags.FLAGS.external_hostname:
hostname = flags.FLAGS.external_hostname
else:
try:
hostname = socket.gethostname()
except (OSError, IOError):
print "Sorry, we couldn't guess your hostname.\n"
hostname = RetryQuestion("Please enter your hostname e.g. "
"grr.example.com", "^[\\.A-Za-z0-9-]+$", hostname)
print """\n\n-=Server URL=-
The Server URL specifies the URL that the clients will connect to
communicate with the server. For best results this should be publicly
accessible. By default this will be port 8080 with the URL ending in /control.
"""
frontend_url = RetryQuestion("Frontend URL", "^http://.*/control$",
"http://%s:8080/control" % hostname)
config.Set("Client.control_urls", [frontend_url])
frontend_port = urlparse.urlparse(frontend_url).port or config_lib.CONFIG.Get(
"Frontend.bind_port")
config.Set("Frontend.bind_port", frontend_port)
print """\n\n-=AdminUI URL=-:
The UI URL specifies where the Administrative Web Interface can be found.
"""
ui_url = RetryQuestion("AdminUI URL", "^http[s]*://.*$",
"http://%s:8000" % hostname)
config.Set("AdminUI.url", ui_url)
ui_port = urlparse.urlparse(ui_url).port or config_lib.CONFIG.Get(
"AdminUI.port")
config.Set("AdminUI.port", ui_port)
def ConfigureDatastore(config):
"""Set the datastore to use by prompting the user to choose."""
print """
1. SQLite (Default) - This datastore is stored on the local file system. If you
configure GRR to run as non-root be sure to allow that user access to the files.
2. MySQL - This datastore uses MySQL and requires MySQL 5.6 server or later
to be running and a user with the ability to create the GRR database and tables.
The MySQL client binaries are required for use with the MySQLdb python module as
well.
"""
datastore = RetryQuestion("Datastore", "^[1-2]$", "1")
if datastore == "1":
config.Set("Datastore.implementation", "SqliteDataStore")
datastore_location = RetryQuestion(
"Datastore Location", "^/[A-Za-z0-9/.-]+$",
config_lib.CONFIG.Get("Datastore.location"))
config.Set("Datastore.location", datastore_location)
if datastore == "2":
config.Set("Datastore.implementation", "MySQLAdvancedDataStore")
mysql_host = RetryQuestion("MySQL Host", "^[\\.A-Za-z0-9-]+$",
config_lib.CONFIG.Get("Mysql.host"))
config.Set("Mysql.host", mysql_host)
mysql_port = RetryQuestion("MySQL Port (0 for local socket)",
"^[0-9]+$",
config_lib.CONFIG.Get("Mysql.port"))
config.Set("Mysql.port", mysql_port)
mysql_database = RetryQuestion("MySQL Database", "^[A-Za-z0-9-]+$",
config_lib.CONFIG.Get("Mysql.database_name"))
config.Set("Mysql.database_name", mysql_database)
mysql_username = RetryQuestion(
"MySQL Username", "[A-Za-z0-9-]+$",
config_lib.CONFIG.Get("Mysql.database_username"))
config.Set("Mysql.database_username", mysql_username)
mysql_password = getpass.getpass(
prompt="Please enter password for database user %s: " % mysql_username)
config.Set("Mysql.database_password", mysql_password)
print """\n\n***WARNING***
Do not continue until a MySQL 5.6 server is installed and running with a user
created with the ability to create the GRR database and tables and the Python
MySQLdb module has been installed on the GRR server.
E.g: apt-get install mysql-server-5.6 python-mysqldb
"""
while raw_input("Are you ready to continue?[Yn]: ").upper() != "Y":
pass
def ConfigureEmails(config):
"""Configure email notification addresses."""
print """\n\n-=Monitoring/Email Domain=-
Emails concerning alerts or updates must be sent to this domain.
"""
domain = RetryQuestion("Email Domain e.g example.com",
"^([\\.A-Za-z0-9-]+)*$",
config_lib.CONFIG.Get("Logging.domain"))
config.Set("Logging.domain", domain)
print """\n\n-=Alert Email Address=-
Address where monitoring events get sent, e.g. crashed clients, broken server
etc.
"""
email = RetryQuestion("Alert Email Address", "", "grr-monitoring@%s" % domain)
config.Set("Monitoring.alert_email", email)
print """\n\n-=Emergency Email Address=-
Address where high priority events such as an emergency ACL bypass are sent.
"""
emergency_email = RetryQuestion("Emergency Access Email Address", "",
"grr-emergency@%s" % domain)
config.Set("Monitoring.emergency_access_email", emergency_email)
def ConfigureBaseOptions(config):
"""Configure the basic options required to run the server."""
print "We are now going to configure the server using a bunch of questions."
print """\n\n-=GRR Datastore=-
For GRR to work each GRR server has to be able to communicate with the
datastore. To do this we need to configure a datastore.\n"""
existing_datastore = config_lib.CONFIG.Get("Datastore.implementation")
if not existing_datastore or existing_datastore == "FakeDataStore":
ConfigureDatastore(config)
else:
print """Found existing settings:
Datastore: %s""" % existing_datastore
if existing_datastore == "SqliteDataStore":
print """ Datastore Location: %s
""" % config_lib.CONFIG.Get("Datastore.location")
if existing_datastore == "MySQLAdvancedDataStore":
print """ MySQL Host: %s
MySQL Port: %s
MySQL Database: %s
MySQL Username: %s
""" % (config_lib.CONFIG.Get("Mysql.host"),
config_lib.CONFIG.Get("Mysql.port"),
config_lib.CONFIG.Get("Mysql.database_name"),
config_lib.CONFIG.Get("Mysql.database_username"))
if raw_input("Do you want to keep this configuration?"
" [Yn]: ").upper() == "N":
ConfigureDatastore(config)
print """\n\n-=GRR URLs=-
For GRR to work each client has to be able to communicate with the
server. To do this we normally need a public dns name or IP address to
communicate with. In the standard configuration this will be used to host both
the client facing server and the admin user interface.\n"""
existing_ui_urn = config_lib.CONFIG.Get("AdminUI.url", default=None)
existing_frontend_urn = config_lib.CONFIG.Get("Client.control_urls",
default=None)
if not existing_frontend_urn or not existing_ui_urn:
ConfigureHostnames(config)
else:
print """Found existing settings:
AdminUI URL: %s
Frontend URL(s): %s
""" % (existing_ui_urn, existing_frontend_urn)
if raw_input(
"Do you want to keep this configuration? [Yn]: ").upper() == "N":
ConfigureHostnames(config)
print """\n\n-=GRR Emails=-
GRR needs to be able to send emails for various logging and
alerting functions. The email domain will be appended to GRR user names
when sending emails to users.\n"""
existing_log_domain = config_lib.CONFIG.Get("Logging.domain", default=None)
existing_al_email = config_lib.CONFIG.Get("Monitoring.alert_email",
default=None)
existing_em_email = config_lib.CONFIG.Get("Monitoring.emergency_access_email",
default=None)
if not existing_log_domain or not existing_al_email or not existing_em_email:
ConfigureEmails(config)
else:
print """Found existing settings:
Email Domain: %s
Alert Email Address: %s
Emergency Access Email Address: %s
""" % (existing_log_domain, existing_al_email, existing_em_email)
if raw_input("Do you want to keep this configuration?"
" [Yn]: ").upper() == "N":
ConfigureEmails(config)
config.Write()
print ("Configuration parameters set. You can edit these in %s" %
config_lib.CONFIG.Get("Config.writeback"))
def AddUsers(token=None):
# Now initialize with our modified config.
startup.Init()
print "\nStep 3: Adding Admin User"
try:
AddUser("admin", labels=["admin"], token=token,
password=flags.FLAGS.admin_password)
except UserError:
if flags.FLAGS.noprompt:
UpdateUser("admin", password=flags.<PASSWORD>,
add_labels=["admin"], token=token)
else:
if ((raw_input("User 'admin' already exists, do you want to "
"reset the password? [yN]: ").upper() or "N") == "Y"):
UpdateUser("admin", password=True, add_labels=["admin"], token=token)
def ManageBinaries(config=None, token=None):
"""Load memory drivers and repack templates into installers."""
print "\nStep 4: Uploading Memory Drivers to the Database"
LoadMemoryDrivers(flags.FLAGS.share_dir, token=token)
print "\nStep 5: Repackaging clients with new configuration."
# We need to update the config to point to the installed templates now.
config.Set("ClientBuilder.executables_path", os.path.join(
flags.FLAGS.share_dir, "executables"))
# Build debug binaries, then build release binaries.
maintenance_utils.RepackAllBinaries(upload=True, debug_build=True,
token=token)
maintenance_utils.RepackAllBinaries(upload=True, token=token)
print "\nInitialization complete, writing configuration."
config.Write()
print "Please restart the service for it to take effect.\n\n"
def Initialize(config=None, token=None):
"""Initialize or update a GRR configuration."""
print "Checking write access on config %s" % config.parser
if not os.access(config.parser.filename, os.W_OK):
raise IOError("Config not writeable (need sudo?)")
print "\nStep 0: Importing Configuration from previous installation."
options_imported = 0
prev_config_file = config.Get("ConfigUpdater.old_config", default=None)
if prev_config_file and os.access(prev_config_file, os.R_OK):
print "Found config file %s." % prev_config_file
if raw_input("Do you want to import this configuration?"
" [yN]: ").upper() == "Y":
options_imported = ImportConfig(prev_config_file, config)
else:
print "No old config file found."
print "\nStep 1: Key Generation"
if config.Get("PrivateKeys.server_key", default=None):
if options_imported > 0:
print ("Since you have imported keys from another installation in the "
"last step,\nyou probably do not want to generate new keys now.")
if (raw_input("You already have keys in your config, do you want to"
" overwrite them? [yN]: ").upper() or "N") == "Y":
flags.FLAGS.overwrite = True
GenerateKeys(config)
else:
GenerateKeys(config)
print "\nStep 2: Setting Basic Configuration Parameters"
ConfigureBaseOptions(config)
AddUsers(token=token)
ManageBinaries(config, token=token)
def InitializeNoPrompt(config=None, token=None):
"""Initialize GRR with no prompts, assumes SQLite db.
Args:
config: config object
token: auth token
Raises:
ValueError: if hostname and password not supplied.
IOError: if config is not writeable
This method does the minimum work necessary to configure GRR without any user
prompting, relying heavily on config default values. User must supply the
external hostname and admin password, everything else is set automatically.
"""
if not (flags.FLAGS.external_hostname and flags.FLAGS.admin_password):
raise ValueError(
"If interactive prompting is disabled, external_hostname and "
"admin_password must be set.")
print "Checking write access on config %s" % config.parser
if not os.access(config.parser.filename, os.W_OK):
raise IOError("Config not writeable (need sudo?)")
config_dict = {}
GenerateKeys(config)
config_dict["Datastore.implementation"] = "SqliteDataStore"
hostname = flags.FLAGS.external_hostname
config_dict["Client.control_urls"] = ["http://%s:%s/control" % (
hostname, config.Get("Frontend.bind_port"))]
config_dict["AdminUI.url"] = "http://%s:%s" % (
hostname, config.Get("AdminUI.port"))
config_dict["Logging.domain"] = hostname
config_dict["Monitoring.alert_email"] = "grr-monitoring@%s" % hostname
config_dict["Monitoring.emergency_access_email"] = (
"grr-emergency@%s" % hostname)
print "Setting configuration as:\n\n%s" % config_dict
for key, value in config_dict.iteritems():
config.Set(key, value)
config.Write()
print ("Configuration parameters set. You can edit these in %s" %
config_lib.CONFIG.Get("Config.writeback"))
AddUsers(token=token)
ManageBinaries(config, token=token)
def UploadRaw(file_path, aff4_path, token=None):
"""Upload a file to the datastore."""
full_path = rdfvalue.RDFURN(aff4_path).Add(os.path.basename(file_path))
fd = aff4.FACTORY.Create(full_path, "AFF4Image", mode="w", token=token)
fd.Write(open(file_path).read(1024 * 1024 * 30))
fd.Close()
return str(fd.urn)
def GetToken():
# Extend for user authorization
return access_control.ACLToken(username="GRRConsole").SetUID()
def main(unused_argv):
"""Main."""
token = GetToken()
config_lib.CONFIG.AddContext("Commandline Context")
config_lib.CONFIG.AddContext("ConfigUpdater Context")
if flags.FLAGS.subparser_name == "initialize":
startup.ConfigInit()
if flags.FLAGS.noprompt:
InitializeNoPrompt(config_lib.CONFIG, token=token)
else:
Initialize(config_lib.CONFIG, token=token)
return
else:
startup.Init()
try:
print "Using configuration %s" % config_lib.CONFIG.parser
except AttributeError:
raise RuntimeError("No valid config specified.")
| |
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import frappe
import json
from frappe import _
def safe_str(obj):
""" return the byte string representation of obj """
try:
return str(obj)
except UnicodeEncodeError:
# obj is unicode
return str(unicode(obj).encode('utf-8')).decode('utf-8')
@frappe.whitelist()
def get_bomed_items(sales_order_name):
'''Returns items with BOM that already do not have a linked Sales Order'''
items = []
sales_order_items = frappe.db.sql("""SELECT * FROM `tabSales Order Item` WHERE `parent`=%s""", (sales_order_name), as_dict=True)
for item in sales_order_items:
bom = get_default_bom_item_object(safe_str(item["item_code"]))
if bom:
items.append(dict(
item_code=safe_str(item["item_code"]),
sales_order=sales_order_name,
warehouse=safe_str(item["warehouse"])
))
return items
def get_default_bom_item_object(item_code):
bom = frappe.get_all('BOM', dict(item=item_code, is_active=True),
order_by='is_default desc')
bom = bom[0].name if bom else None
return bom
@frappe.whitelist()
def make_boms(items, sales_order, company):
'''Make BOMs against the given Sales Order for the given `items`'''
items = json.loads(items).get('items')
out = []
for i in items:
if not i.get("qty"):
frappe.throw(_("Please select Qty against item {0}").format(i.get("item_code")))
bom = frappe.new_doc("BOM")
bom.item = i['item_code']
bom.quantity = i['qty']
bom.company = company
bom.sales_order = sales_order
bom.fg_warehouse = 'Stores - DD'
bom.append("items", {
'item_code': '-15',
'qty': 1,
'conversion_factor': 1.0,
'rate': 0.01,
'amount': 1
})
bom.ignore_permissions = True
bom.insert()
bom.save()
bom.submit()
out.append(bom)
return [p.name for p in out]
@frappe.whitelist()
def get_sales_order_item(item_code, sales_order):
soi = frappe.db.sql("""SELECT * FROM `tabSales Order Item` WHERE `item_code`=%s AND `parent`=%s""", (item_code, sales_order), as_dict=True)
return soi[0]
@frappe.whitelist()
def get_attachments_of_quotation(quotation_name, sales_order_name):
existing_attachments = frappe.db.sql("""SELECT * FROM `tabFile` WHERE `attached_to_doctype`='Sales Order' AND `attached_to_name`=%s""", (sales_order_name), as_dict=True)
if len(existing_attachments) > 0:
return False
qattachments = frappe.db.sql("""SELECT * FROM `tabFile` WHERE `attached_to_doctype`='Quotation' AND `attached_to_name`=%s""", (quotation_name), as_dict=True)
sattachments = []
for qattachment in qattachments:
sattachment = frappe.new_doc("File")
sattachment.update(qattachment)
sattachment.name = None
sattachment.attached_to_name = sales_order_name
sattachment.attached_to_doctype = "Sales Order"
sattachment.save(ignore_permissions=True)
sattachments.append(sattachment)
frappe.db.commit()
return sattachments
@frappe.whitelist()
def copy_quotation_attachments(quotation_name, sales_order_name):
existing_attachments = frappe.db.sql("""SELECT * FROM `tabSales Order Attachment` WHERE `parenttype`='Sales Order' AND `parent`=%s""", (sales_order_name), as_dict=True)
if len(existing_attachments) > 0:
return False
qattachments = frappe.db.sql("""SELECT * FROM `tabQuotation Attachment` WHERE `parenttype`='Quotation' AND `parent`=%s""", (quotation_name), as_dict=True)
sattachments = []
id = 1
for qattachment in qattachments:
sattachment = frappe.new_doc("Sales Order Attachment")
sattachment.modified = qattachment.modified
sattachment.modified_by = qattachment.modified_by
sattachment.attachment_name = qattachment.attachment_name
sattachment.owner = qattachment.owner
sattachment.attachment = qattachment.attachment
sattachment.width_percentige = qattachment.width_percentige
sattachment.attachment_name = qattachment.attachment_name
sattachment.idx = id
id = id + 1
sattachment.name = None
sattachment.parent = sales_order_name
sattachment.parenttype = "Sales Order"
sattachment.parentfield = "sales_order_attachment"
sattachment.docstatus = 0
sattachment.save(ignore_permissions=True)
sattachments.append(sattachment)
frappe.db.commit()
return sattachments
def divan_pillow_collection(item, number):
html = "<tr>"
html += '<td style="width: 95px;padding-bottom: 0px !important;padding-top: 0px !important;margin-top: 0px !important;margin-bottom: 0px !important;">'
html += u"Колекция " + safe_str(number) + ": "
html += "</td>"
html += '<td style="border-bottom: 1px dotted black;min-width: 200px;text-align: center;padding-bottom: 0px !important;padding-top: 0px !important;margin-top: 0px !important;margin-bottom: 0px !important;"><b>'
if item["divan_pcollection_" + safe_str(number) + "_supplier"]:
html += " " + safe_str(item["divan_pcollection_" + safe_str(number) + "_supplier"])
if item["divan_pcollection_" + safe_str(number) + "_name"]:
html += " " + safe_str(item["divan_pcollection_" + safe_str(number) + "_name"])
if item["divan_pcollection_" + safe_str(number) + "_design"]:
html += " " + safe_str(item["divan_pcollection_" + safe_str(number) + "_design"])
if item["divan_pcollection_" + safe_str(number) + "_damaska_color"]:
html += " " + safe_str(item["divan_pcollection_" + safe_str(number) + "_damaska_color"])
html += '</b></td>'
html += '<td style="padding-bottom: 0px !important;padding-top: 0px !important;margin-top: 0px !important;margin-bottom: 0px !important;"></td></td>'
if item["divan_pcollection_" + safe_str(number) + "_number"]:
html += u'<td style="padding-bottom: 0px !important;padding-top: 0px !important;margin-top: 0px !important;margin-bottom: 0px !important;"><b>Брой:</b>'
html += safe_str(item["divan_pcollection_" + safe_str(number) + "_number"])
html += "</td>"
else:
html += '<td style="padding-bottom: 0px !important;padding-top: 0px !important;margin-top: 0px !important;margin-bottom: 0px !important;"></td>'
html += "</tr><tr>"
html += u'<td style="width: 90px;text-align: right;padding-bottom: 0px !important;padding-top: 0px !important;margin-top: 0px !important;margin-bottom: 0px !important;">поръчан на: </td>'
if item["divan_pcollection_" + safe_str(number) + "_ordered_on"]:
html += '<td style="text-align: left;padding-bottom: 0px !important;padding-top: 0px !important;margin-top: 0px !important;margin-bottom: 0px !important;"><div style="border-bottom: 1px dotted black; width:70px;height: 18px;"><b>'
html += item["divan_pcollection_" + safe_str(number) + "_ordered_on"].strftime("%d") + "." + item["divan_pcollection_" + safe_str(number) + "_ordered_on"].strftime("%m") + "." + item["divan_pcollection_" + safe_str(number) + "_ordered_on"].strftime("%Y")
html += '</b></div></td>'
else:
html += '<td style="text-align: left;padding-bottom: 0px !important;padding-top: 0px !important;margin-top: 0px !important;margin-bottom: 0px !important;"><div style="border-bottom: 1px dotted black; width:70px;height: 18px;"></div></td>'
html += u'<td colspan="2" style="text-align: right;padding-bottom: 0px !important;padding-top: 0px !important;margin-top: 0px !important;margin-bottom: 0px !important;"><b>при нас на</b>: </td>'
if item["divan_pcollection_" + safe_str(number) + "_arraiving_on"]:
html += '<td style="text-align: left;padding-bottom: 0px !important;padding-top: 0px !important;margin-top: 0px !important;margin-bottom: 0px !important;"><div style="border-bottom: 1px dotted black; width:70px;height: 18px;"><b>'
html += item["divan_pcollection_" + safe_str(number) + "_arraiving_on"].strftime("%d") + "." + item["divan_pcollection_" + safe_str(number) + "_arraiving_on"].strftime("%m") + "." + item["divan_pcollection_" + safe_str(number) + "_arraiving_on"].strftime("%Y")
html += '</b></div></td>'
else:
html += '<td style="padding-bottom: 0px !important;padding-top: 0px !important;margin-top: 0px !important;margin-bottom: 0px !important;"><div style="border-bottom: 1px dotted black; width:70px;height: 18px;"></div></td>'
return html + "</tr>"
def collection(item, number):
html = "<tr>"
html += '<td style="width: 95px;padding-bottom: 0px !important;padding-top: 0px !important;margin-top: 0px !important;margin-bottom: 0px !important;">'
html += u"Колекция " + safe_str(number) + ": "
html += "</td>"
html += '<td style="border-bottom: 1px dotted black;min-width: 200px;text-align: center;padding-bottom: 0px !important;padding-top: 0px !important;margin-top: 0px !important;margin-bottom: 0px !important;"><b>'
if item["supplier_" + safe_str(number)]:
html += " " + safe_str(item["supplier_" + safe_str(number)])
if item["name_" + safe_str(number)]:
html += " " + safe_str(item["name_" + safe_str(number)])
if item["design_" + safe_str(number)]:
html += " " + safe_str(item["design_" + safe_str(number)])
if item["color_" + safe_str(number)]:
html += " " + safe_str(item["color_" + safe_str(number)])
html += '</b></td>'
html += '<td style="padding-bottom: 0px !important;padding-top: 0px !important;margin-top: 0px !important;margin-bottom: 0px !important;"></td></td>'
if item["purpose_" + safe_str(number)]:
html += u'<td style="padding-bottom: 0px !important;padding-top: 0px !important;margin-top: 0px !important;margin-bottom: 0px !important;">за:<b>'
html += safe_str(item["purpose_" + safe_str(number)])
html += "</b></td>"
else:
html += '<td style="padding-bottom: 0px !important;padding-top: 0px !important;margin-top: 0px !important;margin-bottom: 0px !important;"></td>'
html += "</tr><tr>"
html += u'<td style="width: 90px;text-align: right;padding-bottom: 0px !important;padding-top: 0px !important;margin-top: 0px !important;margin-bottom: 0px !important;">поръчан на<: </td>'
if item["ordered_on_" + safe_str(number)]:
html += '<td style="text-align: left;padding-bottom: 0px !important;padding-top: 0px !important;margin-top: 0px !important;margin-bottom: 0px !important;"><div style="border-bottom: 1px dotted black; width:70px;height: 18px;"><b>'
html += item["ordered_on_" + safe_str(number)].strftime("%d") + "." + item["ordered_on_" + safe_str(number)].strftime("%m") + "." + item["ordered_on_" + safe_str(number)].strftime("%Y")
html += '</b></div></td>'
else:
html += '<td style="text-align: left;padding-bottom: 0px !important;padding-top: 0px !important;margin-top: 0px !important;margin-bottom: 0px !important;"><div style="border-bottom: 1px dotted black; width:70px;height: 18px;"></div></td>'
html += u'<td colspan="2" style="text-align: right;padding-bottom: 0px !important;padding-top: 0px !important;margin-top: 0px !important;margin-bottom: 0px !important;">при нас на: </td>'
if item["arraiving_on_" + safe_str(number)]:
html += '<td style="text-align: left;padding-bottom: 0px !important;padding-top: 0px !important;margin-top: 0px !important;margin-bottom: 0px !important;"><div style="border-bottom: 1px dotted black; width:70px;height: 18px;"><b>'
html += item["arraiving_on_" + safe_str(number)].strftime("%d") + "." + item["arraiving_on_" + safe_str(number)].strftime("%m") + "." + item["arraiving_on_" + safe_str(number)].strftime("%Y")
html += '</b></div></td>'
else:
html += '<td style="padding-bottom: 0px !important;padding-top: 0px !important;margin-top: 0px !important;margin-bottom: 0px !important;"><div style="border-bottom: 1px dotted black; width:70px;height: 18px;"></div></td>'
return html+"</tr>"
@frappe.whitelist()
def make_report(names):
import json
if names[0] != "[":
names = [names]
else:
names = json.loads(names)
# setup html tags
html = "<html><head><title>Print Report</title></head><body style='margin: 0; padding-left: 100px; padding-right: 100px;'>"
# get all docs
docs = frappe.db.get_values("Sales Order", {"name":("in", names)}, "*", as_dict=True, order_by="delivery_date")
# header
html += u"<h1 align=center> Поръчки "
if len(docs) > 0:
if docs[0].delivery_date:
if len(docs) > 1 and docs[len(docs) - 1].delivery_date:
html += u"от "
else:
html += u"за ";
html += safe_str(docs[0].delivery_date.strftime("%d") + "-" + docs[0].delivery_date.strftime("%m") + "-" + docs[0].delivery_date.strftime("%Y")) + u"г. "
if len(docs) > 1 and docs[len(docs) - 1].delivery_date:
html += u"до " + safe_str(docs[len(docs) - 1].delivery_date.strftime("%d") + "-" + docs[len(docs) - 1].delivery_date.strftime("%m") + "-" + docs[len(docs) - 1].delivery_date.strftime("%Y")) + u"г. "
html += "</h1><br/><br/>"
# doc info and attachments
for doc in docs:
attachments = frappe.db.sql("""SELECT * FROM `tabFile` WHERE `attached_to_doctype`='Sales Order' AND `attached_to_name`=%s;""", (doc.name), as_dict=True)
items = frappe.db.sql("""SELECT * FROM `tabSales Order Item` WHERE `parent`=%s;""", (doc.name), as_dict=True)
# doc name
html += u"<font style='font-weight: bold;background-color: yellow;'> ⏺ </font>"
html += "<font style='font-weight: bold;background-color: yellow;'>" + doc.title + "</font>"
# doc date
info = False
if doc.delivery_date:
if not info:
html += " - "
info = True
html += u"Срок "
date = safe_str(doc.delivery_date.strftime("%d") + "-" + doc.delivery_date.strftime("%m") + "-" + doc.delivery_date.strftime("%Y"))
import datetime
d = datetime.datetime.now()
now = '-'.join(safe_str(x) for x in (d.day, d.month, d.year))
html += "<font color='"
if datetime.datetime.strptime(now, "%d-%m-%Y") >= datetime.datetime.strptime(date, "%d-%m-%Y"):
html += "red"
else:
html += "blue"
html += "'>"+date + u"г. </font>"
# doc item info and type image
html += "<br/>"
for item in items:
# doc item name
html += "<div style='padding-left: 30px; padding-right: | |
# -*- encoding: utf-8
from sqlalchemy.testing import eq_, is_
from sqlalchemy import schema
from sqlalchemy.sql import table, column, quoted_name
from sqlalchemy.dialects import mssql
from sqlalchemy.dialects.mssql import mxodbc
from sqlalchemy.testing import fixtures, AssertsCompiledSQL
from sqlalchemy import sql
from sqlalchemy import Integer, String, Table, Column, select, MetaData,\
update, delete, insert, extract, union, func, PrimaryKeyConstraint, \
UniqueConstraint, Index, Sequence, literal
from sqlalchemy import testing
from sqlalchemy.dialects.mssql import base
class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = mssql.dialect()
def test_true_false(self):
self.assert_compile(
sql.false(), "0"
)
self.assert_compile(
sql.true(),
"1"
)
def test_select(self):
t = table('sometable', column('somecolumn'))
self.assert_compile(t.select(),
'SELECT sometable.somecolumn FROM sometable')
def test_select_with_nolock(self):
t = table('sometable', column('somecolumn'))
self.assert_compile(
t.select().with_hint(t, 'WITH (NOLOCK)'),
'SELECT sometable.somecolumn FROM sometable WITH (NOLOCK)')
def test_select_with_nolock_schema(self):
m = MetaData()
t = Table('sometable', m, Column('somecolumn', Integer),
schema='test_schema')
self.assert_compile(
t.select().with_hint(t, 'WITH (NOLOCK)'),
'SELECT test_schema.sometable.somecolumn '
'FROM test_schema.sometable WITH (NOLOCK)')
def test_select_w_order_by_collate(self):
m = MetaData()
t = Table('sometable', m, Column('somecolumn', String))
self.assert_compile(
select([t]).
order_by(
t.c.somecolumn.collate("Latin1_General_CS_AS_KS_WS_CI").asc()),
"SELECT sometable.somecolumn FROM sometable "
"ORDER BY sometable.somecolumn COLLATE "
"Latin1_General_CS_AS_KS_WS_CI ASC"
)
def test_join_with_hint(self):
t1 = table('t1',
column('a', Integer),
column('b', String),
column('c', String),
)
t2 = table('t2',
column("a", Integer),
column("b", Integer),
column("c", Integer),
)
join = t1.join(t2, t1.c.a == t2.c.a).\
select().with_hint(t1, 'WITH (NOLOCK)')
self.assert_compile(
join,
'SELECT t1.a, t1.b, t1.c, t2.a, t2.b, t2.c '
'FROM t1 WITH (NOLOCK) JOIN t2 ON t1.a = t2.a'
)
def test_insert(self):
t = table('sometable', column('somecolumn'))
self.assert_compile(t.insert(),
'INSERT INTO sometable (somecolumn) VALUES '
'(:somecolumn)')
def test_update(self):
t = table('sometable', column('somecolumn'))
self.assert_compile(t.update(t.c.somecolumn == 7),
'UPDATE sometable SET somecolumn=:somecolum'
'n WHERE sometable.somecolumn = '
':somecolumn_1', dict(somecolumn=10))
def test_insert_hint(self):
t = table('sometable', column('somecolumn'))
for targ in (None, t):
for darg in ("*", "mssql"):
self.assert_compile(
t.insert().
values(somecolumn="x").
with_hint("WITH (PAGLOCK)",
selectable=targ,
dialect_name=darg),
"INSERT INTO sometable WITH (PAGLOCK) "
"(somecolumn) VALUES (:somecolumn)"
)
def test_update_hint(self):
t = table('sometable', column('somecolumn'))
for targ in (None, t):
for darg in ("*", "mssql"):
self.assert_compile(
t.update().where(t.c.somecolumn == "q").
values(somecolumn="x").
with_hint("WITH (PAGLOCK)",
selectable=targ,
dialect_name=darg),
"UPDATE sometable WITH (PAGLOCK) "
"SET somecolumn=:somecolumn "
"WHERE sometable.somecolumn = :somecolumn_1"
)
def test_update_exclude_hint(self):
t = table('sometable', column('somecolumn'))
self.assert_compile(
t.update().where(t.c.somecolumn == "q").
values(somecolumn="x").
with_hint("XYZ", "mysql"),
"UPDATE sometable SET somecolumn=:somecolumn "
"WHERE sometable.somecolumn = :somecolumn_1"
)
def test_delete_hint(self):
t = table('sometable', column('somecolumn'))
for targ in (None, t):
for darg in ("*", "mssql"):
self.assert_compile(
t.delete().where(t.c.somecolumn == "q").
with_hint("WITH (PAGLOCK)",
selectable=targ,
dialect_name=darg),
"DELETE FROM sometable WITH (PAGLOCK) "
"WHERE sometable.somecolumn = :somecolumn_1"
)
def test_delete_exclude_hint(self):
t = table('sometable', column('somecolumn'))
self.assert_compile(
t.delete().
where(t.c.somecolumn == "q").
with_hint("XYZ", dialect_name="mysql"),
"DELETE FROM sometable WHERE "
"sometable.somecolumn = :somecolumn_1"
)
def test_delete_extra_froms(self):
t1 = table('t1', column('c1'))
t2 = table('t2', column('c1'))
q = sql.delete(t1).where(t1.c.c1 == t2.c.c1)
self.assert_compile(
q, "DELETE FROM t1 FROM t1, t2 WHERE t1.c1 = t2.c1"
)
def test_delete_extra_froms_alias(self):
a1 = table('t1', column('c1')).alias('a1')
t2 = table('t2', column('c1'))
q = sql.delete(a1).where(a1.c.c1 == t2.c.c1)
self.assert_compile(
q, "DELETE FROM a1 FROM t1 AS a1, t2 WHERE a1.c1 = t2.c1"
)
self.assert_compile(sql.delete(a1), "DELETE FROM t1 AS a1")
def test_update_from_hint(self):
t = table('sometable', column('somecolumn'))
t2 = table('othertable', column('somecolumn'))
for darg in ("*", "mssql"):
self.assert_compile(
t.update().where(t.c.somecolumn == t2.c.somecolumn).
values(somecolumn="x").
with_hint("WITH (PAGLOCK)",
selectable=t2,
dialect_name=darg),
"UPDATE sometable SET somecolumn=:somecolumn "
"FROM sometable, othertable WITH (PAGLOCK) "
"WHERE sometable.somecolumn = othertable.somecolumn"
)
def test_update_to_select_schema(self):
meta = MetaData()
table = Table(
"sometable", meta,
Column("sym", String),
Column("val", Integer),
schema="schema"
)
other = Table(
"#other", meta,
Column("sym", String),
Column("newval", Integer)
)
stmt = table.update().values(
val=select([other.c.newval]).
where(table.c.sym == other.c.sym).as_scalar())
self.assert_compile(
stmt,
"UPDATE [schema].sometable SET val="
"(SELECT [#other].newval FROM [#other] "
"WHERE [schema].sometable.sym = [#other].sym)",
)
stmt = table.update().values(val=other.c.newval).\
where(table.c.sym == other.c.sym)
self.assert_compile(
stmt,
"UPDATE [schema].sometable SET val="
"[#other].newval FROM [schema].sometable, "
"[#other] WHERE [schema].sometable.sym = [#other].sym",
)
# TODO: not supported yet.
# def test_delete_from_hint(self):
# t = table('sometable', column('somecolumn'))
# t2 = table('othertable', column('somecolumn'))
# for darg in ("*", "mssql"):
# self.assert_compile(
# t.delete().where(t.c.somecolumn==t2.c.somecolumn).
# with_hint("WITH (PAGLOCK)",
# selectable=t2,
# dialect_name=darg),
# ""
# )
def test_strict_binds(self):
"""test the 'strict' compiler binds."""
from sqlalchemy.dialects.mssql.base import MSSQLStrictCompiler
mxodbc_dialect = mxodbc.dialect()
mxodbc_dialect.statement_compiler = MSSQLStrictCompiler
t = table('sometable', column('foo'))
for expr, compile in [
(
select([literal("x"), literal("y")]),
"SELECT 'x' AS anon_1, 'y' AS anon_2",
),
(
select([t]).where(t.c.foo.in_(['x', 'y', 'z'])),
"SELECT sometable.foo FROM sometable WHERE sometable.foo "
"IN ('x', 'y', 'z')",
),
(
t.c.foo.in_([None]),
"sometable.foo IN (NULL)"
)
]:
self.assert_compile(expr, compile, dialect=mxodbc_dialect)
def test_in_with_subqueries(self):
"""Test removal of legacy behavior that converted "x==subquery"
to use IN.
"""
t = table('sometable', column('somecolumn'))
self.assert_compile(t.select().where(t.c.somecolumn
== t.select()),
'SELECT sometable.somecolumn FROM '
'sometable WHERE sometable.somecolumn = '
'(SELECT sometable.somecolumn FROM '
'sometable)')
self.assert_compile(t.select().where(t.c.somecolumn
!= t.select()),
'SELECT sometable.somecolumn FROM '
'sometable WHERE sometable.somecolumn != '
'(SELECT sometable.somecolumn FROM '
'sometable)')
@testing.uses_deprecated
def test_count(self):
t = table('sometable', column('somecolumn'))
self.assert_compile(t.count(),
'SELECT count(sometable.somecolumn) AS '
'tbl_row_count FROM sometable')
def test_noorderby_insubquery(self):
"""test that the ms-sql dialect removes ORDER BY clauses from
subqueries"""
table1 = table('mytable',
column('myid', Integer),
column('name', String),
column('description', String),
)
q = select([table1.c.myid],
order_by=[table1.c.myid]).alias('foo')
crit = q.c.myid == table1.c.myid
self.assert_compile(select(['*'], crit),
"SELECT * FROM (SELECT mytable.myid AS "
"myid FROM mytable) AS foo, mytable WHERE "
"foo.myid = mytable.myid")
def test_force_schema_quoted_name_w_dot_case_insensitive(self):
metadata = MetaData()
tbl = Table(
'test', metadata,
Column('id', Integer, primary_key=True),
schema=quoted_name("foo.dbo", True)
)
self.assert_compile(
select([tbl]),
"SELECT [foo.dbo].test.id FROM [foo.dbo].test"
)
def test_force_schema_quoted_w_dot_case_insensitive(self):
metadata = MetaData()
tbl = Table(
'test', metadata,
Column('id', Integer, primary_key=True),
schema=quoted_name("foo.dbo", True)
)
self.assert_compile(
select([tbl]),
"SELECT [foo.dbo].test.id FROM [foo.dbo].test"
)
def test_force_schema_quoted_name_w_dot_case_sensitive(self):
metadata = MetaData()
tbl = Table(
'test', metadata,
Column('id', Integer, primary_key=True),
schema=quoted_name("Foo.dbo", True)
)
self.assert_compile(
select([tbl]),
"SELECT [Foo.dbo].test.id FROM [Foo.dbo].test"
)
def test_force_schema_quoted_w_dot_case_sensitive(self):
metadata = MetaData()
tbl = Table(
'test', metadata,
Column('id', Integer, primary_key=True),
schema="[Foo.dbo]"
)
self.assert_compile(
select([tbl]),
"SELECT [Foo.dbo].test.id FROM [Foo.dbo].test"
)
def test_schema_autosplit_w_dot_case_insensitive(self):
metadata = MetaData()
tbl = Table(
'test', metadata,
Column('id', Integer, primary_key=True),
schema="foo.dbo"
)
self.assert_compile(
select([tbl]),
"SELECT foo.dbo.test.id FROM foo.dbo.test"
)
def test_schema_autosplit_w_dot_case_sensitive(self):
metadata = MetaData()
tbl = Table(
'test', metadata,
Column('id', Integer, primary_key=True),
schema="Foo.dbo"
)
self.assert_compile(
select([tbl]),
"SELECT [Foo].dbo.test.id FROM [Foo].dbo.test"
)
def test_owner_database_pairs(self):
dialect = mssql.dialect()
for identifier, expected_schema, expected_owner in [
("foo", None, "foo"),
("foo.bar", "foo", "bar"),
("Foo.Bar", "Foo", "Bar"),
("[Foo.Bar]", None, "Foo.Bar"),
("[Foo.Bar].[bat]", "Foo.Bar", "bat"),
]:
schema, owner = base._owner_plus_db(dialect, identifier)
eq_(owner, expected_owner)
eq_(schema, expected_schema)
def test_delete_schema(self):
metadata = MetaData()
tbl = Table('test', metadata, Column('id', Integer,
primary_key=True), schema='paj')
self.assert_compile(tbl.delete(tbl.c.id == 1),
'DELETE FROM paj.test WHERE paj.test.id = '
':id_1')
s = select([tbl.c.id]).where(tbl.c.id == 1)
self.assert_compile(tbl.delete().where(tbl.c.id.in_(s)),
'DELETE FROM paj.test WHERE paj.test.id IN '
'(SELECT paj.test.id FROM paj.test '
'WHERE paj.test.id = :id_1)')
def test_delete_schema_multipart(self):
metadata = MetaData()
tbl = Table(
'test', metadata,
Column('id', Integer,
primary_key=True),
schema='banana.paj')
self.assert_compile(tbl.delete(tbl.c.id == 1),
'DELETE FROM banana.paj.test WHERE '
'banana.paj.test.id = :id_1')
s = select([tbl.c.id]).where(tbl.c.id == 1)
self.assert_compile(tbl.delete().where(tbl.c.id.in_(s)),
'DELETE FROM banana.paj.test WHERE '
'banana.paj.test.id IN (SELECT banana.paj.test.id '
'FROM banana.paj.test WHERE '
'banana.paj.test.id = :id_1)')
def test_delete_schema_multipart_needs_quoting(self):
metadata = MetaData()
tbl = Table(
'test', metadata,
Column('id', Integer, primary_key=True),
schema='banana split.paj')
self.assert_compile(tbl.delete(tbl.c.id == 1),
'DELETE FROM [banana split].paj.test WHERE '
'[banana split].paj.test.id = :id_1')
s = select([tbl.c.id]).where(tbl.c.id == 1)
self.assert_compile(tbl.delete().where(tbl.c.id.in_(s)),
'DELETE FROM [banana split].paj.test WHERE '
'[banana split].paj.test.id IN ('
'SELECT [banana split].paj.test.id FROM '
'[banana split].paj.test WHERE '
'[banana split].paj.test.id = :id_1)')
def test_delete_schema_multipart_both_need_quoting(self):
metadata = MetaData()
tbl = Table('test', metadata, Column('id', Integer,
primary_key=True),
schema='banana split.paj with a space')
self.assert_compile(tbl.delete(tbl.c.id == 1),
'DELETE FROM [banana split].[paj with a '
'space].test WHERE [banana split].[paj '
'with a space].test.id = :id_1')
s = select([tbl.c.id]).where(tbl.c.id == 1)
self.assert_compile(
tbl.delete().where(tbl.c.id.in_(s)),
"DELETE FROM [banana split].[paj with a space].test "
"WHERE [banana split].[paj with a space].test.id IN "
"(SELECT [banana split].[paj with a space].test.id "
"FROM [banana split].[paj with a space].test "
"WHERE [banana split].[paj with a space].test.id = :id_1)"
)
def test_union(self):
t1 = table(
't1', column('col1'), column('col2'),
column('col3'), column('col4'))
t2 = table(
't2', column('col1'), column('col2'),
column('col3'), column('col4'))
s1, s2 = select(
[t1.c.col3.label('col3'), t1.c.col4.label('col4')],
t1.c.col2.in_(['t1col2r1', 't1col2r2'])), \
select([t2.c.col3.label('col3'), t2.c.col4.label('col4')],
t2.c.col2.in_(['t2col2r2', 't2col2r3']))
u = union(s1, s2, order_by=['col3', 'col4'])
self.assert_compile(u,
'SELECT t1.col3 AS col3, t1.col4 AS col4 '
'FROM t1 WHERE t1.col2 IN (:col2_1, '
':col2_2) UNION SELECT t2.col3 AS col3, '
't2.col4 AS col4 FROM t2 WHERE t2.col2 IN '
'(:col2_3, :col2_4) ORDER BY col3, col4')
self.assert_compile(u.alias('bar').select(),
'SELECT bar.col3, bar.col4 FROM (SELECT '
't1.col3 AS col3, t1.col4 AS col4 FROM t1 '
'WHERE t1.col2 IN | |
3150, 2647, 3150, 3151, 2757, 3150, 3151,
3152, 2856, 2941, 2942, 2943, 2944, 220, 2527,
3150, 2648, 3150, 3151, 2758, 3150, 3151, 3152,
2857, 3150, 3151, 3152, 3153, 2945, 3018, 3019,
3020, 3021, 3022, 232, 239, 250, 240, 250,
250, 241, 250, 250, 250, 242, 250, 250,
250, 250, 243, 250, 250, 250, 250, 250,
244, 83, 84, 85, 86, 87, 88, 17,
178, 2473, 190, 29, 178, 2589, 2710, 190,
2484, 2820, 2605, 202, 202, 41, 178, 2589,
2710, 190, 2590, 5966, 2711, 2820, 2821, 202,
2495, 2919, 2616, 2919, 2920, 2726, 214, 214,
214, 53, 178, 2589, 2710, 190, 2590, 5966,
2711, 2820, 2821, 202, 2591, 5967, 2712, 5976,
6021, 2822, 2919, 2920, 2921, 214, 2506, 3007,
2627, 3007, 3008, 2737, 3007, 3008, 3009, 2836,
226, 226, 226, 226, 65, 178, 2589, 2710,
190, 2590, 5966, 2711, 2820, 2821, 202, 2591,
5967, 2712, 5976, 6021, 2822, 2919, 2920, 2921,
214, 2592, 5968, 2713, 5977, 6022, 2823, 5985,
6030, 6066, 2922, 3007, 3008, 3009, 3010, 226,
2517, 3084, 2638, 3084, 3085, 2748, 3084, 3085,
3086, 2847, 3084, 3085, 3086, 3087, 2935, 238,
238, 238, 238, 238, 77, 178, 2589, 2710,
190, 2590, 5966, 2711, 2820, 2821, 202, 2591,
5967, 2712, 5976, 6021, 2822, 2919, 2920, 2921,
214, 2592, 5968, 2713, 5977, 6022, 2823, 5985,
6030, 6066, 2922, 3007, 3008, 3009, 3010, 226,
2593, 5969, 2714, 5978, 6023, 2824, 5986, 6031,
6067, 2923, 5993, 6038, 6074, 1603, 3011, 3084,
3085, 3086, 3087, 3088, 238, 2528, 3150, 2649,
3150, 3151, 2759, 3150, 3151, 3152, 2858, 3150,
3151, 3152, 3153, 2946, 3150, 3151, 3152, 3153,
3154, 3023, 250, 250, 250, 250, 250, 250,
89, 173, 2534, 2655, 185, 2535, 3205, 2656,
2765, 2766, 197, 2536, 3205, 2657, 3205, 3206,
2767, 2864, 2865, 2866, 209, 2537, 3205, 2658,
3205, 3206, 2768, 3205, 3206, 3207, 2867, 2952,
2953, 2954, 2955, 221, 2538, 3205, 2659, 3205,
3206, 2769, 3205, 3206, 3207, 2868, 3205, 3206,
3207, 3208, 2956, 3029, 3030, 3031, 3032, 3033,
233, 2539, 3205, 2660, 3205, 3206, 2770, 3205,
3206, 3207, 2869, 3205, 3206, 3207, 3208, 2957,
3205, 3206, 3207, 3208, 3209, 3034, 3095, 3096,
3097, 3098, 3099, 3100, 245, 251, 262, 252,
262, 262, 253, 262, 262, 262, 254, 262,
262, 262, 262, 255, 262, 262, 262, 262,
262, 256, 262, 262, 262, 262, 262, 262,
257, 95, 96, 97, 98, 99, 100, 101,
18, 178, 2474, 190, 30, 178, 2589, 2710,
190, 2485, 2820, 2606, 202, 202, 42, 178,
2589, 2710, 190, 2590, 5966, 2711, 2820, 2821,
202, 2496, 2919, 2617, 2919, 2920, 2727, 214,
214, 214, 54, 178, 2589, 2710, 190, 2590,
5966, 2711, 2820, 2821, 202, 2591, 5967, 2712,
5976, 6021, 2822, 2919, 2920, 2921, 214, 2507,
3007, 2628, 3007, 3008, 2738, 3007, 3008, 3009,
2837, 226, 226, 226, 226, 66, 178, 2589,
2710, 190, 2590, 5966, 2711, 2820, 2821, 202,
2591, 5967, 2712, 5976, 6021, 2822, 2919, 2920,
2921, 214, 2592, 5968, 2713, 5977, 6022, 2823,
5985, 6030, 6066, 2922, 3007, 3008, 3009, 3010,
226, 2518, 3084, 2639, 3084, 3085, 2749, 3084,
3085, 3086, 2848, 3084, 3085, 3086, 3087, 2936,
238, 238, 238, 238, 238, 78, 178, 2589,
2710, 190, 2590, 5966, 2711, 2820, 2821, 202,
2591, 5967, 2712, 5976, 6021, 2822, 2919, 2920,
2921, 214, 2592, 5968, 2713, 5977, 6022, 2823,
5985, 6030, 6066, 2922, 3007, 3008, 3009, 3010,
226, 2593, 5969, 2714, 5978, 6023, 2824, 5986,
6031, 6067, 2923, 5993, 6038, 6074, 6102, 3011,
3084, 3085, 3086, 3087, 3088, 238, 2529, 3150,
2650, 3150, 3151, 2760, 3150, 3151, 3152, 2859,
3150, 3151, 3152, 3153, 2947, 3150, 3151, 3152,
3153, 3154, 3024, 250, 250, 250, 250, 250,
250, 90, 178, 2589, 2710, 190, 2590, 5966,
2711, 2820, 2821, 202, 2591, 5967, 2712, 5976,
6021, 2822, 2919, 2920, 2921, 214, 2592, 5968,
2713, 5977, 6022, 2823, 5985, 6030, 6066, 2922,
3007, 3008, 3009, 3010, 226, 2593, 5969, 2714,
5978, 6023, 2824, 5986, 6031, 6067, 2923, 5993,
6038, 6074, 6102, 3011, 3084, 3085, 3086, 3087,
3088, 238, 2594, 5970, 2715, 5979, 6024, 2825,
5987, 6032, 6068, 2924, 5994, 6039, 6075, 6103,
3012, 6000, 6045, 6081, 6109, 1604, 3089, 3150,
3151, 3152, 3153, 3154, 3155, 250, 2540, 3205,
2661, 3205, 3206, 2771, 3205, 3206, 3207, 2870,
3205, 3206, 3207, 3208, 2958, 3205, 3206, 3207,
3208, 3209, 3035, 3205, 3206, 3207, 3208, 3209,
3210, 3101, 262, 262, 262, 262, 262, 262,
262, 102, 174, 2545, 2666, 186, 2546, 3249,
2667, 2776, 2777, 198, 2547, 3249, 2668, 3249,
3250, 2778, 2875, 2876, 2877, 210, 2548, 3249,
2669, 3249, 3250, 2779, 3249, 3250, 3251, 2878,
2963, 2964, 2965, 2966, 222, 2549, 3249, 2670,
3249, 3250, 2780, 3249, 3250, 3251, 2879, 3249,
3250, 3251, 3252, 2967, 3040, 3041, 3042, 3043,
3044, 234, 2550, 3249, 2671, 3249, 3250, 2781,
3249, 3250, 3251, 2880, 3249, 3250, 3251, 3252,
2968, 3249, 3250, 3251, 3252, 3253, 3045, 3106,
3107, 3108, 3109, 3110, 3111, 246, 2551, 3249,
2672, 3249, 3250, 2782, 3249, 3250, 3251, 2881,
3249, 3250, 3251, 3252, 2969, 3249, 3250, 3251,
3252, 3253, 3046, 3249, 3250, 3251, 3252, 3253,
3254, 3112, 3161, 3162, 3163, 3164, 3165, 3166,
3167, 258, 263, 274, 264, 274, 274, 265,
274, 274, 274, 266, 274, 274, 274, 274,
267, 274, 274, 274, 274, 274, 268, 274,
274, 274, 274, 274, 274, 269, 274, 274,
274, 274, 274, 274, 274, 270, 107, 108,
109, 110, 111, 112, 113, 114, 19, 178,
2475, 190, 31, 178, 2589, 2710, 190, 2486,
2820, 2607, 202, 202, 43, 178, 2589, 2710,
190, 2590, 5966, 2711, 2820, 2821, 202, 2497,
2919, 2618, 2919, 2920, 2728, 214, 214, 214,
55, 178, 2589, 2710, 190, 2590, 5966, 2711,
2820, 2821, 202, 2591, 5967, 2712, 5976, 6021,
2822, 2919, 2920, 2921, 214, 2508, 3007, 2629,
3007, 3008, 2739, 3007, 3008, 3009, 2838, 226,
226, 226, 226, 67, 178, 2589, 2710, 190,
2590, 5966, 2711, 2820, 2821, 202, 2591, 5967,
2712, 5976, 6021, 2822, 2919, 2920, 2921, 214,
2592, 5968, 2713, 5977, 6022, 2823, 5985, 6030,
6066, 2922, 3007, 3008, 3009, 3010, 226, 2519,
3084, 2640, 3084, 3085, 2750, 3084, 3085, 3086,
2849, 3084, 3085, 3086, 3087, 2937, 238, 238,
238, 238, 238, 79, 178, 2589, 2710, 190,
2590, 5966, 2711, 2820, 2821, 202, 2591, 5967,
2712, 5976, 6021, 2822, 2919, 2920, 2921, 214,
2592, 5968, 2713, 5977, 6022, 2823, 5985, 6030,
6066, 2922, 3007, 3008, 3009, 3010, 226, 2593,
5969, 2714, 5978, 6023, 2824, 5986, 6031, 6067,
2923, 5993, 6038, 6074, 6102, 3011, 3084, 3085,
3086, 3087, 3088, 238, 2530, 3150, 2651, 3150,
3151, 2761, 3150, 3151, 3152, 2860, 3150, 3151,
3152, 3153, 2948, 3150, 3151, 3152, 3153, 3154,
3025, 250, 250, 250, 250, 250, 250, 91,
178, 2589, 2710, 190, 2590, 5966, 2711, 2820,
2821, 202, 2591, 5967, 2712, 5976, 6021, 2822,
2919, 2920, 2921, 214, 2592, 5968, 2713, 5977,
6022, 2823, 5985, 6030, 6066, 2922, 3007, 3008,
3009, 3010, 226, 2593, 5969, 2714, 5978, 6023,
2824, 5986, 6031, 6067, 2923, 5993, 6038, 6074,
6102, 3011, 3084, 3085, 3086, 3087, 3088, 238,
2594, 5970, 2715, 5979, 6024, 2825, 5987, 6032,
6068, 2924, 5994, 6039, 6075, 6103, 3012, 6000,
6045, 6081, 6109, 6130, 3089, 3150, 3151, 3152,
3153, 3154, 3155, 250, 2541, 3205, 2662, 3205,
3206, 2772, 3205, 3206, 3207, 2871, 3205, 3206,
3207, 3208, 2959, 3205, 3206, 3207, 3208, 3209,
3036, 3205, 3206, 3207, 3208, 3209, 3210, 3102,
262, 262, 262, 262, 262, 262, 262, 103,
178, 2589, 2710, 190, 2590, 5966, 2711, 2820,
2821, 202, 2591, 5967, 2712, 5976, 6021, 2822,
2919, 2920, 2921, 214, 2592, 5968, 2713, 5977,
6022, 2823, 5985, 6030, 6066, 2922, 3007, 3008,
3009, 3010, 226, 2593, 5969, 2714, 5978, 6023,
2824, 5986, 6031, 6067, 2923, 5993, 6038, 6074,
6102, 3011, 3084, 3085, 3086, 3087, 3088, 238,
2594, 5970, 2715, 5979, 6024, 2825, 5987, 6032,
6068, | |
import unittest
from cumulusci.core.exceptions import TaskOptionsError
from cumulusci.tasks.metadata_etl.layouts import (
AddFieldsToPageLayout,
AddRecordPlatformActionListItem,
AddRelatedLists,
)
from cumulusci.tasks.salesforce.tests.util import create_task
from cumulusci.utils.xml import metadata_tree
MD = "{%s}" % metadata_tree.METADATA_NAMESPACE
LAYOUT_XML = """<?xml version="1.0" encoding="UTF-8"?>
<Layout xmlns="http://soap.sforce.com/2006/04/metadata">
<layoutSections>
<customLabel>false</customLabel>
<detailHeading>false</detailHeading>
<editHeading>true</editHeading>
<label>Information</label>
<layoutColumns>
<layoutItems>
<behavior>Readonly</behavior>
<field>Name</field>
</layoutItems>
</layoutColumns>
<layoutColumns/>
<style>TwoColumnsTopToBottom</style>
</layoutSections>
{relatedLists}
</Layout>
"""
RELATED_LIST = """ <relatedLists>
<fields>FULL_NAME</fields>
<fields>CONTACT.TITLE</fields>
<fields>CONTACT.EMAIL</fields>
<fields>CONTACT.PHONE1</fields>
<relatedList>RelatedContactList</relatedList>
</relatedLists>
"""
class TestAddRelatedLists:
def test_adds_related_list(self):
task = create_task(
AddRelatedLists,
{
"managed": True,
"api_version": "47.0",
"api_names": "bar,foo",
"related_list": "TEST",
"fields": "foo__c,bar__c",
},
)
tree = metadata_tree.fromstring(
LAYOUT_XML.format(relatedLists=RELATED_LIST).encode("utf-8")
)
element = tree._element
assert len(element.findall(f".//{MD}relatedLists[{MD}relatedList='TEST']")) == 0
task._transform_entity(tree, "Layout")
assert len(element.findall(f".//{MD}relatedLists[{MD}relatedList='TEST']")) == 1
field_elements = element.findall(
f".//{MD}relatedLists[{MD}relatedList='TEST']/{MD}fields"
)
field_names = {elem.text for elem in field_elements}
assert field_names == set(["foo__c", "bar__c"])
def test_excludes_buttons(self):
task = create_task(
AddRelatedLists,
{
"managed": True,
"api_version": "47.0",
"api_names": "bar,foo",
"related_list": "TEST",
"fields": "foo__c,bar__c",
"exclude_buttons": "New,Edit",
},
)
tree = metadata_tree.fromstring(
LAYOUT_XML.format(relatedLists=RELATED_LIST).encode("utf-8")
)
assert (
len(tree._element.findall(f".//{MD}relatedLists[{MD}relatedList='TEST']"))
== 0
)
result = task._transform_entity(tree, "Layout")
assert (
len(result._element.findall(f".//{MD}relatedLists[{MD}relatedList='TEST']"))
== 1
)
button_elements = result._element.findall(
f".//{MD}relatedLists[{MD}relatedList='TEST']/{MD}excludeButtons"
)
excluded_buttons = {elem.text for elem in button_elements}
assert excluded_buttons == set(["New", "Edit"])
def test_includes_buttons(self):
task = create_task(
AddRelatedLists,
{
"managed": True,
"api_version": "47.0",
"api_names": "bar,foo",
"related_list": "TEST",
"fields": "foo__c,bar__c",
"custom_buttons": "MyCustomNewAction,MyCustomEditAction",
},
)
tree = metadata_tree.fromstring(
LAYOUT_XML.format(relatedLists=RELATED_LIST).encode("utf-8")
)
assert (
len(tree._element.findall(f".//{MD}relatedLists[{MD}relatedList='TEST']"))
== 0
)
result = task._transform_entity(tree, "Layout")
element = result._element
assert len(element.findall(f".//{MD}relatedLists[{MD}relatedList='TEST']")) == 1
button_elements = element.findall(
f".//{MD}relatedLists[{MD}relatedList='TEST']/{MD}customButtons"
)
custom_buttons = {elem.text for elem in button_elements}
assert custom_buttons == set(["MyCustomNewAction", "MyCustomEditAction"])
def test_adds_related_list_no_existing(self):
task = create_task(
AddRelatedLists,
{
"managed": True,
"api_version": "47.0",
"api_names": "bar,foo",
"related_list": "TEST",
"fields": "foo__c,bar__c",
},
)
tree = metadata_tree.fromstring(
LAYOUT_XML.format(relatedLists="").encode("utf-8")
)
element = tree._element
assert len(element.findall(f".//{MD}relatedLists[{MD}relatedList='TEST']")) == 0
task._transform_entity(tree, "Layout")
assert len(element.findall(f".//{MD}relatedLists[{MD}relatedList='TEST']")) == 1
field_elements = element.findall(
f".//{MD}relatedLists[{MD}relatedList='TEST']/{MD}fields"
)
field_names = {elem.text for elem in field_elements}
assert field_names == set(["foo__c", "bar__c"])
def test_skips_existing_related_list(self):
task = create_task(
AddRelatedLists,
{
"managed": True,
"api_version": "47.0",
"api_names": "bar,foo",
"related_list": "RelatedContactList",
"fields": "foo__c,bar__c",
},
)
tree = metadata_tree.fromstring(
LAYOUT_XML.format(relatedLists=RELATED_LIST).encode("utf-8")
)
result = task._transform_entity(tree, "Layout")
assert result is None
##### TestAddRecordPlatformActionListItem
# Mocked empty page layout (no action list)
# Included common elements to better emulate a 'real' page layout.
MOCK_EMPTY_LAYOUT = """<?xml version="1.0" encoding="UTF-8"?>
<Layout xmlns="http://soap.sforce.com/2006/04/metadata">
<excludeButtons>Submit</excludeButtons>
<layoutSections>
<customLabel>false</customLabel>
<detailHeading>false</detailHeading>
<editHeading>true</editHeading>
<label>Information</label>
<layoutColumns>
<layoutItems>
<behavior>Required</behavior>
<field>Name</field>
</layoutItems>
<layoutItems>
<emptySpace>true</emptySpace>
</layoutItems>
<layoutItems>
<emptySpace>true</emptySpace>
</layoutItems>
</layoutColumns>
<style>TwoColumnsTopToBottom</style>
</layoutSections>
<miniLayout>
<fields>Name</fields>
<relatedLists>
<fields>NAME</fields>
<fields>STATUS</fields>
<relatedList>MOCKOBJECT</relatedList>
</relatedLists>
</miniLayout>
<relatedLists>
<fields>FULL_NAME</fields>
<fields>CONTACT.TITLE</fields>
<fields>CONTACT.EMAIL</fields>
<fields>CONTACT.PHONE1</fields>
<relatedList>RelatedContactList</relatedList>
</relatedLists>
<relatedLists>
<relatedList>RelatedFileList</relatedList>
</relatedLists>
<showEmailCheckbox>false</showEmailCheckbox>
<showHighlightsPanel>false</showHighlightsPanel>
<showInteractionLogPanel>false</showInteractionLogPanel>
<showRunAssignmentRulesCheckbox>false</showRunAssignmentRulesCheckbox>
<showSubmitAndAttachButton>false</showSubmitAndAttachButton>
{action_list_scenario}
</Layout>
"""
# Mocked existing action list
# For different scenarios change action_list_context (Record, Listview, etc)
# and the optional_first/last_action_items for inserting an existing item(s)
MOCK_EXISTING_ACTION_LIST = """
<platformActionList>
<actionListContext>{action_list_context}</actionListContext>
{optional_first_action_item}
<platformActionListItems>
<actionName>Edit</actionName>
<actionType>StandardButton</actionType>
<sortOrder>{}</sortOrder>
</platformActionListItems>
<platformActionListItems>
<actionName>FeedItem.TextPost</actionName>
<actionType>QuickAction</actionType>
<sortOrder>{}</sortOrder>
</platformActionListItems>
{optional_last_action_item}
</platformActionList>
"""
# Empty action item and empty action list
EMPTY_ACTION_ITEM = """
<platformActionListItems>
<actionName>{action_name}</actionName>
<actionType>{action_type}</actionType>
<sortOrder>{expected_order}</sortOrder>
</platformActionListItems>
"""
EMPTY_ACTION_LIST = """
<platformActionList>
<actionListContext>{action_list_context}</actionListContext>
{optional_action_item}
</platformActionList>
"""
class TestAddRecordPlatformActionListItem:
def test_adds_action_item_to_existing_list_place_last(self):
# options scenario:
# adding Quick Action to layout with existing action item list
# not setting place_first, so should be last in action list
options = {
"action_type": "QuickAction",
"action_name": "pkg__mockObject.TestQuickAction",
}
task = create_task(AddRecordPlatformActionListItem, options)
assert not task._place_first
# Mocks: build our existing action list and create our metadata tree
# "Record" context
# default sort order (0, 1)
# no optional
mock_action_list = MOCK_EXISTING_ACTION_LIST.format(
0,
1,
action_list_context="Record",
optional_first_action_item="",
optional_last_action_item="",
)
metadata = metadata_tree.fromstring(
MOCK_EMPTY_LAYOUT.format(action_list_scenario=mock_action_list).encode(
"utf-8"
)
)
mock_action_list_size = len(
metadata._get_child("platformActionList").findall("platformActionListItems")
)
# Creating expected action item/list xml and metadata
# Expected context = "Record"
# The action_list_items <sortOrder> (positional *args) can be dynamically set from the mock_action_list_size or range
# using our optional_last_action_item to set our expected_action_item placement
expected_action_item = EMPTY_ACTION_ITEM.format(
expected_order=mock_action_list_size, **options
)
expected_action_list = MOCK_EXISTING_ACTION_LIST.format(
*range(0, mock_action_list_size + 1),
action_list_context="Record",
optional_first_action_item="",
optional_last_action_item=expected_action_item,
)
expected_metadata = metadata_tree.fromstring(
MOCK_EMPTY_LAYOUT.format(action_list_scenario=expected_action_list).encode(
"utf-8"
)
)
# run test actual
actual_metadata = task._transform_entity(metadata, "Layout")
# Assert our transformed metadata is the same as our expected
# This confirms, action list item size, sortOrder, and record context
assert actual_metadata.tostring() == expected_metadata.tostring()
def test_adds_action_item_to_existing_list_place_first(self):
# options scenario:
# adding Quick Action to layout with existing Record context action item list
# place_first = true, so new action should end up first in action list
options = {
"action_type": "QuickAction",
"action_name": "pkg__mockObject.TestQuickAction",
"place_first": True,
}
task = create_task(AddRecordPlatformActionListItem, options)
# Mocks: build our existing action list and create our metadata tree
# "Record" context
# default sort order (0, 1)
# no optional
mock_action_list = MOCK_EXISTING_ACTION_LIST.format(
0,
1,
action_list_context="Record",
optional_first_action_item="",
optional_last_action_item="",
)
metadata = metadata_tree.fromstring(
MOCK_EMPTY_LAYOUT.format(action_list_scenario=mock_action_list).encode(
"utf-8"
)
)
mock_action_list_size = len(
metadata._get_child("platformActionList").findall("platformActionListItems")
)
# Creating expected action item/list xml and metadata
# Expected context = "Record"
# our action_list_items <sortOrder> is being set dynamically from the mock_action_list_size (if we need to change later)
# setting our expected new action item to sortOrder 0 since placement should be first
# using our optional_first_action_item to set our expected_action_item
expected_action_item = EMPTY_ACTION_ITEM.format(expected_order=0, **options)
expected_action_list = MOCK_EXISTING_ACTION_LIST.format(
*range(1, mock_action_list_size + 1),
action_list_context="Record",
optional_first_action_item=expected_action_item,
optional_last_action_item="",
)
expected_metadata = metadata_tree.fromstring(
MOCK_EMPTY_LAYOUT.format(action_list_scenario=expected_action_list).encode(
"utf-8"
)
)
# run test actual
actual_metadata = task._transform_entity(metadata, "Layout")
# Assert our transformed metadata is the same as our expected
# This confirms, action list item size, sortOrder, and record context
assert actual_metadata.tostring() == expected_metadata.tostring()
def test_does_not_add_action_if_already_exists(self):
# options scenario:
# attempting to add Quick Action to layout with quick action already existing
options = {
"action_type": "QuickAction",
"action_name": "pkg__mockObject.TestQuickAction",
}
task = create_task(AddRecordPlatformActionListItem, options)
# Mocks: build our existing action list and create our metadata tree
# "Record" context
# default sort order (0, 1)
mock_action_item = EMPTY_ACTION_ITEM.format(expected_order=2, **options)
mock_action_list = MOCK_EXISTING_ACTION_LIST.format(
0,
1,
action_list_context="Record",
optional_first_action_item="",
optional_last_action_item=mock_action_item,
)
metadata = metadata_tree.fromstring(
MOCK_EMPTY_LAYOUT.format(action_list_scenario=mock_action_list).encode(
"utf-8"
)
)
# run test
actual_metadata = task._transform_entity(metadata, "Layout")
# should not transform, and metadata should be none
assert actual_metadata is None
def test_creates_new_action_list_when_none_present(self):
# options scenario:
# adding Quick Action to layout without existing action list
# place_first = true, so should be first in action list
options = {
"action_type": "QuickAction",
"action_name": "pkg__mockObject.TestQuickAction",
"place_first": True,
}
task = create_task(AddRecordPlatformActionListItem, options)
# Mocks: build our existing action list and create our metadata tree
# This is an empty layout without any actionList
metadata = metadata_tree.fromstring(
MOCK_EMPTY_LAYOUT.format(action_list_scenario="").encode("utf-8")
)
# Creating expected action item/list xml and metadata
# Expected action list context = "Record"
# Should only contain one action item, specified in options
expected_action_item = EMPTY_ACTION_ITEM.format(expected_order=0, **options)
expected_action_list = EMPTY_ACTION_LIST.format(
action_list_context="Record", optional_action_item=expected_action_item
)
expected_metadata = metadata_tree.fromstring(
MOCK_EMPTY_LAYOUT.format(action_list_scenario=expected_action_list).encode(
"utf-8"
)
)
# run test actual
actual_metadata = task._transform_entity(metadata, "Layout")
# Assert our transformed metadata is the same as our expected
# This confirms, action list item size, sortOrder, and record context
assert actual_metadata.tostring() == expected_metadata.tostring()
def test_adds_new_action_list_when_existing_list_is_not_record_context(self):
# options scenario:
# adding Quick Action to layout with existing action item list
options = {
"action_type": "QuickAction",
"action_name": "pkg__mockObject.TestQuickAction",
}
task = create_task(AddRecordPlatformActionListItem, options)
# Mocks: build our existing action list and create our metadata tree
# "Listview" context (which should trigger creation of new)
# default sort order (0, 1)
# no additional action items.
mock_action_list = MOCK_EXISTING_ACTION_LIST.format(
0,
1,
action_list_context="Listview",
optional_first_action_item="",
optional_last_action_item="",
)
metadata = metadata_tree.fromstring(
MOCK_EMPTY_LAYOUT.format(action_list_scenario=mock_action_list).encode(
"utf-8"
)
)
# Creating expected action item/list xml and metadata
# Our expected metadata includes both the mock_action_list with context "Listview",
# and an actionList of context "Record" created during the transform
# our action_list_items <sortOrder> is being set dynamically from the mock_action_list_size (if we need to change later)
# setting our added item to 0 sortOrder since only item
# using option_action_item to set expected action item
expected_action_item = EMPTY_ACTION_ITEM.format(expected_order=0, **options)
expected_action_list = EMPTY_ACTION_LIST.format(
action_list_context="Record", optional_action_item=expected_action_item
)
expected_metadata = metadata_tree.fromstring(
MOCK_EMPTY_LAYOUT.format(
action_list_scenario=str(mock_action_list + "\n" + expected_action_list)
).encode("utf-8")
)
# run test actual
actual_metadata = task._transform_entity(metadata, "Layout")
# Assert our transformed metadata is the same as our expected
# This confirms, action list item size, sortOrder, and record context
assert actual_metadata.tostring() == expected_metadata.tostring()
# Mocked empty page layout
MOCK_ADD_FIELDS_LAYOUT = """<?xml version="1.0" encoding="UTF-8"?>
<Layout xmlns="http://soap.sforce.com/2006/04/metadata">
<excludeButtons>Submit</excludeButtons>
<layoutSections>
<customLabel>false</customLabel>
<detailHeading>false</detailHeading>
<editHeading>true</editHeading>
<label>Information</label>
<layoutColumns>
<layoutItems>
<behavior>Required</behavior>
<field>Name</field>
</layoutItems>
<layoutItems>
<emptySpace>true</emptySpace>
</layoutItems>
<layoutItems>
<emptySpace>true</emptySpace>
</layoutItems>
</layoutColumns>
<layoutColumns>
<layoutItems>
<emptySpace>true</emptySpace>
</layoutItems>
<layoutItems>
<emptySpace>true</emptySpace>
</layoutItems>
</layoutColumns>
<style>TwoColumnsTopToBottom</style>
</layoutSections>
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C,R,W
"""A set of constants and methods to manage permissions and security"""
import logging
from typing import Callable, List, Optional, Set, Tuple, TYPE_CHECKING, Union
from flask import current_app, redirect, g, flash, request, session, jsonify
from flask_appbuilder import Model
from flask_appbuilder.security.sqla import models as ab_models
from flask_appbuilder.security.sqla.manager import SecurityManager
from flask_appbuilder.security.views import (
PermissionModelView,
PermissionViewModelView,
RoleModelView,
UserModelView,
expose,
UserDBModelView,
AuthDBView
)
from flask_appbuilder.widgets import ListWidget
from sqlalchemy import or_
from sqlalchemy.orm.query import Query
from flask_appbuilder.security.sqla.models import User
import sqlalchemy as db
from flask_appbuilder.security.manager import BaseSecurityManager
from flask_login import login_user, logout_user
import datetime
import jwt
import requests
import json
from flask_oidc import OpenIDConnect
from flask_appbuilder.security.manager import AUTH_OID
from urllib.parse import quote
from flask_appbuilder.security.views import AuthOIDView
import jwt
from base64 import b64encode,b64decode
from sqlalchemy.engine.base import Connection
from sqlalchemy.orm.mapper import Mapper
from superset import sql_parse
from superset.connectors.connector_registry import ConnectorRegistry
from superset.exceptions import SupersetSecurityException
from superset.utils.core import DatasourceName
import urllib.parse
from simplecrypt import encrypt, decrypt
from base64 import b64encode, b64decode
if TYPE_CHECKING:
from superset.common.query_context import QueryContext
from superset.connectors.base.models import BaseDatasource
from superset.models.core import Database
from superset.viz import BaseViz
user_x = None
class SupersetSecurityListWidget(ListWidget):
"""
Redeclaring to avoid circular imports
"""
template = "superset/fab_overrides/list.html"
class SupersetRoleListWidget(ListWidget):
"""
Role model view from FAB already uses a custom list widget override
So we override the override
"""
template = "superset/fab_overrides/list_role.html"
def __init__(self, **kwargs):
kwargs["appbuilder"] = current_app.appbuilder
super().__init__(**kwargs)
UserModelView.list_widget = SupersetSecurityListWidget
RoleModelView.list_widget = SupersetRoleListWidget
PermissionViewModelView.list_widget = SupersetSecurityListWidget
PermissionModelView.list_widget = SupersetSecurityListWidget
class SupersetSecurityManager(SecurityManager):
READ_ONLY_MODEL_VIEWS = {"DatabaseAsync", "DatabaseView", "DruidClusterModelView"}
USER_MODEL_VIEWS = {
"UserDBModelView",
"UserLDAPModelView",
"UserOAuthModelView",
"UserOIDModelView",
"UserRemoteUserModelView",
}
GAMMA_READ_ONLY_MODEL_VIEWS = {
"SqlMetricInlineView",
"TableColumnInlineView",
"TableModelView",
"DruidColumnInlineView",
"DruidDatasourceModelView",
"DruidMetricInlineView",
} | READ_ONLY_MODEL_VIEWS
ADMIN_ONLY_VIEW_MENUS = {
"AccessRequestsModelView",
"Manage",
"SQL Lab",
"Queries",
"Refresh Druid Metadata",
"ResetPasswordView",
"RoleModelView",
"Security",
} | USER_MODEL_VIEWS
ALPHA_ONLY_VIEW_MENUS = {"Upload a CSV"}
ADMIN_ONLY_PERMISSIONS = {
"can_sql_json", # TODO: move can_sql_json to sql_lab role
"can_override_role_permissions",
"can_sync_druid_source",
"can_override_role_permissions",
"can_approve",
"can_update_role",
}
READ_ONLY_PERMISSION = {"can_show", "can_list"}
ALPHA_ONLY_PERMISSIONS = {
"muldelete",
"all_database_access",
"all_datasource_access",
}
OBJECT_SPEC_PERMISSIONS = {
"database_access",
"schema_access",
"datasource_access",
"metric_access",
"can_only_access_owned_queries",
}
ACCESSIBLE_PERMS = {"can_userinfo"}
def get_schema_perm(
self, database: Union["Database", str], schema: Optional[str] = None
) -> Optional[str]:
"""
Return the database specific schema permission.
:param database: The Superset database or database name
:param schema: The Superset schema name
:return: The database specific schema permission
"""
if schema:
return f"[{database}].[{schema}]"
return None
def can_access(self, permission_name: str, view_name: str) -> bool:
"""
Return True if the user can access the FAB permission/view, False
otherwise.
Note this method adds protection from has_access failing from missing
permission/view entries.
:param permission_name: The FAB permission name
:param view_name: The FAB view-menu name
:returns: Whether the use can access the FAB permission/view
"""
user = g.user
if user.is_anonymous:
return self.is_item_public(permission_name, view_name)
return self._has_view_access(user, permission_name, view_name)
def can_only_access_owned_queries(self) -> bool:
"""
Return True if the user can only access owned queries, False otherwise.
:returns: Whether the use can only access owned queries
"""
return self.can_access(
"can_only_access_owned_queries", "can_only_access_owned_queries"
)
def all_datasource_access(self) -> bool:
"""
Return True if the user can access all Superset datasources, False otherwise.
:returns: Whether the user can access all Superset datasources
"""
return self.can_access("all_datasource_access", "all_datasource_access")
def all_database_access(self) -> bool:
"""
Return True if the user can access all Superset databases, False otherwise.
:returns: Whether the user can access all Superset databases
"""
return self.can_access("all_database_access", "all_database_access")
def database_access(self, database: "Database") -> bool:
"""
Return True if the user can access the Superset database, False otherwise.
:param database: The Superset database
:returns: Whether the user can access the Superset database
"""
return (
self.all_datasource_access()
or self.all_database_access()
or self.can_access("database_access", database.perm)
)
def schema_access(self, datasource: "BaseDatasource") -> bool:
"""
Return True if the user can access the schema associated with the Superset
datasource, False otherwise.
Note for Druid datasources the database and schema are akin to the Druid cluster
and datasource name prefix, i.e., [schema.]datasource, respectively.
:param datasource: The Superset datasource
:returns: Whether the user can access the datasource's schema
"""
return (
self.all_datasource_access()
or self.database_access(datasource.database)
or self.can_access("schema_access", datasource.schema_perm)
)
def datasource_access(self, datasource: "BaseDatasource") -> bool:
"""
Return True if the user can access the Superset datasource, False otherwise.
:param datasource: The Superset datasource
:returns: Whether the use can access the Superset datasource
"""
return self.schema_access(datasource) or self.can_access(
"datasource_access", datasource.perm
)
def get_datasource_access_error_msg(self, datasource: "BaseDatasource") -> str:
"""
Return the error message for the denied Superset datasource.
:param datasource: The denied Superset datasource
:returns: The error message
"""
return f"""This endpoint requires the datasource {datasource.name}, database or
`all_datasource_access` permission"""
def get_datasource_access_link(self, datasource: "BaseDatasource") -> Optional[str]:
"""
Return the link for the denied Superset datasource.
:param datasource: The denied Superset datasource
:returns: The access URL
"""
from superset import conf
return conf.get("PERMISSION_INSTRUCTIONS_LINK")
def get_table_access_error_msg(self, tables: List[str]) -> str:
"""
Return the error message for the denied SQL tables.
Note the table names conform to the [[cluster.]schema.]table construct.
:param tables: The list of denied SQL table names
:returns: The error message
"""
quoted_tables = [f"`{t}`" for t in tables]
return f"""You need access to the following tables: {", ".join(quoted_tables)},
`all_database_access` or `all_datasource_access` permission"""
def get_table_access_link(self, tables: List[str]) -> Optional[str]:
"""
Return the access link for the denied SQL tables.
Note the table names conform to the [[cluster.]schema.]table construct.
:param tables: The list of denied SQL table names
:returns: The access URL
"""
from superset import conf
return conf.get("PERMISSION_INSTRUCTIONS_LINK")
def _datasource_access_by_name(
self, database: "Database", table_name: str, schema: str = None
) -> bool:
"""
Return True if the user can access the SQL table, False otherwise.
:param database: The SQL database
:param table_name: The SQL table name
:param schema: The Superset schema
:returns: Whether the use can access the SQL table
"""
from superset import db
if self.database_access(database) or self.all_datasource_access():
return True
schema_perm = self.get_schema_perm(database, schema)
if schema_perm and self.can_access("schema_access", schema_perm):
return True
datasources = ConnectorRegistry.query_datasources_by_name(
db.session, database, table_name, schema=schema
)
for datasource in datasources:
if self.can_access("datasource_access", datasource.perm):
return True
return False
def _get_schema_and_table(
self, table_in_query: str, schema: str
) -> Tuple[str, str]:
"""
Return the SQL schema/table tuple associated with the table extracted from the
SQL query.
Note the table name conforms to the [[cluster.]schema.]table construct.
:param table_in_query: The SQL table name
:param schema: The fallback SQL schema if not present in the table name
:returns: The SQL schema/table tuple
"""
table_name_pieces = table_in_query.split(".")
if len(table_name_pieces) == 3:
return tuple(table_name_pieces[1:]) # type: ignore
elif len(table_name_pieces) == 2:
return tuple(table_name_pieces) # type: ignore
return (schema, table_name_pieces[0])
def _datasource_access_by_fullname(
self, database: "Database", table_in_query: str, schema: str
) -> bool:
"""
Return True if the user can access the table extracted from the SQL query, False
otherwise.
Note the table name conforms to the [[cluster.]schema.]table construct.
:param database: The Superset database
:param table_in_query: The SQL table name
:param schema: The fallback SQL schema, i.e., if not present in the table name
:returns: Whether the user can access the SQL table
"""
table_schema, table_name = self._get_schema_and_table(table_in_query, schema)
return self._datasource_access_by_name(
database, table_name, schema=table_schema
)
def rejected_tables(self, sql: str, database: "Database", schema: str) -> List[str]:
"""
Return the list of rejected SQL table names.
Note the rejected table names conform to the [[cluster.]schema.]table construct.
:param sql: The SQL statement
:param database: The SQL database
:param schema: The SQL database schema
:returns: The rejected table names
"""
superset_query = sql_parse.ParsedQuery(sql)
return [
t
for t in superset_query.tables
if not self._datasource_access_by_fullname(database, t, schema)
]
def _user_datasource_perms(self) -> Set[str]:
"""
Return the set of FAB permission view-menu names the user can access.
:returns: The set of FAB permission view-menu names
"""
datasource_perms = | |
<filename>tests/framework/unit_tests/TSA/testFourier.py
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This Module performs Unit Tests for the TSA.Fourier class.
It can not be considered part of the active code but of the regression test system
"""
import os
import sys
import copy
import numpy as np
# add RAVEN to path
frameworkDir = os.path.abspath(os.path.join(*([os.path.dirname(__file__)] + [os.pardir]*4 + ['framework'])))
if frameworkDir not in sys.path:
sys.path.append(frameworkDir)
from utils.utils import find_crow
find_crow(frameworkDir)
from utils import xmlUtils
from TSA import Fourier
plot = False
print('Module undergoing testing:')
print(Fourier)
print('')
results = {"pass":0,"fail":0}
def checkFloat(comment, value, expected, tol=1e-10, update=True):
"""
This method is aimed to compare two floats given a certain tolerance
@ In, comment, string, a comment printed out if it fails
@ In, value, float, the value to compare
@ In, expected, float, the expected value
@ In, tol, float, optional, the tolerance
@ In, update, bool, optional, if False then don't update results counter
@ Out, res, bool, True if same
"""
if np.isnan(value) and np.isnan(expected):
res = True
elif np.isnan(value) or np.isnan(expected):
res = False
else:
res = abs(value - expected) <= tol
if update:
if not res:
print("checking float",comment,'|',value,"!=",expected)
results["fail"] += 1
else:
results["pass"] += 1
return res
def checkTrue(comment, res, update=True):
"""
This method is a pass-through for consistency and updating
@ In, comment, string, a comment printed out if it fails
@ In, res, bool, the tested value
@ In, update, bool, optional, if False then don't update results counter
@ Out, res, bool, True if test
"""
if update:
if res:
results["pass"] += 1
else:
print("checking bool",comment,'|',res,'is not True!')
results["fail"] += 1
return res
def checkSame(comment, value, expected, update=True):
"""
This method is aimed to compare two identical things
@ In, comment, string, a comment printed out if it fails
@ In, value, float, the value to compare
@ In, expected, float, the expected value
@ In, update, bool, optional, if False then don't update results counter
@ Out, res, bool, True if same
"""
res = value == expected
if update:
if res:
results["pass"] += 1
else:
print("checking string",comment,'|',value,"!=",expected)
results["fail"] += 1
return res
def checkArray(comment, first, second, dtype, tol=1e-10, update=True):
"""
This method is aimed to compare two arrays
@ In, comment, string, a comment printed out if it fails
@ In, value, float, the value to compare
@ In, expected, float, the expected value
@ In, tol, float, optional, the tolerance
@ In, update, bool, optional, if False then don't update results counter
@ Out, res, bool, True if same
"""
res = True
if len(first) != len(second):
res = False
print("checking answer",comment,'|','lengths do not match:',len(first),len(second))
else:
for i in range(len(first)):
if dtype == float:
pres = checkFloat('',first[i],second[i],tol,update=False)
elif dtype in (str,unicode):
pres = checkSame('',first[i],second[i],update=False)
if not pres:
print('checking array',comment,'|','entry "{}" does not match: {} != {}'.format(i,first[i],second[i]))
res = False
if update:
if res:
results["pass"] += 1
else:
results["fail"] += 1
return res
def checkNone(comment, entry, update=True):
"""
Checks if entry is None.
@ In, comment, string, a comment printed out if it fails
@ In, entry, object, to test if against None
@ In, update, bool, optional, if False then don't update results counter
@ Out, res, bool, True if None
"""
res = entry is None
if update:
if res:
results["pass"] += 1
else:
print("checking answer",comment,'|','"{}" is not None!'.format(entry))
results["fail"] += 1
def checkFails(comment, errstr, function, update=True, args=None, kwargs=None):
"""
Checks if expected error occurs
@ In, comment, string, a comment printed out if it fails
@ In, errstr, str, expected fail message
@ In, function, method, method to run to test for failure
@ In, update, bool, optional, if False then don't update results counter
@ In, args, list, arguments to pass to function
@ In, kwargs, dict, keyword arguments to pass to function
@ Out, res, bool, True if failed as expected
"""
print('Error testing ...')
if args is None:
args = []
if kwargs is None:
kwargs = {}
try:
function(*args,**kwargs)
res = False
msg = 'Function call did not error!'
except Exception as e:
res = checkSame('',e.args[0],errstr,update=False)
if not res:
msg = 'Unexpected error message. \n Received: "{}"\n Expected: "{}"'.format(e.args[0],errstr)
if update:
if res:
results["pass"] += 1
print(' ... end Error testing (PASSED)')
else:
print("checking error",comment,'|',msg)
results["fail"] += 1
print(' ... end Error testing (FAILED)')
print('')
return res
######################################
# CONSTRUCTION #
######################################
def createFourierXML(targets, periods):
xml = xmlUtils.newNode('Fourier', attrib={'target':','.join(targets)})
xml.append(xmlUtils.newNode('periods', text=','.join(str(k) for k in periods)))
return xml
def createFromXML(xml):
fourier = Fourier()
inputSpec = Fourier.getInputSpecification()()
inputSpec.parseNode(xml)
fourier.handleInput(inputSpec)
return fourier
def createFourier(targets, periods):
xml = createFourierXML(targets, periods)
fourier = createFromXML(xml)
return fourier
def createFourierSignal(amps, periods, phases, pivot, intercept=0, plot=False):
if plot:
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
signal = np.zeros(len(pivot)) + intercept
for k, period in enumerate(periods):
new = amps[k] * np.sin(2 * np.pi / period * pivot + phases[k])
if plot:
ax.plot(pivot, new, ':')
signal += new
if plot:
ax.plot(pivot, signal, 'k-')
plt.show()
return signal
###################
# Simple #
###################
# generate signal
targets = ['A', 'B', 'C']
pivot = np.arange(100) / 10.
periods = [2, 5, 10]
amps = [0.5, 1, 2]
phasesA = [0, np.pi, 0]
signalA = createFourierSignal(amps, periods, phasesA, pivot, plot=plot)
phasesB = [np.pi, 0, np.pi/4]
signalB = createFourierSignal(amps, periods, phasesB, pivot, plot=plot)
phasesC = [np.pi, np.pi/4, -np.pi/4]
interceptC = 2
signalC = createFourierSignal(amps, periods, phasesC, pivot, intercept=interceptC, plot=plot)
signals = np.zeros((len(pivot), 3))
signals[:, 0] = signalA
signals[:, 1] = signalB
signals[:, 2] = signalC
fourier = createFourier(targets, periods)
params = fourier.characterize(signals, pivot, targets)
# intercepts
checkFloat('Signal A intercept', params['A']['intercept'], 0)
checkFloat('Signal B intercept', params['B']['intercept'], 0)
checkFloat('Signal C intercept', params['C']['intercept'], interceptC)
# amplitudes
checkFloat('Signal A period 0 amplitude', params['A']['coeffs'][periods[0]]['amplitude'], amps[0])
checkFloat('Signal A period 1 amplitude', params['A']['coeffs'][periods[1]]['amplitude'], amps[1])
checkFloat('Signal A period 2 amplitude', params['A']['coeffs'][periods[2]]['amplitude'], amps[2])
checkFloat('Signal B period 0 amplitude', params['B']['coeffs'][periods[0]]['amplitude'], amps[0])
checkFloat('Signal B period 1 amplitude', params['B']['coeffs'][periods[1]]['amplitude'], amps[1])
checkFloat('Signal B period 2 amplitude', params['B']['coeffs'][periods[2]]['amplitude'], amps[2])
checkFloat('Signal C period 0 amplitude', params['C']['coeffs'][periods[0]]['amplitude'], amps[0])
checkFloat('Signal C period 1 amplitude', params['C']['coeffs'][periods[1]]['amplitude'], amps[1])
checkFloat('Signal C period 2 amplitude', params['C']['coeffs'][periods[2]]['amplitude'], amps[2])
# phases
# check absolute value of phase pi since -pi and pi are often converged on separately
checkFloat('Signal A period 0 phase', params['A']['coeffs'][periods[0]]['phase'] , phasesA[0])
checkFloat('Signal A period 1 phase', abs(params['A']['coeffs'][periods[1]]['phase']), phasesA[1])
checkFloat('Signal A period 2 phase', params['A']['coeffs'][periods[2]]['phase'] , phasesA[2])
checkFloat('Signal B period 0 phase', abs(params['B']['coeffs'][periods[0]]['phase']), phasesB[0])
checkFloat('Signal B period 1 phase', params['B']['coeffs'][periods[1]]['phase'] , phasesB[1])
checkFloat('Signal B period 2 phase', params['B']['coeffs'][periods[2]]['phase'] , phasesB[2])
checkFloat('Signal C period 0 phase', abs(params['C']['coeffs'][periods[0]]['phase']), phasesC[0])
checkFloat('Signal C period 1 phase', params['C']['coeffs'][periods[1]]['phase'] , phasesC[1])
checkFloat('Signal C period 2 phase', params['C']['coeffs'][periods[2]]['phase'] , phasesC[2])
# recreate signals
res = fourier.generate(params, pivot, None)
for tg, target in enumerate(targets):
checkArray(f'Signal {target} replication', res[:, tg], signals[:, tg], float)
##### now redo with non-simultaneous fitting
params = fourier.characterize(signals, pivot, targets, simultFit=False)
# intercepts
checkFloat('Signal A intercept', params['A']['intercept'], 0)
checkFloat('Signal B intercept', params['B']['intercept'], 0)
checkFloat('Signal C intercept', params['C']['intercept'], interceptC)
# amplitudes
checkFloat('Signal A period 0 amplitude', params['A']['coeffs'][periods[0]]['amplitude'], amps[0])
checkFloat('Signal A period 1 amplitude', params['A']['coeffs'][periods[1]]['amplitude'], amps[1])
checkFloat('Signal A period 2 amplitude', params['A']['coeffs'][periods[2]]['amplitude'], amps[2])
checkFloat('Signal B period 0 amplitude', params['B']['coeffs'][periods[0]]['amplitude'], amps[0])
checkFloat('Signal B period 1 amplitude', params['B']['coeffs'][periods[1]]['amplitude'], amps[1])
checkFloat('Signal B period 2 amplitude', params['B']['coeffs'][periods[2]]['amplitude'], amps[2])
checkFloat('Signal C period 0 amplitude', params['C']['coeffs'][periods[0]]['amplitude'], amps[0])
checkFloat('Signal C period 1 amplitude', params['C']['coeffs'][periods[1]]['amplitude'], amps[1])
checkFloat('Signal C period 2 amplitude', params['C']['coeffs'][periods[2]]['amplitude'], amps[2])
# phases
# check absolute value of phase pi since -pi and pi are often converged on separately
checkFloat('Signal A period 0 phase', params['A']['coeffs'][periods[0]]['phase'] , phasesA[0])
checkFloat('Signal A period 1 phase', abs(params['A']['coeffs'][periods[1]]['phase']), phasesA[1])
checkFloat('Signal A period 2 phase', params['A']['coeffs'][periods[2]]['phase'] , phasesA[2])
checkFloat('Signal B period 0 phase', abs(params['B']['coeffs'][periods[0]]['phase']), phasesB[0])
checkFloat('Signal B period 1 phase', params['B']['coeffs'][periods[1]]['phase'] , phasesB[1])
checkFloat('Signal B period 2 phase', params['B']['coeffs'][periods[2]]['phase'] , phasesB[2])
checkFloat('Signal C period 0 phase', abs(params['C']['coeffs'][periods[0]]['phase']), phasesC[0])
checkFloat('Signal C period 1 phase', params['C']['coeffs'][periods[1]]['phase'] , phasesC[1])
checkFloat('Signal C period 2 phase', params['C']['coeffs'][periods[2]]['phase'] , phasesC[2])
# recreate signals
res = fourier.generate(params, pivot, None)
for tg, target in enumerate(targets):
checkArray(f'Signal {target} replication', res[:, tg], signals[:, tg], float)
# check residual
# -> generate random noise to add to signal, then check it is returned in residual
r = np.random.rand(pivot.size, | |
"_wdens.dat")
if self.solute is not None :
_write_density(self.sdensity, 1.0 / len(self.solute), "_sdens.dat")
self._write_records(postfix="_dt.txt")
vals = np.asarray([entry.value for entry in self.records])
with open(self.out+".txt", "w") as f :
f.write(" ".join("%.3f %.3f"%(av, err) for av, err in zip(vals.mean(axis=0),
vals.std(axis=0)/np.sqrt(vals.shape[0])))+"\n")
class MempropAnalysis(TrajectoryAction):
@staticmethod
def descr() :
return "Analyze common membrane properties"
def add_arguments(self, parser):
parser.add_argument('--pmask',help="the selectiom mask for phosphor atoms",default="name P")
parser.add_argument('--lipidmask',help="the selectiom mask for lipid residues",default="resname POPC")
parser.add_argument('--watmask',help="the selectiom mask for water residues",default="resname SOL")
parser.add_argument('--watvol',type=float,help="the volume of a water molecule in nm3",default=0.0306)
parser.add_argument('--gridout', help="the prefix for the filename of a 2D grid")
parser.add_argument('--protmask',help="the selectiom mask for protein residues")
parser.add_argument('-o','--out',help="the output prefix",default="memprop")
def setup(self,args):
self.out = args.out
self.phosphorsel = self.processor.universe.select_atoms(args.pmask)
self.lipidsel = self.processor.universe.select_atoms(args.lipidmask)
watsel = self.processor.universe.select_atoms(args.watmask)
self.nlipid = len(self.lipidsel.residues)
self.nwat = len(watsel.residues)
nphosph = len(self.phosphorsel.residues)
print("Number of lipids (%d), waters (%d) and phosphor atoms (%d)"%(self.nlipid,self.nwat,nphosph))
self.watvol = args.watvol
if self.nlipid == 0 or self.nwat == 0 or nphosph == 0 :
raise Exception("Either number of lipids (%d), water (%d) or phosphor atoms (%d) is zero"%(self.nlipid,self.nwat,nphosph))
self.apllist = []
self.vpllist = []
# Setup edges to cover the entire simulation box
zpos = self.processor.universe.coord._pos[:,2] - self.lipidsel.positions[:,2].mean()
self.resolution = 0.25
self.edges = np.arange(zpos.min(),zpos.max()+self.resolution,self.resolution)
self.density = np.zeros(self.edges.shape[0]+1)
# Setup arrays for RMSF calculations
self.sumcoords2 = np.zeros([nphosph,2])
self.sumcoords = np.zeros([nphosph,2])
self.records = []
self.gridout = args.gridout
if self.gridout is not None :
bounds = np.asarray([[0.0, 0.0, 0.0],self.processor.universe.dimensions[:3]])
self.grid_low = AnalysisGrid(bounds)
self.grid_upp = AnalysisGrid(bounds)
if args.protmask is not None :
self.protsel = self.processor.universe.select_atoms(args.protmask)
self.grid_prot = AnalysisGrid(bounds)
self.protone = np.ones(len(self.protsel))
else :
self.protsel = None
def process(self):
"""
Calculate APL, VPL and accumulate density of phosphor selection
"""
boxnm = self.processor.currbox / 10.0
self.apllist.append(boxnm[0]*boxnm[1]/float(self.nlipid/2))
self.vpllist.append((boxnm[0]*boxnm[1]*boxnm[2] -
self.watvol*self.nwat)/float(self.nlipid))
zpos = self.phosphorsel.positions[:,2] - self.lipidsel.positions[:,2].mean()
for lipdig in np.digitize(zpos,self.edges) :
self.density[lipdig] += 1
self.sumcoords += self.phosphorsel.positions[:,:2]
self.sumcoords2 += self.phosphorsel.positions[:,:2]*self.phosphorsel.positions[:,:2]
self.records.append(MDRecord(self.processor.currtime,[self.apllist[-1],self.vpllist[-1],self._calc_dhh(),self._calc_rmsf()]))
if self.gridout is not None :
mid = self.phosphorsel.center_of_geometry()
sel_low = self.phosphorsel.positions[:,2] < mid[2]
sel_upp = np.logical_not(sel_low)
coords_upp = self.phosphorsel.positions[sel_upp,:]
coords_low = self.phosphorsel.positions[sel_low,:]
self.grid_low.accumulate(coords_low-mid,
self._calc_zdist(coords_low, coords_upp))
self.grid_upp.accumulate(coords_upp-mid,
self._calc_zdist(coords_upp, coords_low))
if self.protsel is not None :
self.grid_prot.accumulate(self.protsel.positions-mid, self.protone)
def finalize(self):
"""
Calculate average APL and VPL as well as distance
between peaks in the phosphor density
"""
dhh = self._calc_dhh()
apl = np.asarray(self.apllist).mean()
vpl = np.asarray(self.vpllist).mean()
rmsf = self._calc_rmsf()
with open(self.out+".txt","w") as f :
f.write("%.3f\t%.3f\t%.3f\t%.3f\n"%(apl, vpl, dhh, rmsf))
self._write_records(postfix="_dt.txt")
if self.gridout is not None:
self.grid_low.average()
self.grid_low.write(self.gridout+"_low.dat")
self.grid_upp.average()
self.grid_upp.write(self.gridout+"_upp.dat")
if self.protsel is not None :
self.grid_prot.average()
self.grid_prot.write(self.gridout+"_prot.dat")
def _calc_dhh(self) :
mid = int(self.density.shape[0]/2)
dens_first = self.density[:mid]
dens_last = self.density[mid:]
max_first = np.argmax(dens_first)
max_last = np.argmax(dens_last)
return (max_last + mid - max_first) / 10.0 * self.resolution
def _calc_rmsf(self):
sumcoords = self.sumcoords / float(self.processor.nprocessed)
sumcoords2 = self.sumcoords2 / float(self.processor.nprocessed)
var = sumcoords2 - (sumcoords * sumcoords)
return var.sum(axis=1).mean()*0.01
def _calc_zdist(self, coords1, coords2) :
"""
Calculate the z-distance between all lipids in one leaflet and the closest lipid in the other leaflet
"""
dist = cdist(coords1[:,:2],coords2[:,:2],'sqeuclidean')
j = np.argmin(dist,axis=1)
return np.sqrt((coords2[j,2]-coords1[:,2])**2)*0.1
class MemVoronoiAnalysis(TrajectoryAction) :
@staticmethod
def descr() :
return "Voronoi analysis of a membrane patch"
def add_arguments(self, parser):
parser.add_argument('--mask',nargs="+",help="the selectiom mask for the atoms to do analysis on")
parser.add_argument('--head',help="the name of the atom to determine leaflets",default="PO4")
parser.add_argument('-o','--out',help="the output",default="memvoro")
def setup(self, args):
self.atoms = self.processor.universe.select_atoms(
" or ".join("(%s)"%m for m in args.mask))
self.head = self.processor.universe.select_atoms("name %s"%args.head)
self.out = args.out
self.aplrecords = []
self.neighrecords = []
self.resnames = list(set([atom.resname for atom in self.atoms]))
self.respairs = []
for i, resname1 in enumerate(self.resnames):
for resname2 in self.resnames[i:]:
self.respairs.append(resname1+"-"+resname2)
def process(self):
midz = self.head.positions[:,2].mean()
lowsel = self.atoms.positions[:,2] < midz
uppsel = np.logical_not(lowsel)
celldim = [[0.0, self.processor.currbox[0]],
[0.0, self.processor.currbox[1]]]
try :
lareas, lneighbours = self._process_leaflet(self.atoms[lowsel], celldim)
uareas, uneighbours = self._process_leaflet(self.atoms[uppsel], celldim)
except:
pass
else:
areas = 0.01 * 0.5 * (lareas + uareas)
neighbours = 0.5 * (lneighbours + uneighbours)
self.aplrecords.append(MDRecord(self.processor.currtime,areas))
self.neighrecords.append(MDRecord(self.processor.currtime,neighbours))
def _process_leaflet(self, atoms, celldim):
cells = pyvoro.compute_2d_voronoi(atoms.positions[:,:2],
celldim, 2.0, periodic=[True,True])
# Calculate the area per each residue type
areas = {resname : 0 for resname in self.resnames}
nres = {resname : 0.0 for resname in self.resnames}
for atom, cell in zip(atoms, cells):
areas[atom.resname] += cell["volume"]
nres[atom.resname] += 1.0
areaout = np.asarray([areas[resname] / nres[resname] for resname in self.resnames])
# Calculate the neighbors
vsets = [set((np.round(v[0],3),np.round(v[1])) for v in cell["vertices"]) for cell in cells]
emptyset = set([])
neighbors = {respair : 0 for respair in self.respairs}
npairs = {respair : 0 for respair in self.respairs}
for i, ivertices in enumerate(vsets):
counts = {respair : 0 for respair in self.respairs}
for j, jvertices in enumerate(vsets[i+1:],i+1):
if ivertices & jvertices != emptyset :
iresname = atoms[i].resname
jresname = atoms[j].resname
if iresname+"-"+jresname in neighbors:
counts[iresname+"-"+jresname] += 1
else:
counts[jresname+"-"+iresname] += 1
for respair in self.respairs:
if counts[respair] > 0 :
npairs[respair] += 1.0
neighbors[respair] += counts[respair]
neighout = np.asarray([neighbors[respair] / npairs[respair]
for respair in self.respairs])
return areaout, neighout
def finalize(self):
headers = ["Time"]
headers.extend(self.resnames)
self.records = self.aplrecords
self._write_records(postfix="_apl.txt", headers=headers)
headers = ["Time"]
headers.extend(self.respairs)
self.records = self.neighrecords
self._write_records(postfix="_neigh.txt", headers=headers)
class PrincipalAxisAnalysis(TrajectoryAction):
"""
Class to analyse the principcal axis and its angle
Attributes
----------
masses : list of float
the masses of the selected atoms
normal : numpy.ndarray
the normal to which the angle is calculate against
records : list of MDRecord
the recorded alpha (angle) values
selection : MDAnalysis.AtomGroup
the selection to make the analysis of
"""
@staticmethod
def descr() :
return "Analyze the principcal axis and its angle"
def add_arguments(self, parser):
parser.add_argument('-m','--mask',help="the selectiom mask",default="name CA")
parser.add_argument('-n','--normal',type=float,nargs=3,help="the normal vector",default=[0.0,0.0,1.0])
parser.add_argument('-o','--out',help="the output filename",default="alpha.txt")
def setup(self,args):
self.selection = self.processor.universe.select_atoms(args.mask)
self.masses = np.asarray([atom.mass for atom in self.selection])
self.normal = np.asarray(args.normal)
self.records = []
self.out = args.out
def process(self):
#xyz = pbc.make_whole_xyz(self.selection.positions,
# self.processor.currbox)
#moi = geo.moment_of_inertia(xyz-xyz.mean(axis=0),self.masses)
#princip = geo.principal_axes(moi)
princip = self.selection.principal_axes(pbc=True)
#alpha = geo.angle(princip[0,:],self.normal)
alpha = mdmath.angle(princip[0,:], self.normal)
dalpha = pbc.unwrap_vector(alpha,np.pi)
alpha = np.abs(alpha-dalpha)*180.0/np.pi
self.records.append(MDRecord(self.processor.currtime,alpha))
def finalize(self):
"""
Write out average alpha and then all alphas to disc
"""
alphas = np.asarray([entry.value for entry in self.records])
print("Mean = %.3f Std = %.3f"%(alphas.mean(),alphas.std()))
self._write_records()
class RmsdAnalysis(TrajectoryAction) :
@staticmethod
def descr() :
return "Root-mean-square-deviation of a selection"
def add_arguments(self, parser):
parser.add_argument('--sel',help="the selectiom mask for the atoms to superpose",default="protein and (name C or name CA or name N or name O)")
parser.add_argument('--mobile',help="the selectiom mask for the mobile atoms")
parser.add_argument('-o','--out',help="the output",default="rmsd.out")
def setup(self, args):
self.refuni = md.Universe(self.processor.args.struct)
self.sel = args.sel
print("Will superpose %d atoms"%len(
self.processor.universe.select_atoms(args.sel)))
self.out = args.out
self.records = []
if args.mobile is not None:
self.refsel = self.refuni.select_atoms(args.mobile)
self.trjsel = self.processor.universe.select_atoms(args.mobile)
print("Mobile RMSD is on %d atoms"%len(self.refsel))
self.mobrecords = []
self.domobile = True
else:
self.domobile = False
def process(self):
rmsd = align.alignto(self.processor.universe, self.refuni,
select=self.sel)[1]
self.records.append(MDRecord(self.processor.currtime,rmsd*0.1))
if self.domobile :
rmsd = np.sqrt(np.mean((self.refsel.positions - self.trjsel.positions) ** 2))
self.mobrecords.append(MDRecord(self.processor.currtime,rmsd*0.1))
def finalize(self):
self._write_records()
if self.domobile:
self.records = self.mobrecords
r, t = os.path.splitext(self.out)
self.out = r
self._write_records(postfix="_mob"+t)
class RMSFAnalysis(TrajectoryAction):
"""
Class to analyse the RMSF of selected residues
Attributes
----------
alignmask : string
the mask to make the implicit RMSD fit on
protsel : MDAnalysis.AtomGroup
the selection used to select protein residues
refuni : MDAnalysis.Universe
the reference universe used for alignment
sumcoords : numpy.ndarray
the accumulated coordinates of the system
sumcoords2 : numpy.ndarray
the accumulated square of the system coordinates
"""
@staticmethod
def descr() :
return "Root-mean-square-fluctuation of a selection"
def add_arguments(self, parser):
parser.add_argument('--atoms',nargs="+",help="the atom names in the backbone",default=["CA","N","C"])
parser.add_argument('--fitsel',help="the selectiom mask for the atoms to superpose",default="protein and (name C or name CA or name N)")
parser.add_argument('--pmask',help="the selectiom mask for protein",default="protein")
parser.add_argument('--uselib',choices=["no","dict","me"],help="if to use library vectors",default="no")
parser.add_argument('--resoffset',type=int,help="the residue offset",default=0)
parser.add_argument('-o','--out',help="the output",default="rmsf.txt")
def setup(self, args):
self.refuni = md.Universe(self.processor.args.struct)
self.protsel = self.processor.universe.select_atoms(args.pmask)
self.atoms = args.atoms
self.fitsel = args.fitsel
natm = len(self.processor.universe.atoms)
self.sumcoords2 = np.zeros([natm,3])
self.sumcoords = np.zeros([natm,3])
self.out = args.out
self.uselib = args.uselib
self.resoffset = args.resoffset
def process(self):
rmsd = align.alignto(self.processor.universe, self.refuni, select=self.fitsel)
xyz = self.processor.currsnap._pos
self.sumcoords += xyz
self.sumcoords2 += xyz*xyz
def finalize(self):
nsnap = float(self.processor.currtime / self.processor.dt)
self.sumcoords = self.sumcoords / nsnap
self.sumcoords2 = self.sumcoords2 / nsnap
var = self.sumcoords2 - (self.sumcoords * self.sumcoords)
bfac = (8.0/3.0)*np.pi*np.pi*var.sum(axis=1)*0.01 # Make the variance into nm^2
with open(self.out,'w') as f:
for residue in self.protsel.residues:
if self.uselib == "no" :
rbfac = 0.0
masssum = 0.0
for | |
# Copyright 2012 OpenStack LLC.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import argparse
import logging
import re
from cliff import lister
from cliff import show
from quantumclient.common import command
from quantumclient.common import exceptions
from quantumclient.common import utils
def add_show_list_common_argument(parser):
parser.add_argument(
'-D', '--show_details',
help='show detailed info',
action='store_true',
default=False, )
parser.add_argument(
'-F', '--fields',
help='specify the field(s) to be returned by server,'
' can be repeated',
action='append',
default=[], )
def add_extra_argument(parser, name, _help):
parser.add_argument(
name,
nargs=argparse.REMAINDER,
help=_help + ': --key1 [type=int|bool|...] value '
'[--key2 [type=int|bool|...] value ...]')
def parse_args_to_dict(values_specs):
'''It is used to analyze the extra command options to command.
Besides known options and arguments, our commands also support user to
put more options to the end of command line. For example,
list_nets -- --tag x y --key1 value1, where '-- --tag x y --key1 value1'
is extra options to our list_nets. This feature can support V2.0 API's
fields selection and filters. For example, to list networks which has name
'test4', we can have list_nets -- --name=test4.
value spec is: --key type=int|bool|... value. Type is one of Python
built-in types. By default, type is string. The key without value is
a bool option. Key with two values will be a list option.
'''
# -- is a pseudo argument
if values_specs and values_specs[0] == '--':
del values_specs[0]
_options = {}
current_arg = None
_values_specs = []
_value_number = 0
_list_flag = False
current_item = None
for _item in values_specs:
if _item.startswith('--'):
if current_arg is not None:
if _value_number > 1 or _list_flag:
current_arg.update({'nargs': '+'})
elif _value_number == 0:
current_arg.update({'action': 'store_true'})
_temp = _item
if "=" in _item:
_item = _item.split('=')[0]
if _item in _options:
raise exceptions.CommandError(
"duplicated options %s" % ' '.join(values_specs))
else:
_options.update({_item: {}})
current_arg = _options[_item]
_item = _temp
elif _item.startswith('type='):
if current_arg is not None:
_type_str = _item.split('=', 2)[1]
current_arg.update({'type': eval(_type_str)})
if _type_str == 'bool':
current_arg.update({'type': utils.str2bool})
elif _type_str == 'dict':
current_arg.update({'type': utils.str2dict})
continue
else:
raise exceptions.CommandError(
"invalid values_specs %s" % ' '.join(values_specs))
elif _item == 'list=true':
_list_flag = True
continue
if not _item.startswith('--'):
if not current_item or '=' in current_item:
raise exceptions.CommandError(
"Invalid values_specs %s" % ' '.join(values_specs))
_value_number += 1
elif _item.startswith('--'):
current_item = _item
if '=' in current_item:
_value_number = 1
else:
_value_number = 0
_list_flag = False
_values_specs.append(_item)
if current_arg is not None:
if _value_number > 1 or _list_flag:
current_arg.update({'nargs': '+'})
elif _value_number == 0:
current_arg.update({'action': 'store_true'})
_parser = argparse.ArgumentParser(add_help=False)
for opt, optspec in _options.iteritems():
_parser.add_argument(opt, **optspec)
_args = _parser.parse_args(_values_specs)
result_dict = {}
for opt in _options.iterkeys():
_opt = opt.split('--', 2)[1]
_value = getattr(_args, _opt.replace('-', '_'))
if _value is not None:
result_dict.update({_opt: _value})
return result_dict
class QuantumCommand(command.OpenStackCommand):
api = 'network'
log = logging.getLogger(__name__ + '.QuantumCommand')
def get_client(self):
return self.app.client_manager.quantum
def get_parser(self, prog_name):
parser = super(QuantumCommand, self).get_parser(prog_name)
parser.add_argument(
'--request_format',
help=_('the xml or json request format'),
default='json',
choices=['json', 'xml', ], )
return parser
class CreateCommand(QuantumCommand, show.ShowOne):
"""Create a resource for a given tenant
"""
api = 'network'
resource = None
log = None
def get_parser(self, prog_name):
parser = super(CreateCommand, self).get_parser(prog_name)
parser.add_argument(
'--tenant_id', metavar='tenant_id',
help=_('the owner tenant ID'), )
self.add_known_arguments(parser)
add_extra_argument(parser, 'value_specs',
'new values for the %s' % self.resource)
return parser
def add_known_arguments(self, parser):
pass
def args2body(self, parsed_args):
return {}
def get_data(self, parsed_args):
self.log.debug('get_data(%s)' % parsed_args)
quantum_client = self.get_client()
quantum_client.format = parsed_args.request_format
body = self.args2body(parsed_args)
_extra_values = parse_args_to_dict(parsed_args.value_specs)
body[self.resource].update(_extra_values)
obj_creator = getattr(quantum_client,
"create_%s" % self.resource)
data = obj_creator(body)
# {u'network': {u'id': u'e9424a76-6db4-4c93-97b6-ec311cd51f19'}}
info = self.resource in data and data[self.resource] or None
if info:
print >>self.app.stdout, _('Created a new %s:' % self.resource)
else:
info = {'': ''}
for k, v in info.iteritems():
if isinstance(v, list):
value = ""
for _item in v:
if value:
value += "\n"
if isinstance(_item, dict):
value += utils.dumps(_item)
else:
value += str(_item)
info[k] = value
elif v is None:
info[k] = ''
return zip(*sorted(info.iteritems()))
class UpdateCommand(QuantumCommand):
"""Update resource's information
"""
api = 'network'
resource = None
log = None
def get_parser(self, prog_name):
parser = super(UpdateCommand, self).get_parser(prog_name)
parser.add_argument(
'id', metavar='%s_id' % self.resource,
help='ID of %s to update' % self.resource)
add_extra_argument(parser, 'value_specs',
'new values for the %s' % self.resource)
return parser
def run(self, parsed_args):
self.log.debug('run(%s)' % parsed_args)
quantum_client = self.get_client()
quantum_client.format = parsed_args.request_format
value_specs = parsed_args.value_specs
if not value_specs:
raise exceptions.CommandError(
"Must specify new values to update %s" % self.resource)
data = {self.resource: parse_args_to_dict(value_specs)}
obj_updator = getattr(quantum_client,
"update_%s" % self.resource)
obj_updator(parsed_args.id, data)
print >>self.app.stdout, (
_('Updated %(resource)s: %(id)s') %
{'id': parsed_args.id, 'resource': self.resource})
return
class DeleteCommand(QuantumCommand):
"""Delete a given resource
"""
api = 'network'
resource = None
log = None
def get_parser(self, prog_name):
parser = super(DeleteCommand, self).get_parser(prog_name)
parser.add_argument(
'id', metavar='%s_id' % self.resource,
help='ID of %s to delete' % self.resource)
return parser
def run(self, parsed_args):
self.log.debug('run(%s)' % parsed_args)
quantum_client = self.get_client()
quantum_client.format = parsed_args.request_format
obj_deleter = getattr(quantum_client,
"delete_%s" % self.resource)
obj_deleter(parsed_args.id)
print >>self.app.stdout, (_('Deleted %(resource)s: %(id)s')
% {'id': parsed_args.id,
'resource': self.resource})
return
class ListCommand(QuantumCommand, lister.Lister):
"""List resourcs that belong to a given tenant
"""
api = 'network'
resource = None
log = None
_formatters = None
def get_parser(self, prog_name):
parser = super(ListCommand, self).get_parser(prog_name)
add_show_list_common_argument(parser)
add_extra_argument(parser, 'filter_specs', 'filters options')
return parser
def get_data(self, parsed_args):
self.log.debug('get_data(%s)' % parsed_args)
quantum_client = self.get_client()
search_opts = parse_args_to_dict(parsed_args.filter_specs)
self.log.debug('search options: %s', search_opts)
quantum_client.format = parsed_args.request_format
fields = parsed_args.fields
extra_fields = search_opts.get('fields', [])
if extra_fields:
if isinstance(extra_fields, list):
fields.extend(extra_fields)
else:
fields.append(extra_fields)
if fields:
search_opts.update({'fields': fields})
if parsed_args.show_details:
search_opts.update({'verbose': 'True'})
obj_lister = getattr(quantum_client,
"list_%ss" % self.resource)
data = obj_lister(**search_opts)
info = []
collection = self.resource + "s"
if collection in data:
info = data[collection]
_columns = len(info) > 0 and sorted(info[0].keys()) or []
return (_columns, (utils.get_item_properties(
s, _columns, formatters=self._formatters, )
for s in info), )
class ShowCommand(QuantumCommand, show.ShowOne):
"""Show information of a given resource
"""
HEX_ELEM = '[0-9A-Fa-f]'
UUID_PATTERN = '-'.join([HEX_ELEM + '{8}', HEX_ELEM + '{4}',
HEX_ELEM + '{4}', HEX_ELEM + '{4}',
HEX_ELEM + '{12}'])
api = 'network'
resource = None
log = None
def get_parser(self, prog_name):
parser = super(ShowCommand, self).get_parser(prog_name)
add_show_list_common_argument(parser)
parser.add_argument(
'id', metavar='%s_id' % self.resource,
help='ID or name of %s to look up' % self.resource)
return parser
def get_data(self, parsed_args):
self.log.debug('get_data(%s)' % parsed_args)
quantum_client = self.get_client()
quantum_client.format = parsed_args.request_format
params = {}
if parsed_args.show_details:
params = {'verbose': 'True'}
if parsed_args.fields:
params = {'fields': parsed_args.fields}
data = None
# Error message to be used in case both search by id and name are
# unsuccessful (if list by name fails it does not return an error)
not_found_message = "Unable to find resource:%s" % parsed_args.id
# perform search by id only if we are passing a valid UUID
match = re.match(self.UUID_PATTERN, parsed_args.id)
if match:
try:
obj_shower = getattr(quantum_client,
"show_%s" % self.resource)
data = obj_shower(parsed_args.id, **params)
except exceptions.QuantumClientException as ex:
logging.debug("Show operation failed with code:%s",
ex.status_code)
not_found_message = ex.message
if ex.status_code != 404:
logging.exception("Unable to perform show operation")
raise
# If data is empty, then we got a 404. Try to interpret Id as a name
if not data:
logging.debug("Trying to interpret %s as a %s name",
parsed_args.id,
self.resource)
# build search_opts for the name
search_opts = parse_args_to_dict(["--name=%s" % parsed_args.id])
search_opts.update(params)
obj_lister = getattr(quantum_client,
"list_%ss" % self.resource)
data = obj_lister(**search_opts)
info = []
collection = self.resource + "s"
if collection in data:
info = data[collection]
if len(info) > 1:
logging.info("Multiple occurrences found for: %s",
parsed_args.id)
_columns = ['id']
# put all ids in a single string as formatter for show
# command will print on record only
id_string = "\n".join(utils.get_item_properties(
s, _columns)[0] for s in info)
return (_columns, (id_string, ), )
elif len(info) == 0:
#Nothing was found
raise exceptions.QuantumClientException(
message=not_found_message)
else:
data = {self.resource: | |
the data
Function calls on GroupBy, if not specially implemented, "dispatch" to the
grouped data. So if you group a DataFrame and wish to invoke the std()
method on each group, you can simply do:
::
df.groupby(mapper).std()
rather than
::
df.groupby(mapper).aggregate(np.std)
You can pass arguments to these "wrapped" functions, too.
See the online documentation for full exposition on these topics and much
more
"""
@property
def _obj_1d_constructor(self) -> Type["Series"]:
# GH28330 preserve subclassed Series/DataFrames
if isinstance(self.obj, DataFrame):
return self.obj._constructor_sliced
assert isinstance(self.obj, Series)
return self.obj._constructor
def _bool_agg(self, val_test, skipna):
"""
Shared func to call any / all Cython GroupBy implementations.
"""
def objs_to_bool(vals: np.ndarray) -> Tuple[np.ndarray, Type]:
if is_object_dtype(vals):
vals = np.array([bool(x) for x in vals])
else:
vals = vals.astype(np.bool)
return vals.view(np.uint8), np.bool
def result_to_bool(result: np.ndarray, inference: Type) -> np.ndarray:
return result.astype(inference, copy=False)
return self._get_cythonized_result(
"group_any_all",
aggregate=True,
cython_dtype=np.dtype(np.uint8),
needs_values=True,
needs_mask=True,
pre_processing=objs_to_bool,
post_processing=result_to_bool,
val_test=val_test,
skipna=skipna,
)
@Substitution(name="groupby")
@Appender(_common_see_also)
def any(self, skipna: bool = True):
"""
Return True if any value in the group is truthful, else False.
Parameters
----------
skipna : bool, default True
Flag to ignore nan values during truth testing.
Returns
-------
bool
"""
return self._bool_agg("any", skipna)
@Substitution(name="groupby")
@Appender(_common_see_also)
def all(self, skipna: bool = True):
"""
Return True if all values in the group are truthful, else False.
Parameters
----------
skipna : bool, default True
Flag to ignore nan values during truth testing.
Returns
-------
bool
"""
return self._bool_agg("all", skipna)
@Substitution(name="groupby")
@Appender(_common_see_also)
def count(self):
"""
Compute count of group, excluding missing values.
Returns
-------
Series or DataFrame
Count of values within each group.
"""
# defined here for API doc
raise NotImplementedError
@Substitution(name="groupby")
@Substitution(see_also=_common_see_also)
def mean(self, numeric_only: bool = True):
"""
Compute mean of groups, excluding missing values.
Parameters
----------
numeric_only : bool, default True
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data.
Returns
-------
pandas.Series or pandas.DataFrame
%(see_also)s
Examples
--------
>>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],
... 'B': [np.nan, 2, 3, 4, 5],
... 'C': [1, 2, 1, 1, 2]}, columns=['A', 'B', 'C'])
Groupby one column and return the mean of the remaining columns in
each group.
>>> df.groupby('A').mean()
B C
A
1 3.0 1.333333
2 4.0 1.500000
Groupby two columns and return the mean of the remaining column.
>>> df.groupby(['A', 'B']).mean()
C
A B
1 2.0 2
4.0 1
2 3.0 1
5.0 2
Groupby one column and return the mean of only particular column in
the group.
>>> df.groupby('A')['B'].mean()
A
1 3.0
2 4.0
Name: B, dtype: float64
"""
return self._cython_agg_general(
"mean",
alt=lambda x, axis: Series(x).mean(numeric_only=numeric_only),
numeric_only=numeric_only,
)
@Substitution(name="groupby")
@Appender(_common_see_also)
def median(self, numeric_only=True):
"""
Compute median of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex
Parameters
----------
numeric_only : bool, default True
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data.
Returns
-------
Series or DataFrame
Median of values within each group.
"""
return self._cython_agg_general(
"median",
alt=lambda x, axis: Series(x).median(axis=axis, numeric_only=numeric_only),
numeric_only=numeric_only,
)
@Substitution(name="groupby")
@Appender(_common_see_also)
def std(self, ddof: int = 1):
"""
Compute standard deviation of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
Parameters
----------
ddof : int, default 1
Degrees of freedom.
Returns
-------
Series or DataFrame
Standard deviation of values within each group.
"""
# TODO: implement at Cython level?
return np.sqrt(self.var(ddof=ddof))
@Substitution(name="groupby")
@Appender(_common_see_also)
def var(self, ddof: int = 1):
"""
Compute variance of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
Parameters
----------
ddof : int, default 1
Degrees of freedom.
Returns
-------
Series or DataFrame
Variance of values within each group.
"""
if ddof == 1:
return self._cython_agg_general(
"var", alt=lambda x, axis: Series(x).var(ddof=ddof)
)
else:
func = lambda x: x.var(ddof=ddof)
with _group_selection_context(self):
return self._python_agg_general(func)
@Substitution(name="groupby")
@Appender(_common_see_also)
def sem(self, ddof: int = 1):
"""
Compute standard error of the mean of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
Parameters
----------
ddof : int, default 1
Degrees of freedom.
Returns
-------
Series or DataFrame
Standard error of the mean of values within each group.
"""
return self.std(ddof=ddof) / np.sqrt(self.count())
@Substitution(name="groupby")
@Appender(_common_see_also)
def size(self):
"""
Compute group sizes.
Returns
-------
Series
Number of rows in each group.
"""
result = self.grouper.size()
# GH28330 preserve subclassed Series/DataFrames through calls
if issubclass(self.obj._constructor, Series):
result = self._obj_1d_constructor(result, name=self.obj.name)
else:
result = self._obj_1d_constructor(result)
return self._reindex_output(result, fill_value=0)
@classmethod
def _add_numeric_operations(cls):
"""
Add numeric operations to the GroupBy generically.
"""
def groupby_function(
name: str,
alias: str,
npfunc,
numeric_only: bool = True,
min_count: int = -1,
):
_local_template = """
Compute %(f)s of group values.
Parameters
----------
numeric_only : bool, default %(no)s
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data.
min_count : int, default %(mc)s
The required number of valid values to perform the operation. If fewer
than ``min_count`` non-NA values are present the result will be NA.
Returns
-------
Series or DataFrame
Computed %(f)s of values within each group.
"""
@Substitution(name="groupby", f=name, no=numeric_only, mc=min_count)
@Appender(_common_see_also)
@Appender(_local_template)
def func(self, numeric_only=numeric_only, min_count=min_count):
self._set_group_selection()
# try a cython aggregation if we can
try:
return self._cython_agg_general(
how=alias,
alt=npfunc,
numeric_only=numeric_only,
min_count=min_count,
)
except DataError:
pass
except NotImplementedError as err:
if "function is not implemented for this dtype" in str(
err
) or "category dtype not supported" in str(err):
# raised in _get_cython_function, in some cases can
# be trimmed by implementing cython funcs for more dtypes
pass
else:
raise
# apply a non-cython aggregation
result = self.aggregate(lambda x: npfunc(x, axis=self.axis))
return result
set_function_name(func, name, cls)
return func
def first_compat(obj: FrameOrSeries, axis: int = 0):
def first(x: Series):
x = x.array[notna(x.array)]
if len(x) == 0:
return np.nan
return x[0]
if isinstance(obj, DataFrame):
return obj.apply(first, axis=axis)
elif isinstance(obj, Series):
return first(obj)
else:
raise TypeError(type(obj))
def last_compat(obj: FrameOrSeries, axis: int = 0):
def last(x: Series):
x = x.array[notna(x.array)]
if len(x) == 0:
return np.nan
return x[-1]
if isinstance(obj, DataFrame):
return obj.apply(last, axis=axis)
elif isinstance(obj, Series):
return last(obj)
else:
raise TypeError(type(obj))
cls.sum = groupby_function("sum", "add", np.sum, min_count=0)
cls.prod = groupby_function("prod", "prod", np.prod, min_count=0)
cls.min = groupby_function("min", "min", np.min, numeric_only=False)
cls.max = groupby_function("max", "max", np.max, numeric_only=False)
cls.first = groupby_function("first", "first", first_compat, numeric_only=False)
cls.last = groupby_function("last", "last", last_compat, numeric_only=False)
@Substitution(name="groupby")
@Appender(_common_see_also)
def ohlc(self) -> DataFrame:
"""
Compute open, high, low and close values of a group, excluding missing values.
For multiple groupings, the result index will be a MultiIndex
Returns
-------
DataFrame
Open, high, low and close values within each group.
"""
return self._apply_to_column_groupbys(lambda x: x._cython_agg_general("ohlc"))
@doc(DataFrame.describe)
def describe(self, **kwargs):
with _group_selection_context(self):
result = self.apply(lambda x: x.describe(**kwargs))
if self.axis == 1:
return result.T
return result.unstack()
def resample(self, rule, *args, **kwargs):
"""
Provide resampling when using a TimeGrouper.
Given a grouper, the function resamples it according to a string
"string" -> "frequency".
See the :ref:`frequency aliases <timeseries.offset_aliases>`
documentation for more details.
Parameters
----------
rule : str or DateOffset
The offset string or object representing target grouper conversion.
*args, **kwargs
Possible arguments are `how`, `fill_method`, `limit`, `kind` and
`on`, and other arguments of `TimeGrouper`.
Returns
-------
Grouper
Return a new grouper with our resampler appended.
See Also
--------
Grouper : Specify a frequency to resample with when
grouping by a key.
DatetimeIndex.resample : Frequency conversion and resampling of
time series.
Examples
--------
>>> idx = pd.date_range('1/1/2000', periods=4, freq='T')
>>> df = pd.DataFrame(data=4 * [range(2)],
... index=idx,
... columns=['a', 'b'])
>>> df.iloc[2, 0] = 5
>>> df
a b
2000-01-01 00:00:00 0 1
2000-01-01 00:01:00 0 1
2000-01-01 00:02:00 5 1
2000-01-01 00:03:00 0 1
Downsample the DataFrame into 3 minute bins and sum the values of
the timestamps falling into a bin.
>>> df.groupby('a').resample('3T').sum()
a b
a
0 2000-01-01 00:00:00 0 2
2000-01-01 00:03:00 0 1
5 2000-01-01 00:00:00 5 1
Upsample the series into 30 second | |
(Hamiltonian): the hamiltonian you want to apply.
Examples:
>>> from mindquantum import Simulator
>>> from mindquantum import Circuit, Hamiltonian
>>> from mindquantum.core.operators import QubitOperator
>>> import scipy.sparse as sp
>>> sim = Simulator('projectq', 1)
>>> sim.apply_circuit(Circuit().h(0))
>>> sim.get_qs()
array([0.70710678+0.j, 0.70710678+0.j])
>>> ham1 = Hamiltonian(QubitOperator('Z0'))
>>> sim.apply_hamiltonian(ham1)
>>> sim.get_qs()
array([ 0.70710678+0.j, -0.70710678+0.j])
>>> sim.reset()
>>> ham2 = Hamiltonian(sp.csr_matrix([[1, 2], [3, 4]]))
>>> sim.apply_hamiltonian(ham2)
>>> sim.get_qs()
array([1.+0.j, 3.+0.j])
"""
_check_input_type('hamiltonian', Hamiltonian, hamiltonian)
_check_hamiltonian_qubits_number(hamiltonian, self.n_qubits)
self.sim.apply_hamiltonian(hamiltonian.get_cpp_obj())
def get_expectation(self, hamiltonian):
r"""
Get expectation of the given hamiltonian. The hamiltonian could be non hermitian.
.. math::
E = \left<\psi\right|H\left|\psi\right>
Args:
hamiltonian (Hamiltonian): The hamiltonian you want to get expectation.
Returns:
numbers.Number, the expectation value.
Examples:
>>> from mindquantum.core.operators import QubitOperator
>>> from mindquantum import Circuit, Simulator
>>> from mindquantum import Hamiltonian
>>> sim = Simulator('projectq', 1)
>>> sim.apply_circuit(Circuit().ry(1.2, 0))
>>> ham = Hamiltonian(QubitOperator('Z0'))
>>> sim.get_expectation(ham)
(0.36235775447667357+0j)
"""
if not isinstance(hamiltonian, Hamiltonian):
raise TypeError(f"hamiltonian requires a Hamiltonian, but got {type(hamiltonian)}")
_check_hamiltonian_qubits_number(hamiltonian, self.n_qubits)
return self.sim.get_expectation(hamiltonian.get_cpp_obj())
def get_qs(self, ket=False):
"""
Get current quantum state of this simulator.
Args:
ket (bool): Whether to return the quantum state in ket format or not.
Default: False.
Returns:
numpy.ndarray, the current quantum state.
Examples:
>>> from mindquantum import qft, Simulator
>>> sim = Simulator('projectq', 2)
>>> sim.apply_circuit(qft(range(2)))
>>> sim.get_qs()
array([0.5+0.j, 0.5+0.j, 0.5+0.j, 0.5+0.j])
"""
if not isinstance(ket, bool):
raise TypeError(f"ket requires a bool, but get {type(ket)}")
state = np.array(self.sim.get_qs())
if ket:
return '\n'.join(ket_string(state))
return state
def set_qs(self, quantum_state):
"""
Set quantum state for this simulation.
Args:
quantum_state (numpy.ndarray): the quantum state that you want.
Examples:
>>> from mindquantum import Simulator
>>> import numpy as np
>>> sim = Simulator('projectq', 1)
>>> sim.get_qs()
array([1.+0.j, 0.+0.j])
>>> sim.set_qs(np.array([1, 1]))
>>> sim.get_qs()
array([0.70710678+0.j, 0.70710678+0.j])
"""
if not isinstance(quantum_state, np.ndarray):
raise TypeError(f"quantum state must be a ndarray, but get {type(quantum_state)}")
if len(quantum_state.shape) != 1:
raise ValueError(f"vec requires a 1-dimensional array, but get {quantum_state.shape}")
n_qubits = np.log2(quantum_state.shape[0])
if n_qubits % 1 != 0:
raise ValueError(f"vec size {quantum_state.shape[0]} is not power of 2")
n_qubits = int(n_qubits)
if self.n_qubits != n_qubits:
raise ValueError(f"{n_qubits} qubits vec does not match with simulation qubits ({self.n_qubits})")
self.sim.set_qs(quantum_state / np.sqrt(np.sum(np.abs(quantum_state) ** 2)))
def get_expectation_with_grad(
self,
hams,
circ_right,
circ_left=None,
simulator_left=None,
encoder_params_name=None,
ansatz_params_name=None,
parallel_worker=None,
):
r"""
Get a function that return the forward value and gradient w.r.t circuit parameters.
This method is designed to calculate the expectation and its gradient shown as below.
.. math::
E = \left<\varphi\right|U_l^\dagger H U_r \left|\psi\right>
where :math:`U_l` is circ_left, :math:`U_r` is circ_right, :math:`H` is hams
and :math:`\left|\psi\right>` is the current quantum state of this simulator,
and :math:`\left|\varphi\right>` is the quantum state of `simulator_left`.
Args:
hams (Hamiltonian): The hamiltonian that need to get expectation.
circ_right (Circuit): The :math:`U_r` circuit described above.
circ_left (Circuit): The :math:`U_l` circuit described above. By default, this circuit
will be none, and in this situation, :math:`U_l` will be equals to
:math:`U_r`. Default: None.
simulator_left (Simulator): The simulator that contains :math:`\left|\varphi\right>`. If
None, then :math:`\left|\varphi\right>` is assumed to be equals to :math:`\left|\psi\right>`.
Default: None.
encoder_params_name (list[str]): To specific which parameters belongs to encoder,
that will encoder the input data into quantum state. The encoder data
can be a batch. Default: None.
ansatz_params_name (list[str]): To specific which parameters belongs to ansatz,
that will be trained during training. Default: None.
parallel_worker (int): The parallel worker numbers. The parallel workers can handle
batch in parallel threads. Default: None.
Returns:
GradOpsWrapper, a grad ops wrapper than contains information to generate this grad ops.
Examples:
>>> import numpy as np
>>> from mindquantum import Simulator, Hamiltonian
>>> from mindquantum import Circuit
>>> from mindquantum.core.operators import QubitOperator
>>> circ = Circuit().ry('a', 0)
>>> ham = Hamiltonian(QubitOperator('Z0'))
>>> sim = Simulator('projectq', 1)
>>> grad_ops = sim.get_expectation_with_grad(ham, circ)
>>> grad_ops(np.array([1.0]))
(array([[0.54030231+0.j]]), array([[[-0.84147098+0.j]]]))
>>> sim1 = Simulator('projectq', 1)
>>> prep_circ = Circuit().h(0)
>>> ansatz = Circuit().ry('a', 0).rz('b', 0).ry('c', 0)
>>> sim1.apply_circuit(prep_circ)
>>> sim2 = Simulator('projectq', 1)
>>> ham = Hamiltonian(QubitOperator(""))
>>> grad_ops = sim2.get_expectation_with_grad(ham, ansatz, Circuit(), simulator_left=sim1)
>>> f, g = grad_ops(np.array([7.902762e-01, 2.139225e-04, 7.795934e-01]))
>>> f
array([[0.99999989-7.52279618e-05j]])
"""
if isinstance(hams, Hamiltonian):
hams = [hams]
elif not isinstance(hams, list):
raise TypeError(f"hams requires a Hamiltonian or a list of Hamiltonian, but get {type(hams)}")
for h_tmp in hams:
_check_input_type("hams's element", Hamiltonian, h_tmp)
_check_hamiltonian_qubits_number(h_tmp, self.n_qubits)
_check_input_type("circ_right", Circuit, circ_right)
if circ_right.is_noise_circuit:
raise ValueError("noise circuit not support yet.")
non_hermitian = False
if circ_left is not None:
_check_input_type("circ_left", Circuit, circ_left)
if circ_left.is_noise_circuit:
raise ValueError("noise circuit not support yet.")
non_hermitian = True
if simulator_left is not None:
_check_input_type("simulator_left", Simulator, simulator_left)
if self.backend != simulator_left.backend:
raise ValueError(
f"simulator_left should have the same backend as this simulator, \
which is {self.backend}, but get {simulator_left.backend}"
)
if self.n_qubits != simulator_left.n_qubits:
raise ValueError(
f"simulator_left should have the same n_qubits as this simulator, \
which is {self.n_qubits}, but get {simulator_left.n_qubits}"
)
non_hermitian = True
if non_hermitian and simulator_left is None:
simulator_left = self
if circ_left is None:
circ_left = circ_right
if circ_left.has_measure_gate or circ_right.has_measure_gate:
raise ValueError("circuit for variational algorithm cannot have measure gate")
if parallel_worker is not None:
_check_int_type("parallel_worker", parallel_worker)
if encoder_params_name is None and ansatz_params_name is None:
encoder_params_name = []
ansatz_params_name = list(circ_right.params_name)
for i in circ_left.params_name:
if i not in ansatz_params_name:
ansatz_params_name.append(i)
if encoder_params_name is None:
encoder_params_name = []
if ansatz_params_name is None:
ansatz_params_name = []
_check_input_type("encoder_params_name", list, encoder_params_name)
_check_input_type("ansatz_params_name", list, ansatz_params_name)
for i in encoder_params_name:
_check_input_type("Element of encoder_params_name", str, i)
for i in ansatz_params_name:
_check_input_type("Element of ansatz_params_name", str, i)
s1 = set(circ_right.params_name) | set(circ_left.params_name)
s2 = set(encoder_params_name) | set(ansatz_params_name)
if s1 - s2 or s2 - s1:
raise ValueError("encoder_params_name and ansatz_params_name are different with circuit parameters")
circ_n_qubits = max(circ_left.n_qubits, circ_right.n_qubits)
if self.n_qubits < circ_n_qubits:
raise ValueError(f"Simulator has {self.n_qubits} qubits, but circuit has {circ_n_qubits} qubits.")
version = "both"
if not ansatz_params_name:
version = "encoder"
if not encoder_params_name:
version = "ansatz"
def grad_ops(*inputs):
if version == "both" and len(inputs) != 2:
raise ValueError("Need two inputs!")
if version in ("encoder", "ansatz") and len(inputs) != 1:
raise ValueError("Need one input!")
if version == "both":
_check_encoder(inputs[0], len(encoder_params_name))
_check_ansatz(inputs[1], len(ansatz_params_name))
batch_threads, mea_threads = _thread_balance(inputs[0].shape[0], len(hams), parallel_worker)
inputs0 = inputs[0]
inputs1 = inputs[1]
if version == "encoder":
_check_encoder(inputs[0], len(encoder_params_name))
batch_threads, mea_threads = _thread_balance(inputs[0].shape[0], len(hams), parallel_worker)
inputs0 = inputs[0]
inputs1 = np.array([])
if version == "ansatz":
_check_ansatz(inputs[0], len(ansatz_params_name))
batch_threads, mea_threads = _thread_balance(1, len(hams), parallel_worker)
inputs0 = np.array([[]])
inputs1 = inputs[0]
if non_hermitian:
f_g1_g2 = self.sim.non_hermitian_measure_with_grad(
[i.get_cpp_obj() for i in hams],
[i.get_cpp_obj(hermitian=True) for i in hams],
circ_left.get_cpp_obj(),
circ_left.get_cpp_obj(hermitian=True),
circ_right.get_cpp_obj(),
circ_right.get_cpp_obj(hermitian=True),
inputs0,
inputs1,
encoder_params_name,
ansatz_params_name,
batch_threads,
mea_threads,
simulator_left.sim,
)
else:
f_g1_g2 = self.sim.hermitian_measure_with_grad(
[i.get_cpp_obj() for i in hams],
circ_right.get_cpp_obj(),
circ_right.get_cpp_obj(hermitian=True),
inputs0,
inputs1,
encoder_params_name,
ansatz_params_name,
batch_threads,
mea_threads,
)
res = np.array(f_g1_g2)
if version == 'both':
f = res[:, :, 0]
g1 = res[:, :, 1 : 1 + len(encoder_params_name)] # noqa:E203
g2 = res[:, :, 1 + len(encoder_params_name) :] # noqa:E203
return f, g1, g2
f = res[:, :, 0]
g = res[:, :, 1:]
return f, g
grad_wrapper = GradOpsWrapper(
grad_ops, hams, circ_right, circ_left, encoder_params_name, ansatz_params_name, parallel_worker
)
s = f'{self.n_qubits} qubit' + ('' if self.n_qubits == 1 else 's')
s += f' {self.backend} VQA Operator'
grad_wrapper.set_str(s)
return grad_wrapper
def _check_encoder(data, encoder_params_size):
if not isinstance(data, np.ndarray):
raise ValueError(f"encoder parameters need numpy array, but get {type(data)}")
data_shape = data.shape
if len(data_shape) != 2:
raise ValueError("encoder data requires a two dimension numpy array")
if data_shape[1] != encoder_params_size:
raise ValueError(
f"encoder parameters size do not match with encoder parameters name,\
need {encoder_params_size} but get {data_shape[1]}."
)
def _check_ansatz(data, ansatz_params_size):
"""Check ansatz."""
if not isinstance(data, np.ndarray):
raise ValueError(f"ansatz parameters need numpy array, but get {type(data)}")
data_shape = data.shape
if len(data_shape) != 1:
raise ValueError("ansatz data requires a one dimension numpy array")
if data_shape[0] != ansatz_params_size:
raise ValueError(
f"ansatz parameters size do not match with ansatz parameters name,\
need {ansatz_params_size} but get {data_shape[0]}"
)
def _thread_balance(n_prs, n_meas, parallel_worker):
"""Thread balance."""
if parallel_worker is None:
parallel_worker = n_meas * n_prs
if n_meas * n_prs <= parallel_worker:
batch_threads = n_prs
mea_threads = n_meas
else:
if n_meas < n_prs:
batch_threads = min(n_prs, | |
<filename>af_lenz.py
#!/usr/bin/env python
from inspect import isfunction
from autofocus import AutoFocusAPI
AutoFocusAPI.api_key = ""
from autofocus import AFSession, AFSample
from autofocus import AFServiceActivity, AFRegistryActivity, AFProcessActivity, AFApiActivity, AFJavaApiActivity, AFUserAgentFragment, AFMutexActivity, AFHttpActivity, AFDnsActivity, AFBehaviorTypeAnalysis, AFBehaviorAnalysis, AFConnectionActivity, AFFileActivity
# APK Specific
from autofocus import AFApkActivityAnalysis, AFApkIntentFilterAnalysis, AFApkReceiverAnalysis, AFApkSensorAnalysis, AFApkServiceAnalysis, AFApkEmbededUrlAnalysis, AFApkRequestedPermissionAnalysis, AFApkSensitiveApiCallAnalysis, AFApkSuspiciousApiCallAnalysis, AFApkSuspiciousFileAnalysis, AFApkSuspiciousStringAnalysis
import sys, argparse, multiprocessing, os, re
__author__ = "<NAME> [karttoon]"
__email__ = "<EMAIL>"
__version__ = "1.1.7"
__date__ = "11OCT2016"
#######################
# Check research mode #
#######################
research_mode = "False"
try:
import ConfigParser
parser = ConfigParser.ConfigParser()
conf_path = os.environ.get("PANW_CONFIG", "~/.config/panw")
parser.read(os.path.expanduser(conf_path))
research_mode = parser.get("researcher", "enabled")
except:
pass
####################
# Build structures #
####################
def build_field_list():
field_list = {
"service" : [],
"registry" : [],
"process" : [],
"japi" : [],
"misc" : [],
"user_agent" : [],
"mutex" : [],
"http" : [],
"dns" : [],
"behavior_desc" : [],
"behavior_type" : [],
"connection" : [],
"file" : [],
"apk_misc" : [],
"apk_filter" : [],
"apk_receiver" : [],
"apk_sensor" : [],
"apk_service" : [],
"apk_embedurl" : [],
"apk_permission" : [],
"apk_sensitiveapi" : [],
"apk_suspiciousapi" : [],
"apk_file" : [],
"apk_string" : [],
"digital_signer" : [],
"imphash" : [],
"default" : []
}
return field_list
def build_field_dict():
field_dict = {
"service" :{},
"registry" :{},
"process" :{},
"japi" :{},
"misc" :{},
"user_agent" :{},
"mutex" :{},
"http" :{},
"dns" :{},
"behavior_desc" :{},
"behavior_type" :{},
"connection" :{},
"file" :{},
"apk_misc" :{},
"apk_filter" :{},
"apk_receiver" :{},
"apk_sensor" :{},
"apk_service" :{},
"apk_embedurl" :{},
"apk_permission" :{},
"apk_sensitiveapi" :{},
"apk_suspiciousapi" :{},
"apk_file" :{},
"apk_string" :{},
"digital_signer" :{},
"imphash" :{},
"default" :{}
}
return field_dict
def build_session_list():
session_list = {
"email_subject" :[],
"file_name" :[],
"application" :[],
"dst_country" :[],
"industry" :[],
"email_sender" :[],
"file_url" :[],
"email_recipient" :[],
"account_name" :[]
}
return session_list
##########################
# AF QUERY SECTION BELOW #
##########################
# Af Query Function
# Takes a type of query and the query itself as input. Example: af_query("hash",<sha256 hash>)
# Returns a properly formatted autofocus query to be passed to the autofocus API
def af_query(ident,query):
# A callable to find the proper field_value for the input_type hash, based on the query_value
def map_hash_value(qv):
if len(qv) == 32:
return "sample.md5"
if len(qv) == 40:
return "sample.sha1"
if len(qv) == 64:
return "sample.sha256"
raise Exception("Unknown hash type")
# Create a map of input_type to field_value
field_map = {
"ip" : "alias.ip_address",
"dns" : "alias.domain",
"hash" : map_hash_value,
"http" : "sample.tasks.http",
"file" : "sample.tasks.file",
"process" : "sample.tasks.process",
"mutex" : "sample.tasks.mutex",
"registry" : "sample.tasks.registry",
"service" : "sample.tasks.service",
"connection" : "sample.tasks.connection",
"user_agent" : "sample.tasks.user_agent",
"tag" : "sample.tag",
"hash_list" : "sample.sha256",
"file_url" : "session.fileurl",
"file_name" : "alias.filename"
}
# Create a map of input_type to operator
operator_map = {
"hash" : "is",
"user_agent" : "is",
"tag" : "is in the list",
"hash_list" : "is in the list"
}
# Lookup the operator to use with this input type
operator_value = operator_map.get(ident, "contains")
try:
# Get the field value from the map
field_value = field_map[ident]
# Is the query value callable? Call it with the query_value to get the field value (hashes)
if isfunction(field_value):
field_value = field_value(query)
except Exception as e:
# Mimic the original catch all, if we don't know what the field is, just exit
raise e
# Everything that is a list (including hash_list and tag)
if operator_value == "is in the list":
params = [v.strip() for v in query.split(",")]
# if we have less than 100 params, we only need one query field
if len(params) <= 100:
return '{"operator":"all","children":[{"field":"%s","operator":"%s","value":[%s]}]}' % (field_value, operator_value, ",".join(['"{}"'.format(v) for v in params]))
else:
# split our params into a list of lists so as to create queries with <=100 elements each.
chunked_params = [params[index:index + 100] for index in xrange(0, len(params), 100)]
# Build multiple groups of "in the list" queries
groups = ",".join(['{"field":"%s","operator":"%s","value":[%s]}' % (field_value, operator_value, ",".join(['"{}"'.format(v) for v in chunk])) for chunk in chunked_params])
# compile them into the final query.
return '{"operator":"any","children":[%s]}' % groups
else:
return '{"operator":"all","children":[{"field":"%s","operator":"%s","value":"%s"}]}' % (field_value, operator_value, query)
###########################
# FUNCTION SECTIONS BELOW #
###########################
# Hash Library Function
# Builds the hash library which is used by every other function
# Returns data as dictionary with each key being the hash and a dictionary value with each section featuring a list {hash:{section:[value1,value2]}}
def hash_library(args):
result_data = {}
input_data = []
if not args.quiet:
print "\n[+] hashes [+]\n"
if research_mode == "True":
poll_af = AFSample.scan
else:
poll_af = AFSample.search
count = 0
if args.ident == "query":
for sample in poll_af(args.query):
if count < args.limit:
input_data.append(sample.sha256)
count += 1
else:
break
else:
for sample in poll_af(af_query(args.ident,args.query)):
if count < args.limit:
input_data.append(sample.sha256)
count += 1
else:
break
# Set the number of workers to be three times the number of cores.
# These operations are not very CPU-intensive, we can get away with a higher number of processes.
pool_size = multiprocessing.cpu_count() * 3
pool = multiprocessing.Pool(processes=pool_size)
# Since we have to pass an iterable to pool.map(), and our worker function requires args to be passed we need to build a dictionary consisting of tuples. e.g:
# [ (args, hash_1), (args, hash_2), (args, hash_n) ]
pool_output = pool.map(hash_worker,[(args,item) for item in input_data])
pool.close()
pool.join()
for item in pool_output:
# structure of item is [{'hash' : { analysis data keys/values }}]
result_data[item.keys()[0]] = item[item.keys()[0]]
return result_data
# Hash worker function
# Designed be be used for parallel processing of samples
# Takes single tuple as argument from pool.map() and transforms those arguments to be used
# in hash_lookup()
def hash_worker(args_tuple):
args,sample_hash = args_tuple
if not args.quiet:
print(sample_hash)
return { sample_hash : hash_lookup(args,sample_hash) }
# Hash Lookup Function
# Basic hash lookup for a sample
# Provides raw data for each section requested
def hash_lookup(args, query):
# Dictionary mapping the raw data for each type of sample analysis
analysis_data = build_field_list()
# Map analysis types to analysis_data keys
analysis_data_map = {
AFServiceActivity : "service",
AFRegistryActivity : "registry",
AFProcessActivity : "process",
AFJavaApiActivity : "japi",
AFApiActivity : "misc",
AFUserAgentFragment : "user_agent",
AFMutexActivity : "mutex",
AFHttpActivity : "http",
AFDnsActivity : "dns",
AFBehaviorAnalysis : "behavior_desc",
AFBehaviorTypeAnalysis : "behavior_type",
AFConnectionActivity : "connection",
AFFileActivity : "file",
AFApkActivityAnalysis : "apk_misc",
AFApkIntentFilterAnalysis : "apk_filter",
AFApkReceiverAnalysis : "apk_receiver",
AFApkSensorAnalysis : "apk_sensor",
AFApkServiceAnalysis : "apk_service",
AFApkEmbededUrlAnalysis : "apk_embedurl",
AFApkRequestedPermissionAnalysis : "apk_permission",
AFApkSensitiveApiCallAnalysis : "apk_sensitiveapi",
AFApkSuspiciousApiCallAnalysis : "apk_suspiciousapi",
AFApkSuspiciousFileAnalysis : "apk_file",
AFApkSuspiciousStringAnalysis : "apl_string"
}
# If there are no counts for the activity, ignore them for the filter
for sample in AFSample.search(af_query("hash",query)):
for analysis in sample.get_analyses():
analysis_data_section = analysis_data_map.get(type(analysis), "default")
try:
if (analysis.benign_count + analysis.grayware_count + analysis.malware_count) < args.filter:
analysis_data[analysis_data_section].append(analysis._raw_line)
except:
pass
# Handle Behaviors which have no BGM values
if type(analysis) == AFBehaviorTypeAnalysis or type(analysis) == AFBehaviorAnalysis:
analysis_data[analysis_data_section].append(analysis._raw_line)
if sample.imphash:
analysis_data["imphash"].append(sample.imphash)
if sample.digital_signer:
analysis_data["digital_signer"].append(sample.digital_signer)
return analysis_data
# Common Artifacts Function
# Identifies lines that exist, per section, in every identified sample
# Must be a 100% match, unless adjusted by -c flag, across all samples to be reported, thus samples that unique every install may not have certain entries appear
def common_artifacts(args):
commonality = float(args.commonality)/float(100)
# Used for collecting all of the artifacts and counts
compare_data = build_field_dict()
# Final collection of all common artifacts
common_data = build_field_list()
count = 0
hashes = hash_library(args)
for hash in hashes.keys():
# Sample data
hash_data = build_field_dict()
for section in hashes[hash]:
for value in hashes[hash][section]:
if value in compare_data[section] and value not in hash_data[section]:
compare_data[section][value] += 1
hash_data[section][value] = 1
if value not in compare_data[section] and value not in hash_data[section]:
hash_data[section][value] = 1
compare_data[section][value] = 1
count += 1
for section in compare_data:
for value in compare_data[section]:
if float(compare_data[section][value])/float(count) >= commonality:
match_percent = int(float(compare_data[section][value])/float(count) * 100)
if "range" in args.special:
common_data[section].append("%-3s | " % (match_percent) + value)
else:
common_data[section].append(value)
common_data['count'] = count # Keep track of how many samples processed
return common_data
# Common Pieces Function
# Similar to the "comnmon_artifact" function, but further breaks down each line to look for commonalities
# Will have more hits but likely less accurate
def common_pieces(args):
commonality = float(args.commonality)/float(100)
# Used for collecting all of the artifacts and counts
compare_data = build_field_dict()
# Final collection of all common pieces
common_pieces = build_field_list()
count = 0
hashes = hash_library(args)
for hash in hashes.keys():
# Sample data
hash_data = build_field_dict()
| |
eating
new_food_scanned = [x, y] # get coordinate of 8 tiles x 3 around to know whether have food which not exists in list food_position previous
# check if character food which we define in util.py is have in map's pacman
if self.InitMap.food in self.map.data[y][x]:
# if 8 tiles x 3 have coordinate of food and this coordinate is not exists in list food_position (list foods pacman scanned and need to eat)
# add this coordinate to list food_position for pacman ready to eat and decrease food_count (foods pacman had seen in map)
if new_food_scanned not in self.food_position:
self.food_count -= 1
self.food_position.append([x, y])
# if foods pacman had seen in map = 0, it's mean pacman had successfully scanned all food in map
if self.food_count == 0: self.map_scanned = True
# assume we have board 20x15
# manhattan distance from node: pacman_position to goal: (4, 5)
# if manhattan distance from node: (4, 5) to goal (food position) in 8 tiles x 3 < 3 steps, add that coordinate to list tiles
if manhattanDistance(self.location, new_food_scanned) <= 2: tiles.append(new_food_scanned)
return tiles
# create some important positions for pacman go to scan around to know in pacman's area have foods, walls or monsters
def create_important_positions(self):
# assume that we have board 5x5
important_position_map_scan = [] # list store important positions which pacman need to go to scan around of map
centerLine = int(self.view / 2) # centerLine = diameter / 2 = 5 / 2 = 2
x, y = 0, 0
# x, y = 0 < 5, x, y = 0 + 2 = 2 -> (2, 2)
# x = 2 < 5, y = 0 < 5, x = 2 + 2 = 4, y = 0 + 2 = 2 -> (4, 2)
while x < self.map.width:
x = x + centerLine
# x = 0 < 5, y = 2 < 5, x = 0 + 2 = 2, y = 2 + 2 = 4 -> (2, 4)
# x = 2 < 5, y = 2 < 5, x = 2 + 2 = 4, y = 2 + 2 = 4 -> (4, 4)
y = 0
while y < self.map.height:
y = y + centerLine
# avoid position is out of bound
if x >= self.map.width or y >= self.map.height: break
important_position_map_scan.append([x, y]) # add that important position to list
return important_position_map_scan # return list important positions for pacman move to
# check if important positions collision with wall, remove it from list to make pacman avoid going that positions
# if pacman have reach to these important positions, remove its position
def check_important_positions_collision_walls(self):
# if pacman scanned and have full view of map, pacman won't need check important positions collision with walls
if self.godEye == True: return
for goal in self.goal_state:
# (2, 2), (2, 4)...(2, 18), (4, 2),...(4,18), (6, 2),...(6, 18)...
x_coordinate, y_coordinate = goal[0], goal[1] # get coordinate of important position to check if it collisions with walls
if self.InitMap.wall in self.map.data[y_coordinate][x_coordinate]:
self.goal_state.remove(goal) # if important position pacman will go to collision with wall, remove it from list important positions
# find best move to closest food from successor
def food_best_move(self):
# if pacman scanned and have full view of map or level of pacman is 1 or 2
if self.godEye == True: nearestCapsule, foodDist = self.closest_capsule(self.food_position) # get food position with min distance for pacman eating
else:
# if pacman didn't scan full view of map and still have food which pacman didn't scanned
if self.map_scanned == False or self.food_count > 0:
self.check_important_positions_collision_walls() # check important positions pacman will go collision with walls
self.remove_duplicate_important_positions() # remove important positions which pacman had gone to avoid a important position pacman scan twice
nearestFood, foodDist = self.closest_capsule(self.goal_state) # get food position with min distance if pacman scanned and finded nearest food from important positions
# if pacman scanned all food around and not exist food any more for pacman to find
# empty food_count if pacman went to important positions
elif self.food_count <= 0: nearestFood = []
nearest_food, foodDist = self.closest_capsule(self.food_position) # get food position with min distance
# if empty, copy nearest food from nearest food position previous
if len(nearest_food) == 0: nearestCapsule = deepcopy(nearestFood) # if nearest_food fully eat, back to nearestFood(important positions) to continue scan for nearest food
else: nearestCapsule = deepcopy(nearest_food)
bestMove = [] # list store best move for pacman eating food
# if pacman stand right of nearest food, go left
if self.location[0] > nearestCapsule[0]: bestMove.append("left")
# if pacman stand left of nearest food, go right
if self.location[0] < nearestCapsule[0]: bestMove.append("right")
# if pacman stand down of nearest food, go up
if self.location[1] > nearestCapsule[1]: bestMove.append("up")
# if pacman stand up of nearest food, go down
if self.location[1] < nearestCapsule[1]: bestMove.append("down")
return bestMove # return list best moves for eating food with min distance
# eat food when pacman scanned position of foods
def eat_food(self):
self.food -= 1
# if empty food, pacman win the game
if self.food <= 0: return util.isWin()
return None
# evaluation which food is closest for pacman to eat in 8 tiles x 3
def closest_capsule(self, caps_pos):
'''
# this evaluation function should be improved for better scanning food
min_food_distance = -1
nearestFood = []
for foods in betterFood:
newFood_x, newFood_y = foods[0], foods[1]
dist = manhattanDistance([x_pacman, y_pacman], [newFood_x, newFood_y])
if min_food_distance >= dist or min_food_distance == -1:
min_food_distance = dist
nearestFood = deepcopy(foods)
return nearestFood, min_food_distance
'''
INFINITY = 99999
x_pacman, y_pacman = self.location[0], self.location[1] # get pacman position
foodDist = INFINITY
nearestCapsule = [] # list store for nearest food for pacman eating
for caps in caps_pos:
newFood_x, newFood_y = caps[0], caps[1] # get food nearests
dist = manhattanDistance([x_pacman, y_pacman], [newFood_x, newFood_y]) # compute distance from pacman position to position of foods nearests
# get min distance (food with min distance to pacman position)
if dist < foodDist:
foodDist = dist
nearestCapsule = deepcopy(caps) # copy positions for list nearest for pacman know which food is nearest
return nearestCapsule, foodDist
# random move pacman instead of stopping at a fixed position
def random_move(self, move):
# if pacman have another possible move beside stop action, make pacman move instead of stopping at a fixed position
if len(move) > 1:
try: move.remove("stop")
except ValueError: pass
for direction in move:
x_pacman, y_pacman = self.location[0], self.location[1] # get position of pacman
new_location_pacman = [] # list store new position when pacman moving
if direction == "left": new_location_pacman = [x_pacman - 1, y_pacman] # add new position pacman after moving left to list
elif direction == "right": new_location_pacman = [x_pacman + 1, y_pacman] # add new position pacman after moving right to list
elif direction == "up": new_location_pacman = [x_pacman, y_pacman - 1] # add new position pacman after moving up to list
elif direction == "down": new_location_pacman = [x_pacman, y_pacman + 1] # add new position pacman after moving down to list
# if new_location random is come back to previous location but pacman have another direction which pacman can go
# remove that direction to make pacman go another direction to help pacman scan full view of map
if self.previous_location == new_location_pacman:
if len(move) > 1: move.remove(direction) # remove direction which make pacman can go back previous position if pacman have another direction to move
# if not exist another direction pacman can move
elif len(move) == 1:
for action in self.actions:
if action == "stop" or action == direction: continue # pacman can stop or move only that direction
move.append(action)
# check if can remove current direction
if len(move) == 1: break # only direction pacman can go, but it can make pacman go back previous position, pacman should stop | |
<filename>spinnaker-monitoring-daemon/spinnaker-monitoring/spectator_metric_transformer.py
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transform spectator metrics so they appear different than produced.
This is to allow them to be written into a metric store in a predictable
way if needed or desired. It also allows letting the metrics appear different
to refactor the data model without having to make global code changes.
"""
import collections
import logging
import re
import yaml
class TimestampedMetricValue(
collections.namedtuple('TimedstampedMetricValue', ['timestamp', 'value'])):
"""Represents a value of a particular metric and the time for it.
This is used to facilite aggregating when we drop tags.
"""
@staticmethod
def from_json(data):
"""Construct TimestampedMetricValue from dictionary in json response."""
return TimestampedMetricValue(data['t'], data['v'])
def aggregate_json(self, data):
"""Aggregate this value with another value from json response.
This is used when dropping tags to combine values together.
"""
return TimestampedMetricValue(max(self.timestamp, data['t']),
self.value + data['v'])
class MetricInfo(object):
"""Manages the value for a specific spectator measurement.
"""
def __init__(self, value_json, sorted_tags):
self.__timestamp = value_json['t']
self.__value = value_json['v']
self.__tags = sorted_tags
def aggregate_value(self, value_json):
"""Aggregate another value into this metric."""
self.__value += value_json['v']
self.__timestamp = max(self.__timestamp, value_json['t'])
def encode_as_spectator_response(self):
"""Encode this metric info as a spectator response measurement."""
response = {'values': [{'t': self.__timestamp, 'v': self.__value}]}
if self.__tags:
response['tags'] = self.__tags
return response
class AggregatedMetricsBuilder(object):
"""Re-aggregates a collection of metrics to accumulate similar instances.
This is used to aggregate multiple similar metric samples if tags
were removed. Where there used to be multiple distinct metrics from
different tag values, there is now a single metric value for the
aggregate of what had been partitioned by the removed tag(s).
The builder will rebuild the collection of metrics based on the
unique tag combinations and each combinations aggregate value.
The timestamp for each metric will be the most recent timestamp from
the individual partitions that went into the aggregate.
"""
def __init__(self, discard_tag_values):
self.__tags_to_metric = {}
self.__discard_tag_values = discard_tag_values
def add(self, value_json, tags):
"""Add a measurement to the builder."""
def find_tag_value(tag):
"""Find value for the specified tag, or None."""
for elem in tags:
if elem['key'] == tag:
return elem['value']
return None
if tags:
for key, compiled_re in self.__discard_tag_values.items():
if compiled_re.match(str(find_tag_value(key))):
# ignore this value because it has undesirable tag value.
return
tags = sorted(tags) if tags else None
key = str(tags)
metric = self.__tags_to_metric.get(key)
if not metric:
metric = MetricInfo(value_json, tags)
self.__tags_to_metric[key] = metric
else:
metric.aggregate_value(value_json)
def build(self):
"""Encode all the measurements for the meter."""
return [info.encode_as_spectator_response()
for info in self.__tags_to_metric.values()]
class SpectatorMetricTransformer(object):
"""Transform Spectator measurement responses.
This transforms responses so that the metrics appear to have different
definitions than they really did. Typically this means changing
the metric name and/or tags, perhaps adding or removing some.
The transformer applies rules to encoded spectator metrics to produce
alternative encodings as if the original spectator metric was the
intended metric produced by the rule. This allows the transformer to
be easily injected into the processing pipeline from the scrape.
Rules are keyed by the spectator meter name that they apply to. Therefore,
every kept meter needs a distinct rule entry even if the rule is otherwise
the same as another.
Each entry in a rule is optional. Missing entries means "the identity".
However the omission of a rule entirely means take the default action
on the transformer which is either to discard the metric (default)
or keep it as is.
Transformation rules are as follows:
'transform_name': <transform_name_map>
'tags': <tag_list>
'transform_tags': <tag_transform_list>
'add_tags' <added_tag_bindings>
'discard_values': <discard_tag_value_list>
'per_account': <per_account>
'per_application': <per_application>
where:
* <transform_name_map> is a map of <target>: <target_name>
which allows the rule to support multiple monitoring systems,
where each is given a different name to follow that particular
systems naming conventions. The rest of the transform is the same.
The <target> is an arbitrary key but should be the system name for
readability. The target will be specified by the caller as part of
the transform request.
If the <target_name> is not present but "default" is, then "default"
will be used. If a name key is present but empty then the metric will
be ignored for that name key. For example if the "default" value is
"my-metric" and a "stackdriver" key is empty and you ask for stackdriver,
the metric would be ignored, but if you ask for "prometheus" then the
name would become the default, "my-metric".
* <tag_list> is a list of tag names to keep as is. An empty list
means none of the tag names will be kept by default. If the
'tags' is not specified at all, and no 'transform_tags' are
specified then all the tags will be kept by default.
The 'statistic' tag is implicitly in this list if present because
it is required to interpret the values.
* <tag_transform_list> is a list of <tag_transform> where <tag_transform> is:
'from': <source_tag_name>
'to': <target_tag_name_or_names>
'type': <type_name_or_names>
'compare_value': <compare_string_value>
'extract_regex': <extract_regex>
where:
* <source_tag_name> is the tag in the spectator metric for the value(s)
* <target_tag_name_or_names> is either a string or list of strings
that specify one or more tags to produce. This can be/include the
same <source_tag_name> but the value will be rewritten.
if the value is a list, then multiple tags will be produced. In this
case the <extract_regex> should have a capture group for each
element.
* <type_name_or_names> is the type for the <target_tag_name_or_names>.
This should match the structure of <target_tag_name_or_names>.
types are as follows:
STRING: the original string value
INT: convert the original string value into an integer
BOOL: true if it matches the 'compare_value' else false.
* <compare_string_value> a string value used to compare against
the tag value when converting into a BOOL.
* <extract_regex> a regular expression used to extract substrings
from the original tag value to produce the new desired tag values.
* <added_tag_bindings> is a dictionary of key/value pairs for tags
that should be added. The tag values are constants. This is intended
to consolidate multiple spectator metrics into a single one using
an additional tag to discriminate the value.
* <discard_tag_value_list> is a list of [transformed] tag values to ignore
as if they never happened. The main motivation for this is to strip out
certain "statistic" dimensions if they arent needed since these ultimately
become other metrics which might not be wanted. The list is dictionary of
<tag>: <regex>
where
<tag> is the target tag name
<regex> is a regular expression to match for undesired values.
* If <per_account> is true, then keep the 'account' tag if present.
It is intended that downstream processors on these measurements may
break off the account and use it some other way in consideration of
its potentially high cardinality.
* If <per_application> is analogous to <per_account> but for the
'application' tag if present.
When the rule is instantiated in the transformer, it is pre-processed
and some additional tags are entered into it for internal use. These
internal tags begin with '_'.
Rules may have additional fields in them for purposes of specifying the
target metrics. However these are ignored by the transformer. Some of these
in practice are:
* 'kind' describes the type of metric (e.g. "Timer")
* 'unit' names what is being counted -- the units for the value.
Timers are always nanoseconds so the unit is for the "count" part.
* 'description' provides documentation on what the metric captures.
* 'aggregatable' denotes whether the values can be aggregated across
replicas. This is a hint suggesting a value is global so should
| |
"""
TeamReel DS API: Endpoints to analyze a new video with ML, get user analysis
results (that user's recent interview performance), get video analysis
results (interview performance in a specific video response), or
get top video responses to a prompt. Provides the following endpoints:
For DS/ML internal use:
(1) '/analyze_new_video': This endpoint gets the new video and
its DB info --> analyzes the video using our ML functions (+ also gets
any human feedback on that video from the TeamReel DB) --> adds/updates
the analysis for that video in the 'video_feedback' table in our DB.
Triggered by AWS Lambda function when any new video is uploaded by a user.
Pipeline: TeamReel user uploads new video in front-end -> save to our S3
bucket -> S3 posts notification by adding message to our SQS queue
-> AWS Lambda function checks SQS queue for messages -> AWS Lambda
function calls this API endpoint to trigger video analysis whenever
there is any message (video waiting to be processed) in SQS).
Returns true if received.
For front-end (Web/iOS) to call for info (video analysis from TeamReel DB):
(2) '/get_user_performance': Takes in a JSON with a TeamReel user_id
(the id in our DB's 'users' table), and returns a JSON with analysis of
that user's interview performance to date.
(3) '/get_prompt_top_responses': Takes in a JSON with a TeamReel
prompt_id (the id in our DB's prompts table), and returns a JSON with
that prompt's top 3 video responses.
(4) '/get_video_analysis': Takes in a JSON with a TeamReel
video_id (the id in our DB's videos table), and returns a JSON with
the performance analysis for that video.
"""
# Import modules/libraries we will use:
# Import external/third-party libraries we will use:
from dotenv import load_dotenv
from flask import Flask, jsonify, request, render_template
import json
import numpy as np
import pandas as pd
import os
import psycopg2
# Import internal functions we need for TeamReel data infra, video and DB:
from data_infra.data_pipelines import get_next_video
from data_infra.postgresql_db_functions import get_feedback_for_user
from data_infra.postgresql_db_functions import get_feedback_for_video, get_video_info
from audio_analysis.audio_functions import get_audio_from_video, get_transcript_from_audio
from audio_analysis.audio_functions import get_audio_sentiment_analysis, get_speed_of_speech
from audio_analysis.audio_functions import get_text_sentiment, remove_files
# Import functions we need from facial_analysis package
# Import functions we need from audio_analysis.background_noise module
# ----------------------------------------------------------------------------
# SETUP:
# Initialize our flask app (API):
application = Flask(__name__)
# Get access info from .env file:
load_dotenv()
# PostgreSQL DB info:
PG_DB_HOST = os.getenv("PG_DB_HOST")
PG_DB_PORT = os.getenv("PG_DB_PORT")
PG_DB_NAME = os.getenv("PG_DB_NAME")
PG_DB_USER = os.getenv("PG_DB_USER")
PG_DB_PW = os.getenv("PG_DB_PW")
PG_DB_URI = os.getenv("PG_DB_URI")
# Open a connection to our PostgreSQL DB:
pg_conn = psycopg2.connect(
host = PG_DB_HOST,
port = PG_DB_PORT,
database = PG_DB_NAME,
user = PG_DB_USER,
password = <PASSWORD>
)
# Instantiate a cursor using this connection:
pg_cursor = pg_conn.cursor()
# ----------------------------------------------------------------------------
# fake API placeholder data:
import fake_data
FAKE_INPUT_USER_PERFORMANCE = fake_data.FAKE_INPUT_USER_PERFORMANCE
FAKE_INPUT_PROMPT_TOP_RESPONSES = fake_data.FAKE_INPUT_PROMPT_TOP_RESPONSES
FAKE_INPUT_VIDEO_ANALYSIS = fake_data.FAKE_INPUT_VIDEO_ANALYSIS
# FAKE_INPUT_VIDEO_ANALYZE = fake_data.FAKE_INPUT_VIDEO_ANALYZE
FAKE_OUTPUT_USER_PERFORMANCE = fake_data.FAKE_OUTPUT_USER_PERFORMANCE
FAKE_OUTPUT_PROMPT_TOP_RESPONSES = fake_data.FAKE_OUTPUT_PROMPT_TOP_RESPONSES
FAKE_OUTPUT_VIDEO_ANALYSIS = fake_data.FAKE_OUTPUT_VIDEO_ANALYSIS
# FAKE_OUTPUT_VIDEO_ANALYZE = fake_data.FAKE_OUTPUT_VIDEO_ANALYZE
# ----------------------------------------------------------------------------
# FLASK APPLICATION ENDPOINTS:
# Base route just so AWS doesn't show status as problematic:
@application.route('/')
def root():
title = """Welcome to the Team Reel Interview Analysis API!"""
api_welcome_string = "Endpoints:\n"
header_1="""For DS/ML Internal Use:"""
endpoint_1="""/analyze_new_video': This endpoint gets the new video and
its DB info --> analyzes the video using our ML functions (+ also gets
any human feedback on that video from the TeamReel DB) --> adds/updates
the analysis for that video in the 'video_feedback' table in our DB.
Triggered by AWS Lambda function when any new video is uploaded by a user.
Pipeline: TeamReel user uploads new video in front-end -> save to our S3
bucket -> S3 posts notification by adding message to our SQS queue
-> AWS Lambda function checks SQS queue for messages -> AWS Lambda
function calls this API endpoint to trigger video analysis whenever
there is any message (video waiting to be processed) in SQS).
Returns true if received."""
header_2="""For Front-end (Web, iOS, Android) to Get Info (Analysis):"""
endpoint_2="""/get_user_performance: Takes in a JSON with a TeamReel
user_id (the 'id' in our DB's 'users' table), and returns a JSON with
analysis of that user's interview performance to date."""
endpoint_3="""/get_prompt_top_responses: Takes in a JSON with a TeamReel
prompt_id (the id in our DB's prompts table), and returns a JSON with
that prompt's top 3 video responses."""
endpoint_4="""/get_video_analysis: Takes in a JSON with a TeamReel
video_id (the id in our DB's videos table), and returns a JSON with
the performance analysis for that video."""
return render_template("home.html",
title=title,
text=api_welcome_string,
header_1=header_1,
endpoint_1=endpoint_1,
header_2=header_2,
endpoint_2=endpoint_2,
endpoint_3=endpoint_3,
endpoint_4=endpoint_4
)
# ----------------------------------------------------------------------------
# '/analyze_new_video': This endpoint gets the new video and
# its DB info --> analyzes the video using our ML functions (+ also gets
# any human feedback on that video from the TeamReel DB) --> adds/updates
# the analysis for that video in the 'video_feedback' table in our DB.
# Triggered by AWS Lambda function when any new video is uploaded by a user.
#
# Pipeline: TeamReel user uploads new video in front-end -> save to our S3
# bucket -> S3 posts notification by adding message to our SQS queue
# -> AWS Lambda function checks SQS queue for messages -> AWS Lambda
# function calls this API endpoint to trigger video analysis whenever
# there is any message (video waiting to be processed) in SQS).
#
# Returns true if received.
@application.route("/analyze_new_video", methods=['GET'])
def analyze_new_video():
"""
'/analyze_new_video' endpoint: This endpoint gets the new video and
its DB info --> analyzes the video using our ML functions (+ also gets
any human feedback on that video from the TeamReel DB) --> adds/updates
the analysis for that video in the 'video_feedback' table in our DB.
Triggered by AWS Lambda function when a TeamReel user uploads a new video.
Pipeline: TeamReel user uploads new video in front-end -> save to our S3
bucket -> S3 posts notification by adding message to our SQS queue
-> AWS Lambda function checks SQS queue for messages -> AWS Lambda
function calls this API endpoint to trigger video analysis whenever
there is any message (video waiting to be processed) in SQS).
Returns true if received.
"""
# GET BASE MATERIALS: VIDEO, AUDIO, TRANSCRIPT:
# Get next video in line for analysis (recently uploaded by a user):
# (1) video_info dict = info about that video from our DB (video_id, etc.)
# (2) download .MP4 video file to project directory
video_info = get_next_video()
# Exception handling: If get_next_video() returns "No messages in queue."
# to indicate there are no messages in the SQS queue (no new videos
# uploaded since last analysis), then stop and return
# "No new videos uploaded since last check."
if video_info == "No messages in queue.":
return "No new videos uploaded since last check."
try:
video_filename = video_info['video']['s3_filename']
video_id = video_info['video']['video_id']
video_s3_key = video_info['video']['s3_key']
except KeyError:
db_error = "KeyError: There is no information about this video in our database."
print(db_error)
return db_error
print(f"video_id: {video_id} \nvideo_s3_key: {video_s3_key}") # [?? To do: remove this! ??]
# Get audio from the video file:
audio_filename = get_audio_from_video(video_filename=video_filename,
save_audio_as='audio.wav')
# Get transcript for the audio (which is from the video):
transcript_filename = get_transcript_from_audio(audio_filename=audio_filename,
save_transcript_as='audio_transcript.txt')
transcript_string = open(transcript_filename).read().replace("\n", " ")
# --------------------------------------------------------------------
# SENTIMENT ANALYSIS:
# VISUAL SENTIMENT:
# [?? To add: Facial centering: Call Chris Huskey's master function and get results ??]
# visual_sentiment_results = [?? To add ??]
# Values for our DB videos_feedback table:
sentiment_visual = np.random.uniform(3, 5) # [?? To do: REMOVE this ??]
sentiment_visual_details_fake = {
"emotions": {
"sad": 0.4816375482887931,
"calm": 0.8443668165181737,
"fear": 0.9012952623858596,
"angry": 0.031246441854258622,
"happy": 0.45286566659565175,
"confused": 0.163269892703233,
"disgusted": 0.9995419575080721,
"surprised": 0.7591465415994776
}
}
sentiment_visual_details = json.dumps(sentiment_visual_details_fake)
# AUDIO AND TEXT SENTIMENT:
audio_sentiment = get_audio_sentiment_analysis(audio_filename=audio_filename)
text_sentiment = get_text_sentiment(file=transcript_filename)
# Values for our DB videos_feedback table:
sentiment_audio = np.random.uniform(3, 5) # [?? To do: REMOVE this ??]
sentiment_audio_details = json.dumps(audio_sentiment)
# --------------------------------------------------------------------
# SPEAKING SPEED:
speaking_speed = get_speed_of_speech(transcript_filename=transcript_filename,
audio_filename=audio_filename)
# Speaking speed summary stats:
ss_mean = 160
ss_std_dev = 30
ss_high_normal = ss_mean + ss_std_dev
ss_low_normal = ss_mean - ss_std_dev
ss_high_extreme = ss_mean + 2*ss_std_dev
ss_low_extreme = ss_mean - 2*ss_std_dev
# Score for our DB videos_feedback table:
# Translate to a score from 1-5:
if (speaking_speed > ss_low_normal) and (speaking_speed < ss_high_normal):
speaking_speed_score = 5
elif (speaking_speed > ss_low_extreme and speaking_speed <= ss_low_normal) or (speaking_speed >= ss_high_normal and speaking_speed < ss_high_extreme):
speaking_speed_score = 4
elif (speaking_speed <= ss_low_extreme) or (speaking_speed | |
"""
Economic Model for VF
Created on 30 March 2020
Author: <NAME>
Contact: <EMAIL>
"""
# ========= IMPORT LIBRARIES ======= #
import numpy as np
import math
import matplotlib.pyplot as plt
import datetime
# ========== GLOBAL VARIABLES ====== #
#Time parameters
YEARLY_TO_MONTHLY_31 = 11.77
DAYS_IN_MONTH = 31
DAYS_IN_YEAR = 365
WEEKS_IN_YEAR = 52
DAYS_IN_WEEK = 7
DAYS_IN_QUARTER = 112
ROI_THRESHOLD = -5 # Below this Number indicates Bankruptcy
GROWING_AREA_RATIO_TO_TOTAL = 0.5
# =========== CREATION OF TIME SERIES
days = 3660 # Days of simulation
days_timeseries =[] # Creation of a timeseries for days
for i in range(days+1):
days_timeseries.append(i)
years = math.floor(days / 365) # Years of Simulation
years_series = [] # Creation of time series for years
for i in range(years + 1):
years_series.append(i)
# ====== USER INPUTS ======== #
yield_required = 9000 #Annual yield (kg)
harvest_weight = 0.1 # 100g of lettuce
land_area = 200
crop_price = 10 # £ per kg
crops_per_area = 20 # per sq-m of growbed
no_of_tiers = 15
# Capital Expenditure
def calc_capex(land_area):
'''
PP. 51 of Plant Factory
Initial cost including necessary facilities (15 tiers, 50cm distance between tiers)
$4000 USD per sq-m x 0.8 for £
'''
capex = 4000*0.8*land_area
return capex
# Annual Productivity
def calc_yield(land_area, GROWING_AREA_RATIO_TO_TOTAL, no_of_tiers, crops_per_area):
'''
PP. 51 of Plant Factory
3000 lettuce heads per sq-m per year (80-100g fresh weight)
20 plants per sq-m (culture bed) x 15 tiers x 0.9 ratio salable
x 0.5 effective floor ratio of tiers to total floor area
50% of floor area used for operations, walkway, seedlings, production
equipment.
12-15 days to harvest
20-22 days seed to seedling
'''
yield_potential = land_area * GROWING_AREA_RATIO_TO_TOTAL\
* crops_per_area * no_of_tiers * harvest_weight
return yield_potential
# =========== OVERALL FIXED COSTS ========== #
# Depreciation of building and facilities
# Tax or Rent of Land/Building
# Insurance
# Basic Salaries
# Basic Charges for Electricity and Municipal Water
# ========== ACTIVITIES ====== #
'''
Cost Components from PP.52 Plant Factory
Labour : 25-30%
Electricity: 25-30%
Depreciation: 25-35%
Logistics: 9.8%
Consumables: 7.6%
Seeds: 2.1%
Other: 11%
'''
# --------- PURCHASING CONSUMABLES ------- #
# CLEANING SUPPLIES - FIXED COST
# SEEDS - VARIABLE COST
def calc_seeds(land_area):
'''
Seeds typically account for 2.1% production costs
PP. 51 of Plant Factory
3000 lettuce heads per sq-m per year (80-100g fresh weight)
20 plants per sq-m (culture bed) x 15 tiers x 0.9 ratio salable
x 0.5 effective floor ratio of tiers to total floor area
50% of floor area used for operations, walkway, seedlings, production
equipment.
12-15 days to harvest
20-22 days seed to seedling
'''
qty_of_seeds = yield_potential/harvest_weight # annual qty of seeds required
seeds_cost = qty_of_seeds * 0.01
return seeds_cost
# PACKAGING - VARIABLE COST
"""
Consumables typically account for 7.5% production costs
"""
# SUBSTRATE - VARIABLE COST
# NUTRIENTS - VARIABLE COST
# CO2 - VARIABLE COST
# PEST MANAGEMENT - VARIABLE COST
# --------- SOWING AND PROPAGATION ------- #
# CLEANING & SYSTEM MAINTENANCE - FIXED COST
# WATER & ENERGY - FIXED COST
# DEPRECIATION - FIXED COST
# LABOUR - VARIABLE COST
# --------- GROWING ------- #
# UTILITIES -
"""
Electricity typically accounts for 21% of Production costs PP.52 Plant Factory
"""
def calc_utilities(yield_potential): # Energy and Water
water_consumption = yield_potential*1
energy_consumption = yield_potential*1
utilities_annual = water_consumption*energy_consumption
return utilities_annual
# LABOUR
def calc_labour(yield_potential):
"""
Labour Costs Formaula
Notes
------
Direct farm labour cost = Number of staff working full-time x wages x 30 hours
Generalisation if statement on farm labour required if unknown
"""
farm_hours = yield_potential*0.2
labour_cost = farm_hours * 7 # wage
return labour_cost
# DEPRECIATION
'''
The economic life period for calculating the depreciation differs from country to country.
In Japan, it is 15 years for the PFAL building, 10 years for the facilities, and 5 years
for the LED lamps.
Typically accounts for 21% of Production costs
'''
# EXPECTED YIELDS
def calc_expected_yield(yield_potential):
yield_rate = 0.97 # Ratio of marketable plants produced by divided by no. of seeds transplanted
expected_yield = yield_potential * yield_rate
return expected_yield
# --------- HARVESTING AND PACKAGING ------- #
# LABOUR - Variable costs
# CLEANING & SYSTEM MAINTENANCE - Variable costs
# WASTE MANAGEMENT
# --------- PACKING AND DELIVERY ------- #
# DELIVERY LABOUR / OUTSOURCING FEES
"""
Packing and Delivery Typically 6-8% of production cost when near City
12% when outside city PP.52 of Plant Factory
"""
"""
Logistics typically accounts for 9.8% PP.52 of Plant Factory
"""
# VEHICLE MAINTENANCE AND INSURANCE
# COMPLIANCE
# --------- SALES & MARKETING ------- #
# MARKETING COSTS
# OFFICE EXPENSES
# ==================== FINANCES ================ #
# OpEx Time series
'''The component costs for electricity, labor,
depreciation, and others of the PFAL using fluorescent (FL) lamps
in Japan accounted for, on average, 25% e 30%, 25% e 30%, 25% e 35%,
and 20%, respectively.
'''
def calc_cogs(yield_potential):
'''
seeds_cost + nutrients_cost + co2_cost + (labour_cost * 50) + packaging costs + media costs
'''
cogs_annual = yield_potential*2
return cogs_annual
def calc_cogs_time_series(days, cogs_annual):
"""
Cost of Goods Sold Formula
Notes
-----
Can adjust for days/weekly/monthly/annually in the future - ASSUMED: CONSUMABLES PURCHASED MONTHLY
"""
cogs_time_series = []
for i in range(days):
if i % DAYS_IN_MONTH == 0:
cogs_time_series.append(cogs_annual / YEARLY_TO_MONTHLY_31)
else:
cogs_time_series.append(0)
return cogs_time_series
def calc_opex_time_series(days, labour_cost, utilities):
"""
Can adjust for days/weekly/monthly/annually in the future - ASSUMED: CONSUMABLES PURCHASED QUARTERLY
Operations = Bill Growth Lights + Bill Environmental Control + Bill Misc Energy + Water Bill + Salary Cost + Maintenance Cost +
Distribution cost - Reduction from Renewable Energy
"""
opex_time_series = []
for i in range(days):
opex = 0
if i % DAYS_IN_MONTH == 0:
opex += (labour_cost / YEARLY_TO_MONTHLY_31) + (utilities / YEARLY_TO_MONTHLY_31)
if i % DAYS_IN_YEAR == 0:
opex += 0
opex_time_series.append(opex)
return opex_time_series
# Sales (ANNUALLY)
def calc_sales(expected_yield): # per year
sales = expected_yield*15 # £15 per kilo
return sales
def calc_revenue_time_series(days, sales):
revenue_time_series = []
for i in range(days):
revenue = 0
if i % DAYS_IN_WEEK == 0:
revenue += (sales/WEEKS_IN_YEAR) # sales across 365 days of the year
revenue_time_series.append(revenue)
return revenue_time_series
# Profit
def calc_profit_time_series(opex_time_series, cogs_time_series, revenue_time_series):
opex = np.asarray(opex_time_series)
cogs = np.asarray(cogs_time_series)
revenue = np.asarray(revenue_time_series)
profit_time_series = revenue - cogs - opex
return profit_time_series
# Loan
def calc_loan_repayment(capex, days):
# Pay back over 5 years (no interest)
monthly_loan_repayment = (capex/YEARLY_TO_MONTHLY_31)*5
loan_time_series = []
for i in range(days):
repayment = 0
if i % DAYS_IN_MONTH == 0:
repayment = monthly_loan_repayment
loan_time_series.append(repayment)
return loan_time_series
# Tax
def calc_tax(days, profit_time_series):
"""Returns tax as daily series"""
tax_time_series = []
for i in range(days):
if i % 365 == 0:
tax_time_series.append(profit_time_series[364]*0.2) # sales across 365 days of the year
else:
tax_time_series.append(0)
tax = np.asarray(tax_time_series)
return tax
# Post-tax profit
def calc_post_profit(profit_time_series, loan_repayments, tax):
post_profit_time_series = profit_time_series - loan_repayments - tax
return post_profit_time_series
def calc_post_profit_annual_series(post_profit_time_series, years):
post_profit = np.cumsum(post_profit_time_series)
profit_series = [0]
for i in range(years):
profit_series.append(post_profit[years * DAYS_IN_YEAR] - post_profit[(years * DAYS_IN_YEAR) - DAYS_IN_YEAR])
profit_annual_series = np.asarray(profit_series)
return profit_annual_series
# Return on Investment - Annually
def calc_roi(profit_annual_series, capex):
roi = (profit_annual_series/capex) * 100
return roi
# Probability of Bankruptcy
def calc_probability_of_bankruptcy(roi, ROI_THRESHOLD, years):
PBS =[0] # Probability of Bankruptcy series
for i in range(years):
if ROI_THRESHOLD > roi[i]:
probability_of_bankruptcy = 1
PBS.append(probability_of_bankruptcy)
else:
PBS.append(0)
return PBS
#Script for ROI Estimation
capex = calc_capex(land_area)
yield_potential = calc_yield(land_area, GROWING_AREA_RATIO_TO_TOTAL, no_of_tiers, crops_per_area)
cogs_annual = calc_cogs(yield_potential)
cogs_time_series = calc_cogs_time_series(days, cogs_annual)
utilities = calc_utilities(yield_potential)
labour = calc_labour(yield_potential)
opex_time_series = calc_opex_time_series(days, labour, utilities)
expected_yield = calc_expected_yield(yield_potential)
sales = calc_sales(expected_yield)
revenue_time_series = calc_revenue_time_series(days, sales)
profit_time_series = calc_profit_time_series(opex_time_series, cogs_time_series, revenue_time_series)
loan_time_series = calc_loan_repayment(capex, days)
tax_time_series = calc_tax(days, profit_time_series)
post_profit_time_series = calc_post_profit(profit_time_series, loan_time_series, tax_time_series)
profit_annual_series = calc_post_profit_annual_series(post_profit_time_series, years)
roi = calc_roi(profit_annual_series, capex)
PBS = calc_probability_of_bankruptcy(roi, ROI_THRESHOLD, years)
#Plot
plt.plot(years_series, roi)
plt.xlabel('Years')
plt.ylabel('Annual ROI')
plt.show()
# Setting up Risk Assessment Plot
fig, ax = plt.subplots()
ax.plot(years_series, PBS, 1, color="g")
# Threshold Lines
'''
- Critical: 50% probability of bankruptcy within 3 years
- Substantial risk: 25% probability of bankruptcy within 5 years
- Moderate risk: 10% probability of bankruptcy within 10 years
- Safe: Less than 10% probability of bankruptcy within 10 years
'''
years_thresholds = np.asarray(years_series)
safe_threshold = 0.01*years_thresholds
substantial_threshold = 0.05*years_thresholds
critical_threshold = 0.1666*years_thresholds
safe_threshold = safe_threshold.tolist()
substantial_threshold = substantial_threshold.tolist()
critical_threshold = critical_threshold.tolist()
# Risk Assessment Graph Plot
#ax.plot([years_thresholds, years_thresholds], [safe_threshold, safe_threshold], "k--")
#ax.plot([years_thresholds, years_thresholds], [substantial_threshold, substantial_threshold], "k--")
#ax.plot([0., years], [critical_threshold, critical_threshold], "k--")
plt.suptitle('Risk Assessment')
plt.plot(years_series, PBS)
plt.plot(years_series, safe_threshold, "r--", label = "safe/moderate")
plt.plot(years_series, substantial_threshold, "r--", label = "moderate/substantial")
plt.plot(years_series, critical_threshold, "r--", label = "substantial/critical")
plt.ylim(0,1)
plt.xlim(0,years)
plt.grid(True)
plt.xlabel('Time (Years)')
plt.ylabel('Probability of Bankruptcy')
plt.show()
# Formulas for Produtivity KPIs
def calc_eletricity_kpi(annual_yield, annual_energy_consumption):
elec_kpi = annual_yield / annual_energy_consumption
return elec_kpi
def calc_labour_kpi(annual_yield, annual_labour):
labour_kpi = annual_yield / annual_labour
return labour_kpi
def calc_cultivation_area_kpi(annual_yield, land_area, GROWING_AREA_RATIO_TO_TOTAL):
cultivation_kpi = annual_yield / (land_area * GROWING_AREA_RATIO_TO_TOTAL)
| |
print(' ERROR. Problem in loading file %s' % infile)
print(' Check to make sure filename matches an existing'
'file')
print(' If it does, there may be something wrong with the'
' fits header.')
print('')
raise IOError('Error in read_from_file')
else:
print('')
print('ERROR. File %s does not exist.' % infile)
print('')
raise IOError('Error in read_from_file')
"""
Set parameters related to image properties and return the hdulist
"""
self.infile = os.path.basename(infile)
return hdu
# -----------------------------------------------------------------------
def read_wcsinfo(self, wcshdr, verbose=True):
"""
Reads in WCS information from the header and saves it, if it's
there, in some attributes of the class
"""
""" Set some defaults """
raax = 0
decax = 1
rakey = 'naxis1'
deckey = 'naxis2'
""" Get the WCS information out of the header if it's there """
try:
wcsinfo = wcs.WCS(wcshdr)
except:
if verbose:
if self.infile is not None:
print('No WCS information in image header: %s',
self.infile)
else:
print('No WCS information in image header')
self.wcsinfo = None
raise KeyError
"""
Make sure that the WCS information is actually WCS-like and not,
for example, pixel-based
"""
imwcs = wcsinfo.wcs
rafound = False
decfound = False
for count, ct in enumerate(imwcs.ctype):
if ct[0:2] == 'RA':
rafound = True
raax = count
rakey = 'naxis%d' % (raax + 1)
if ct[0:3] == 'DEC':
decfound = True
decax = count
deckey = 'naxis%d' % (decax + 1)
if rafound is False or decfound is False:
if verbose:
print('No valid WCS information in image header')
print(' CTYPE keys are not RA/DEC')
self.wcsinfo = None
raise KeyError
""" Get the RA and Dec of the center of the image """
xcent = wcshdr[rakey] / 2.
ycent = wcshdr[deckey] / 2.
imcent = np.ones((1, wcshdr['naxis']))
imcent[0, raax] = xcent
imcent[0, decax] = ycent
imcentradec = wcsinfo.wcs_pix2world(imcent, 1)
radec = coords.radec_to_skycoord(imcentradec[0, raax],
imcentradec[0, decax])
""" Get the pixel scale and image rotation """
impa = coords.matrix_to_rot(wcsinfo.pixel_scale_matrix, raax=raax,
decax=decax)
pixscale = wcs.utils.proj_plane_pixel_scales(wcsinfo.celestial) \
* 3600.
""" Summarize the WCS information """
if verbose:
print('Pixel scale (x, y): (%7.3f, %7.3f) arcsec/pix' %
(pixscale[0], pixscale[1]))
print('Instrument FOV (arcsec): %7.1f x %7.1f' %
(pixscale[0] * wcshdr[rakey],
pixscale[1] * wcshdr[deckey]))
print('Image position angle (E of N): %+7.2f' % impa)
""" Add the information to the object """
self.wcsinfo = wcsinfo
self.raaxis = raax + 1
self.decaxis = decax + 1
self.radec = radec
self.pixscale = pixscale
self.impa = impa
self.crpix = wcsinfo.wcs.crpix
self.crval = wcsinfo.wcs.crval
# -----------------------------------------------------------------------
def __add__(self, other):
"""
Adds either a constant or another WcsHDU or other flavor of HDU to
the data in this WcsHDU object
"""
""" Get the data and header """
data = self.data.copy()
hdr = self.header.copy()
""" Do the addition """
if isinstance(other, (float, int)):
data += other
elif isinstance(other, (WcsHDU, pf.PrimaryHDU, pf.ImageHDU)):
data += other.data
else:
raise TypeError('\nAdded object must be one of: int, float, '
'WcsHDU, PrimaryHDU, or ImageHDU')
""" Return a new WcsHDU object """
return WcsHDU(data, inhdr=hdr, verbose=False, wcsverb=False)
# -----------------------------------------------------------------------
def __sub__(self, other):
"""
Adds either a constant or another WcsHDU or other flavor of HDU to
the data in this WcsHDU object
"""
""" Get the data and header """
data = self.data.copy()
hdr = self.header.copy()
""" Do the addition """
if isinstance(other, (float, int)):
data -= other
elif isinstance(other, (WcsHDU, pf.PrimaryHDU, pf.ImageHDU)):
data -= other.data
else:
raise TypeError('\nAdded object must be one of: int, float, '
'WcsHDU, PrimaryHDU, or ImageHDU')
""" Return a new WcsHDU object """
return WcsHDU(data, inhdr=hdr, verbose=False, wcsverb=False)
# -----------------------------------------------------------------------
def __mul__(self, other):
"""
Adds either a constant or another WcsHDU or other flavor of HDU to
the data in this WcsHDU object
"""
""" Get the data and header """
data = self.data.copy()
hdr = self.header.copy()
""" Do the addition """
if isinstance(other, (float, int)):
data *= other
elif isinstance(other, (WcsHDU, pf.PrimaryHDU, pf.ImageHDU)):
data *= other.data
else:
raise TypeError('\nAdded object must be one of: int, float, '
'WcsHDU, PrimaryHDU, or ImageHDU')
""" Return a new WcsHDU object """
return WcsHDU(data, inhdr=hdr, verbose=False, wcsverb=False)
# -----------------------------------------------------------------------
def __truediv__(self, other):
"""
Adds either a constant or another WcsHDU or other flavor of HDU to
the data in this WcsHDU object
"""
""" Get the data and header """
data = self.data.copy()
hdr = self.header.copy()
""" Do the addition """
if isinstance(other, (float, int)):
data /= other
elif isinstance(other, (WcsHDU, pf.PrimaryHDU, pf.ImageHDU)):
data /= other.data
else:
raise TypeError('\nAdded object must be one of: int, float, '
'WcsHDU, PrimaryHDU, or ImageHDU')
""" Return a new WcsHDU object """
return WcsHDU(data, inhdr=hdr, verbose=False, wcsverb=False)
# -----------------------------------------------------------------------
def copy(self):
"""
Returns a copy of the WcsHDU object
"""
""" Use the built-in copy methods for the data and header """
data = self.data.copy()
hdr = self.header.copy()
""" Return a new WcsHDU object """
newhdu = WcsHDU(data, inhdr=hdr, verbose=False, wcsverb=False)
newhdu.infile = self.infile
return newhdu
# -----------------------------------------------------------------------
def cross_correlate(self, other, padfrac=0.6, shift=True, datacent=None,
datasize=None, othercent=None, hext=0,
reset_fft=False):
"""
Cross correlates the image data, or a subset of them, with the
image data in another object.
Inputs:
other - Other data set with which to correlate. Can be one of the
following: a numpy array, a PrimaryHDU, an ImageHDU,
or a HDUList
Output:
xcorr - cross-correlated data, returned as a WcsHDU object
"""
""" Select the portion of the data to be used """
if datacent is not None:
if datasize is None:
raise ValueError('\nif datacent is set, then datasize must '
'also be set')
x1 = int(datacent[0] - datasize/2.)
x2 = x1 + datasize
y1 = int(datacent[1] - datasize/2.)
y2 = y1 + datasize
# NEED TO ADD CHECKS
data = self.data[y1:y2, x1:x2]
else:
data = self.data.copy()
""" Set the size of the images to correlate, including padding """
ysize, xsize = data.shape
pad = int(padfrac * max(xsize, ysize))
padsize = int(2 * pad + max(xsize, ysize))
"""
If the conjugate FFT doesn't already exist, take the FFT of the
selected data and then take its conjugate
"""
if self.fftconj is None or reset_fft:
f1 = np.zeros((padsize, padsize))
f1[pad:pad+ysize, pad:pad+xsize] = data
F1 = fft2(f1)
F1c = np.conjugate(F1)
del f1, F1
else:
F1c = self.fftconj
""" Get the data the other data set """
if isinstance(other, np.ndarray):
odata2 = other
elif isinstance(other, (pf.PrimaryHDU, pf.ImageHDU, WcsHDU)):
odata2 = other.data
if othercent is not None:
if datasize is None:
raise ValueError('\nif othercent is set, then datasize must '
'also be set')
x1 = int(othercent[0] - datasize/2.)
x2 = x1 + datasize
y1 = int(othercent[1] - datasize/2.)
y2 = y1 + datasize
# NEED TO ADD CHECKS
data2 = odata2[y1:y2, x1:x2]
else:
data2 = odata2.copy()
""" Make the FFT of the other data set """
f2 = np.zeros((padsize, padsize))
f2[pad:pad+ysize, pad:pad+xsize] = data2
F2 = fft2(f2)
""" Do the cross correlation and return the results as a WcsHDU """
if shift:
xc = fftshift(ifft2(F1c * F2)).real
else:
xc = ifft2(F1c * F2).real
return WcsHDU(xc, verbose=False, wcsverb=False)
# -----------------------------------------------------------------------
def make_hdr_wcs(self, inhdr, wcsinfo, keeplist='all', debug=False):
"""
Creates a new header that includes (possibly updated) wcs
information to use for an output file/HDU.
Inputs:
inhdr - Input header. This could be just the header of the
HDU that was used to create this Image object, but it
could also be some modification of that header or even
a brand-new header
wcsinfo - WCS information, which may be just the information in
the input file, but may also be a modification
keeplist - If set to 'all' (the default) then keep all of the
header cards in inhdr. If not, then just keep the
header cards -- designated as strings -- in keeplist
"""
"""
Eliminate, as much as possible, the WCS header keywords from
the original header. This is done to avoid possibly conflicting
information, e.g., a CD matrix in the original header and then
a CDELT + PC matrix from the | |
# Copyright 2019 The resource-policy-evaluation-library Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from urllib.parse import urlparse
from .base import Resource
from rpe.exceptions import is_retryable_exception
from rpe.exceptions import UnsupportedRemediationSpec
from rpe.exceptions import InvalidRemediationSpecStep
import tenacity
from googleapiclienthelpers.discovery import build_subresource
from googleapiclienthelpers.waiter import Waiter
# client for resource manager, will be lazy created later
resource_manager_projects = None
class GoogleAPIResource(Resource):
# Names of the get and update methods. Most are the same but override in
# the Resource if necessary
resource_property = None
get_method = "get"
update_method = "update"
# If a resource is not in a ready state, we can't update it. If we retrieve
# it, and the state changes, updates will be rejected because the ETAG will
# have changed. If a resource defines readiness criteria, the get() call
# will wait until the resource is in a ready state to return
#
# Key/Value to check to see if a resource is ready
readiness_key = None
readiness_value = None
def __init__(self, resource_data, **kwargs):
full_resource_path = "{}.{}".format(
self.service_name,
self.resource_path
)
self.service = build_subresource(
full_resource_path,
self.version,
**kwargs
)
# Store the extra kwargs in case we need to create new clients
self.kwargs = kwargs
# Support original update method until we can deprecate it
self.update = self.remediate
# If we are a property of a resource, also get the resource we're
# associated with
if self.is_property():
# Build resource data for the parent
parent_data = resource_data.copy()
parent_type = resource_data['resource_type'].rsplit('.', 1)[0]
parent_data['resource_type'] = parent_type
self.parent_resource = GoogleAPIResource.factory(
parent_data,
**kwargs
)
self.resource_data = resource_data
self._ancestry = None
def is_property(self):
return self.resource_property is not None
@staticmethod
def factory(resource_data, **kargs):
resource_type_map = {
'apps.services.versions.instances': GcpAppEngineInstance,
'bigquery.datasets': GcpBigqueryDataset,
'bigtableadmin.projects.instances': GcpBigtableInstance,
'bigtableadmin.projects.instances.iam': GcpBigtableInstanceIam,
'cloudfunctions.projects.locations.functions': GcpCloudFunction,
'cloudfunctions.projects.locations.functions.iam': GcpCloudFunctionIam,
'compute.instances': GcpComputeInstance,
'compute.disks': GcpComputeDisks,
'compute.subnetworks': GcpComputeSubnetwork,
'compute.firewalls': GcpComputeFirewall,
'container.projects.locations.clusters': GcpGkeCluster,
'container.projects.locations.clusters.nodePools': GcpGkeClusterNodepool,
'cloudresourcemanager.projects': GcpProject,
'cloudresourcemanager.projects.iam': GcpProjectIam,
'dataproc.clusters': GcpDataprocCluster,
'pubsub.projects.subscriptions': GcpPubsubSubscription,
'pubsub.projects.subscriptions.iam': GcpPubsubSubscriptionIam,
'pubsub.projects.topics': GcpPubsubTopic,
'pubsub.projects.topics.iam': GcpPubsubTopicIam,
'serviceusage.services': GcpProjectService,
'sqladmin.instances': GcpSqlInstance,
'storage.buckets': GcpStorageBucket,
'storage.buckets.iam': GcpStorageBucketIamPolicy
}
resource_type = resource_data.get('resource_type')
if not resource_type:
assert 0, 'Unrecognized resource'
if resource_type not in resource_type_map:
assert 0, 'Unrecognized resource'
cls = resource_type_map.get(resource_type)
return cls(resource_data, **kargs)
def type(self):
type_components = ["gcp", self.service_name, self.resource_path]
# Things like IAM policy are not separate resources, but rather
# properties of a resource. We may want to evaluate policy on these
# properties, so we represent them as resources and need to distinguish
# them in the resource type.
if self.is_property():
type_components.append(self.resource_property)
return ".".join(type_components)
# Google's documentation describes what it calls a 'full resource name' for
# resources. None of the API's seem to implement it (except Cloud Asset
# Inventory). This attempts to generate it from the discovery-based api
# client's generated http request url.
#
# If we inject it into the resource, we can use it in policy evaluation to
# simplify the structure of our policies
def full_resource_name(self):
# If this is a resource property, return the resource's frn instead
if self.is_property():
return self.parent_resource.full_resource_name()
method = getattr(self.service, self.get_method)
uri = method(**self._get_request_args()).uri
uri_parsed = urlparse(uri)
domain = uri_parsed.netloc
path_segments = uri_parsed.path[1:].split('/')
# First we need the name of the api
if domain.startswith("www."):
# we need to get the api name from the path
api_name = path_segments.pop(0)
else:
# the api name is the first segment of the domain
api_name = domain.split('.')[0]
# occasionally the compute api baseUrl is returned as
# compute.googleapis.com/compute, in which case we need to remove
# the duplicated api reference
if api_name == path_segments[0]:
path_segments.pop(0)
# Remove the version from the path
path_segments.pop(0)
# Remove method from the last path segment
if ":" in path_segments[-1]:
path_segments[-1] = path_segments[-1].split(":")[0]
# Annoying resource-specific fixes
if api_name == 'storage' and path_segments[0] == 'b':
path_segments[0] = "buckets"
resource_path = "/".join(path_segments)
return "//{}.googleapis.com/{}".format(api_name, resource_path)
def get(self):
method = getattr(self.service, self.get_method)
# If the resource has readiness criteria, wait for it
if self.readiness_key and self.readiness_value:
waiter = Waiter(method, **self._get_request_args())
asset = waiter.wait(
self.readiness_key,
self.readiness_value,
interval=10,
retries=90
)
else:
asset = method(**self._get_request_args()).execute()
asset['_full_resource_name'] = self.full_resource_name()
# if this asset is a property, inject its parent
if self.is_property():
parent = self.parent_resource.get()
asset['_resource'] = parent
return asset
# Determine what remediation steps to take, fall back to the original resource-defined update method
def remediate(self, remediation):
# Check for an update spec version, default to version 1
remediation_spec = remediation.get('_remediation_spec', "v1")
if remediation_spec == "v1":
# If no remediation_spec is listed, fall back to previous behavior
# We inject the _full_resource_name in requests, so we need to remove it
for key in list(remediation):
if key.startswith('_'):
del remediation[key]
method_name = self.update_method
params = self._update_request_args(remediation)
self._call_method(method_name, params)
elif remediation_spec == "v2beta1":
required_keys = ['method', 'params']
for step in remediation.get('steps', []):
if not all(k in step for k in required_keys):
raise InvalidRemediationSpecStep()
method_name = step.get('method')
params = step.get('params')
self._call_method(method_name, params)
else:
raise UnsupportedRemediationSpec("The specified remediation spec is not supported")
@tenacity.retry(
retry=tenacity.retry_if_exception(is_retryable_exception),
wait=tenacity.wait_random_exponential(multiplier=5, max=20),
stop=tenacity.stop_after_attempt(15)
)
def _call_method(self, method_name, params):
''' Call the requested method on the resource '''
method = getattr(self.service, method_name)
return method(**params).execute()
@property
def ancestry(self):
if self._ancestry:
return self._ancestry
# attempt to fill in the resource's ancestry
# if the target project has the cloudresourcemanager api disabled, this will fail
try:
global resource_manager_projects
if resource_manager_projects is None:
resource_manager_projects = build_subresource(
'cloudresourcemanager.projects', 'v1', **self.kwargs
)
self._ancestry = resource_manager_projects.getAncestry(
projectId=self.resource_data['project_id']
).execute()
except Exception:
# This call is best-effort. Any failures should be caught
pass
return self._ancestry
class GcpAppEngineInstance(GoogleAPIResource):
service_name = "appengine"
resource_path = "apps.services.versions.instances"
version = "v1"
readiness_key = 'vmStatus'
readiness_value = 'RUNNING'
update_method = "debug"
cai_type = None # unknown
def _get_request_args(self):
return {
'appsId': self.resource_data['resource_name'].split('/')[1],
'servicesId': self.resource_data['resource_name'].split('/')[3],
'versionsId': self.resource_data['resource_name'].split('/')[5],
'instancesId': self.resource_data['resource_name'].split('/')[-1]
}
def _update_request_args(self, body):
return {
'appsId': self.resource_data['resource_name'].split('/')[1],
'servicesId': self.resource_data['resource_name'].split('/')[3],
'versionsId': self.resource_data['resource_name'].split('/')[5],
'instancesId': self.resource_data['resource_name'].split('/')[-1]
}
class GcpBigqueryDataset(GoogleAPIResource):
service_name = "bigquery"
resource_path = "datasets"
version = "v2"
cai_type = "bigquery.googleapis.com/Dataset"
def _get_request_args(self):
return {
'datasetId': self.resource_data['resource_name'],
'projectId': self.resource_data['project_id']
}
def _update_request_args(self, body):
return {
'datasetId': self.resource_data['resource_name'],
'projectId': self.resource_data['project_id'],
'body': body
}
class GcpBigtableInstance(GoogleAPIResource):
service_name = "bigtableadmin"
resource_path = "projects.instances"
version = "v2"
update_method = "partialUpdateInstance"
readiness_key = 'state'
readiness_value = 'READY'
cai_type = "bigtableadmin.googleapis.com/Instance"
def _get_request_args(self):
return {
'name': 'projects/{}/instances/{}'.format(
self.resource_data['project_id'],
self.resource_data['resource_name']
),
}
def _update_request_args(self, body):
return {
'name': 'projects/{}/instances/{}'.format(
self.resource_data['project_id'],
self.resource_data['resource_name']
),
'body': body,
'updateMask': 'labels,displayName,type'
}
class GcpBigtableInstanceIam(GcpBigtableInstance):
resource_property = 'iam'
get_method = "getIamPolicy"
update_method = "setIamPolicy"
readiness_key = None
readiness_value = None
cai_type = "bigtableadmin.googleapis.com/Instance"
def _get_request_args(self):
return {
'resource': 'projects/{}/instances/{}'.format(
self.resource_data['project_id'],
self.resource_data['resource_name']
),
}
def _update_request_args(self, body):
return {
'resource': 'projects/{}/instances/{}'.format(
self.resource_data['project_id'],
self.resource_data['resource_name']
),
'body': {
'policy': body
}
}
class GcpCloudFunction(GoogleAPIResource):
service_name = "cloudfunctions"
resource_path = "projects.locations.functions"
version = "v1"
update_method = "patch"
cai_type = "cloudfunctions.googleapis.com/CloudFunction" # unreleased
def _get_request_args(self):
return {
'name': 'projects/{}/locations/{}/functions/{}'.format(
self.resource_data['project_id'],
self.resource_data['resource_location'],
self.resource_data['resource_name']
),
}
def _update_request_args(self, body):
return {
'name': 'projects/{}/locations/{}/functions/{}'.format(
self.resource_data['project_id'],
self.resource_data['resource_location'],
self.resource_data['resource_name']
),
'body': body
}
class GcpCloudFunctionIam(GcpCloudFunction):
resource_property = 'iam'
get_method = "getIamPolicy"
update_method = "setIamPolicy"
cai_type = "cloudfunctions.googleapis.com/CloudFunction" # unreleased
def _get_request_args(self):
return {
'resource': 'projects/{}/locations/{}/functions/{}'.format(
self.resource_data['project_id'],
self.resource_data['resource_location'],
self.resource_data['resource_name']
),
}
def _update_request_args(self, body):
return {
'resource': 'projects/{}/locations/{}/functions/{}'.format(
self.resource_data['project_id'],
self.resource_data['resource_location'],
self.resource_data['resource_name']
),
'body': {
'policy': body
}
}
class GcpComputeInstance(GoogleAPIResource):
service_name = "compute"
resource_path = "instances"
version = "v1"
cai_type = "compute.googleapis.com/Instance"
def _get_request_args(self):
return {
'instance': self.resource_data['resource_name'],
'zone': self.resource_data['resource_location'],
'project': self.resource_data['project_id']
}
def _update_request_args(self, body):
return {
'instance': self.resource_data['resource_name'],
'zone': self.resource_data['resource_location'],
'project': self.resource_data['project_id']
}
class GcpComputeDisks(GoogleAPIResource):
service_name = "compute"
resource_path = "disks"
version = "v1"
cai_type = "compute.googleapis.com/Disk"
def _get_request_args(self):
return {
'project': self.resource_data['project_id'],
'zone': self.resource_data['resource_location'],
'disk': self.resource_data['resource_name']
}
def _update_request_args(self, body):
return {
'project': self.resource_data['project_id'],
'zone': self.resource_data['resource_location'],
'disk': self.resource_data['resource_name']
}
class GcpComputeSubnetwork(GoogleAPIResource):
service_name = "compute"
resource_path = "subnetworks"
version = "v1"
update_method | |
import os
import multiprocessing
from meaningless.bible_web_extractor import WebExtractor
from meaningless.utilities import common
from meaningless.utilities.exceptions import UnsupportedTranslationError, InvalidPassageError
class BaseDownloader:
"""
An downloader object that stores Bible passages into a local file
"""
__translations_with_omitted_passages = {
'ASV': ['Matthew 17:21', 'Matthew 18:11', 'Matthew 23:14',
'Mark 7:16', 'Mark 9:44', 'Mark 9:46', 'Mark 11:26', 'Mark 15:28',
'Luke 17:36', 'Luke 23:17',
'John 5:4',
'Acts 8:37', 'Acts 15:34', 'Acts 24:7', 'Acts 28:29',
'Romans 16:24'],
'EHV': ['Matthew 23:14',
'Mark 15:28',
'Luke 17:36',
'Acts 8:37', 'Acts 15:34', 'Acts 24:7', 'Acts 28:29',
'Romans 16:24'],
'ESV': ['Matthew 12:47', 'Matthew 17:21', 'Matthew 18:11', 'Matthew 23:14',
'Mark 7:16', 'Mark 9:44', 'Mark 9:46', 'Mark 11:26', 'Mark 15:28',
'Luke 17:36', 'Luke 23:17',
'John 5:4',
'Acts 8:37', 'Acts 15:34', 'Acts 24:7', 'Acts 28:29',
'Romans 16:24'],
'ESVUK': ['Matthew 12:47', 'Matthew 17:21', 'Matthew 18:11', 'Matthew 23:14',
'Mark 7:16', 'Mark 9:44', 'Mark 9:46', 'Mark 11:26', 'Mark 15:28',
'Luke 17:36', 'Luke 23:17',
'John 5:4',
'Acts 8:37', 'Acts 15:34', 'Acts 24:7', 'Acts 28:29',
'Romans 16:24'],
'GW': ['Matthew 17:21', 'Matthew 18:11', 'Matthew 23:14',
'Mark 7:16', 'Mark 9:44', 'Mark 9:46', 'Mark 11:26', 'Mark 15:28',
'Luke 17:36', 'Luke 23:17',
'John 5:4',
'Acts 8:37', 'Acts 15:34', 'Acts 24:7', 'Acts 28:29',
'Romans 16:24'],
'ISV': ['Mark 15:28',
'Luke 17:36',
'Acts 8:37', 'Acts 15:34', 'Acts 24:7', 'Acts 28:29'],
'LEB': ['Matthew 17:21', 'Matthew 18:11', 'Matthew 23:14',
'Mark 7:16', 'Mark 9:44', 'Mark 9:46', 'Mark 11:26', 'Mark 15:28',
'Luke 17:36', 'Luke 23:17',
'John 5:4',
'Acts 8:37', 'Acts 15:34', 'Acts 24:7', 'Acts 28:29'],
'NRSV': ['Matthew 17:21', 'Matthew 18:11', 'Matthew 23:14',
'Mark 7:16', 'Mark 9:44', 'Mark 9:46', 'Mark 11:26', 'Mark 15:28',
'Luke 17:36', 'Luke 23:17',
'John 5:4',
'Acts 8:37', 'Acts 15:34', 'Acts 24:7', 'Acts 28:29',
'Romans 16:24'],
'NLT': ['Matthew 17:21', 'Matthew 18:11', 'Matthew 23:14',
'Mark 7:16', 'Mark 9:44', 'Mark 9:46', 'Mark 11:26', 'Mark 15:28',
'Luke 17:36', 'Luke 23:17',
'John 5:4',
'Acts 8:37', 'Acts 15:34', 'Acts 24:7', 'Acts 28:29',
'Romans 16:24'],
'NASB': ['Matthew 17:21', 'Matthew 18:11', 'Matthew 23:14',
'Mark 7:16', 'Mark 9:44', 'Mark 9:46', 'Mark 15:28',
'John 5:4',
'Acts 8:37', 'Acts 15:34', 'Acts 24:7', 'Acts 28:29',
'Romans 16:24'],
'NET': ['Matthew 17:21', 'Matthew 18:11', 'Matthew 23:14',
'Mark 7:16', 'Mark 9:44', 'Mark 9:46', 'Mark 11:26', 'Mark 15:28',
'Luke 17:36', 'Luke 23:17',
'John 5:4',
'Acts 8:37', 'Acts 15:34', 'Acts 28:29',
'Romans 16:24'],
'NOG': ['Matthew 17:21', 'Matthew 18:11', 'Matthew 23:14',
'Mark 9:44', 'Mark 9:46', 'Mark 11:26', 'Mark 15:28',
'Luke 17:36', 'Luke 23:17',
'John 5:4',
'Acts 8:37', 'Acts 15:34', 'Acts 24:7', 'Acts 28:29',
'Romans 16:24'],
}
def __init__(self, file_writing_function, translation='NIV', show_passage_numbers=True,
default_directory=os.getcwd(), strip_excess_whitespace=False, enable_multiprocessing=True,
use_ascii_punctuation=False, file_extension='', write_key_as_string=False):
"""
:param file_writing_function: Function definition used to specify how to write to a given file.
The function should only take 2 arguments, which are the file path to write to
and the in-memory object being sourced (in that order).
:type file_writing_function: callable[[str, dict], int]
:param translation: Translation code for the particular passage. For example, 'NIV', 'ESV', 'NLT'
:type translation: str
:param show_passage_numbers: If True, any present passage numbers are preserved. Defaults to True.
:type show_passage_numbers: bool
:param default_directory: Directory containing the downloaded file.
Defaults to the current working directory.
:type default_directory: str
:param strip_excess_whitespace: If True, passages don't retain leading & trailing whitespaces as well as
newline characters. Defaults to False.
:type strip_excess_whitespace: bool
:param enable_multiprocessing: If True, downloads are performed using multiple daemon processes, resulting in
lower download times by splitting computations among multiple CPU cores.
Defaults to True.
:type enable_multiprocessing: bool
:param use_ascii_punctuation: When True, converts all Unicode punctuation characters into their ASCII
counterparts. Defaults to False.
:type use_ascii_punctuation: bool
:param file_extension: File extension used when reading from a default file when file_path is not provided
:type file_extension: str
:param write_key_as_string: If True, specifies that all keys in the downloaded file are converted to strings.
Defaults to False.
:type write_key_as_string: bool
"""
self.translation = translation
self.show_passage_numbers = show_passage_numbers
self.default_directory = default_directory
self.strip_excess_whitespace = strip_excess_whitespace
self.enable_multiprocessing = enable_multiprocessing
self.use_ascii_punctuation = use_ascii_punctuation
self.file_extension = file_extension
self.file_writing_function = file_writing_function
self.write_key_as_string = write_key_as_string
def download_passage(self, book, chapter, passage, file_path=''):
"""
Downloads a single passage as a file
:param book: Name of the book
:type book: str
:param chapter: Chapter number
:type chapter: int
:param passage: Passage number
:type passage: int
:param file_path: When specified, saves the file to this location with a custom filename and extension.
Using this parameter will take priority over the default_directory class property.
Defaults to the default_directory path with the book as the file name with a default
extension.
:type file_path: str
:return: 1 if the download was successful. 0 if an error occurred.
:rtype: int
"""
return self.download_passage_range(book, chapter, passage, chapter, passage, file_path)
def download_passages(self, book, chapter, passage_from, passage_to, file_path=''):
"""
Downloads a range of passages of the same chapter as a file
:param book: Name of the book
:type book: str
:param chapter: Chapter number
:type chapter: int
:param passage_from: First passage number to get
:type passage_from: int
:param passage_to: Last passage number to get
:type passage_to: int
:param file_path: When specified, saves the file to this location with a custom filename and extension.
Using this parameter will take priority over the default_directory class property.
Defaults to the default_directory path with the book as the file name with a default
extension.
:type file_path: str
:return: 1 if the download was successful. 0 if an error occurred.
:rtype: int
"""
return self.download_passage_range(book, chapter, passage_from, chapter, passage_to, file_path)
def download_chapter(self, book, chapter, file_path=''):
"""
Downloads a single chapter as a file
:param book: Name of the book
:type book: str
:param chapter: Chapter number
:type chapter: int
:param file_path: When specified, saves the file to this location with a custom filename and extension.
Using this parameter will take priority over the default_directory class property.
Defaults to the default_directory path with the book as the file name with a default
extension.
:type file_path: str
:return: 1 if the download was successful. 0 if an error occurred.
:rtype: int
"""
return self.download_passage_range(book, chapter, 1, chapter, common.get_end_of_chapter(), file_path)
def download_chapters(self, book, chapter_from, chapter_to, file_path=''):
"""
Downloads a range of passages from a specified chapter selection as a file
:param book: Name of the book
:type book: str
:param chapter_from: First chapter number to get
:type chapter_from: int
:param chapter_to: Last chapter number to get
:type chapter_to: int
:param file_path: When specified, saves the file to this location with a custom filename and extension.
Using this parameter will take priority over the default_directory class property.
Defaults to the default_directory path with the book as the file name with a default
extension.
:type file_path: str
:return: 1 if the download was successful. 0 if an error occurred.
:rtype: int
"""
return self.download_passage_range(book, chapter_from, 1, chapter_to, common.get_end_of_chapter(), file_path)
def download_book(self, book, file_path=''):
"""
Downloads a specific book of the Bible and saves it as a file
:param book: Name of the book
:type book: str
:param file_path: When specified, saves the file to this location with a custom filename and extension.
Using this parameter will take priority over the default_directory class property.
Defaults to the default_directory path with the book as the file name with a default
extension.
:type file_path: str
:return: 1 if the download was successful. 0 if an error occurred.
:rtype: int
"""
return self.download_passage_range(book, 1, 1, common.get_chapter_count(book, self.translation),
common.get_end_of_chapter(), file_path)
def download_passage_range(self, book, chapter_from, passage_from, chapter_to, passage_to, file_path=''):
"""
Downloads a range of passages from one specific passage to another passage as a file
:param book: Name of the book
:type book: str
:param chapter_from: First chapter number to get
:type chapter_from: int
:param passage_from: First passage number to get in the first chapter
:type passage_from: int
:param chapter_to: Last chapter number to get
:type chapter_to: int
:param passage_to: Last passage number to get in the last chapter
:type passage_to: int
:param file_path: When specified, saves the file to this location with a custom filename and extension.
Using this parameter will take priority over the default_directory class property.
Defaults to the default_directory path with the book | |
<filename>AutoDDU_CLI.py
Version_of_AutoDDU_CLI = "0.0.7"
import json
import os
import platform
import shutil
import subprocess
import sys
import time
import traceback
import urllib.request
# import wexpect
import zipfile
from datetime import datetime, timezone, date
from subprocess import CREATE_NEW_CONSOLE
#import ntplib
import requests
import wmi
from win32com.shell import shell, shellcon
import winreg
import ctypes
from win32event import CreateMutex
from win32api import CloseHandle, GetLastError
from winerror import ERROR_ALREADY_EXISTS
import webbrowser
import psutil
import urllib.error
import posixpath
import codecs
advanced_options_dict_global = {"disablewindowsupdatecheck": 0, "bypassgpureq": 0, "provideowngpuurl": [],
"disabletimecheck": 0, "disableinternetturnoff": 0, "donotdisableoverclocks": 0,
"disabledadapters": [], "avoidspacecheck": 0, "amdenterprise" : 0,
"nvidiastudio" : 0, "startedinsafemode" : 0, "inteldriverassistant" : 0} # ONLY USE FOR INITIALIZATION IF PERSISTENTFILE IS TO 0. NEVER FOR CHECKING IF IT HAS CHANGED.
clear = lambda: os.system('cls')
Appdata = shell.SHGetFolderPath(0, shellcon.CSIDL_COMMON_APPDATA, 0, 0)
Appdata_AutoDDU_CLI = os.path.join(Appdata, "AutoDDU_CLI")
Persistent_File_location = os.path.join(Appdata, "AutoDDU_CLI", "PersistentDDU_Log.txt")
root_for_ddu_assembly = os.path.join(Appdata, "AutoDDU_CLI", "DDU_Parser")
ddu_AssemblyInfo = os.path.join(Appdata, "AutoDDU_CLI", "DDU_Parser\\", "AssemblyInfo.vb")
ddu_zip_path = os.path.join(Appdata, "AutoDDU_CLI", "DDU_Parser\\", "DDU.exe")
ddu_extracted_path = os.path.join(Appdata, "AutoDDU_CLI", "DDU_Extracted")
Users_directory = os.path.dirname(shell.SHGetFolderPath(0, shellcon.CSIDL_PROFILE, 0, 0))
exe_location = os.path.join(Appdata_AutoDDU_CLI, "AutoDDU_CLI.exe")
Script_Location_For_startup = os.path.join(shell.SHGetFolderPath(0, shellcon.CSIDL_APPDATA, 0, 0), 'Microsoft',
'Windows', 'Start Menu', 'Programs', 'Startup', 'AutoDDUStartup.vbs')
log_file_location = os.path.join(Appdata_AutoDDU_CLI, "AutoDDU_LOG.txt")
PROGRAM_FILESX86 = shell.SHGetFolderPath(0, shellcon.CSIDL_PROGRAM_FILESX86, 0, 0)
# Only Fermi professional (NVS, Quadro, Tesla) is supported, and only till the end of 2022.
FERMI_NVIDIA = ['GF100', 'GF100M', 'GF100G', 'GF100GL', 'GF100GLM', 'GF106', 'GF108', 'GF104', 'GF116', 'GF106M', 'GF106GL', 'GF106GLM', 'GF108M', 'GF108GL', 'GF108GLM', 'GF119', 'GF110', 'GF114', 'GF104M', 'GF104GLM', 'GF11', 'GF119M', 'GF110GL', 'GF117M', 'GF114M', 'GF116M']
EOL_NVIDIA = ['NV1', 'NV3', 'NV4', 'NV5', 'MCP04', 'NV40', 'NV40GL', 'CK804', 'nForce2', 'nForce', 'MCP2A', 'MCP2S', 'G70', 'G70M', 'G70GL', 'NV0A',
'NV41', 'NV41M', 'NV41GLM', 'NV42GL', 'NV41GL', 'nForce3', 'CK8S', 'NV43', 'G70/G71', 'NV45GL', 'NV39', 'NV35', 'NV37GL', 'NV38GL', 'NV19',
'NV10', 'NV10GL', 'NV11', 'NV11M', 'NV11GL', 'NV43M', 'NV43GL', 'NV15', 'NV15GL', 'NV44', 'NV44M', 'NV17', 'NV17M', 'NV17GL', 'NV18', 'NV18M',
'NV18GL', 'G80', 'G80GL', 'G72', 'G7', 'G72M', 'G72GLM', 'G72GL', 'NV1F', 'NV20', 'NV20GL', 'NV48', 'NV44A', 'C51PV', 'C51', 'C51G', 'NV25',
'NV25GL', 'MCP51', 'NV28', 'NV28M', 'NV28GL', 'NV28GLM', 'G71', 'G71M', 'G71GLM', 'G71GL', 'NV2A', 'MCPX', 'G73', 'NV30', 'NV30GL', 'NV31',
'NV31G', 'NV31M', 'NV31GLM', 'NV34', 'NV34M', 'NV34GL', 'NV38', 'NV35GL', 'NV36', 'NV36M', 'NV36GL', 'MCP55', 'G73M', 'G73GLM', 'G73GL', 'C55',
'C61', 'MCP61', 'G84', 'G84M', 'G84GL', 'G84GLM', 'G92', 'G86', 'G86M', 'G86GLM', 'MCP65', 'C67', 'C68', 'MCP67', 'MCP78S', 'MCP73', 'NF200',
'GT200b', 'GT200', 'GT200GL', 'G92M', 'G92GL', 'G92GLM', 'G94', 'G94M', 'G94GL', 'G94GLM', 'G96C', 'G96', 'G96CM', 'G96M', 'G96GL', 'G96CGL',
'G96GLM', 'G98', 'G98M', 'G98GL', 'G98GLM', 'MCP77', 'MCP72XE/MCP72P/MCP78U/MCP78S', 'C73', 'C77', 'C78', 'C79', 'MCP7A', 'MCP79',
'MCP89', 'GT216', 'GT216M', 'GT216GL', 'GT216GLM', 'GT218', 'GT218M', 'GT218GL', 'GT218GLM', 'GT215', 'GT215M', 'GT215GLM',
'Xavier', 'MCP78U', 'MCP72P' , 'MCP72XE']
KEPLER_NVIDIA = ['GK104', 'GK106', 'GK208', 'GK110', 'GK107', 'GK107M', 'GK107GL', 'GK107GLM', 'GK110B',
'GK110GL', 'GK110BGL', 'GK180GL', 'GK210GL', 'GK104GL', 'GK104M', 'GK104GLM', 'GK106M',
'GK106GL', 'GK106GLM', 'GK208B', 'GK208M', 'GK208BM', 'GK20', 'GK208GLM']
Professional_NVIDIA_GPU = ["Quadro", "NVS", "RTX A"]
Datacenter_NVIDIA_GPU = ["Tesla", "HGX", "M", "T"]
Exceptions_laptops = ["710A", "745A", "760A", "805A", "810A", "810A", "730A",
"740A"] # Kepler laptops GPUs with no M in the name.
EOL_AMD = ['Kaver', 'Kaveri', 'Wrestler', 'Arie', 'Cyan', 'Kryptos', 'Garfiel', 'Arlen', 'Pook', 'Anubi',
'Fenghuang', 'Arde', 'Renoir', 'VanGogh', 'Rembrandt', 'Rembrand', 'BeaverCreek', 'RV380/M24', 'RV380', 'RV370/M22', 'RS100', 'RS200', 'R300', 'R350', 'RV350', 'RV360',
'68800AX', 'R200', 'RS200M', 'SB200', 'Crayola', '215CT', 'Mach64', 'SB300', 'IXP', 'SB600', 'SBx00', 'SB7x0/SB8x0/SB9x0', 'SB8x0/SB9x0', 'SB7x0', 'SB700/SB800/SB900',
'SB900', 'RS250', '210888ET', 'XENOS', 'Rage', '3D', 'Xilleon', 'RV250', 'R420', 'RV420/M18', 'R481', 'M1', '264LT', 'RV200/M7', 'RV100/M6', 'RV250/M9', 'Theater', 'R360',
'RV350/M10', 'RV360/M12', 'All-In-Wonder', 'R100', 'RV200', 'RV100', 'ES100', 'Mach', 'RV380/M24C', 'R423', 'R480', 'R430', 'RV410/M26', '264VT3', 'RV410', 'RS300', 'RS300M',
'RS480', 'RV280', 'RS480/RS482/RS485', 'RX480/RX482', 'RD580', 'RS480M', 'RD790', 'RX780/RX790', 'RD780', 'RS482/RS485', 'RS482M', 'RX780/RD790', 'RD890', 'RD890S/SR5650',
'RD9x0/RX980', 'RD890/RD9x0/RX980', 'RD890/RD9x0', 'RD890/RD990', 'RD890S/RD990', 'RC410', 'RS400', 'RS4xx', 'RC4xx/RS4xx', 'RS400M', 'RC410M', 'RV370', 'RV280/M9+', 'R423/M28',
'Mars', 'Opal', 'Oland', 'Olan', 'Saturn', 'Bonaire', 'Bonair', 'Tobago', 'Sun', 'Jet', 'Cayman', 'Antilles', 'Blackcomb', 'Barts', 'Whistler', 'Turks', 'Onega', 'Seymour',
'Caico', 'Caicos', 'Tahiti', 'Malta', 'Tahit', 'Hawaii', 'Hawai', 'Vesuvius', 'Ellesmere', 'Baffin', 'Wimbledon', 'Neptune', 'Wimbledo', 'Neptun', 'Pitcairn', 'Curacao', 'Pitcair',
'Venus', 'Heathrow', 'Chelsea', 'Cape', 'Thames', 'Lexington', 'Cypress', 'Hemlock', 'Broadway', 'Granville', 'Juniper', 'Madison', 'Pinewood', 'Redwood', 'Redwoo', 'Park', 'Robson',
'Ceda', 'Cedar', 'Topaz', 'Meso', 'Amethyst', 'Tonga', 'Lexa', 'RS200/RS250', 'R520', 'R520/M58', 'RV515', 'RV505', 'RV515/M54', 'RV515/M52', 'RV516', 'RV516/M64',
'RV516/M64-S', 'RV516/M62', 'RV516/M62-CSP64', 'RV516/M64-CSP128', 'RV516/M62-S', 'RV530', 'RV535', 'RV530/M56', 'RV530/M56-P', 'RV530LE', 'RV530/M66', 'RV530/M66-P', 'RV530/M66-XT',
'RV550/M71', 'R580+', 'R580', 'RV570', 'RV560', 'Fiji', 'Arcturus', 'Aldebara', 'RS350', 'RS350M', 'RS690', 'RS690/RS740', 'RS690M', 'RS600', 'RS600M', 'RS740', 'R600', 'RV770', 'R700',
'RV770/M98L', 'RV770/M98', 'RV770/M98-XT', 'RV790', 'RV730/M96', 'RV730/M96-XT', 'RV730', 'RV730/M96-CSP', 'RV740/M97', 'RV740/M97-XT', 'RV740', 'RV610', 'RV610/M74', 'RV610/M72-S', 'RV670',
'RV670/M88', 'RV670/M88-XT', 'R680', 'RV710', 'RV710/M92', 'RV711/M93', 'RV630', 'RV630/M76', 'RV635/M86', 'RV635', 'RV620', 'RV620/M82', 'RS780', 'RS780C', 'RS780M', 'RS780MC', 'RS780D', 'RS780E',
'RS780L', 'Sumo', 'SuperSumo', 'Sum', 'RS880', 'RS880M', 'Kabini', 'Temash', 'Mullins', 'Mullin', 'Wani', 'Amu', 'Nola', 'Stoney', 'Trinity', 'Richland', 'Bishop', 'Liverpool', 'Starsh', 'Starsha2',
'Gladiu', 'Kingston/Clayton/Jupiter/Gladius/Montego', 'Jupite', 'RV670/680', 'RV710/730', 'Cayman/Antilles', 'Oland/Hainan/Cape', 'Tiran', 'TV']
unrecoverable_error_print = (r"""
An unrecoverable error has occured in this totally bug free
software.
Chika is disappointed, but at least this error shows what went wrong.
Please share the stacktrace below to Evernow so he can fix it.
In addition, above the stacktrace is the directory of where
DDU and your drivers are downloaded if they were downloaded.
{ddu_extracted_path}
{Appdata}
""".format(ddu_extracted_path=ddu_extracted_path, Appdata=Appdata_AutoDDU_CLI))
login_or_not = """
You should be logged in automatically to a
user profile we created, if it doesn't then login
yourself manually.
"""
AutoDDU_CLI_Settings = os.path.join(Appdata_AutoDDU_CLI, "AutoDDU_CLI_Settings.json")
def serialize_req(obj):
return json.dumps(obj, separators=(',', ':'))
def getDispDrvrByDevid(query_obj, timeout=10):
ENDPOINT = 'https://gfwsl.geforce.com/nvidia_web_services/' \
'controller.gfeclientcontent.NG.php/' \
'com.nvidia.services.GFEClientContent_NG.getDispDrvrByDevid'
url = posixpath.join(ENDPOINT, serialize_req(query_obj))
http_req = urllib.request.Request(
url,
data=None,
headers={
'User-Agent': 'NvBackend/36.0.0.0'
}
)
with urllib.request.urlopen(http_req, None, timeout) as resp:
coding = resp.headers.get_content_charset()
coding = coding if coding is not None else 'utf-8-sig'
decoder = codecs.getreader(coding)(resp)
res = json.load(decoder)
return res
def get_latest_geforce_driver(dev_id):
notebook=False
x86_64=True
os_version="10.0"
os_build="19044"
language=1033
beta=False
dch=True
crd=False
timeout=10
query_obj = {
"dIDa": dev_id, # Device PCI IDs:
# ["DEVID_VENID_DEVID_VENID"]
"osC": os_version, # OS version (Windows 10)
"osB": os_build, # OS build
"is6": "1" if x86_64 else "0", # 0 - 32bit, 1 - 64bit
"lg": str(language), # Language code
"iLp": "1" if notebook else "0", # System Is Laptop
"prvMd": "0", # Private Model?
"gcV": "172.16.31.10", # GeForce Experience client version
"gIsB": "1" if beta else "0", # Beta?
"dch": "1" if dch else "0", # 0 - Standard Driver, 1 - DCH Driver
"upCRD": "1" if crd else "0", # Searched driver: 0 - GameReady Driver, 1 - CreatorReady Driver
"isCRD": "1" if crd else "0", # Installed driver: 0 - GameReady Driver, 1 - CreatorReady Driver
}
print(query_obj)
try:
res = getDispDrvrByDevid(query_obj, timeout)
except urllib.error.HTTPError as e:
print(e)
if e.code == 404:
res = None
else:
raise e
return res
def FindOutOfBranchDriver(vendorid_deviceid):
# '1BE0_10DE'
drv = get_latest_geforce_driver([vendorid_deviceid])
if drv is None:
return None
else:
return drv['DriverAttributes']['DownloadURLAdmin']
def checkifvaliddownload(url):
logger("Checking if custom URL {} is valid".format(str(url)))
try:
my_referer = "https://www.amd.com/en/support/graphics/amd-radeon-6000-series/amd-radeon-6700-series/amd-radeon-rx-6700-xt"
file = urllib.request.Request(url)
file.add_header('Referer', my_referer)
file.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:93.0) Gecko/20100101 Firefox/93.0')
file = urllib.request.urlopen(file, timeout=5)
logger("Got size of custom URL to be {}".format(str(file.length)))
if file.length < 6000000: # 6MB, which is size of intel driver assistant
return False
else:
return True
except:
logger("Failed valid check with error " +str(traceback.format_exc()) )
return False
def checkBatteryLevel():
try:
if psutil.sensors_battery() != None and int(psutil.sensors_battery().percent) < 40 and psutil.sensors_battery().power_plugged == False:
print("Your battery is less than 40%")
print("Please connect your laptop to power, then continue with instructions below.")
HandleOtherLanguages()
except:
logger("Failed to check battery level with")
logger(str(traceback.format_exc()))
def cleanupAutoLogin():
try:
Winlogon_key = winreg.CreateKey(winreg.HKEY_LOCAL_MACHINE, 'Software\\Microsoft\\Windows NT\\CurrentVersion\\Winlogon')
if winreg.QueryValueEx(Winlogon_key, 'DefaultUserName')[0] == obtainsetting("ProfileUsed"):
failed = 0
try:
winreg.DeleteValue(Winlogon_key, 'AutoAdminLogon')
except:
failed = 1
logger("Failed in cleanupAutoLogin 1")
logger(str(traceback.format_exc()))
try:
winreg.DeleteValue(Winlogon_key, 'DefaultUserName')
except:
failed = 1
logger("Failed in cleanupAutoLogin 2")
logger(str(traceback.format_exc()))
try:
winreg.DeleteValue(Winlogon_key, 'DefaultPassword')
except:
failed = 1
logger("Failed in cleanupAutoLogin 3")
logger(str(traceback.format_exc()))
try:
winreg.DeleteValue(Winlogon_key, 'AutoLogonCount')
except:
# I think this is normal to occur so...
logger("EXPECTED failure in cleanupAutoLogin 4")
logger(str(traceback.format_exc()))
else:
failed = 1
logger("Did not log because DefaultUserName did not match ProfileUsed, used is {used} and defaultusername is {default}".format(default=winreg.QueryValueEx(Winlogon_key, 'DefaultUserName')[0], user=obtainsetting("ProfileUsed")))
winreg.CloseKey(Winlogon_key)
print("Finished AutoLogin cleanup")
logger("Finished cleanupAutoLogin successfully")
except:
failed = 1
logger("Failed in cleanupAutoLogin 5")
logger(str(traceback.format_exc()))
if failed == 1:
print("WARNING: Something MAY have gone wrong in some cleanup")
print("DDU finished just fine, just that when we log you out,")
print("you MAY be logged back into this DDU profile, if you are")
print("please log out then then restart, you may have to do this FIVE times for it to stop.")
print("We'll continue | |
"""
Minter definition transaction class. This transaction type
allows the current coin creators to redefine who has the ability to create coins.
"""
def __init__(self):
self._mint_fulfillment = None
self._mint_condition = None
self._minerfees = []
self._data = bytearray()
self._version = bytearray([128])
self._id = None
self._nonce = token_bytes(nbytes=8)
self._specifier = bytearray(b'minter defin tx\0')
@property
def version(self):
return 128
@property
def id(self):
"""
Get transaction id
"""
return self._id
@id.setter
def id(self, txn_id):
"""
Set transaction id
"""
self._id = txn_id
@property
def coin_inputs(self):
"""
Retrieves the coin inputs
"""
# TODO: make this static of some Base (Abstract) Tx class
return []
@property
def coin_outputs(self):
"""
Retrieves the coin outputs
"""
# TODO: make this static of some Base (Abstract) Tx class
return []
@property
def data(self):
"""
Retrieves the data
"""
# TODO: make this static of some Base (Abstract) Tx class
return bytearray()
@property
def mint_condition(self):
"""
Retrieve the new mint condition which will be set
"""
return self._mint_condition
@property
def mint_fulfillment(self):
"""
Retrieve the current mint fulfillment
"""
return self._mint_fulfillment
@property
def json(self):
"""
Returns a json representation of the transaction
"""
result = {
'version': self.version,
'data': {
'nonce': base64.b64encode(self._nonce).decode('utf-8'),
'mintfulfillment': self._mint_fulfillment.json if self._mint_fulfillment else '{}',
'mintcondition': self._mint_condition.json if self._mint_condition else '{}',
'minerfees': [str(fee) for fee in self._minerfees]
}
}
if self._data:
result['data']['arbitrarydata'] = base64.b64encode(self._data).decode('utf-8')
return result
def add_data(self, data):
"""
Add data to the transaction
"""
self._data.extend(data)
def set_singlesig_mint_condition(self, minter_address, locktime=None):
"""
Set the mint condition to a singlesig condition.
@param minter_address: The address of the singlesig condition to set as new mint condition
"""
unlockhash = UnlockHash.from_string(minter_address)
condition = UnlockHashCondition(unlockhash=unlockhash)
if locktime is not None:
condition = LockTimeCondition(condition=condition, locktime=locktime)
self._mint_condition = condition
def set_multisig_mint_condition(self, unlockhashes, min_nr_sig, locktime=None):
"""
Set the mint condition to a multisig condition
@param unlockhashes: The unlockhashes which can sign the multisig condition
@param min_nr_sig: The minimum amount of signatures in order to fulfill the condition
@param locktime: An optional time until which the condition cannot be fulfilled
"""
condition = MultiSignatureCondition(unlockhashes=unlockhashes, min_nr_sig=min_nr_sig)
if locktime is not None:
condition = LockTimeCondition(condition=condition, locktime=locktime)
self._mint_condition = condition
def set_condition(self, condition):
"""
Set a new premade minter condition
"""
self._mint_condition = condition
def add_minerfee(self, minerfee):
"""
Adds a minerfee to the transaction
"""
self._minerfees.append(minerfee)
def get_input_signature_hash(self, extra_objects=None):
"""
Builds a signature hash for an input
"""
if extra_objects is None:
extra_objects = []
buffer = bytearray()
# encode transaction version
buffer.extend(self._version)
# encode the specifier
buffer.extend(self._specifier)
# encode nonce
buffer.extend(self._nonce)
# extra objects if any
for extra_object in extra_objects:
buffer.extend(binary.encode(extra_object))
# encode new mintcondition
buffer.extend(binary.encode(self._mint_condition))
# minerfee length
buffer.extend(binary.encode(len(self._minerfees)))
# actual minerfees
for miner_fee in self._minerfees:
buffer.extend(binary.encode(miner_fee, type_='currency'))
# arb data
buffer.extend(binary.encode(self._data, type_='slice'))
return hash(data=buffer)
class TransactionV129:
"""
Coin creation transaction class. This transaction type allows the current
coin creators to create new coins and spend them.
"""
def __init__(self):
self._mint_fulfillment = None
self._nonce = token_bytes(nbytes=8)
self._version = bytearray([129])
self._id = None
self._minerfees = []
self._data = bytearray()
self._coin_outputs = []
self._specifier = bytearray(b'coin mint tx')
self._specifier.extend([0,0,0,0])
@property
def version(self):
return 129
@property
def id(self):
"""
Get the transaction id
"""
return self._id
@id.setter
def id(self, tx_id):
"""
Set the transaction id
"""
self._id = tx_id
@property
def coin_inputs(self):
"""
Retrieves the coin inputs
"""
# TODO: make this static of some Base (Abstract) Tx class
return []
@property
def coin_outputs(self):
"""
Retrieves the coin outputs
"""
return self._coin_outputs or []
@property
def data(self):
"""
Retrieves the data
"""
return self._data
@property
def mint_fulfillment(self):
"""
Retrieve the current mint fulfillment
"""
return self._mint_fulfillment
@property
def json(self):
"""
Returns a json version of the TransactionV129 object
"""
result = {
'version': self.version,
'data': {
'nonce': base64.b64encode(self._nonce).decode('utf-8'),
'mintfulfillment': self._mint_fulfillment.json if self._mint_fulfillment else '{}',
'coinoutputs': [output.json for output in self._coin_outputs],
'minerfees': [str(fee) for fee in self._minerfees]
}
}
if self._data:
result['data']['arbitrarydata'] = base64.b64encode(self._data).decode('utf-8')
return result
def add_data(self, data):
"""
Add data to the transaction
"""
self._data.extend(data)
def add_coin_output(self, value, recipient, locktime=None):
"""
Add a new coin output to the transaction
@param value: Amount of coins
@param recipient: The recipient address
@param locktime: If provided then a locktimecondition will be created for this output
"""
unlockhash = UnlockHash.from_string(recipient)
condition = UnlockHashCondition(unlockhash=unlockhash)
if locktime is not None:
condition = LockTimeCondition(condition=condition, locktime=locktime)
self._coin_outputs.append(CoinOutput(value=value, condition=condition))
def add_multisig_output(self, value, unlockhashes, min_nr_sig, locktime=None):
"""
Add a new MultiSignature output to the transaction
@param value: Value of the output in hastings
@param unlockhashes: List of all unlockhashes which are authorised to spend this input
@param min_nr_sig: The amount of signatures required to spend this output
@param locktime: If provided then a locktimecondition will be created for this output
"""
condition = MultiSignatureCondition(unlockhashes=unlockhashes, min_nr_sig=min_nr_sig)
if locktime is not None:
condition = LockTimeCondition(condition=condition, locktime=locktime)
coin_output = CoinOutput(value=value, condition=condition)
self._coin_outputs.append(coin_output)
def add_output(self, value, condition):
"""
Add a new output from a premade condition
"""
self._coin_outputs.append(CoinOutput(value=value, condition=condition))
def add_minerfee(self, minerfee):
"""
Adds a miner fee to the transaction
"""
self._minerfees.append(minerfee)
def get_input_signature_hash(self, extra_objects=None):
"""
Builds a signature hash for an input
"""
if extra_objects is None:
extra_objects = []
buffer = bytearray()
# encode the transaction version
buffer.extend(self._version)
# specifier
buffer.extend(self._specifier)
# nonce
buffer.extend(self._nonce)
# arbitrary objects if any
for extra_object in extra_objects:
buffer.extend(binary.encode(extra_object))
# new coin outputs
buffer.extend(binary.encode(self._coin_outputs, type_='slice'))
# miner fees
buffer.extend(binary.encode(len(self._minerfees)))
for miner_fee in self._minerfees:
buffer.extend(binary.encode(miner_fee, type_='currency'))
# finally custom data
buffer.extend(binary.encode(self._data, type_='slice'))
return hash(data=buffer)
class TransactionV144:
"""
Bot Registration transaction class. This transaction type allows a
new 3Bot to be registered.
"""
def __init__(self):
self._specifier = bytearray(b'bot register tx\0')
self._id = None
self._addresses = []
self._names = []
self._number_of_months = 0
self._transaction_fee = None
self._coin_inputs = []
self._refund_coin_output = None
self._identification = TfchainPublicKeySignaturePair(None, None)
@property
def version(self):
return BOT_REGISTRATION_TRANSACTION_VERSION
@property
def id(self):
"""
Get the transaction id
"""
return self._id
@id.setter
def id(self, tx_id):
"""
Set the transaction id
"""
self._id = tx_id
@property
def identification(self):
"""
Get the 3Bot identification of this transaction.
"""
return self._identification
@property
def required_bot_fees(self):
# a static registration fee has to be paid
fees = tfconst.BOT_REGISTRATION_FEE_MULTIPLIER * HASTINGS_TFT_VALUE
# the amount of desired months also has to be paid
fees += _compute_monthly_bot_fees(self._number_of_months)
# if more than one name is defined it also has to be paid
lnames = len(self._names)
if lnames > 1:
fees += HASTINGS_TFT_VALUE * (lnames-1) * tfconst.BOT_FEE_PER_ADDITIONAL_NAME_MULTIPLIER
# no fee has to be paid for the used network addresses during registration
# return the total fees
return fees
@property
def coin_inputs(self):
"""
Retrieves coin inputs
"""
return self._coin_inputs
@property
def coin_outputs(self):
"""
Retrieves coin inputs
"""
# TODO: support 3Bot Fee Payout as well
if self._refund_coin_output:
return [self._refund_coin_output]
return []
@property
def data(self):
"""
Retrieves the data
"""
# TODO: make this static of some Base (Abstract) Tx class
return bytearray()
@property
def json(self):
"""
Returns a json version of the TransactionV144 object
"""
result = {
'version': self.version,
'data': {
'nrofmonths': self._number_of_months,
'txfee': str(self._transaction_fee),
'coininputs': [ci.json for ci in self._coin_inputs],
'identification': self._identification.json,
}
}
if self._addresses:
result['data']['addresses'] = [addr.json for addr in self._addresses]
if self._names:
result['data']['names'] = self._names.copy()
if self._refund_coin_output:
result['data']['refundcoinoutput'] = self._refund_coin_output.json
return result
def from_dict(self, data):
"""
Populates this TransactionV144 object from a data (JSON-decoded) dictionary
"""
if 'nrofmonths' in data:
self._number_of_months = data['nrofmonths']
else:
self._number_of_months = 0
if 'txfee' in data:
self._transaction_fee = int(data['txfee'])
else:
self._transaction_fee = None
if 'coininputs' in data:
for ci_info in data['coininputs']:
ci = CoinInput.from_dict(ci_info)
self._coin_inputs.append(ci)
else:
self._coin_inputs = []
if 'identification' in data:
self._identification = TfchainPublicKeySignaturePair.from_dict(data['identification'])
else:
self._identification = TfchainPublicKeySignaturePair(None, None)
if 'addresses' in data:
for addr_str in data['addresses']:
addr = tftnet.NetworkAddress.from_string(addr_str)
self._addresses.append(addr)
else:
self._addresses = []
if 'names' in data:
self._names = data['names'].copy()
else:
self._names = []
if 'refundcoinoutput' in data:
co = CoinOutput.from_dict(data['refundcoinoutput'])
self._refund_coin_output = co
else:
self._refund_coin_output = None
def add_address(self, addr_str):
addr = tftnet.NetworkAddress.from_string(addr_str)
self._addresses.append(addr)
def add_name(self, name):
self._names.append(name)
def set_transaction_fee(self, txfee):
self._transaction_fee = txfee
def set_number_of_months(self, n):
if n < | |
faire la transition entre jour et mois
extra_between_day_and_month = "(jour du mois de |jour du mois d'|de |d')?"
## Regex de dates pour les dates complètes (avec jour) ou partielles (mois et année seulement), numériques.
full_date_regex = "\d\d?'*(er|me|e|deg)? "+extra_between_day_and_month+month_and_year_group_regex
partial_date_regex = month_and_year_group_regex
## Noms de chiffres possibles
# Tous les chiffres de jours peuvent être adjectivés, comme "seizieme"
toutes_lettres_chiffres = ["deux","trois","quatre","cinq","six","sept","huit","neuf"]
toutes_lettres_chiffres_purs = ["un"] + toutes_lettres_chiffres
toutes_lettres_chiffres_jours = ["premier"] + toutes_lettres_chiffres
toutes_lettres_nombres_dix = ["onze","douze","treize","quatorze","quinze","seize"]
toutes_lettres_dizaines = ["dix","vingt","trente","quarante","cinquante","soixante","septante","soixante dix",
"huitante","quatre vingt","nonante","quatre vingt dix"]
toutes_lettres_dizaines_jours = ["dix","vingt","trente"]
toutes_lettres_millenaires = ["mille","mil"]
toutes_lettres_centaines = ["cent","cens"]
toutes_lettres_annee = ["l'an de grace","de l'an"]
## Formules annonçant une date
formules_preliminaires = ["Ainsi adoptee", "Donnee", "Donne", "Fait"]
chambres_pour_formules_preliminaires = ["Grand Conseil","Conseil d'Etat"]
## Lieux
vrais_lieux = ["sion", "lucerne", "lausanne", "berne", "gernsbach", "st-gall", "brigue", "zurich", "bex",
"fribourg", "sierre", "genes", "st-maurice", "paris", "turin", "palais des tuileries",
"palais de saint cloud", "bechenkowiski", "witepsk", "smolensk", "slawkovo", "wiazma", "ghjat",
"mojaisk", "moscou", "borowsk", "roilskoe", "vereia", "molodetschno", "palais de l'elysee",
"lutzen", "francfort", "loerrach", "bale", "thonon", "troyes", "geneve", "lons-le-saunier",
"yverdon", "monthey", "bramois", "viege", "bonneville", "illarsaz", "chamoson", "martigny",
"aigle", "verolliez", "aoste", "rome", "aarau", "posieux", "massongex", "ratisbonne", "vaumarcus",
"copenhague", "neuchatel", "coire", "riddes", "<NAME>", "vernayaz", "glarus"]
coquilles_lieux = {"coirele":"coire", "paixberne":"berne", "precieufesion":"sion", "cirsion":"sion",
"vconsion":"sion"}
uniformize_lieux = {"st maurice":"st-maurice", "st gall":"st-gall", "massongez":"massongex",
"arau":"aarau"}
lieux = vrais_lieux + list(coquilles_lieux.keys()) + list(uniformize_lieux.keys())
correct_lieux_dict = dict()
correct_lieux_dict.update(coquilles_lieux)
correct_lieux_dict.update(uniformize_lieux)
## Déterminant introduisant la date
prefix_date = ["le ","ce "]
## Regex pour les dates avec lieux
lieu_full_date_regex = make_re_group(lieux) + " " + make_re_group(prefix_date)+"?" + full_date_regex
lieu_partial_date_regex = make_re_group(lieux) + " " + "(en )?" + partial_date_regex
# (en )? SERAIT PEUT-ETRE PLUS PERTINENT AILLEURS (dans les prefix-date par exemple ?)
## Regex pour les formules introduisant les dates, complète (participe + gouvern.) ou partielles (participe seul)
full_formule_regex = make_re_group(formules_preliminaires) + " en " + make_re_group(
chambres_pour_formules_preliminaires) + " a " + make_re_group(lieux) + " " + make_re_group(
prefix_date)
partial_formule_regex = make_re_group(formules_preliminaires) + " a " + make_re_group(
lieux) + " " + make_re_group(prefix_date)
# Conversion des noms de mois leurs valeurs numérales
normalize_month_dict = {"janvier":1,"fevrier":2,"mars":3,"avril":4,"mai":5,"juin":6,"juillet":7,"aout":8,
"septembre":9,"octobre":10,"novembre":11,"decembre":12,
"vendemiaire":1,"brumaire":2,"frimaire":3,"nivose":4,"pluviose":5,"ventose":6,
"germinal":7,"floreal":8,"prairial":9,"messidor":10,"thermidor":11,"fructidor":12,
"may":5,"aoust":8,"decembr":12,
"jer":1,"7bre":9,"8bre":10,"9bre":11,"10bre":12,"xbre":12
}
# In[19]:
# Fonction de chargement du texte d'un fichier et de traitement pour chercher la date (\n -> espace)
# Fonction de recherche de date
def find_most_probable_dates(text):
"""
Find in text the patterns that are the most probable to be the date of the documents and return them as
a list match objects, along with the code that characterize the date.
text : the text to be analyzed
Return : (match_list, code)
match_list : the list of match objects that contains dates
code : the code of the level of probability of the given date.
"""
# Niveau de complexité 1 (Formule avec participe passé + entité gouvernementale + lieu + date)
match_list = find_pattern(full_formule_regex + full_date_regex, text)
if len(match_list) > 0:
return match_list, "formule_grande+date_complete"
match_list = find_pattern(full_formule_regex + partial_date_regex, text)
if len(match_list) > 0:
return match_list, "formule_grande+date_partielle"
# Niveau de complexité 2 (Formule avec participe passé + lieu + date)
match_list = find_pattern(partial_formule_regex + full_date_regex, text)
if len(match_list) > 0:
return match_list, "formule_petite+date_complete"
match_list = find_pattern(partial_formule_regex + partial_date_regex, text)
if len(match_list) > 0:
return match_list, "formule_petite+date_partielle"
# Niveau de complexité 3 (Lieu + date)
match_list = find_pattern("a " + lieu_full_date_regex, text)
if len(match_list) > 0:
return match_list, "a_lieu+date_complete"
match_list = find_pattern(lieu_full_date_regex, text)
if len(match_list) > 0:
return match_list, "lieu+date_complete"
match_list = find_pattern("a "+lieu_partial_date_regex, text)
if len(match_list) > 0:
return match_list, "a_lieu+date_partielle"
match_list = find_pattern(lieu_partial_date_regex, text)
if len(match_list) > 0:
return match_list, "lieu+date_partielle"
# Niveau de complexité 4 (Préfixe + date)
match_list = find_pattern(make_re_group(prefix_date) + full_date_regex, text)
if len(match_list) > 0:
return match_list, "le+date_complete"
match_list = find_pattern(make_re_group(prefix_date) + partial_date_regex, text)
if len(match_list) > 0:
return match_list, "le+date partielle"
# Niveau de complexité 5 (Date complète en toutes lettres)
# Actuellement pas implémenté car cela prendrait du temps pour un nombre de dates limitées,
# Et serait difficilement implémentable vu le grand nombre de variations possibles.
# Idée ! Pour les dates en toute lettres, pourrait-on remplacer dans le texte chaque
# nom de chiffre par son équivalent en vrai chiffre ?
# Il resterait des subtilités mais au moins ça serait plus simple.
# Mieux ! Commencer par trouver le mois et ensuite chercher autour du mois les chiffres.
#
# Note supplémentaire : si les années en toutes lettres sont rares,
# en revanche tous les documents liés au département du Simplon ont des jours en toute lettre
# Ca pourrait être judicieux de les intégrer.
# Niveau de complexité 6(Date complète)
match_list = find_pattern(full_date_regex, text)
if len(match_list) > 0:
return match_list, "date_complete"
# Niveau de complexité 7 (Formules spéciales)
match_list = find_pattern("pour l'annee \d\d\d\d|pour l'exercice( de)? \d\d\d\d", text)
if len(match_list) > 0:
return match_list, "pour_annee+annee"
# Rajouter un niveau 8 pour les dates partielles sur ligne seule
# (Déplacer la conversion du texte pour la recherche de date dans cette fonction pour avoir acces aux
# deux textes ?)
# Niveau de complexité 9 (Date partielle unique)
match_list = find_pattern(partial_date_regex, text)
if (len(match_list) == 1) or (len(set(map(lambda mobj : mobj.group(), match_list))) == 1):
return match_list, "date_partielle_unique"
else:
return list(), "no_match"
# Fonction de choix de date (decide_extrem, donc pas besoin de faire un alias si hors compil.)
# Quant aux compil, il faudra gérer quand on a tous les éléments en même temps.
# Fonction de conversion de date
# Cette fonction extrait la date et le lieu de la str trouvée et les converti.
# Plutôt une fonction spéciale pour le lieu ?
def normalize_date(date_text):
"""
Extract and convert a date from a date text that contains the wanted date.
The day and the year must be in a numerical format.
"""
republican_flag = False
date_text = date_text.lower()
# Extraction des données spécifiques des dates
full_date = search_for_pattern(full_date_regex, date_text)
if full_date is not None:
full_date_str = full_date.group()
day = retrieve_unique_pattern("^\d\d?",full_date_str, allow_part_of_word = True)
month = retrieve_unique_pattern(month_group_regex,full_date_str)
if month in revolutionary_month_list:
republican_flag = True
year = retrieve_unique_pattern("\d+$",full_date_str)
else:
year = retrieve_unique_pattern("\d\d\d\d$",full_date_str)
else:
partial_date = search_for_pattern(partial_date_regex, date_text)
if partial_date is not None:
partial_date_str = partial_date.group()
day = None
month = retrieve_unique_pattern(month_group_regex,partial_date_str)
if month in revolutionary_month_list:
republican_flag = True
year = retrieve_unique_pattern("\d+$",partial_date_str)
else:
year = retrieve_unique_pattern("\d\d\d\d$",partial_date_str)
else:
day = None
month = None
year = retrieve_unique_pattern("\d\d\d\d$",date_text)
# Conversion de ces données
## Conversion en chiffres
if month is not None:
month_num = normalize_month_dict[month]
else:
month_num = None
day_num = int(day) if day is not None else None
year_num = int(year)
## Conversion des dates républicaines en dates grégoriennes
if republican_flag:
if day is not None:
year_num, month_num, day_num = convertdate.french_republican.to_gregorian(year_num, month_num, day_num)
else: # Il n'existe aucune correspondance parfaite entre mois républicain et grégorien, donc faut approximer
year_num, month_num, _ = convertdate.french_republican.to_gregorian(year_num, month_num, 15)
return day_num, month_num, year_num
# In[20]:
find_most_probable_dates("fait a Sion le 9 janvier 1815")
# ## Complément aux dates
# Pour les dates qui n'ont pas été trouvées dans les documents, on va utiliser si possible les dates des titres originaux des documents, afin d'avoir une couverture maximale (certes, certaines de ces dates sont fausses, mais c'est dans l'ensemble toujours mieux que rien)
# In[21]:
#TODO
# Considérer should_be_name, pas Z_name pour prendre en compte les éventuels recotages
# Dans la recherche par range de date sur le site, il faudra pas oublier que certaines dates n'ont que
# mois+année, voire que année. Les inclure systématiquement si l'année/le mois entre dans le range
# ou alors faire un range par année uniquement ?
# In[22]:
def find_and_normalize_date_in_filename(cote):
"""
Using the original log_df, find the date of a given cote written in the filename of the first page of the cote
"""
should_be_name = log_df[log_df.cote == cote]["should_be_name"].iloc[0]
end_date = -8 # "_XXX.jpg" has length 8
begin_date = re.search(cote,should_be_name).span()[1]
date = should_be_name[begin_date:end_date]
date_match = re.search("\d\d\d\d-\d\d-\d\d", date)
if date_match is not None:
year, month, day = date_match.group().split("-")
day_num, month_num, year_num = int(day), int(month), int(year)
return day_num, month_num, year_num, "filename_date"
date_match = re.search("\d\d\d\d-\d\d", date)
if | |
"""Returns mapping of sites from input to this object
Pymatgen molecule_matcher does not work unfortunately as it needs to be
a reasonably physical molecule.
Here, the graph is constructed by connecting the nearest neighbor, and
isomorphism is performed to find matches, then kabsch algorithm is
performed to make sure it is a match. NetworkX is used for portability.
Parameters
----------
env : dictionary that contains information of local environment of a
site in datum. See _GetSiteEnvironments defintion in the class
SiteEnvironments for what this variable should be.
Returns
-------
dict : atom mapping. None if there is no mapping
"""
# construct graph
G = self._ConstructGraph(env['pos'],env['sitetypes'])
if len(self.G.nodes) != len(G.nodes):
s = 'Number of nodes is not equal.\n'
raise ValueError(s)
elif len(self.G.edges) != len(G.edges):
print(len(self.G.edges),len(G.edges))
s = 'Number of edges is not equal.\n'
s += "- Is the data point's cell a redefined lattice of primitive cell?\n"
s += '- If relaxed structure is used, you may want to check structure or increase Gatol\n'
if path:
s += path
raise ValueError(s)
GM = iso.GraphMatcher(self.G,G,self._nm,self._em)
######################## Most Time Consuming Part #####################
ams = list(GM.isomorphisms_iter())
# Perhaps parallelize it?
######################## Most Time Consuming Part #####################
if not ams:
s = 'No isomorphism found.\n'
s += "- Is the data point's cell a redefined lattice of primitive cell?\n"
s += '- If relaxed structure is used, you may want to check structure or increase rtol\n'
if path:
s += path
raise ValueError(s)
rmsd = []
for am in ams: #Loop over isomorphism
# reconstruct graph after alinging point order
xyz = np.zeros((len(self.pos),3))
for i in am:
xyz[i,:] = env['pos'][am[i],:]
R = self._kabsch(self.pos,xyz)
#RMSD
rmsd.append(np.sqrt(np.mean(np.linalg.norm(np.dot(self.pos,R)-xyz,axis=1)**2)))
mini = np.argmin(rmsd)
minrmsd = rmsd[mini]
if minrmsd < self.tol:
return ams[mini]
else:
s = 'No isomorphism found.\n'
s += '-Consider increasing neighbor finding tolerance'
raise ValueError(s)
def _kabsch(self, P, Q):
"""Returns rotation matrix to align coordinates using
Kabsch algorithm.
"""
C = np.dot(np.transpose(P), Q)
V, S, W = np.linalg.svd(C)
d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0
if d:
S[-1] = -S[-1]
V[:, -1] = -V[:, -1]
R = np.dot(V, W)
return R
class SiteEnvironments(object):
def __init__(self,site_envs,ns,na,aos,eigen_tol,pbc,cutoff, dnames= None):
"""Initialize
Use Load to intialize this class.
Parameters
----------
site_envs : list of SiteEnvironment object
ns : int. number of spectator sites types
na : int. number of active sites types
aos : list of string. avilable occupational states for active sites
string should be the name of the occupancy. (consistent with the input data)
eigen_tol : tolerance for eigenanalysis of point group analysis in
pymatgen.
pbc : periodic boundary condition.
cutoff : float. Cutoff radius in angstrom for pooling sites to
construct local environment
"""
self.site_envs = site_envs
self.unique_site_types = [env.sitetypes[0] for env in self.site_envs]
self.ns = ns
self.na = na
self.aos = aos
self.eigen_tol = eigen_tol
self.pbc = pbc
self.cutoff = cutoff
self.dnames = dnames
def __repr__(self):
s = '<%i active sites'%(self.na)+'|%i spectator sites'%(self.ns) +'>'
return s
def __getitem__(self, el):
"""Returns a site environment
"""
return self.site_envs[el]
def ReadDatum(self,path,cutoff_factor = 1.1):
"""Load structure data and return neighbor information
Parameters
----------
path : path of the structure
cutoff_factor : float. this is extra buffer factor multiplied
to cutoff to ensure pooling all relevant sites.
Return
------
Y : property value
XSites : one hot encoding of the site. See DataLoader in Data.py
for detailed instruction.
neighborlist : s x n x p x i. s is the type of site index,
n is the site index, p is the permutation,
index and i is the neighbor sites index (0 being the site itself).
See DataLoader in Data.py for detailed instruction.
"""
Y, cell, coord, st, oss = InputReader(path)
# Construct one hot encoding
XSites = np.zeros((len(oss),len(self.aos)))
for i,o in enumerate(oss):
XSites[i,self.aos.index(o)] = 1
# get mapping between all site index to active site index
alltoactive = {}
n = 0
for i,s in enumerate(st):
if 'A' in s:
alltoactive[i] = n
n+=1
# Get Neighbors
## Read Data
site_envs = self._GetSiteEnvironments(coord,cell,st,self.cutoff*cutoff_factor,
self.pbc,get_permutations=False,eigen_tol=self.eigen_tol)
XNSs = [[] for _ in range(len(self.site_envs))]
for env in site_envs:
i = self.unique_site_types.index(env['sitetypes'][0])
env = self._truncate(self.site_envs[i],env)
# get map between two environment
mapping = self.site_envs[i].GetMapping(env,path)
# align input to the primitive cell (reference)
aligned_idx = [env['env2config'][mapping[i]] for i in range(len(env['env2config']))]
# apply permutations
nni_perm = np.take(aligned_idx,self.site_envs[i].permutations)
# remove spectators
nni_perm = nni_perm[:,self.site_envs[i].activesiteidx]
# map it to active sites
nni_perm = np.vectorize(alltoactive.__getitem__)(nni_perm)
XNSs[i].append(nni_perm.tolist())
return Y, XSites.tolist(), XNSs
@classmethod
def _truncate(cls,env_ref,env):
"""When cutoff_factor is used, it will pool more site than cutoff factor specifies.
This will rule out nonrelevant sites by distance.
"""
# Extract the right number of sites by distance
dists = defaultdict(list)
for i,s in enumerate(env['sitetypes']):
dists[s].append([i,env['dist'][i]])
for s in dists:
dists[s] = sorted(dists[s], key= lambda x:x[1])
siteidx = []
for s in dists:
siteidx += [i[0] for i in dists[s][:env_ref.formula[s]]]
siteidx = sorted(siteidx)
env['pos']=[env['pos'][i] for i in range(len(env['pos'])) if i in siteidx]
env['pos']=np.subtract(env['pos'],np.mean(env['pos'],0))
env['sitetypes'] = [env['sitetypes'][i] for i in range(len(env['sitetypes'])) if i in siteidx]
env['env2config'] = [env['env2config'][i] for i in siteidx]
del env['dist']
return env
@classmethod
def Load(cls,path,cutoff,eigen_tol=1e-5):
"""Load Primitive cell and return SiteEnvironments
Parameters
----------
path : input file path
cutoff : float. cutoff distance in angstrom for collecting local
environment.
eigen_tol : tolerance for eigenanalysis of point group analysis in
pymatgen.
"""
cell, pbc, coord, st, ns, na, aos, dnames = InputReader(path)
site_envs = cls._GetSiteEnvironments(coord,cell,st,cutoff,pbc,True,eigen_tol=eigen_tol)
site_envs = [SiteEnvironment(e['pos'],e['sitetypes'],e['env2config'],
e['permutations'],cutoff) for e in site_envs]
ust = [env.sitetypes[0] for env in site_envs]
usi = np.unique(ust,return_index=True)[1]
site_envs = [site_envs[i] for i in usi]
return cls(site_envs,ns,na,aos,eigen_tol,pbc,cutoff, dnames)
@classmethod
def _GetSiteEnvironments(cls,coord,cell,SiteTypes,cutoff,pbc,get_permutations=True,eigen_tol=1e-5):
"""Extract local environments from primitive cell
Parameters
----------
coord : n x 3 list or numpy array of scaled positions. n is the number
of atom.
cell : 3 x 3 list or numpy array
SiteTypes : n list of string. String must be S or A followed by a
number. S indicates a spectator sites and A indicates a active
sites.
cutoff : float. cutoff distance in angstrom for collecting local
environment.
pbc : list of boolean. Periodic boundary condition
get_permutations : boolean. Whether to find permutatated neighbor list or not.
eigen_tol : tolerance for eigenanalysis of point group analysis in
pymatgen.
Returns
------
list of local_env : list of local_env class
"""
#%% Check error
assert isinstance(coord,(list,np.ndarray))
assert isinstance(cell,(list,np.ndarray))
assert len(coord) == len(SiteTypes)
#%% Initialize
# TODO: Technically, user doesn't even have to supply site index, because
# pymatgen can be used to automatically categorize sites..
coord = np.mod(coord,1)
pbc = np.array(pbc)
#%% Map sites to other elements..
# TODO: Available pymatgne functions are very limited when DummySpecie is
# involved. This may be perhaps fixed in the future. Until then, we
# simply bypass this by mapping site to an element
# Find available atomic number to map site to it
availableAN = [i+1 for i in reversed(range(0,118))]
# Organize Symbols and record mapping
symbols = []
site_idxs = []
SiteSymMap = {} # mapping
SymSiteMap = {}
for i,SiteType in enumerate(SiteTypes):
if SiteType not in SiteSymMap:
symbol = Element.from_Z(availableAN.pop())
SiteSymMap[SiteType] = symbol
SymSiteMap[symbol] = SiteType
else:
symbol = SiteSymMap[SiteType]
symbols.append(symbol)
if 'A' in SiteType:
site_idxs.append(i)
#%% Get local environments of each site
# Find neighbors and permutations using pymatgen
lattice = Lattice(cell)
structure = Structure(lattice, symbols,coord)
neighbors = structure.get_all_neighbors(cutoff,include_index=True)
site_envs = []
for site_idx in site_idxs:
local_env_sym = [symbols[site_idx]]
local_env_xyz = [structure[site_idx].coords]
local_env_dist = [0.0]
local_env_sitemap = [site_idx]
for n in neighbors[site_idx]:
# if PBC condition is fulfilled..
c = np.around(n[0].frac_coords,10)
withinPBC = np.logical_and(0<=c,c<1)
if np.all(withinPBC[~pbc]):
local_env_xyz.append(n[0].coords)
local_env_sym.append(n[0].specie)
local_env_dist.append(n[1])
local_env_sitemap.append(n[2])
local_env_xyz = np.subtract(local_env_xyz,np.mean(local_env_xyz,0))
perm = []
if get_permutations:
finder = PointGroupAnalyzer(Molecule(local_env_sym,local_env_xyz),eigen_tolerance=eigen_tol)
pg | |
dset_name="train", query_bert_path_or_handler="", sub_feat_path_or_handler="",
vid_feat_path_or_handler="", normalize_vfeat=True, normalize_tfeat=True,
avg_pooling=False, annotation_root=ANNOTATION_PACKAGE_ROOT, feature_root=FEATURE_PACKAGE_ROOT):
assert dset_name in ['train', 'valid', 'test'], "dset_name should be in 'train' 'valid' and 'test'"
self.dset_name = dset_name
if dset_name == 'train':
self.data = load_jsonl(os.path.join(annotation_root, 'trainset.jsonl'))
elif dset_name == 'valid':
self.data = load_jsonl(os.path.join(annotation_root, 'validset.jsonl'))
elif dset_name == 'test':
self.data = load_jsonl(os.path.join(annotation_root, 'testset.jsonl'))
self.query_bert_path_or_handler = query_bert_path_or_handler
self.sub_feat_path_or_handler = sub_feat_path_or_handler
self.vid_fear_path_or_handler = vid_feat_path_or_handler
self.normalize_vfeat = normalize_vfeat
self.normalize_tfeat = normalize_tfeat
if avg_pooling:
self.pooling = 'avg_pooling'
else:
self.pooling = 'max_pooling'
# Should be loaded from h5py file
with h5py.File(os.path.join(feature_root, 'feature.hdf5'), 'r') as f:
self.query_text_feat = load_from_feature_package(f['query_text_feature'])
self.query_img_feat = load_from_feature_package(f['query_grid_feature'])
self.sub_text_feat = load_from_feature_package(f['subtitle_text_feature'])
self.video_vis_feat = load_from_feature_package(f['frame_grid_feature'])
# Generate query type list
self.query_type = dict(
text=[],
video=[],
text_video=[]
)
for item in self.data:
q_type = item['query_type']
if q_type == 'Text Only':
self.query_type['text'].append(item['query_id'])
elif q_type == 'Video Only':
self.query_type['video'].append(item['query_id'])
else:
self.query_type['text_video'].append(item['query_id'])
# generate list that does not overlap with train set
if dset_name == 'valid' or dset_name == 'test':
self.not_in_train = []
for item in self.data:
if item['not_in_train']:
self.not_in_train.append(item['query_id'])
def __len__(self):
return len(self.data)
def __getitem__(self, index):
item = self.data[index]
sample_seg_idx = random.sample(range(len(item['answer_segment_id'])), 1)[0]
meta = edict(
query_id=item['query_id'],
query_name=item['query_name'],
text_query=item['text_query'],
original_query=item['original_query'],
query_img_path=item['query_img_path'],
vid_name=item['vid_name'],
answer_segment_name=item['answer_segment_name'],
answer_segment_id=item['answer_segment_id'],
answer_segment_info=item['answer_segment_info'],
sample_seg_id_for_training=item['answer_segment_id'][sample_seg_idx],
sample_seg_name_for_training=item['answer_segment_name'][sample_seg_idx]
)
query_text_feat = self.query_text_feat[item['vid_name']][item['query_name']]['feature'][0]
img_2_text_alignment = self.query_text_feat[item['vid_name']][item['query_name']]['img_alignment']
query_vis_feat = self.query_img_feat[item['vid_name']][item['query_img_path'].split('/')[-1]]
ctx_vis_feat = self.video_vis_feat[item['vid_name']][item['answer_segment_name'][sample_seg_idx]][self.pooling]
ctx_text_feat = self.sub_text_feat[item['vid_name']][item['answer_segment_name'][sample_seg_idx]][self.pooling]
if self.normalize_tfeat:
query_text_feat = l2_normalize_np_array(query_text_feat)
ctx_text_feat = l2_normalize_np_array(ctx_text_feat)
if self.normalize_vfeat:
query_vis_feat = l2_normalize_np_array(query_vis_feat)
ctx_vis_feat = l2_normalize_np_array(ctx_vis_feat)
return edict(
meta=meta,
query_text_feat=torch.from_numpy(query_text_feat),
query_vis_feat=torch.from_numpy(query_vis_feat),
image_2_text_alignment=img_2_text_alignment,
ctx_vis_feat=torch.from_numpy(ctx_vis_feat),
ctx_text_feat=torch.from_numpy(ctx_text_feat)
)
class AQVSR_segment(Dataset):
def __init__(self, dset_name="train", normalize_vfeat=True, normalize_tfeat=True,
avg_pooling=False, annotation_root=ANNOTATION_PACKAGE_ROOT, feature_root=FEATURE_PACKAGE_ROOT):
assert dset_name in ['train', 'valid', 'test'], "dset_name should be in 'train' 'valid' and 'test'"
self.dset_name = dset_name
if dset_name == 'train':
self.data = load_jsonl(os.path.join(annotation_root, 'trainset.jsonl'))
elif dset_name == 'valid':
self.data = load_jsonl(os.path.join(annotation_root, 'validset.jsonl'))
elif dset_name == 'test':
self.data = load_jsonl(os.path.join(annotation_root, 'testset.jsonl'))
self.normalize_vfeat = normalize_vfeat
self.normalize_tfeat = normalize_tfeat
if avg_pooling:
self.pooling = 'avg_pooling'
else:
self.pooling = 'max_pooling'
# Generate iterable segment list
self.segment_list = []
vid_set = set()
for query in self.data:
vid = query['query_name'][:11]
vid_set.add(vid)
seg2id = load_json(os.path.join(ID_FILE_ROOT, 'id.json'))['seg2id']
for seg_name, seg_id in seg2id.items():
vid = seg_name[:11]
if vid in vid_set:
self.segment_list.append([seg_id, seg_name, vid])
# Should be loaded from h5py file
with h5py.File(os.path.join(feature_root, 'feature.hdf5'), 'r') as f:
self.sub_text_feat = load_from_feature_package(f['subtitle_text_feature'])
self.video_vis_feat = load_from_feature_package(f['frame_grid_feature'])
def __len__(self):
return len(self.segment_list)
def __getitem__(self, index):
seg = self.segment_list[index]
seg_id = seg[0]
seg_name = seg[1]
vid = seg[2]
ctx_vis_feat = self.video_vis_feat[vid][seg_name][self.pooling]
ctx_text_feat = self.sub_text_feat[vid][seg_name][self.pooling]
if self.normalize_tfeat:
ctx_text_feat = l2_normalize_np_array(ctx_text_feat)
if self.normalize_vfeat:
ctx_vis_feat = l2_normalize_np_array(ctx_vis_feat)
return edict(
seg_id=seg_id,
seg_name=seg_name,
vid_name=vid,
ctx_vis_feat=torch.from_numpy(ctx_vis_feat),
ctx_text_feat=torch.from_numpy(ctx_text_feat)
)
# Return format according to ranking loss
# pos, intra-neg, inter-neg
class AQVSR_Ranking(Dataset):
"""
Args:
avg_pooling, boolean, default = False, True for avg_pooling, False for max_pooling
Return:
a dict: {
"meta": {
"query_id": int,
"text_query": str, # purely text query
"original_query": str,
"query_image_path": str,
"vid_name": str, # youtube_id (11)
"answer_segment_name": list[str], # name of segments: ["xtuiYd45q1W_segment1",...]
"answer_segment_id": list[segment_id], # unique_segment_id
"answer_segment_info": list[[st,ed], ... [st,ed]], # start_time, end_time of coresponding segment
# modified in v2:
"pos_seg_id_for_training": int, # sample one ground truth segment for training
"pos_seg_name_for_training": str,
"intra_neg_seg_id_for_training": int, # sample one intra wrong segment for training
"intra_neg_seg_name_for_training": str,
"inter_neg_seg_id_for_training": int, # sample one inter wrong segment for training
"inter_neg_seg_name_for_training": str,
}
"query_text_feat": torch.tensor, (L, D_q) # query feature
"query_vis_feat": torch.tensor, (n_region, 2048) # image feature®ion feature
"image_2_text_alignment": list[list] # image to token alignment
# modified in v2: # n_sample sub/video feature include the groundtruth
"pos_text_feat": torch.tensor, (n_clip_in_segment, dim_sub)
"intra_neg_text_feat": torch.tensor, (n_clip_in_segment, dim_sub)
"inter_neg_text_feat": torch.tensor, (n_clip_in_segment, dim_sub)
"pos_vis_feat": torch.tensor, (n_sample, n_clip_in_segment, dim_video)
"intra_neg_vis_feat": torch.tensor, (n_clip_in_segment, dim_video)
"inter_neg_vis_feat": torch.tensor, (n_clip_in_segment, dim_video)
}
"""
def __init__(self, dset_name='train', normalize_vfeat=True, normalize_tfeat=True,
avg_pooling=False, annotation_root=ANNOTATION_PACKAGE_ROOT, feature_root=FEATURE_PACKAGE_ROOT):
assert dset_name in ['train', 'valid', 'test'], "dset_name should be in 'train' 'valid' and 'test'"
self.dset_name = dset_name
if dset_name == 'train':
self.data = load_jsonl(os.path.join(annotation_root, 'trainset.jsonl'))
elif dset_name == 'valid':
self.data = load_jsonl(os.path.join(annotation_root, 'validset.jsonl'))
elif dset_name == 'test':
self.data = load_jsonl(os.path.join(annotation_root, 'testset.jsonl'))
# return dict should also be modified if change the neg number
self.n_pos = 1
self.n_neg_intra = 1
self.n_neg_inter = 1
self.normalize_vfeat = normalize_vfeat
self.normalize_tfeat = normalize_tfeat
if avg_pooling:
self.pooling = 'avg_pooling'
else:
self.pooling = 'max_pooling'
# Generate iterable segment list, split segment to train/test set
self.segment_list = []
vid_set = set()
for query in self.data:
vid = query['query_name'][:11]
vid_set.add(vid)
seg2id = load_json(os.path.join(ID_FILE_ROOT, 'id.json'))['seg2id']
for seg_name, seg_id in seg2id.items():
vid = seg_name[:11]
if vid in vid_set:
self.segment_list.append([seg_id, seg_name, vid])
# Should be loaded from h5py file
with h5py.File(os.path.join(feature_root, 'feature.hdf5'), 'r') as f:
self.query_text_feat = load_from_feature_package(f['query_text_feature'])
self.query_img_feat = load_from_feature_package(f['query_grid_feature'])
self.sub_text_feat = load_from_feature_package(f['subtitle_text_feature'])
self.video_vis_feat = load_from_feature_package(f['frame_grid_feature'])
# Add negative list
for item_idx in range(len(self.data)):
item = self.data[item_idx]
negative_seg_id_intra = []
negative_seg_id_inter = []
negative_seg_name_intra = []
negative_seg_name_inter = []
for [seg_id, seg_name, vid] in self.segment_list:
if seg_name in item['answer_segment_name']:
continue
else:
if vid == item['vid_name']:
negative_seg_id_intra.append(seg_id)
negative_seg_name_intra.append(seg_name)
else:
negative_seg_id_inter.append(seg_id)
negative_seg_name_inter.append(seg_name)
self.data[item_idx]['intra_negative_segment_name'] = negative_seg_name_intra
self.data[item_idx]['intra_negative_segment_id'] = negative_seg_id_intra
self.data[item_idx]['inter_negative_segment_name'] = negative_seg_name_inter
self.data[item_idx]['inter_negative_segment_id'] = negative_seg_id_inter
# Generate query type list
self.query_type = dict(
text=[],
video=[],
text_video=[]
)
for item in self.data:
q_type = item['query_type']
if q_type == 'Text Only':
self.query_type['text'].append(item['query_id'])
elif q_type == 'Video Only':
self.query_type['video'].append(item['query_id'])
else:
self.query_type['text_video'].append(item['query_id'])
# generate list that does not overlap with train set
if dset_name == 'valid' or dset_name == 'test':
self.not_in_train = []
for item in self.data:
if item['not_in_train']:
self.not_in_train.append(item['query_id'])
def __len__(self):
return len(self.data)
def __getitem__(self, index):
item = self.data[index]
# sample positive and negative segment
positive_seg_id = item['answer_segment_id']
positive_seg_name = item['answer_segment_name']
negative_seg_name_intra = item['intra_negative_segment_name']
negative_seg_name_inter = item['inter_negative_segment_name']
negative_seg_id_intra = item['intra_negative_segment_id']
negative_seg_id_inter = item['inter_negative_segment_id']
positive_idx = random.sample(range(len(positive_seg_name)), self.n_pos)
negative_idx_intra = random.sample(range(len(negative_seg_name_intra)), self.n_neg_intra)
negative_idx_inter = random.sample(range(len(negative_seg_name_inter)), self.n_neg_inter)
positive_seg_id_sampled = [positive_seg_id[idx] for idx in positive_idx]
negative_seg_id_intra_sampled = [negative_seg_id_intra[idx] for idx in negative_idx_intra]
negative_seg_id_inter_sampled = [negative_seg_id_inter[idx] for idx in negative_idx_inter]
positive_seg_name_sampled = [positive_seg_name[idx] for idx in positive_idx]
negative_seg_name_intra_sampled = [negative_seg_name_intra[idx] for idx in negative_idx_intra]
negative_seg_name_inter_sampled = [negative_seg_name_inter[idx] for idx in negative_idx_inter]
meta = edict(
query_id=item['query_id'],
query_name=item['query_name'],
text_query=item['text_query'],
original_query=item['original_query'],
query_img_path=item['query_img_path'],
vid_name=item['vid_name'],
answer_segment_name=item['answer_segment_name'],
answer_segment_id=item['answer_segment_id'],
answer_segment_info=item['answer_segment_info'],
pos_seg_id=positive_seg_id_sampled[0], # note that this [0] need all n_pos/n_neg = 1
pos_seg_name=positive_seg_name_sampled[0],
intra_neg_seg_id=negative_seg_id_intra_sampled[0],
intra_neg_seg_name=negative_seg_name_intra_sampled[0],
inter_neg_seg_id=negative_seg_id_inter_sampled[0],
inter_neg_seg_name=negative_seg_name_inter_sampled[0]
)
query_text_feat = self.query_text_feat[item['vid_name']][item['query_name']]['feature'][0]
img_2_text_alignment = self.query_text_feat[item['vid_name']][item['query_name']]['img_alignment']
query_vis_feat = self.query_img_feat[item['vid_name']][item['query_img_path'].split('/')[-1]]
ctx_vis_feat = [self.video_vis_feat[seg_name[:11]][seg_name][self.pooling] for seg_name in
positive_seg_name_sampled + negative_seg_name_intra_sampled + negative_seg_name_inter_sampled]
ctx_text_feat = [self.sub_text_feat[seg_name[:11]][seg_name][self.pooling] for seg_name in
positive_seg_name_sampled + negative_seg_name_intra_sampled + negative_seg_name_inter_sampled]
if self.normalize_tfeat:
query_text_feat = l2_normalize_np_array(query_text_feat)
for i in range(len(ctx_text_feat)):
ctx_text_feat[i] = torch.from_numpy(l2_normalize_np_array(ctx_text_feat[i]))
if self.normalize_vfeat:
query_vis_feat = l2_normalize_np_array(query_vis_feat)
for i in range(len(ctx_vis_feat)):
ctx_vis_feat[i] = torch.from_numpy(l2_normalize_np_array(ctx_vis_feat[i]))
return edict(
meta=meta,
query_text_feat=torch.from_numpy(query_text_feat),
query_vis_feat=torch.from_numpy(query_vis_feat),
image_2_text_alignment=img_2_text_alignment,
pos_ctx_vis_feat=ctx_vis_feat[0],
intra_neg_ctx_vis_feat=ctx_vis_feat[1],
inter_neg_ctx_vis_feat=ctx_vis_feat[2],
pos_ctx_text_feat=ctx_text_feat[0],
intra_neg_ctx_text_feat=ctx_text_feat[1],
inter_neg_ctx_text_feat=ctx_text_feat[2],
)
class AQVSR_Ranking_enum(Dataset):
"""
Args:
avg_pooling, boolean, default = False, True for avg_pooling, False for max_pooling
Return:
a dict: {
"meta": {
"query_id": int,
"text_query": str, # purely text query
"original_query": str,
"query_image_path": str,
"vid_name": str, # youtube_id (11)
"answer_segment_name": list[str], # name of segments: ["xtuiYd45q1W_segment1",...]
"answer_segment_id": list[segment_id], # unique_segment_id
"answer_segment_info": list[[st,ed], ... [st,ed]], # start_time, end_time of coresponding segment
# modified in v2:
"seg_id_for_ranking": int, #
"seg_name_for_ranking": str,
}
"query_text_feat": torch.tensor, (L, D_q) # query feature
"query_vis_feat": torch.tensor, (n_region, 2048) # image feature®ion feature
"image_2_text_alignment": list[list] # image to token alignment
# modified in v2:
"ctx_text_feat": torch.tensor, (n_clip_in_segment, dim_sub) # sampled sub/video feature
"ctx_vis_feat": torch.tensor, (n_sample, n_clip_in_segment, dim_video)
}
"""
def __init__(self, dset_name='test', normalize_vfeat=True, normalize_tfeat=True,
avg_pooling=False, annotation_root=ANNOTATION_PACKAGE_ROOT, feature_root=FEATURE_PACKAGE_ROOT):
assert dset_name in ['train', 'valid', 'test'], "dset_name should be in 'train' 'valid' and 'test'"
self.dset_name = dset_name
if dset_name == 'train':
self.data = load_jsonl(os.path.join(annotation_root, 'trainset.jsonl'))
elif dset_name == 'valid':
self.data = load_jsonl(os.path.join(annotation_root, 'validset.jsonl'))
elif dset_name == 'test':
self.data = load_jsonl(os.path.join(annotation_root, 'testset.jsonl'))
self.normalize_vfeat = normalize_vfeat
self.normalize_tfeat = normalize_tfeat
if avg_pooling:
self.pooling = 'avg_pooling'
else:
self.pooling = 'max_pooling'
# Generate iterable segment list, split segment to train/test set
self.pairlist = []
vid_set = set()
for query in self.data:
vid = query['query_name'][:11]
vid_set.add(vid)
seg2id = load_json(os.path.join(ID_FILE_ROOT, 'id.json'))['seg2id']
# collect query and seg
self.query_ids = [self.data[i]['query_id'] for i in range(len(self.data))]
self.seg_ids = [v for k, v in seg2id.items() if k[:11] in vid_set]
self.n_query = len(self.query_ids)
self.n_seg = len(self.seg_ids)
# print(self.n_query, self.n_seg)
for query in self.data:
for seg_name, seg_id in seg2id.items():
vid = seg_name[:11]
if vid in vid_set:
self.pairlist.append(dict(
query_item=query,
seg_name=seg_name,
seg_id=seg_id,
vid=vid
))
# Should be loaded from h5py file
| |
<reponame>leozz37/makani
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Check the local folder and upload new files to a remote folder."""
import datetime
import logging
import os
import re
import string
import urllib2
import gflags
import httplib2
from makani.lib.python import gsutil
from makani.lib.python.batch_sim import gcloud_util
import psutil
gflags.DEFINE_integer('max_cpu_percent',
60,
'Do not upload if the cpu utilization goes above it.')
gflags.DEFINE_integer('max_mem_percent',
90,
'Do not upload if the memory utilization goes above it.')
gflags.DEFINE_list('exclusive_binaries',
['sim', 'recorder', 'vis'],
'Do not upload if any of the listed binaries is running.')
gflags.DEFINE_boolean('preserve_local',
True,
'True if the local logs should remain after they '
'are uploaded.')
gflags.DEFINE_boolean('clean_uploaded',
False,
'True if a local log should be removed if the scan '
'finds it is already uploaded.')
FLAGS = gflags.FLAGS
class BadTimeToUploadError(Exception):
"""Error raised to signal that this is not a good time to upload logs."""
class _UploadFailureError(Exception):
"""Error raised to signal that log upload has failed."""
def IterFiles(local_root, regex_pattern):
"""Check the local folder and iterate through files to be uploaded.
Local files can exist in sub-directories of local_root. All uploaded files
reside directly under dest_dir. If a file already exists in dest_dir, it
will not be uploaded.
Args:
local_root: The local directory in which to scan for things to upload.
regex_pattern: Regular expression used to identify what files to upload.
It is used to match the relative file path within `local_root`.
Yields:
A tuple of (file name, directory path) to the file to be uploaded.
"""
regex = re.compile(regex_pattern)
for dirpath, directories, files in os.walk(local_root):
# Files in upper level directories are uploaded first; we assume files in
# subdirectories take lower priority / are less interesting.
# TODO: If it is preferred to treat files/subdirectories in the same
# ordered stream, then it is better to collect all paths first, then sort
# and upload them in order. This applies to CheckAndUploadDirectories too.
# In one directory, files tagged with larger timestamps are uploaded first.
files.sort(reverse=True)
for filename in files:
base_relpath = os.path.join(dirpath, filename)
rel_path = os.path.relpath(base_relpath, local_root)
if regex and not regex.match(rel_path):
continue
yield filename, dirpath
# Traverse directories with larger timestamps first.
directories.sort(reverse=True)
def IterDirectories(local_root, regex_dir_pattern):
"""Iterate matching directories.
Local directories must have a relative path matching a particular pattern.
All uploaded directories reside directly under dest_dir. If a directory
already exists in dest_dir, it will not be uploaded.
Args:
local_root: The local directory containing directories to upload.
regex_dir_pattern: Regular expression to identify what directories to
upload.
Yields:
A tuple of (directory name, parent directory) of the files to upload.
"""
regex_dir = re.compile(regex_dir_pattern) if regex_dir_pattern else None
for dirpath, directories, _ in os.walk(local_root):
# Upload directories tagged with larger timestamps first.
for directory in sorted(directories, reverse=True):
base_relpath = os.path.join(dirpath, directory)
rel_path = os.path.relpath(base_relpath, local_root)
if regex_dir and not regex_dir.match(rel_path):
continue
directories.remove(directory)
yield directory, dirpath
def PrepareToUpload(local_root, dest_dir):
"""Check necessary conditions required for uploading.
Args:
local_root: The local directory containing directories to upload.
dest_dir: The remote directory to upload to.
Returns:
existing_dest_paths: A set of filenames for existing files in the
destination directory.
gs_api: The gsutil.GsutilApi object.
Raises:
BadTimeToUploadError: Internet is not available.
ValueError: local_root is not a valid path.
"""
if not os.path.isdir(local_root):
raise ValueError('Cannot find local directory %s.' % local_root)
if not HasInternet():
raise BadTimeToUploadError('No internet connection detected.')
gs_api = gsutil.GsutilApi()
try:
existing_dest_paths = set(gs_api.List(dest_dir))
except httplib2.ServerNotFoundError:
raise BadTimeToUploadError('Internet has become unavailable.')
return existing_dest_paths, gs_api
def TryUploadDirectory(directory, parent_relpath, dest_dir, source_file_regex,
rename_template, gs_api, preserve_local, check_timing,
clean_uploaded, uploaded_files):
"""Attempt to upload a directory.
Args:
directory: The name of the directory.
parent_relpath: The local directory where the directory resides.
dest_dir: The remote directory to upload to.
source_file_regex: The precompiled regular expression to test whether a file
should be uploaded. If None, all files are uploaded.
The regex is used to match the subpath within `directory`.
rename_template: The template used to rename the file at the destination.
If None, the original file name is preserved.
gs_api: The gsutil.GsutilApi object.
preserve_local: If True, the source files will remain after uploading.
check_timing: If True, the upload will begin only if preconditions are met.
clean_uploaded: True if a local log should be removed if the scan
finds it is already uploaded.
uploaded_files: A list of tuples, each has the form of (local_filename,
uploaded_filename).
Raises:
BadTimeToUploadError: Raised if it is not the right time to upload.
"""
base_relpath = os.path.join(parent_relpath, directory)
renamed_directory = _RenameFile(directory, rename_template, parent_relpath)
full_cloud_path = gcloud_util.GcsPath(dest_dir, renamed_directory)
# Test if there exists any file with such prefix.
# TODO: Could be made more efficient if there is an "Exist" call.
is_new_path = not bool(gs_api.List(full_cloud_path))
# Upload all files (except symbolic links) within the directory.
# Do not rename any files within the directory.
rename_template = None
for sub_directory, sub_directories, files in os.walk(base_relpath):
rel_path = os.path.relpath(sub_directory, base_relpath)
if rel_path == '.':
sub_cloud_directory = full_cloud_path
else:
sub_cloud_directory = gcloud_util.GcsPath(full_cloud_path, rel_path)
if is_new_path:
existing_dest_paths = set()
else:
try:
existing_dest_paths = set(gs_api.List(sub_cloud_directory))
except httplib2.ServerNotFoundError:
# Internet becomes unavailable.
return
# Files in upper level directories are uploaded first; we assume files in
# subdirectories take lower priority / are less interesting.
# In one directory, files tagged with larger timestamps are uploaded first.
files.sort(reverse=True)
for filename in files:
file_path = os.path.join(sub_directory, filename)
rel_path = os.path.relpath(file_path, base_relpath)
if source_file_regex and not source_file_regex.match(rel_path):
continue
try:
result = TryUploadFile(filename, sub_directory, sub_cloud_directory,
existing_dest_paths, rename_template, gs_api,
preserve_local, check_timing, clean_uploaded)
except BadTimeToUploadError:
return
else:
if result:
uploaded_files.append(result)
# Traverse directories with larger timestamps first.
sub_directories.sort(reverse=True)
def TryUploadFile(filename, root, dest_dir, existing_dest_paths,
rename_template, gs_api, preserve_local, check_timing,
clean_uploaded):
"""Attempt to upload a file.
Args:
filename: The name of the file.
root: The local directory where the file resides.
dest_dir: The remote directory to upload to.
existing_dest_paths: A set of filenames for existing files in the
destination directory.
rename_template: The template used to rename the file at the destination.
If None, the original file name is preserved.
gs_api: The gsutil.GsutilApi object.
preserve_local: If True, the source files will remain in local_root after
uploading.
check_timing: If True, the upload will begin only if preconditions are met.
clean_uploaded: True if a local log should be removed if the scan
finds it is already uploaded.
Returns:
None if upload failed.
(full_local_path, full_cloud_path) if upload succeeded.
full_local_path is the full local path of the file to be uploaded.
full_cloud_path is the destination path to upload the file.
Raises:
BadTimeToUploadError: Raised if it is not the right time to upload.
_UploadFailureError: Raised if the upload is unsuccessful.
"""
# Check whether the uploading conditions are met.
is_valid, full_local_path = _CheckIsUploadable(filename, root)
if not is_valid:
return None
full_cloud_path = _GetRemoteFilename(root, filename, dest_dir,
rename_template=rename_template)
is_already_uploaded = _CheckAndCleanUploaded(
full_local_path, full_cloud_path, existing_dest_paths,
gs_api, clean_uploaded=clean_uploaded)
if is_already_uploaded:
return None
# Terminate early if the system is no longer idle.
if check_timing and not _TimeForUpload():
logging.error('Bad time to upload files.')
raise BadTimeToUploadError()
# Upload the file and remove local file if desired.
is_uploaded = _UploadFile(full_local_path, full_cloud_path, gs_api)
if is_uploaded:
if not preserve_local:
_DeleteLocalFile(full_local_path)
existing_dest_paths.add(full_cloud_path)
return (full_local_path, full_cloud_path)
elif check_timing and not HasInternet():
logging.error('Internet is not available.')
raise BadTimeToUploadError()
else:
return None
def HasInternet():
"""Test whether internet is available or not.
The function is based on the assumption that Google NEVER goes down!
Returns:
True if internet is available.
"""
try:
urllib2.urlopen('http://gstatic.com/generate_204', timeout=1)
return True
except urllib2.URLError:
return False
def _UploadFile(full_local_path, full_cloud_path, gs_api):
"""Upload the file to the cloud.
Args:
full_local_path: The full local path of the file to be uploaded.
full_cloud_path: The destination path of the uploaded file.
gs_api: The gsutil.GsutilApi object.
Returns:
True if the file has been uploaded successfully.
"""
logging.info('Uploading %s | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2011, <NAME>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the University of California, Berkeley nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
import logging
logging.basicConfig(level=logging.DEBUG)
from mongokit_ng import *
from bson.objectid import ObjectId
class AutoRefTestCase(unittest.TestCase):
"""Tests AutoRef case"""
def setUp(self):
self.connection = Connection()
self.col = self.connection.test.mongokit
def tearDown(self):
self.connection.drop_database('test')
self.connection.drop_database('test2')
def test_simple_autoref(self):
class DocA(Document):
structure = {
"a":{'foo':int},
}
self.connection.register([DocA])
doca = self.col.DocA()
doca['_id'] = 'doca'
doca['a']['foo'] = 3
doca.save()
class DocB(Document):
structure = {
"b":{"doc_a":DocA},
}
use_autorefs = True
self.connection.register([DocB])
docb = self.col.DocB()
# the structure is automaticly filled by the corresponding structure
assert docb == {'b': {'doc_a':None}}, docb
#docb.validate()
docb['_id'] = 'docb'
docb['b']['doc_a'] = 4
self.assertRaises(SchemaTypeError, docb.validate)
docb['b']['doc_a'] = doca
assert docb == {'b': {'doc_a': {'a': {'foo': 3}, '_id': 'doca'}}, '_id': 'docb'}
docb.save()
saved_docb = self.col.find_one({'_id':'docb'})
_docb = self.col.DocB.get_from_id('docb')
assert saved_docb['b']['doc_a'] == DBRef(database='test', collection='mongokit', id='doca'), saved_docb['b']['doc_a']
docb_list = list(self.col.DocB.fetch())
assert len(docb_list) == 1
new_docb = docb_list[0]
assert isinstance(new_docb['b']['doc_a'], DocA), new_docb['b']['doc_a'].__class__
assert docb == {'b': {'doc_a': {'a': {'foo': 3}, '_id': 'doca'}}, '_id': 'docb'}, docb
assert docb['b']['doc_a']['a']['foo'] == 3
docb['b']['doc_a']['a']['foo'] = 4
docb.save()
assert docb['b']['doc_a']['a']['foo'] == 4, docb
assert self.col.DocA.fetch().next()['a']['foo'] == 4
assert doca['a']['foo'] == 4, doca['a']['foo']
saved_docb = self.col.DocB.collection.find_one({'_id':'docb'})
assert saved_docb['b']['doc_a'] == DBRef(database='test', collection='mongokit', id='doca'), saved_docb['b']['doc_a']
assert self.col.DocB.fetch_one() == docb
assert self.col.DocB.find_one({'_id':'docb'}) == docb
def test_simple_autoref2(self):
class Embed(Document):
structure = {
'foo': dict,
'bar': int,
}
class Doc(Document):
structure = {
'embed':Embed,
'eggs': str,
}
use_autorefs = True
self.connection.register([Embed, Doc])
embed = self.col.Embed()
embed['foo'] = {'hello':'monde'}
embed['bar'] = 3
embed.save()
doc = self.col.Doc()
doc['embed'] = embed
doc['eggs'] = 'arf'
doc.save()
assert doc == {'embed': {'_id': embed['_id'], 'bar': 3, 'foo': {'hello': 'monde'}}, '_id': doc['_id'], 'eggs': 'arf'}, doc
doc = self.col.Doc.fetch_one()
doc['embed']['foo']['hello'] = 'World'
doc.save()
assert doc == {'embed': {'_id': embed['_id'], 'bar': 3, 'foo': {'hello': 'World'}}, '_id': doc['_id'], 'eggs': 'arf'}, doc
assert self.col.Embed.fetch_one() == {'_id': embed['_id'], 'bar': 3, 'foo': {'hello': 'World'}}
def test_autoref_with_default_values(self):
class DocA(Document):
structure = {
"a":{'foo':int},
"abis":{'bar':int},
}
self.connection.register([DocA])
doca = self.col.DocA()
doca['_id'] = 'doca'
doca['a']['foo'] = 2
doca.save()
class DocB(Document):
structure = {
"b":{"doc_a":DocA},
}
use_autorefs = True
default_values = {'b.doc_a':doca}
self.connection.register([DocB])
docb = self.col.DocB()
assert docb == {'b': {'doc_a': {'a': {'foo': 2}, 'abis': {'bar': None}, '_id': 'doca'}}}, docb
docb.save()
def test_autoref_with_required_fields(self):
class DocA(Document):
structure = {
"a":{'foo':int},
"abis":{'bar':int},
}
required_fields = ['a.foo']
self.connection.register([DocA])
doca = self.col.DocA()
doca['_id'] = 'doca'
doca['a']['foo'] = 2
doca.save()
class DocB(Document):
db_name = "test"
collection_name = "mongokit"
structure = {
"b":{"doc_a":DocA},
}
use_autorefs = True
self.connection.register([DocB])
docb = self.col.DocB()
docb['b']['doc_a'] = doca
assert docb == {'b': {'doc_a': {'a': {'foo': 2}, 'abis': {'bar': None}, '_id': 'doca'}}}, docb
docb['_id'] = 'docb'
docb['b']['doc_a']['a']['foo'] = None
self.assertRaises(RequireFieldError, docb.validate)
docb['b']['doc_a']['a']['foo'] = 4
docb.save()
docb['b']['doc_a'] = None
docb.save()
def test_badautoref(self):
"""Test autoref enabled, but embed the wrong kind of document.
Assert that it tells us it's a bad embed.
"""
class EmbedDoc(Document):
structure = {
"spam": str
}
self.connection.register([EmbedDoc])
embed = self.col.EmbedDoc()
embed["spam"] = "eggs"
embed.save()
assert embed
class EmbedOtherDoc(Document):
structure = {
"ham": str
}
self.connection.register([EmbedOtherDoc])
embedOther = self.connection.test.embed_other.EmbedOtherDoc()
embedOther["ham"] = "eggs"
embedOther.save()
assert embedOther
class MyDoc(Document):
use_autorefs = True
structure = {
"bla":{
"foo":str,
"bar":int,
},
"spam": EmbedDoc,
}
use_autorefs = True
self.connection.register([MyDoc])
mydoc = self.connection.test.autoref.MyDoc()
mydoc["bla"]["foo"] = "bar"
mydoc["bla"]["bar"] = 42
mydoc["spam"] = embedOther
self.assertRaises(SchemaTypeError, mydoc.save)
def test_badautoref_not_enabled(self):
# Test that, if autoref is disabled
# adding a Document to the structure act
# like a regular dict
class EmbedDoc(Document):
structure = {
"spam": str
}
self.connection.register([EmbedDoc])
embed = self.connection.test['autoref.embed'].EmbedDoc()
embed["spam"] = "eggs"
embed.save()
assert embed
class MyDoc(Document):
structure = {
"bla":{
"foo":str,
"bar":int,
},
"spam": EmbedDoc,
}
self.connection.register([MyDoc])
doc = self.col.MyDoc()
self.assertEqual(doc, {'bla': {'foo': None, 'bar': None}, 'spam': None})
def test_subclass(self):
# Test autoref enabled, but embed a subclass.
# e.g. if we say EmbedDoc, a subclass of EmbedDoc
# is also valid.
class EmbedDoc(Document):
structure = {
"spam": str
}
self.connection.register([EmbedDoc])
embed = self.connection.test['autoref.embed'].EmbedDoc()
embed["spam"] = "eggs"
embed.save()
class EmbedOtherDoc(EmbedDoc):
structure = {
"ham": str
}
self.connection.register([EmbedOtherDoc])
embedOther = self.connection.test['autoref.embed_other'].EmbedOtherDoc()
embedOther["ham"] = "eggs"
embedOther.save()
assert embedOther
class MyDoc(Document):
use_autorefs = True
structure = {
"bla":{
"foo":str,
"bar":int,
},
"spam": EmbedDoc,
}
self.connection.register([MyDoc])
mydoc = self.connection.test.autoref.MyDoc()
mydoc["bla"]["foo"] = "bar"
mydoc["bla"]["bar"] = 42
mydoc["spam"] = embedOther
mydoc.save()
assert mydoc['spam'].collection.name == "autoref.embed_other"
assert mydoc['spam'] == embedOther
def test_autoref_in_list(self):
class DocA(Document):
structure = {
"a":{'foo':int},
}
self.connection.register([DocA])
doca = self.col.DocA()
doca['_id'] = 'doca'
doca['a']['foo'] = 3
doca.save()
doca2 = self.col.DocA()
doca2['_id'] = 'doca2'
doca2['a']['foo'] = 5
doca2.save()
class DocB(Document):
structure = {
"b":{"doc_a":[DocA]},
}
use_autorefs = True
self.connection.register([DocB])
docb = self.col.DocB()
# the structure is automatically filled by the corresponding structure
assert docb == {'b': {'doc_a':[]}}, docb
docb.validate()
docb['_id'] = 'docb'
docb['b']['doc_a'].append('bla')
self.assertRaises(SchemaTypeError, docb.validate)
docb['b']['doc_a'] = []
docb['b']['doc_a'].append(doca)
assert docb == {'b': {'doc_a': [{'a': {'foo': 3}, '_id': 'doca'}]}, '_id': 'docb'}
docb.save()
assert isinstance(docb.collection.find_one({'_id':'docb'})['b']['doc_a'][0], DBRef), type(docb.collection.find_one({'_id':'docb'})['b']['doc_a'][0])
assert docb == {'b': {'doc_a': [{'a': {'foo': 3}, '_id': 'doca'}]}, '_id': 'docb'}
assert docb['b']['doc_a'][0]['a']['foo'] == 3
docb['b']['doc_a'][0]['a']['foo'] = 4
docb.save()
assert docb['b']['doc_a'][0]['a']['foo'] == 4, docb['b']['doc_a'][0]['a']['foo']
assert doca['a']['foo'] == 4, doca['a']['foo']
docb['b']['doc_a'].append(doca2)
assert docb == {'b': {'doc_a': [{'a': {'foo': 4}, '_id': 'doca'}, {'a': {'foo': 5}, '_id': 'doca2'}]}, '_id': 'docb'}
docb.validate()
def test_autoref_retrieval(self):
class DocA(Document):
structure = {
"a":{'foo':int},
}
self.connection.register([DocA])
doca = self.col.DocA()
doca['_id'] = 'doca'
doca['a']['foo'] = 3
doca.save()
class DocB(Document):
structure = {
"b":{
"doc_a":DocA,
"deep": {"doc_a_deep":DocA},
"deeper": {"doc_a_deeper":DocA,
"inner":{"doc_a_deepest":DocA}}
},
}
use_autorefs = True
self.connection.register([DocB])
docb = self.col.DocB()
# the structure is automatically filled by the corresponding structure
docb['_id'] = 'docb'
docb['b']['doc_a'] = doca
# create a few deeper docas
deep = self.col.DocA()
#deep['_id'] = 'deep'
deep['a']['foo'] = 5
deep.save()
docb['b']['deep']['doc_a_deep'] = deep
deeper = self.col.DocA()
deeper['_id'] = 'deeper'
deeper['a']['foo'] = 8
deeper.save()
docb['b']['deeper']['doc_a_deeper'] = deeper
deepest = self.col.DocA()
deepest['_id'] = 'deepest'
#deepest['_id'] = 'deeper'
deepest['a']['foo'] = 18
deepest.save()
docb['b']['deeper']['inner']['doc_a_deepest'] = deepest
docb.save()
# now, does retrieval function as expected?
test_doc = self.col.DocB.get_from_id(docb['_id'])
assert isinstance(test_doc['b']['doc_a'], DocA), type(test_doc['b']['doc_a'])
assert test_doc['b']['doc_a']['a']['foo'] == 3
assert isinstance(test_doc['b']['deep']['doc_a_deep'], DocA)
assert test_doc['b']['deep']['doc_a_deep']['a']['foo'] == 5
assert isinstance(test_doc['b']['deeper']['doc_a_deeper'], DocA)
assert test_doc['b']['deeper']['doc_a_deeper']['a']['foo'] == 8, test_doc
assert isinstance(test_doc['b']['deeper']['inner']['doc_a_deepest'], DocA)
assert test_doc['b']['deeper']['inner']['doc_a_deepest']['a']['foo'] == 18
def test_autoref_with_same_embed_id(self):
class DocA(Document):
structure = {
"a":{'foo':int},
}
self.connection.register([DocA])
doca = self.col.DocA()
doca['_id'] = 'doca'
doca['a']['foo'] = 3
doca.save()
class DocB(Document):
structure = {
"b":{
"doc_a":DocA,
"deep": {"doc_a_deep":DocA},
},
}
use_autorefs = True
self.connection.register([DocB])
docb = self.col.DocB()
docb['_id'] = 'docb'
docb['b']['doc_a'] = doca
# create a few deeper docas
deep = self.col.DocA()
deep['_id'] = 'doca' # XXX same id of doca, this will be erased by doca when saving doca
deep['a']['foo'] = 5
deep.save()
docb['b']['deep']['doc_a_deep'] = deep
docb.save()
doca.save()
test_doc = self.col.DocB.get_from_id(docb['_id'])
assert test_doc['b']['doc_a']['a']['foo'] == 3, test_doc['b']['doc_a']['a']['foo']
assert test_doc['b']['deep']['doc_a_deep']['a']['foo'] == 3, test_doc['b']['deep']['doc_a_deep']['a']['foo']
def test_autorefs_embed_in_list_with_bad_reference(self):
class User(Document):
| |
logger.info('Setting osd noout flag')
ct_pod.exec_ceph_cmd('ceph osd set noout')
logger.info(f"Put object into {pool_name}")
pool_object = 'test_object'
ct_pod.exec_ceph_cmd(f"rados -p {pool_name} put {pool_object} /etc/passwd")
logger.info(f"Looking for Placement Group with {pool_object} object")
pg = ct_pod.exec_ceph_cmd(f"ceph osd map {pool_name} {pool_object}")['pgid']
logger.info(f"Found Placement Group: {pg}")
dummy_deployment, dummy_pod = helpers.create_dummy_osd(osd_deployment)
def corrupt_pg():
"""
Corrupt PG on one OSD in Ceph pool for 12 minutes and measure it.
There should be only CephPGRepairTakingTooLong Pending alert as
it takes 2 hours for it to become Firing.
This configuration of alert can be observed in ceph-mixins which
is used in the project:
https://github.com/ceph/ceph-mixins/blob/d22afe8c0da34490cb77e52a202eefcf4f62a869/config.libsonnet#L23
There should be also CephClusterErrorState alert that takes 10
minutest to start firing.
Returns:
str: Name of corrupted deployment
"""
# run_time of operation
run_time = 60 * 12
nonlocal oc
nonlocal pool_name
nonlocal pool_object
nonlocal dummy_pod
nonlocal pg
nonlocal osd_deployment
nonlocal dummy_deployment
logger.info(f"Corrupting {pg} PG on {osd_deployment}")
dummy_pod.exec_bash_cmd_on_pod(
f"ceph-objectstore-tool --data-path /var/lib/ceph/osd/ceph-"
f"{osd_deployment.split('-')[-1]} --pgid {pg} {pool_object} "
f"set-bytes /etc/shadow --no-mon-config"
)
logger.info('Unsetting osd noout flag')
ct_pod.exec_ceph_cmd('ceph osd unset noout')
ct_pod.exec_ceph_cmd(f"ceph pg deep-scrub {pg}")
oc.exec_oc_cmd(f"scale --replicas=0 deployment/{dummy_deployment}")
oc.exec_oc_cmd(f"scale --replicas=1 deployment/{osd_deployment}")
logger.info(f"Waiting for {run_time} seconds")
time.sleep(run_time)
return osd_deployment
test_file = os.path.join(measurement_dir, 'measure_corrupt_pg.json')
measured_op = measure_operation(corrupt_pg, test_file)
logger.info(f"Deleting pool {pool_name}")
ct_pod.exec_ceph_cmd(
f"ceph osd pool delete {pool_name} {pool_name} "
f"--yes-i-really-really-mean-it"
)
logger.info(f"Checking that pool {pool_name} is deleted")
logger.info(f"Deleting deployment {dummy_deployment}")
oc.delete(resource_name=dummy_deployment)
return measured_op
#
# IO Workloads
#
@pytest.fixture
def fio_pvc_dict():
"""
PVC template for fio workloads.
Note that all 'None' values needs to be defined before usage.
"""
template = textwrap.dedent("""
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: fio-target
spec:
storageClassName: None
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: None
""")
pvc_dict = yaml.safe_load(template)
return pvc_dict
@pytest.fixture
def fio_configmap_dict():
"""
ConfigMap template for fio workloads.
Note that you need to add actual configuration to workload.fio file.
"""
template = textwrap.dedent("""
kind: ConfigMap
apiVersion: v1
metadata:
name: fio-config
data:
workload.fio: |
# here comes workload configuration
""")
cm_dict = yaml.safe_load(template)
return cm_dict
@pytest.fixture
def fio_job_dict():
"""
Job template for fio workloads.
"""
template = textwrap.dedent("""
apiVersion: batch/v1
kind: Job
metadata:
name: fio
spec:
template:
metadata:
name: fio
spec:
containers:
- name: fio
image: quay.io/johnstrunk/fs-performance:latest
command:
- "/usr/bin/fio"
- "--output-format=json"
- "/etc/fio/workload.fio"
volumeMounts:
- name: fio-target
mountPath: /mnt/target
- name: fio-config-volume
mountPath: /etc/fio
restartPolicy: Never
volumes:
- name: fio-target
persistentVolumeClaim:
claimName: fio-target
- name: fio-config-volume
configMap:
name: fio-config
""")
job_dict = yaml.safe_load(template)
return job_dict
def get_storageutilization_size(target_percentage, ceph_pool_name):
"""
For the purpose of the workload storage utilization fixtures, get expected
pvc_size based on STORED and MAX AVAIL values (as reported by `ceph df`)
for given ceph pool and target utilization percentage.
This is only approximate, and it won't work eg. if each pool has different
configuration of replication.
Returns:
int: pvc_size for storage utilization job (in GiB, rounded)
"""
# get STORED and MAX AVAIL of given ceph pool ...
ct_pod = pod.get_ceph_tools_pod()
ceph_df_dict = ct_pod.exec_ceph_cmd(ceph_cmd="ceph df")
ceph_pool = None
ceph_total_stored = 0
for pool in ceph_df_dict["pools"]:
ceph_total_stored += pool["stats"]["stored"]
if pool["name"] == ceph_pool_name:
ceph_pool = pool
if ceph_pool is None:
logger.error((
f"pool {ceph_pool_name} was not found "
f"in output of `ceph df`: {ceph_df_dict}"))
# If the following assert fail, the problem is either:
# - name of the pool has changed (when this happens before GA, it's
# likely ocs-ci bug, after the release it's a product bug),
# - pool is missing (likely a product bug)
# either way, the fixture can't continue ...
assert ceph_pool is not None, f"pool {ceph_pool_name} should exist"
# ... to compute PVC size (values in bytes)
total = ceph_pool["stats"]["max_avail"] + ceph_total_stored
max_avail_gi = ceph_pool['stats']['max_avail'] / 2**30
logger.info(f"MAX AVAIL of {ceph_pool_name} is {max_avail_gi} Gi")
target = total * target_percentage
to_utilize = target - ceph_total_stored
pvc_size = round(to_utilize / 2**30) # GiB
logger.info((
f"fixture is going to request {pvc_size} Gi volume "
f"to reach {target/2**30} Gi of total cluster utilization, which "
f"is {target_percentage*100}% of the total capacity"))
return pvc_size
def fio_to_dict(fio_output):
""""
Parse fio output and provide parsed dict it as a result.
"""
fio_output_lines = fio_output.splitlines()
for line_num, line in enumerate(fio_output_lines):
if line == "{":
break
else:
logger.info(line)
fio_parseable_output = "\n".join(fio_output_lines[line_num:])
fio_report = yaml.safe_load(fio_parseable_output)
return fio_report
def workload_fio_storageutilization(
fixture_name,
target_percentage,
project,
fio_pvc_dict,
fio_job_dict,
fio_configmap_dict,
measurement_dir,
tmp_path,
):
"""
This function implements core functionality of fio storage utilization
workload fixture. This is necessary because we can't parametrize single
general fixture over multiple parameters (it would mess with test case id
and polarion test case tracking).
"""
if fixture_name.endswith("rbd"):
storage_class_name = "ocs-storagecluster-ceph-rbd"
ceph_pool_name = "ocs-storagecluster-cephblockpool"
elif fixture_name.endswith("cephfs"):
storage_class_name = "ocs-storagecluster-cephfs"
ceph_pool_name = "ocs-storagecluster-cephfilesystem-data0"
else:
raise UnexpectedVolumeType(
"unexpected volume type, ocs-ci code is wrong")
# make sure we communicate what is going to happen
logger.info((
f"starting {fixture_name} fixture, "
f"using {storage_class_name} storage class "
f"backed by {ceph_pool_name} ceph pool"))
pvc_size = get_storageutilization_size(target_percentage, ceph_pool_name)
# For cephfs we can't use fill_fs because of BZ 1763808 (the process
# will get *Disk quota exceeded* error instead of *No space left on
# device* error).
# On the other hand, we can't use size={pvc_size} for rbd, as we can't
# write pvc_size bytes to a filesystem on a block device of {pvc_size}
# size (obviously, some space is used by filesystem metadata).
if fixture_name.endswith("rbd"):
fio_conf = textwrap.dedent("""
[simple-write]
readwrite=write
buffered=1
blocksize=4k
ioengine=libaio
directory=/mnt/target
fill_fs=1
""")
else:
fio_conf = textwrap.dedent(f"""
[simple-write]
readwrite=write
buffered=1
blocksize=4k
ioengine=libaio
directory=/mnt/target
size={pvc_size}G
""")
# put the dicts together into yaml file of the Job
fio_configmap_dict["data"]["workload.fio"] = fio_conf
fio_pvc_dict["spec"]["storageClassName"] = storage_class_name
fio_pvc_dict["spec"]["resources"]["requests"]["storage"] = f"{pvc_size}Gi"
fio_objs = [fio_pvc_dict, fio_configmap_dict, fio_job_dict]
fio_job_file = ObjectConfFile(fixture_name, fio_objs, project, tmp_path)
# how long do we let the job running while writing data to the volume
# TODO: increase this value or make it configurable
write_timeout = pvc_size * 30 # seconds
logger.info((
f"fixture will wait {write_timeout} seconds for the Job "
f"to write {pvc_size} Gi data on OCS backed volume"))
def write_data():
"""
Write data via fio Job (specified in ``tf`` tmp file) to reach desired
utilization level, and keep this level for ``minimal_time`` seconds.
"""
# deploy the fio Job to the cluster
fio_job_file.create()
# This is a WORKAROUND of particular ocsci design choices: I just wait
# for one pod in the namespace, and then ask for the pod again to get
# it's name (but it would be much better to just wait for the job to
# finish instead, then ask for a name of the successful pod and use it
# to get logs ...)
ocp_pod = ocp.OCP(kind="Pod", namespace=project.namespace)
ocp_pod.wait_for_resource(
resource_count=1,
condition=constants.STATUS_COMPLETED,
timeout=write_timeout,
sleep=30)
pod_data = ocp_pod.get()
# explicit list of assumptions, if these assumptions are not met, the
# code won't work and it either means that something went terrible
# wrong or that the code needs to be changed
assert pod_data['kind'] == "List"
pod_dict = pod_data['items'][0]
assert pod_dict['kind'] == "Pod"
pod_name = pod_dict['metadata']['name']
logger.info(f"Identified pod name of the finished fio Job: {pod_name}")
fio_output = ocp_pod.exec_oc_cmd(
f"logs {pod_name}", out_yaml_format=False)
# parse fio output
fio_report = fio_to_dict(fio_output)
logger.info(fio_report)
# data which will be available to the test via:
# fixture_name['result']
result = {
'fio': fio_report,
'pvc_size': pvc_size,
'target_p': target_percentage,
'namespace': project.namespace}
return result
test_file = os.path.join(measurement_dir, f"{fixture_name}.json")
measured_op = measure_operation(
write_data, test_file, measure_after=True, minimal_time=300)
# we don't need to delete anything if this fixture has been already
# executed
if measured_op['first_run']:
# make sure we communicate what is going to happen
logger.info(f"going to delete {fixture_name} Job")
fio_job_file.delete()
return measured_op
# Percentages used in fixtures below are based on needs of:
# - alerting tests, which needs to cover alerts for breaching 75% and 85%
# utilization (see KNIP-635 and document attached there).
# - metrics tests (KNIP-634) which would like to check lower utilizations as
# well
@pytest.fixture
def workload_storageutilization_50p_rbd(
project,
fio_pvc_dict,
fio_job_dict,
fio_configmap_dict,
measurement_dir,
tmp_path):
target_percentage = 0.5
fixture_name = "workload_storageutilization_50p_rbd"
measured_op = workload_fio_storageutilization(
fixture_name,
target_percentage,
project,
fio_pvc_dict,
fio_job_dict,
fio_configmap_dict,
measurement_dir,
tmp_path)
return measured_op
@pytest.fixture
def workload_storageutilization_75p_rbd(
project,
fio_pvc_dict,
fio_job_dict,
fio_configmap_dict,
measurement_dir,
tmp_path):
target_percentage = 0.75
fixture_name = "workload_storageutilization_75p_rbd"
measured_op = workload_fio_storageutilization(
fixture_name,
target_percentage,
project,
fio_pvc_dict,
fio_job_dict,
fio_configmap_dict,
measurement_dir,
tmp_path)
return measured_op
@pytest.fixture
def | |
<reponame>toebes/onshape-clients<gh_stars>10-100
# coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
# python 2 and python 3 compatibility library
import six
from onshape_client.oas.api_client import ApiClient
from onshape_client.oas.exceptions import ApiTypeError, ApiValueError
from onshape_client.oas.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
int,
none_type,
str,
validate_and_convert_types,
)
from onshape_client.oas.models import bt_workflowable_test_object_info
from onshape_client.oas.models import bt_app_element_basic_info
from onshape_client.oas.models import bt_app_associative_data_info_array
from onshape_client.oas.models import bt_list_response_bt_insertable_info
from onshape_client.oas.models import bt_metadata_schema_info
from onshape_client.oas.models import bt_list_response_bt_metadata_property_summary_info
from onshape_client.oas.models import bt_metadata_property_info
from onshape_client.oas.models import bt_bounding_box_info
from onshape_client.oas.models import bt_update_workflowable_test_object_params
class DefaultApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def __create_workflowable_test_object(self, wfid, **kwargs):
"""create_workflowable_test_object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_workflowable_test_object(wfid, async_req=True)
>>> result = thread.get()
Args:
wfid (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int): specifies the index of the server
that we want to use.
Default is 0.
async_req (bool): execute request asynchronously
Returns:
bt_workflowable_test_object_info.BTWorkflowableTestObjectInfo
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get(
"_return_http_data_only", True
)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index", 0)
kwargs["wfid"] = wfid
return self.call_with_http_info(**kwargs)
self.create_workflowable_test_object = Endpoint(
settings={
"response_type": (
bt_workflowable_test_object_info.BTWorkflowableTestObjectInfo,
),
"auth": ["OAuth2"],
"endpoint_path": "/api/workflowabletestobject/testobject/{wfid}",
"operation_id": "create_workflowable_test_object",
"http_method": "POST",
"servers": [],
},
params_map={
"all": ["wfid",],
"required": ["wfid",],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {"wfid": (str,),},
"attribute_map": {"wfid": "wfid",},
"location_map": {"wfid": "path",},
"collection_format_map": {},
},
headers_map={
"accept": ["application/vnd.onshape.v1+json;charset=UTF-8;qs=0.1"],
"content_type": [],
},
api_client=api_client,
callable=__create_workflowable_test_object,
)
def __delete_associative_data(self, did, eid, wvm, wvmid, **kwargs):
"""delete_associative_data # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_associative_data(did, eid, wvm, wvmid, async_req=True)
>>> result = thread.get()
Args:
did (str):
eid (str):
wvm (str):
wvmid (str):
Keyword Args:
transaction_id (str): [optional] if omitted the server will use the default value of ''
parent_change_id (str): [optional] if omitted the server will use the default value of ''
associative_data_id ([str]): [optional]
element_id (str): [optional] if omitted the server will use the default value of ''
view_id (str): [optional] if omitted the server will use the default value of ''
microversion_id (str): [optional] if omitted the server will use the default value of ''
document_microversion (str): [optional] if omitted the server will use the default value of ''
deterministic_id (str): [optional] if omitted the server will use the default value of ''
feature_id (str): [optional] if omitted the server will use the default value of ''
entity_id (str): [optional] if omitted the server will use the default value of ''
occurrence_id (str): [optional] if omitted the server will use the default value of ''
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int): specifies the index of the server
that we want to use.
Default is 0.
async_req (bool): execute request asynchronously
Returns:
bt_app_element_basic_info.BTAppElementBasicInfo
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get(
"_return_http_data_only", True
)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index", 0)
kwargs["did"] = did
kwargs["eid"] = eid
kwargs["wvm"] = wvm
kwargs["wvmid"] = wvmid
return self.call_with_http_info(**kwargs)
self.delete_associative_data = Endpoint(
settings={
"response_type": (bt_app_element_basic_info.BTAppElementBasicInfo,),
"auth": ["OAuth2"],
"endpoint_path": "/api/appelements/d/{did}/{wvm}/{wvmid}/e/{eid}/associativedata",
"operation_id": "delete_associative_data",
"http_method": "DELETE",
"servers": [],
},
params_map={
"all": [
"did",
"eid",
"wvm",
"wvmid",
"transaction_id",
"parent_change_id",
"associative_data_id",
"element_id",
"view_id",
"microversion_id",
"document_microversion",
"deterministic_id",
"feature_id",
"entity_id",
"occurrence_id",
],
"required": ["did", "eid", "wvm", "wvmid",],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {
"did": (str,),
"eid": (str,),
"wvm": (str,),
"wvmid": (str,),
"transaction_id": (str,),
"parent_change_id": (str,),
"associative_data_id": ([str],),
"element_id": (str,),
"view_id": (str,),
"microversion_id": (str,),
"document_microversion": (str,),
"deterministic_id": (str,),
"feature_id": (str,),
"entity_id": (str,),
"occurrence_id": (str,),
},
"attribute_map": {
"did": "did",
"eid": "eid",
"wvm": "wvm",
"wvmid": "wvmid",
"transaction_id": "transactionId",
"parent_change_id": "parentChangeId",
"associative_data_id": "associativeDataId",
"element_id": "elementId",
"view_id": "viewId",
"microversion_id": "microversionId",
"document_microversion": "documentMicroversion",
"deterministic_id": "deterministicId",
"feature_id": "featureId",
"entity_id": "entityId",
"occurrence_id": "occurrenceId",
},
"location_map": {
"did": "path",
"eid": "path",
"wvm": "path",
"wvmid": "path",
"transaction_id": "query",
"parent_change_id": "query",
"associative_data_id": "query",
"element_id": "query",
"view_id": "query",
"microversion_id": "query",
"document_microversion": "query",
"deterministic_id": "query",
"feature_id": "query",
"entity_id": "query",
"occurrence_id": "query",
},
"collection_format_map": {"associative_data_id": "multi",},
},
headers_map={
"accept": ["application/vnd.onshape.v1+json;charset=UTF-8;qs=0.1"],
"content_type": [],
},
api_client=api_client,
callable=__delete_associative_data,
)
def __get_associative_data(self, did, wvm, wvmid, eid, **kwargs):
"""get_associative_data # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_associative_data(did, wvm, wvmid, eid, async_req=True)
>>> result = thread.get()
Args:
did (str):
wvm (str):
wvmid (str):
eid (str):
Keyword Args:
transaction_id (str): [optional] if omitted the server will use the default value of ''
change_id (str): [optional] if omitted the server will use the default value of ''
associative_data_id ([str]): [optional]
element_id (str): [optional] if omitted the server will use the default value of ''
view_id (str): [optional] if omitted the server will use the default value of ''
microversion_id (str): [optional] if omitted the server will use the default value of ''
document_microversion (str): [optional] if omitted the server will use the default value of ''
deterministic_id (str): [optional] if omitted the server will use the default value of ''
feature_id (str): [optional] if omitted the server will use the default value of ''
entity_id (str): [optional] if omitted the server will use the default value of ''
occurrence_id (str): [optional] if omitted the server will use the default value of ''
return_id_tags (bool): [optional] if omitted the server will use the default value of False
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int): specifies the index of the server
that we want to use.
Default is 0.
async_req (bool): execute request asynchronously
Returns:
bt_app_associative_data_info_array.BTAppAssociativeDataInfoArray
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get(
| |
#####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# <EMAIL>. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
import imp
import operator
import os
import sys
import unittest
from iptest import IronPythonTestCase, is_cli, is_posix, path_modifier
def get_builtins_dict():
if type(__builtins__) is type(sys):
return __builtins__.__dict__
return __builtins__
class meta_loader(object):
def __init__(self, value):
self.value = value
def load_module(self, fullname):
if type(self.value) is Exception: raise self.value
return self.value
class meta_importer(object):
def __init__(self, s):
self.s = s
def find_module(self, fullname, path=None):
self.s.assertIsNone(path)
if fullname == 'does_not_exist_throw': raise Exception('hello')
elif fullname == 'does_not_exist_abc': return meta_loader('abc')
elif fullname == 'does_not_exist_loader_throw': return meta_loader(Exception('loader'))
elif fullname == 'does_not_exist_None': return meta_loader(None)
elif fullname == 'does_not_exist_X':
class X(object):
abc = 3
return meta_loader(X)
class ImpTest(IronPythonTestCase):
def setUp(self):
super(ImpTest, self).setUp()
self._testdir = "ImpTest"
self._imptestdir = os.path.join(self.test_dir, self._testdir)
self._f_init = os.path.join(self._imptestdir, "__init__.py")
self._f_module = os.path.join(self._imptestdir, "imptestmod.py")
self.temp_name = ["os",
"os.P_WAIT",
"os.chmod",
"sys.path",
"xxxx"
]
def test_imp_new_module(self):
x = imp.new_module('abc')
sys.modules['abc'] = x
x.foo = 'bar'
import abc
self.assertEqual(abc.foo, 'bar')
y = imp.new_module('\r\n')
sys.modules['xyz'] = y
y.foo = 'foo'
import xyz
self.assertEqual(xyz.foo, 'foo')
def test_imp_in_exec(self):
_imfp = 'impmodfrmpkg'
_f_imfp_init = os.path.join(self.test_dir, _imfp, "__init__.py")
_f_imfp_mod = os.path.join(self.test_dir, _imfp, "mod.py")
_f_imfp_start = os.path.join(self.test_dir, "imfpstart.tpy")
self.write_to_file(_f_imfp_init, "")
self.write_to_file(_f_imfp_mod, "")
self.write_to_file(_f_imfp_start, """
try:
from impmodfrmpkg.mod import mod
except ImportError, e:
pass
else:
raise AssertionError("Import of mod from pkg.mod unexpectedly succeeded")
""")
# import a package
import impmodfrmpkg
# create a dictionary like that package
glb = {'__name__' : impmodfrmpkg.__name__, '__path__' : impmodfrmpkg.__path__}
loc = {}
exec 'import mod' in glb, loc
self.assertTrue('mod' in loc)
glb = {'__name__' : impmodfrmpkg.__name__, '__path__' : impmodfrmpkg.__path__}
loc = {}
exec 'from mod import *' in glb, loc
#self.assertTrue('value' in loc) # TODO: Fix me
if is_cli:
loc = {}
exec 'from System import *' in globals(), loc
self.assertTrue('Int32' in loc)
self.assertTrue('Int32' not in globals())
exec 'from System import *'
self.assertTrue('Int32' in dir())
self.delete_files(_f_imfp_start)
self.clean_directory(os.path.join(self.test_dir, _imfp), remove=True)
def test_imp_basic(self):
magic = imp.get_magic()
suffixes = imp.get_suffixes()
self.assertTrue(isinstance(suffixes, list))
for suffix in suffixes:
self.assertTrue(isinstance(suffix, tuple))
self.assertEqual(len(suffix), 3)
self.assertTrue((".py", "U", 1) in suffixes)
def test_imp_package(self):
self.write_to_file(self._f_init, "my_name = 'imp package test'")
pf, pp, (px, pm, pt) = imp.find_module(self._testdir, [self.test_dir])
self.assertEqual(pt, imp.PKG_DIRECTORY)
self.assertEqual(pf, None)
self.assertEqual(px, "")
self.assertEqual(pm, "")
module = imp.load_module(self._testdir, pf, pp, (px, pm, pt))
self.assertTrue(self._testdir in sys.modules)
self.assertEqual(module.my_name, 'imp package test')
with path_modifier(self.test_dir):
fm = imp.find_module(self._testdir)
# unpack the result obtained above
pf, pp, (px, pm, pt) = fm
self.assertEqual(pt, imp.PKG_DIRECTORY)
self.assertEqual(pf, None)
self.assertEqual(px, "")
self.assertEqual(pm, "")
module = imp.load_module(self._testdir, pf, pp, (px, pm, pt))
self.assertEqual(module.my_name, 'imp package test')
def test_imp_module(self):
self.write_to_file(self._f_module, "value = 'imp test module'")
pf, pp, (px, pm, pt) = imp.find_module("imptestmod", [self._imptestdir])
self.assertEqual(pt, imp.PY_SOURCE)
self.assertTrue(pf != None)
self.assertTrue(isinstance(pf, file))
module = imp.load_module("imptestmod", pf, pp, (px, pm, pt))
self.assertEqual(module.value, 'imp test module')
pf.close()
with path_modifier(self._imptestdir) as p:
fm = imp.find_module("imptestmod")
# unpack the result obtained above
pf, pp, (px, pm, pt) = fm
self.assertEqual(pt, imp.PY_SOURCE)
self.assertTrue(pf != None)
self.assertTrue(isinstance(pf, file))
self.assertEqual(px, ".py")
self.assertEqual(pm, "U")
module = imp.load_module("imptestmod", pf, pp, (px, pm, pt))
self.assertEqual(module.value, 'imp test module')
pf.close()
def test_direct_module_creation(self):
import math
for baseMod in math, sys:
module = type(baseMod)
x = module.__new__(module)
self.assertEqual(repr(x), "<module '?' (built-in)>")
#self.assertEqual(x.__dict__, None)
x.__init__('abc', 'def')
self.assertEqual(repr(x), "<module 'abc' (built-in)>")
self.assertEqual(x.__doc__, 'def')
x.__init__('aaa', 'zzz')
self.assertEqual(repr(x), "<module 'aaa' (built-in)>")
self.assertEqual(x.__doc__, 'zzz')
# can't assign to module __dict__
try:
x.__dict__ = {}
except TypeError: pass
else: AssertUnreachable()
# can't delete __dict__
try:
del(x.__dict__)
except TypeError: pass
else: AssertUnreachable()
# init doesn't clobber dict, it just re-initializes values
x.__dict__['foo'] = 'xyz'
x.__init__('xyz', 'nnn')
self.assertEqual(x.foo, 'xyz')
# dict is lazily created on set
x = module.__new__(module)
x.foo = 23
self.assertEqual(x.__dict__, {'foo':23})
self.assertEqual(repr(x), "<module '?' (built-in)>")
# can't pass wrong sub-type to new
try:
module.__new__(str)
except TypeError: pass
else: AssertUnreachable()
# dir on non-initialized module raises TypeError
x = module.__new__(module)
x.__name__ = 'module_does_not_exist_in_sys_dot_modules'
self.assertRaises(ImportError, reload, x)
def test_redefine_import(self):
# redefining global __import__ shouldn't change import semantics
global __import__
global called
called = False
def __import__(*args):
global called
called = True
self.assertEqual(called, False)
del __import__
called = False
self.assertEqual(called, False)
def test_module_dict(self):
currentModule = sys.modules[__name__]
self.assertEqual(operator.isMappingType(currentModule.__dict__), True)
self.assertEqual(type({}), type(currentModule.__dict__))
self.assertEqual(isinstance(currentModule.__dict__, dict), True)
def test_lock(self):
i=0
while i<5:
i+=1
if not imp.lock_held():
self.assertRaises(RuntimeError,imp.release_lock)
imp.acquire_lock()
else:
imp.release_lock()
def test_is_frozen(self):
for name in self.temp_name:
f = imp.is_frozen(name)
self.assertFalse(f)
def test_init_frozen(self):
for name in self.temp_name:
f = imp.init_frozen(name)
self.assertIsNone(f)
def test_is_builtin(self):
self.assertEqual(imp.is_builtin("xxx"),0)
self.assertEqual(imp.is_builtin("12324"),0)
self.assertEqual(imp.is_builtin("&*^^"),0)
self.assertEqual(imp.is_builtin("dir"),0)
self.assertEqual(imp.is_builtin("__doc__"),0)
self.assertEqual(imp.is_builtin("__name__"),0)
self.assertEqual(imp.is_builtin("_locle"),0)
self.assertEqual(imp.is_builtin("cPickle"),1)
self.assertEqual(imp.is_builtin("_random"),1)
# nt module disabled in Silverlight
if is_posix:
self.assertEqual(imp.is_builtin("posix"),1)
else:
self.assertEqual(imp.is_builtin("nt"),1)
self.assertEqual(imp.is_builtin("thread"),1)
# there are a several differences between ironpython and cpython
if is_cli:
self.assertEqual(imp.is_builtin("copy_reg"),1)
else:
self.assertEqual(imp.is_builtin("copy_reg"),0)
# supposedly you can't re-init these
self.assertEqual(imp.is_builtin("sys"), -1)
self.assertEqual(imp.is_builtin("__builtin__"), -1)
self.assertEqual(imp.is_builtin("exceptions"), -1)
imp.init_builtin("sys")
imp.init_builtin("__builtin__")
imp.init_builtin("exceptions")
@unittest.skipUnless(is_cli, 'IronPython specific test')
def test_sys_path_none_builtins(self):
prevPath = sys.path
#import some builtin modules not previously imported
try:
sys.path = prevPath + [None]
if not imp.is_builtin('copy_reg'):
self.assertTrue('copy_reg' not in sys.modules.keys())
import datetime
import copy_reg
self.assertTrue('datetime' in sys.modules.keys())
self.assertTrue('copy_reg' in sys.modules.keys())
sys.path = [None]
if not imp.is_builtin('binascii'):
self.assertTrue('binascii' not in sys.modules.keys())
import datetime
import copy_reg
import binascii
self.assertTrue('datetime' in sys.modules.keys())
self.assertTrue('copy_reg' in sys.modules.keys())
self.assertTrue('binascii' in sys.modules.keys())
finally:
sys.path = prevPath
def test_sys_path_none_userpy(self):
prevPath = sys.path
#import a *.py file
temp_syspath_none = os.path.join(self.test_dir, "temp_syspath_none.py")
self.write_to_file(temp_syspath_none, "stuff = 3.14")
try:
sys.path = [None] + prevPath
import temp_syspath_none
self.assertEqual(temp_syspath_none.stuff, 3.14)
finally:
sys.path = prevPath
self.delete_files(os.path.join(self.test_dir, "temp_syspath_none.py"))
def test_sys_path_none_negative(self):
prevPath = sys.path
test_paths = [ [None] + prevPath,
prevPath + [None],
[None],
]
try:
for temp_path in test_paths:
sys.path = temp_path
try:
import does_not_exist
self.fail('Should not reach this point')
except ImportError:
pass
finally:
sys.path = prevPath
def test_init_builtin(self):
r = imp.init_builtin("c_Pickle")
self.assertEqual(r,None)
r = imp.init_builtin("2345")
self.assertEqual(r,None)
r = imp.init_builtin("xxxx")
self.assertEqual(r,None)
r = imp.init_builtin("^$%$#@")
self.assertEqual(r,None)
r = imp.init_builtin("_locale")
self.assertTrue(r!=None)
def test_flags(self):
self.assertEqual(imp.SEARCH_ERROR,0)
self.assertEqual(imp.PY_SOURCE,1)
self.assertEqual(imp.PY_COMPILED,2)
self.assertEqual(imp.C_EXTENSION,3)
self.assertEqual(imp.PY_RESOURCE,4)
self.assertEqual(imp.PKG_DIRECTORY,5)
self.assertEqual(imp.C_BUILTIN,6)
self.assertEqual(imp.PY_FROZEN,7)
self.assertEqual(imp.PY_CODERESOURCE,8)
def test_user_defined_modules(self):
"""test the importer using user-defined module types"""
class MockModule(object):
def __init__(self, name): self.__name__ = name
def __repr__(self): return 'MockModule("' + self.__name__ + '")'
TopModule = MockModule("TopModule")
sys.modules["TopModule"] = TopModule
SubModule = MockModule("SubModule")
theObj = object()
SubModule.Object = theObj
TopModule.SubModule = SubModule
sys.modules["TopModule.SubModule"] = SubModule
# clear the existing names from our namespace...
x, y = TopModule, SubModule
del TopModule, SubModule
# verify we can import TopModule w/ TopModule.SubModule name
import TopModule.SubModule
self.assertEqual(TopModule, x)
self.assertTrue('SubModule' not in dir())
# verify we can import Object from TopModule.SubModule
from TopModule.SubModule import Object
self.assertEqual(Object, theObj)
# verify we short-circuit the lookup in TopModule if
# we have a sys.modules entry...
SubModule2 = MockModule("SubModule2")
SubModule2.Object2 = theObj
sys.modules["TopModule.SubModule"] = SubModule2
from TopModule.SubModule import Object2
self.assertEqual(Object2, theObj)
del sys.modules['TopModule']
del sys.modules['TopModule.SubModule']
def test_constructed_module(self):
"""verify that we don't load arbitrary modules from modules, only truly nested modules"""
ModuleType = type(sys)
TopModule = ModuleType("TopModule")
sys.modules["TopModule"] = TopModule
SubModule = ModuleType("SubModule")
SubModule.Object = object()
TopModule.SubModule = SubModule
try:
import TopModule.SubModule
AssertUnreachable()
except ImportError:
pass
del sys.modules['TopModule']
#TODO: @skip("multiple_execute")
def test_import_from_custom(self):
import __builtin__
try:
class foo(object):
b = 'abc'
def __import__(name, globals, locals, fromlist):
global received
received = name, fromlist
return foo()
saved = __builtin__.__import__
__builtin__.__import__ = __import__
from a import b
self.assertEqual(received, ('a', ('b', )))
finally:
__builtin__.__import__ = saved
def test_module_name(self):
import imp
m = imp.new_module('foo')
self.assertEqual(m.__str__(), "<module 'foo' (built-in)>")
m.__name__ = 'bar'
self.assertEqual(m.__str__(), "<module 'bar' (built-in)>")
m.__name__ = None
self.assertEqual(m.__str__(), "<module '?' (built-in)>")
m.__name__ = []
self.assertEqual(m.__str__(), "<module '?' (built-in)>")
m.__file__ = | |
"""
Base Request Handlers
"""
import asyncio
import datetime
import logging
import typing
import uuid
from email import utils
import jsonpatch
import problemdetails
import sprockets_postgres as postgres
from openapi_core.deserializing.exceptions import DeserializeError
from openapi_core.schema.media_types.exceptions import InvalidContentType
from openapi_core.templating.paths.exceptions import \
OperationNotFound, PathNotFound
from openapi_core.unmarshalling.schemas.exceptions import ValidateError
from openapi_core.validation.exceptions import InvalidSecurity
from sprockets.http import mixins
from sprockets.mixins import mediatype
from tornado import httputil, web
from imbi import session, user, version
LOGGER = logging.getLogger(__name__)
def require_permission(permission):
"""Decorator function for requiring a permission string for an endpoint
:param str permission: The permission string to require
:raises: problemdetails.Problem
"""
def _require_permission(f):
def wrapped(self, *args, **kwargs):
"""Inner-wrapping of the decorator that performs the logic"""
if not self._current_user or \
not self._current_user.has_permission(permission):
if self._respond_with_html:
return self.render(
'index.html',
javascript_url=self.application.settings.get(
'javascript_url'))
LOGGER.info('%r does not have the "%s" permission',
self._current_user, permission)
raise problemdetails.Problem(
status_code=403, title='Unauthorized')
return f(self, *args, **kwargs)
return wrapped
return _require_permission
class RequestHandler(postgres.RequestHandlerMixin,
mixins.ErrorLogger,
problemdetails.ErrorWriter,
mediatype.ContentMixin,
web.RequestHandler):
"""Base RequestHandler class used for recipients and subscribers."""
APPLICATION_JSON = 'application/json'
TEXT_HTML = 'text/html'
NAME = 'Base'
ITEM_NAME = ''
def __init__(self,
application,
request: httputil.HTTPServerRequest,
**kwargs):
super().__init__(application, request, **kwargs)
self.logger = logging.getLogger(f'imbi.endpoints.{self.NAME}')
self.session: typing.Optional[session.Session] = None
self._current_user: typing.Optional[user.User] = None
self._links = {}
async def prepare(self) -> None:
"""Prepare the request handler for the request. If the application
is not ready return a ``503`` error.
Checks for a session cookie and if present, loads the session into
the current user and authenticates it. If authentication fails,
the current user and cookie is cleared.
"""
if self.application.settings.get('javascript_url'):
self.set_header('Access-Control-Allow-Origin', '*')
if not self.application.ready_to_serve:
return self.send_error(503, reason='Application not ready')
self.session = session.Session(self)
await self.session.initialize()
self._current_user = await self.get_current_user()
future = super().prepare()
if asyncio.isfuture(future):
await future
def on_finish(self) -> None:
"""Invoked after a request has completed"""
super().on_finish()
metric_id = '{}.{}'.format(self.NAME, self.request.method)
self.application.loop.add_callback(
self.application.stats.incr,
'response.{}.{}'.format(metric_id, self.get_status()))
self.application.loop.add_callback(
self.application.stats.add_duration,
'request.{}.{}'.format(metric_id, self.get_status()),
self.request.request_time())
def compute_etag(self) -> None:
"""Override Tornado's built-in ETag generation"""
return None
async def get_current_user(self) -> typing.Optional[user.User]:
"""Used by the system to manage authentication behaviors"""
if self.session and self.session.user:
return self.session.user
token = self.request.headers.get('Private-Token', None)
if token:
current_user = user.User(self.application, token=token)
if await current_user.authenticate():
return current_user
def get_template_namespace(self) -> dict:
"""Returns a dictionary to be used as the default template namespace.
The results of this method will be combined with additional defaults
in the :mod:`tornado.template` module and keyword arguments to
:meth:`~tornado.web.RequestHandler.render`
or :meth:`~tornado.web.RequestHandler.render_string`.
"""
namespace = super(RequestHandler, self).get_template_namespace()
namespace.update({'version': version})
return namespace
def send_response(self, value: typing.Union[dict, list]) -> None:
"""Send the response to the client"""
if 'self' not in self._links:
self._add_self_link(self.request.path)
self._add_link_header()
if hasattr(self, 'TTL') and \
not self.request.headers.get('Pragma') == 'no-cache':
self._add_response_caching_headers(self.TTL)
super().send_response(value)
def set_default_headers(self) -> None:
"""Override the default headers, setting the Server response header"""
super().set_default_headers()
self.set_header('Server', self.settings['server_header'])
def write_error(self, status_code, **kwargs):
if self._respond_with_html:
return self.render(
'error.html',
javascript_url=self.application.settings.get('javascript_url'),
status_code=status_code, **kwargs)
super().write_error(status_code, **kwargs)
def _add_last_modified_header(self, value: datetime.datetime) -> None:
"""Add a RFC-822 formatted timestamp for the Last-Modified HTTP
response header.
"""
if not value:
return
self.set_header('Last-Modified', self._rfc822_date(value))
def _add_link_header(self) -> None:
"""Takes the accumulated links and creates a link header value"""
links = []
for rel, path in self._links.items():
links.append('<{}://{}{}>; rel="{}"'.format(
self.request.protocol, self.request.host, path, rel))
if links:
self.add_header('Link', ','.join(links))
def _add_self_link(self, path: str) -> None:
"""Adds the self Link response header"""
self._links['self'] = path
def _add_response_caching_headers(self, ttl: int) -> None:
"""Adds the cache response headers for the object being returned."""
self.add_header('Cache-Control', 'public, max-age={}'.format(ttl))
def _on_postgres_timing(self,
metric_name: str,
duration: float) -> None:
"""Invoked by sprockets-postgres after each query"""
self.application.loop.add_callback(
self.application.stats.add_duration,
'postgres.{}'.format(metric_name), duration)
@property
def _respond_with_html(self) -> bool:
"""Returns True if the current response should respond with HTML"""
return self.get_response_content_type().startswith(self.TEXT_HTML)
@staticmethod
def _rfc822_date(value: datetime.datetime) -> str:
"""Return an RFC-822 formatted timestamp for the given value"""
return utils.format_datetime(value)
class AuthenticatedRequestHandler(RequestHandler):
"""RequestHandler base class for authenticated requests"""
async def prepare(self) -> None:
await super().prepare()
if not self._current_user:
if self._respond_with_html:
return await self.render(
'index.html',
javascript_url=self.application.settings.get(
'javascript_url'))
self.set_status(401)
await self.finish()
class ValidatingRequestHandler(AuthenticatedRequestHandler):
"""Validates the request against the OpenAPI spec"""
async def prepare(self) -> None:
await super().prepare()
try:
self.application.validate_request(self.request)
except DeserializeError as err:
self.logger.warning('Request failed to deserialize: %s', err)
raise problemdetails.Problem(
status_code=400, title='Bad Request', detail=str(err))
except InvalidSecurity as err:
self.logger.debug('Invalid OpenAPI spec security: %s', err)
raise problemdetails.Problem(
status_code=500, title='OpenAPI security error',
detail=str(err))
except OperationNotFound as err:
raise problemdetails.Problem(
status_code=405, title='Method Not Allowed', detail=str(err))
except InvalidContentType as err:
raise problemdetails.Problem(
status_code=415, title='Unsupported Media Type',
detail=str(err))
except PathNotFound as err:
self.logger.error('OpenAPI Spec Error: %s', err)
raise problemdetails.Problem(
status_code=500, title='OpenAPI Spec Error', detail=str(err))
except ValidateError as err:
self.logger.warning('Request failed to validate: %s', err)
raise problemdetails.Problem(
status_code=400, title='Bad Request',
detail='The request did not validate',
errors=[str(e).split('\n')[0] for e in err.schema_errors])
class CRUDRequestHandler(ValidatingRequestHandler):
"""CRUD request handler to reduce large amounts of duplicated code"""
NAME = 'default'
DEFAULTS = {}
ID_KEY: typing.Union[str, list] = 'id'
IS_COLLECTION = False
FIELDS = None
GET_NAME = None # Used to create link headers for POST requests
TTL = 300
DELETE_SQL: typing.Optional[str] = None
GET_SQL: typing.Optional[str] = None
PATCH_SQL: typing.Optional[str] = None
POST_SQL: typing.Optional[str] = None
async def delete(self, *args, **kwargs):
if self.DELETE_SQL is None:
self.logger.debug('DELETE_SQL not defined')
raise problemdetails.Problem(
status_code=405, title='Not Implemented')
await self._delete(kwargs)
async def get(self, *args, **kwargs):
if self.GET_SQL is None:
self.logger.debug('GET_SQL not defined')
raise problemdetails.Problem(
status_code=405, title='Not Implemented')
if self._respond_with_html:
return self.render(
'index.html',
javascript_url=self.application.settings.get('javascript_url'))
await self._get(kwargs)
async def patch(self, *args, **kwargs):
if self.PATCH_SQL is None:
self.logger.debug('PATCH_SQL not defined')
raise problemdetails.Problem(
status_code=405, title='Not Implemented')
await self._patch(kwargs)
async def post(self, *args, **kwargs):
if self.POST_SQL is None:
self.logger.debug('POST_SQL not defined')
raise problemdetails.Problem(
status_code=405, title='Not Implemented')
await self._post(kwargs)
def send_response(self, value: typing.Union[dict, list]) -> None:
"""Send the response to the client"""
if isinstance(value, list):
return super().send_response(value)
if not (self.request.method == 'GET' and self.IS_COLLECTION):
self._add_last_modified_header(
value.get('last_modified_at', value.get('created_at')))
for key in {'created_at', 'last_modified_at'}:
if key in value:
del value[key]
if self.ID_KEY:
if isinstance(self.ID_KEY, list):
args = [str(value[k]) for k in self.ID_KEY]
else:
args = [str(value[self.ID_KEY])]
try:
self._add_self_link(
self.reverse_url(self.ITEM_NAME or self.NAME, *args))
except (AssertionError, KeyError):
self.logger.debug('Failed to reverse URL for %s %r',
self.NAME, args)
self._add_link_header()
super().send_response(value)
async def _delete(self, kwargs):
result = await self.postgres_execute(
self.DELETE_SQL, self._get_query_kwargs(kwargs),
'delete-{}'.format(self.NAME))
if not result.row_count:
raise problemdetails.Problem(
status_code=404, title='Item not found')
self.set_status(204, reason='Item Deleted')
async def _get(self, kwargs):
result = await self.postgres_execute(
self.GET_SQL, self._get_query_kwargs(kwargs),
'get-{}'.format(self.NAME))
if not result.row_count or not result.row:
raise problemdetails.Problem(
status_code=404, title='Item not found')
for key, value in result.row.items():
if isinstance(value, uuid.UUID):
result.row[key] = str(value)
self.send_response(result.row)
def _get_query_kwargs(self, kwargs) -> dict:
if isinstance(self.ID_KEY, list):
return {k: kwargs[k] for k in self.ID_KEY}
return {self.ID_KEY: kwargs[self.ID_KEY]}
async def _patch(self, kwargs):
patch_value = self.get_request_body()
result = await self.postgres_execute(
self.GET_SQL, self._get_query_kwargs(kwargs),
'get-{}'.format(self.NAME))
if not result.row_count:
raise problemdetails.Problem(
status_code=404, title='Item not found')
original = dict(result.row)
for key in {'created_at', 'created_by',
'last_modified_at', 'last_modified_by'}:
del original[key]
for key, value in original.items():
if isinstance(value, uuid.UUID):
original[key] = str(value)
# Apply the patch to the current value
patch = jsonpatch.JsonPatch(patch_value)
updated = patch.apply(original)
# Bail early if there are no changes
if not {k: original[k] for k in original
if k in updated and original[k] != updated[k]}:
self._add_self_link(self.request.path)
self._add_link_header()
return self.set_status(304)
if isinstance(self.ID_KEY, list):
for key in self.ID_KEY:
updated['current_{}'.format(key)] = kwargs[key]
else:
updated['current_{}'.format(self.ID_KEY)] = kwargs[self.ID_KEY]
updated['username'] = self._current_user.username
result = await self.postgres_execute(
self.PATCH_SQL, updated,
'patch-{}'.format(self.NAME))
if not result.row_count:
raise problemdetails.Problem(
status_code=500, title='Failed to update record')
# Send the new record as a response
await self._get(self._get_query_kwargs(updated))
async def _post(self, kwargs) -> None:
values = self.get_request_body()
# Handle compound keys for child object CRUD
if isinstance(self.ID_KEY, list):
for key in self.ID_KEY:
if key not in values and key in kwargs:
values[key] = kwargs[key]
elif self.ID_KEY not in values and self.ID_KEY in kwargs:
values[self.ID_KEY] = kwargs[self.ID_KEY]
# Set defaults of None for all fields in insert
for name in self.FIELDS:
if name not in values:
values[name] = self.DEFAULTS.get(name)
values['username'] = self._current_user.username
result = await self.postgres_execute(
self.POST_SQL, values, 'post-{}'.format(self.NAME))
if not result.row_count:
self.logger.debug('No rows returned')
raise problemdetails.Problem(
status_code=500, title='Failed to create record')
# Return the record as if it were a GET
await self._get(self._get_query_kwargs(result.row))
class CollectionRequestHandler(CRUDRequestHandler):
DEFAULTS = {}
ID_KEY: typing.Union[str, list] = 'id'
IS_COLLECTION: True
FIELDS = None
GET_NAME = None # Used to create link headers for POST requests
COLLECTION_SQL = """SELECT * FROM pg_tables WHERE schemaname = 'v1';"""
TTL = 300
async def get(self, *args, **kwargs):
| |
<reponame>basilevh/spatialaudiogen
import tensorflow as tf
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.contrib import layers
from tensorflow.contrib.layers import l2_regularizer
from tensorflow.contrib.layers import batch_norm as _bn
import numpy as np
def add_bias(x, n_units, biases_initializer, dtype, trainable):
# Initializer
biases_shape = [n_units]
if biases_initializer is None:
biases_initializer = tf.constant_initializer(0.0, dtype=tf.float32)
elif isinstance(biases_initializer, np.ndarray):
if biases_initializer.ndim != 1 or biases_initializer.shape[0] != biases_shape[0]:
raise ValueError('Shape of constant initializer ('+str(biases_initializer.shape)+') does not match expected shape ('+str(biases_shape)+'). ')
biases_shape = None # Shape is inferred from initializer
# Create variable for bias
biases = variables.model_variable('biases',
shape=biases_shape,
dtype=dtype,
initializer=biases_initializer,
trainable=trainable)
# Add bias
return tf.nn.bias_add(x, biases)
def var_initializer(shape, initializer=None):
if initializer is None:
# initializer = tf.truncated_normal_initializer(stddev=0.001)
initializer = tf.contrib.layers.xavier_initializer()
elif isinstance(initializer, np.ndarray):
if any([s is None for s in shape]): raise ValueError('All kernel dimensions must be known.')
if initializer.ndim != len(shape) or not all([s1==s2 for s1, s2 in zip(initializer.shape, shape)]):
raise ValueError('Shape of constant initializer ('+str(initializer.shape)+') does not match expected shape ('+str(shape)+'). ')
shape = None # Shape is inferred from initializer
return shape, initializer
def fully_connected(x, n_units,
use_bias=True,
use_batch_norm=False,
activation_fn=tf.nn.relu,
weight_decay=0.0005,
trainable=True,
reuse=False,
is_training=None,
weights_initializer=None,
biases_initializer=None,
name='fc'):
"""Wrapper for fully connected layer."""
with tf.variable_scope(name, 'fully_connected', [x], reuse=reuse) as sc:
dtype = x.dtype.base_dtype
input_rank = x.get_shape().ndims
if input_rank is None: raise ValueError('Rank of inputs must be known')
if input_rank < 2: raise ValueError('Rank of inputs is %d, which is < 2' % input_rank)
inputs_shape = x.get_shape()
inp_size = utils.last_dimension(inputs_shape, min_rank=2)
static_shape = inputs_shape.as_list()
weights_shape, weights_initializer = var_initializer([inp_size, n_units], weights_initializer)
weights_regularizer = l2_regularizer(weight_decay) if weight_decay > 0 and trainable else None
weights = variables.model_variable('weights',
shape=weights_shape,
dtype=dtype,
initializer=weights_initializer,
regularizer=weights_regularizer,
trainable=trainable)
if len(static_shape) > 2:
# Reshape inputs
x = tf.reshape(x, [-1, inp_size])
x = tf.matmul(x, weights)
if use_batch_norm:
x = _bn(x, decay=0.99, scale=True, is_training=is_training, trainable=trainable, reuse=reuse, scope='bn')
elif use_bias:
x = add_bias(x, n_units, biases_initializer, dtype, trainable)
if activation_fn is not None:
x = activation_fn(x)
if len(static_shape) > 2:
# Reshape back outputs
x = tf.reshape(x, static_shape[:-1]+[-1,])
# x.set_shape(static_shape)
return utils.collect_named_outputs(tf.GraphKeys.ACTIVATIONS, sc.original_name_scope, x)
def deconv_2d(x, n_units, kernel_size,
stride=1,
use_bias=True,
padding="SAME",
activation_fn=tf.nn.relu,
weight_decay=0.0005,
trainable=True,
reuse=None,
weights_initializer=None,
biases_initializer=None,
name='deconv2d'):
"""Deconvolution wrapper."""
with tf.variable_scope(name, 'Deconv2D', [x], reuse=reuse) as sc:
dtype = x.dtype.base_dtype
input_rank = x.get_shape().ndims
if input_rank is None: raise ValueError('Rank of inputs must be known')
if input_rank < 3: raise ValueError('Rank of inputs is %d, which is < 3' % input_rank)
if input_rank == 3:
x = tf.expand_dims(x, 3)
kernel_size = utils.n_positive_integers(2, kernel_size)
w_shape = list(kernel_size) + [n_units, x.get_shape().as_list()[-1]]
if len(w_shape) < input_rank:
w_shape = [1] * (input_rank - len(w_shape)) + w_shape
# print w_shape
# Create variable for kernel
w_shape, weights_initializer = var_initializer(w_shape, weights_initializer)
weights_regularizer = l2_regularizer(weight_decay) if weight_decay > 0 and trainable else None
weights = variables.model_variable('weights',
shape=w_shape,
dtype=dtype,
initializer=weights_initializer,
regularizer=weights_regularizer,
trainable=trainable)
# print weights
# print ' * {:15s} | {:20s} | {:10s}'.format(name+' W', str(weights.get_shape()), str(weights.dtype))
# Deconvolution
sz = x.get_shape().as_list()
stide = utils.n_positive_integers(2, stride)
output_shape = (sz[0], sz[1]*stride[0]+kernel_size[0]-stride[0], sz[2]*stride[1]+kernel_size[1]-stride[1], n_units)
x = tf.nn.conv2d_transpose(x, weights, output_shape, strides=[1, stride[0], stride[1], 1], padding=padding)
# print x
# Bias
if use_bias:
x = add_bias(x, n_units, biases_initializer, dtype, trainable)
# print x
# Activation
if activation_fn is not None:
x = activation_fn(x)
# print x
return utils.collect_named_outputs(tf.GraphKeys.ACTIVATIONS, sc.original_name_scope, x)
def conv_2d(x, n_units, kernel_size,
stride=1,
dilation=None,
padding="SAME",
use_bias=True,
use_batch_norm=False,
activation_fn=tf.nn.relu,
weight_decay=0.0005,
trainable=True,
reuse=None,
is_training=None,
weights_initializer=None,
biases_initializer=None,
bn_initializer=None,
name='conv2d'):
"""Convolution wrapper."""
with tf.variable_scope(name, 'Conv2D', [x], reuse=reuse) as sc:
dtype = x.dtype.base_dtype
input_rank = x.get_shape().ndims
if input_rank is None: raise ValueError('Rank of inputs must be known')
if input_rank < 3: raise ValueError('Rank of inputs is %d, which is < 3' % input_rank)
if input_rank == 3:
x = tf.expand_dims(x, 3)
# Kernel dimensions
kernel_size = utils.n_positive_integers(2, kernel_size)
w_shape = list(kernel_size) + [x.get_shape().as_list()[-1], n_units]
if len(w_shape) < input_rank:
w_shape = [1]*(input_rank-len(w_shape)) + w_shape
# Create variable for kernel
w_shape, weights_initializer = var_initializer(w_shape, weights_initializer)
weights_regularizer = l2_regularizer(weight_decay) if weight_decay > 0 and trainable else None
weights = variables.model_variable('weights',
shape=w_shape,
dtype=dtype,
initializer=weights_initializer,
regularizer=weights_regularizer,
trainable=trainable)
# Convolution
stride = utils.n_positive_integers(2, stride)
if len(stride) < input_rank-2:
stride = (1,)*(input_rank-len(stride)-2) + stride
if dilation is not None:
dilation = utils.n_positive_integers(2, dilation)
if len(dilation) < input_rank-2:
dilation = (1,)*(input_rank-len(dilation)-2) + dilation
x = tf.nn.convolution(input=x, filter=weights, strides=stride, dilation_rate=dilation, padding=padding)
# Batch normalization
if use_batch_norm:
x = _bn(x, decay=0.99, scale=True, param_initializers=bn_initializer, is_training=is_training, trainable=trainable, reuse=reuse, scope='bn')
# Bias
elif use_bias:
x = add_bias(x, n_units, biases_initializer, dtype, trainable)
# Activation
if activation_fn is not None:
x = activation_fn(x)
return utils.collect_named_outputs(tf.GraphKeys.ACTIVATIONS, sc.original_name_scope, x)
def conv_1d(x, n_units, kernel_size,
stride=1,
dilation=None,
padding="SAME",
use_bias=True,
use_batch_norm=False,
activation_fn=None,
weight_decay=0.0005,
trainable=True,
reuse=None,
is_training=None,
weights_initializer=None,
biases_initializer=None,
name='conv1d'):
"""Wrapper for 1d convolutional layer."""
with tf.variable_scope(name, 'Conv1D', [x], reuse=reuse) as sc:
input_rank = x.get_shape().ndims
if input_rank is None: raise ValueError('Rank of inputs must be known')
if input_rank not in [2, 3]: raise ValueError('Rank of inputs is %d, which is not 2 or 3' % input_rank)
if input_rank == 2:
x = tf.expand_dims(x, 2)
if dilation is not None:
dilation = [1, dilation]
x = tf.expand_dims(x, axis=1)
x = conv_2d(x, n_units,
kernel_size=[1, kernel_size],
stride=[1, stride],
dilation=dilation,
padding=padding,
use_bias=use_bias,
use_batch_norm=use_batch_norm,
activation_fn=activation_fn,
weight_decay=weight_decay,
trainable=trainable,
reuse=reuse,
is_training=is_training,
weights_initializer=weights_initializer,
biases_initializer=biases_initializer)
x = tf.squeeze(x, axis=1, name=name)
return utils.collect_named_outputs(tf.GraphKeys.ACTIVATIONS, sc.original_name_scope, x)
def causal_conv1d(x, n_units, kernel_size,
axis=1,
stride=1,
dilation=1,
use_bias=True,
use_batch_norm=False,
activation_fn=None,
weight_decay=0.0005,
trainable=True,
reuse=None,
is_training=None,
weights_initializer=None,
biases_initializer=None,
bn_initializer=None,
name='CausalConv1D'):
with tf.variable_scope(name, 'CausalConv1D', [x], reuse=reuse) as sc:
dtype = x.dtype.base_dtype
input_rank = x.get_shape().ndims
if input_rank is None: raise ValueError('Rank of inputs must be known')
if input_rank < 2: raise ValueError('Rank of inputs is %d, which is < 2' % input_rank)
if input_rank == 2:
x = tf.expand_dims(x, 2)
input_rank = x.get_shape().ndims
n_inp_channels = x.get_shape().as_list()[-1]
n_inp_steps = x.get_shape().as_list()[axis]
# Kernel dimensions
w_shape = [kernel_size] + [1]*(input_rank-3+1-axis) + [n_inp_channels, n_units]
# Create variable for kernel
weights_shape, weights_initializer = var_initializer(w_shape, weights_initializer)
weights_regularizer = l2_regularizer(weight_decay) if weight_decay > 0 and trainable else None
weights = variables.model_variable('weights',
shape=w_shape,
dtype=dtype,
initializer=weights_initializer,
regularizer=weights_regularizer,
trainable=trainable)
# Convolution
if dilation > 1:
dilation_rate = [1 for _ in range(x.get_shape().ndims-2)]
dilation_rate[axis-1] = dilation
out = tf.nn.convolution(x, weights, padding='VALID', dilation_rate=dilation_rate)
else:
strides = [1 for _ in range(input_rank-2)]
strides[axis-1] = stride
out = tf.nn.convolution(x, weights, padding='VALID', strides=strides)
# Remove excess elements at the end.
out_width = (n_inp_steps - (kernel_size - 1) * dilation) / stride
x = tf.slice(out, [0]*input_rank, [-1, out_width] + [-1]*(input_rank-2))
# Batch normalization
if use_batch_norm:
x = _bn(x, decay=0.99, scale=True, param_initializers=bn_initializer, is_training=is_training, trainable=trainable, reuse=reuse, scope='bn')
# Bias
elif use_bias:
x = add_bias(x, n_units, biases_initializer, dtype, trainable)
# Activation
if activation_fn is not None:
x = activation_fn(x)
return utils.collect_named_outputs(tf.GraphKeys.ACTIVATIONS, sc.original_name_scope, x)
def max_pool2d(x, window, stride=1, padding='SAME', name='MaxPool'):
input_rank = x.get_shape().ndims
if input_rank is None: raise ValueError('Rank of inputs must be known')
if input_rank < 3: raise ValueError('Rank of inputs is %d, which is < 3' % input_rank)
if input_rank == 3:
x = tf.expand_dims(x, 3)
window = utils.n_positive_integers(2, window)
if len(window) < input_rank-2:
window = (1,)*(input_rank-len(window)-2) + window
stride = utils.n_positive_integers(2, stride)
if len(stride) < input_rank-2:
stride = (1,)*(input_rank-len(stride)-2) + stride
out = tf.nn.pool(x, window,'MAX', padding, strides=stride, name=name)
return utils.collect_named_outputs(tf.GraphKeys.ACTIVATIONS, name, out)
def max_pool1d(x, kernel_size, stride=1, padding='SAME', name='MaxPool'):
with tf.variable_scope(name, 'MaxPool1D', [x]) as sc:
input_rank = x.get_shape().ndims
if input_rank is None: raise ValueError('Rank of inputs must be known')
if input_rank not in [2, 3]: raise ValueError('Rank of inputs is %d, which is not 2 or 3' % input_rank)
if input_rank == 2:
x = tf.expand_dims(x, 2)
x = tf.expand_dims(x, axis=1)
x = max_pool2d(x, [1, kernel_size], [1, stride], padding=padding, name=name)
x = tf.squeeze(x, axis=1, name=name)
return x
def avg_pool2d(x, kernel_size, stride=1, padding='SAME', name='AvgPool'):
return layers.avg_pool2d(x, kernel_size, stride, padding=padding, outputs_collections=tf.GraphKeys.ACTIVATIONS, scope=name)
def avg_pool1d(x, kernel_size, stride=1, padding='SAME', name='AvgPool'):
with tf.variable_scope(name, 'AvgPool1D', [x]) as sc:
input_rank = x.get_shape().ndims
if input_rank is None: raise ValueError('Rank of inputs must be known')
if input_rank not in [2, 3]: raise ValueError('Rank of inputs is %d, which is not 2 or 3' % input_rank)
if input_rank == 2:
x = tf.expand_dims(x, 2)
x = tf.expand_dims(x, axis=1)
x = avg_pool2d(x, [1, kernel_size], [1, stride], padding=padding, name=name)
x = tf.squeeze(x, axis=1, name=name)
return x
def dropout(x,
keep_prob=0.5,
is_training=False,
name='drop'):
with tf.variable_scope(name, | |
wdim_bins = (self.bins * wg_min),
integrate = self.kernels.csr_sigma_clip4(self.queue, wdim_bins, (wg_min,), *kw_int.values())
events.append(EventDescription("csr_sigma_clip4", integrate))
# now perform the calc_from_1d on the device and count the number of pixels
memset2 = self.program.memset_int(self.queue, (1,), (1,), self.cl_mem["counter"], numpy.int32(0), numpy.int32(1))
events.append(EventDescription("memset counter", memset2))
# Prepare picking
kw_proj = self.cl_kernel_args["find_peaks"]
kw_proj["cutoff"] = numpy.float32(cutoff_pick)
kw_proj["noise"] = numpy.float32(noise)
if radial_range is not None:
kw_proj["radius_min"] = numpy.float32(min(radial_range))
kw_proj["radius_max"] = numpy.float32(max(radial_range) * EPS32)
else:
kw_proj["radius_min"] = numpy.float32(0.0)
kw_proj["radius_max"] = numpy.float32(numpy.finfo(numpy.float32).max)
wg = max(self.workgroup_size["find_peaks"])
wdim_data = (self.size + wg - 1) & ~(wg - 1),
peak_search = self.program.find_peaks(self.queue, wdim_data, (wg,), *list(kw_proj.values()))
events.append(EventDescription("peak_search", peak_search))
# Return the number of peaks
cnt = numpy.empty(1, dtype=numpy.int32)
ev = pyopencl.enqueue_copy(self.queue, cnt, self.cl_mem["counter"])
events.append(EventDescription("copy D->H counter", ev))
if self.profile:
self.events += events
return cnt[0]
def count(self, data, dark=None, dummy=None, delta_dummy=None,
variance=None, dark_variance=None,
flat=None, solidangle=None, polarization=None, absorption=None,
dark_checksum=None, flat_checksum=None, solidangle_checksum=None,
polarization_checksum=None, absorption_checksum=None, dark_variance_checksum=None,
safe=True, error_model=None,
normalization_factor=1.0,
cutoff_clip=5.0, cycle=5, noise=1.0, cutoff_pick=3.0,
radial_range=None):
"""
Count the number of peaks by:
* sigma_clipping within a radial bin to measure the mean and the deviation of the background
* reconstruct the background in 2D
* count the number of peaks above mean + cutoff*sigma
:param data: 2D array with the signal
:param dark: array of same shape as data for pre-processing
:param dummy: value for invalid data
:param delta_dummy: precesion for dummy assessement
:param variance: array of same shape as data for pre-processing
:param dark_variance: array of same shape as data for pre-processing
:param flat: array of same shape as data for pre-processing
:param solidangle: array of same shape as data for pre-processing
:param polarization: array of same shape as data for pre-processing
:param dark_checksum: CRC32 checksum of the given array
:param flat_checksum: CRC32 checksum of the given array
:param solidangle_checksum: CRC32 checksum of the given array
:param polarization_checksum: CRC32 checksum of the given array
:param safe: if True (default) compares arrays on GPU according to their checksum, unless, use the buffer location is used
:param preprocess_only: return the dark subtracted; flat field & solidangle & polarization corrected image, else
:param normalization_factor: divide raw signal by this value
:param cutoff_clip: discard all points with `|value - avg| > cutoff * sigma` during sigma_clipping. 4-5 is quite common
:param cycle: perform at maximum this number of cycles. 5 is common.
:param noise: minimum meaningful signal. Fixed threshold for picking
:param cutoff_pick: pick points with `value > background + cutoff * sigma` 3-4 is quite common value
:param radial_range: 2-tuple with the minimum and maximum radius values for picking points. Reduces the region of search.
:return: number of pixel of high intensity found
"""
if isinstance(error_model, str):
error_model = error_model.lower()
else:
if variance is None:
logger.warning("Nor variance not error-model is provided ...")
error_model = ""
with self.sem:
count = self._count(data, dark, dummy, delta_dummy, variance, dark_variance, flat, solidangle, polarization, absorption,
dark_checksum, flat_checksum, solidangle_checksum, polarization_checksum, absorption_checksum, dark_variance_checksum,
safe, error_model, normalization_factor, cutoff_clip, cycle, noise, cutoff_pick, radial_range)
return count
def _peak_finder(self, data, dark=None, dummy=None, delta_dummy=None,
variance=None, dark_variance=None,
flat=None, solidangle=None, polarization=None, absorption=None,
dark_checksum=None, flat_checksum=None, solidangle_checksum=None,
polarization_checksum=None, absorption_checksum=None, dark_variance_checksum=None,
safe=True, error_model=None,
normalization_factor=1.0,
cutoff_clip=5.0, cycle=5, noise=1.0, cutoff_pick=3.0,
radial_range=None):
"""
Unlocked version of sparsify
"""
cnt = self._count(data, dark, dummy, delta_dummy, variance, dark_variance, flat, solidangle, polarization, absorption,
dark_checksum, flat_checksum, solidangle_checksum, polarization_checksum, absorption_checksum, dark_variance_checksum,
safe, error_model, normalization_factor, cutoff_clip, cycle, noise, cutoff_pick, radial_range)
indexes = numpy.empty(cnt, dtype=numpy.int32)
dtype = data.dtype
if dtype.kind == 'f':
dtype = numpy.float32
kernel = self.program.copy_peak
elif dtype.kind in "iu":
if dtype.itemsize > 4:
dtype = numpy.dtype("uint32") if dtype.kind == "u" else numpy.dtype("int32")
kernel = self.program.__getattr__("copy_peak_" + dtype.name)
signal = numpy.empty(cnt, dtype)
if cnt > 0:
# Call kernel to copy intensities
kw = self.cl_kernel_args["copy_peak"]
size = (cnt + self.BLOCK_SIZE - 1) & ~(self.BLOCK_SIZE - 1)
ev0 = kernel(self.queue, (size,), (self.BLOCK_SIZE,),
*list(kw.values()))
ev1 = pyopencl.enqueue_copy(self.queue, indexes, self.cl_mem["peak_position"])
ev2 = pyopencl.enqueue_copy(self.queue, signal, self.cl_mem["peak_intensity"])
if self.profile:
self.events += [EventDescription("copy D->D + cast %s intenity" % dtype.name, ev0),
EventDescription("copy D->H peak_position", ev1),
EventDescription("copy D->H peak_intensty", ev2)]
return indexes, signal
def sparsify(self, data, dark=None, dummy=None, delta_dummy=None,
variance=None, dark_variance=None,
flat=None, solidangle=None, polarization=None, absorption=None,
dark_checksum=None, flat_checksum=None, solidangle_checksum=None,
polarization_checksum=None, absorption_checksum=None, dark_variance_checksum=None,
safe=True, error_model=None,
normalization_factor=1.0,
cutoff_clip=5.0, cycle=5, noise=1.0, cutoff_pick=3.0,
radial_range=None):
"""
Perform a sigma-clipping iterative filter within each along each row.
see the doc of scipy.stats.sigmaclip for more descriptions.
If the error model is "azimuthal": the variance is the variance within a bin,
which is refined at each iteration, can be costly !
Else, the error is propagated according to:
.. math::
signal = (raw - dark)
variance = variance + dark_variance
normalization = normalization_factor*(flat * solidangle * polarization * absortoption)
count = number of pixel contributing
Integration is performed using the CSR representation of the look-up table on all
arrays: signal, variance, normalization and count
:param dark: array of same shape as data for pre-processing
:param dummy: value for invalid data
:param delta_dummy: precesion for dummy assessement
:param variance: array of same shape as data for pre-processing
:param dark_variance: array of same shape as data for pre-processing
:param flat: array of same shape as data for pre-processing
:param solidangle: array of same shape as data for pre-processing
:param polarization: array of same shape as data for pre-processing
:param dark_checksum: CRC32 checksum of the given array
:param flat_checksum: CRC32 checksum of the given array
:param solidangle_checksum: CRC32 checksum of the given array
:param polarization_checksum: CRC32 checksum of the given array
:param safe: if True (default) compares arrays on GPU according to their checksum, unless, use the buffer location is used
:param preprocess_only: return the dark subtracted; flat field & solidangle & polarization corrected image, else
:param normalization_factor: divide raw signal by this value
:param cutoff_clip: discard all points with `|value - avg| > cutoff * sigma` during sigma_clipping. 4-5 is quite common
:param cycle: perform at maximum this number of cycles. 5 is common.
:param noise: minimum meaningful signal. Fixed threshold for picking
:param cutoff_pick: pick points with `value > background + cutoff * sigma` 3-4 is quite common value
:param radial_range: 2-tuple with the minimum and maximum radius values for picking points. Reduces the region of search.
:return: SparseFrame object, see `intensity`, `x` and `y` properties
"""
if isinstance(error_model, str):
error_model = error_model.lower()
else:
if variance is None:
logger.warning("Nor variance not error-model is provided ...")
error_model = ""
with self.sem:
indexes, values = self._peak_finder(data, dark, dummy, delta_dummy, variance, dark_variance, flat, solidangle, polarization, absorption,
dark_checksum, flat_checksum, solidangle_checksum, polarization_checksum, absorption_checksum, dark_variance_checksum,
safe, error_model, normalization_factor, cutoff_clip, cycle, noise, cutoff_pick, radial_range)
background_avg = numpy.zeros(self.bins, dtype=numpy.float32)
background_std = numpy.zeros(self.bins, dtype=numpy.float32)
ev1 = pyopencl.enqueue_copy(self.queue, background_avg, self.cl_mem["averint"])
ev2 = pyopencl.enqueue_copy(self.queue, background_std, self.cl_mem["stderr"])
if self.profile:
self.events += [EventDescription("copy D->H background_avg", ev1),
EventDescription("copy D->H background_std", ev2)]
result = SparseFrame(indexes, values)
result._shape = data.shape
result._dtype = data.dtype
result._compute_engine = self.__class__.__name__
result._mask = self.radius2d
result._cutoff = cutoff_pick
result._noise = noise
result._radius = self.bin_centers
result._background_avg = background_avg
result._background_std = background_std
result._unit = self.unit
result._has_dark_correction = dark is not None
result._has_flat_correction = flat is not None
result._normalization_factor = normalization_factor
result._has_polarization_correction = polarization is not None
result._has_solidangle_correction = solidangle is not None
result._has_absorption_correction = absorption is not None
result._metadata = None
result._method = "sparsify"
result._method_called = None
result._background_cycle = cycle
result._radial_range = radial_range
result._dummy = dummy
# result.delta_dummy = delta_dummy
return result
# Name of the default "process" method
__call__ = sparsify
#===============================================================================
# Simple variante
#===============================================================================
class OCL_SimplePeakFinder(OpenclProcessing):
BLOCK_SIZE = 1024 # works with 32x32 patches (1024 threads)
kernel_files = ["pyfai:openCL/simple_peak_picker.cl"]
buffers = [BufferDescription("image", 1, numpy.float32, mf.READ_WRITE),
BufferDescription("image_raw", 1, numpy.float32, mf.READ_ONLY),
BufferDescription("mask", 1, numpy.int8, mf.READ_ONLY),
BufferDescription("output", 1, numpy.int32, mf.READ_WRITE),
BufferDescription("peak_intensity", 1, numpy.float32, mf.WRITE_ONLY)
]
mapping = {numpy.int8: "s8_to_float",
numpy.uint8: "u8_to_float",
numpy.int16: "s16_to_float",
numpy.uint16: "u16_to_float",
numpy.uint32: "u32_to_float",
numpy.int32: "s32_to_float",
numpy.float32: "f32_to_float"
}
def __init__(self, image_shape=None, mask=None,
ctx=None, devicetype="all", platformid=None, deviceid=None,
block_size=None, profile=False):
"""
:param image_shape: 2-tuple with the size of the image
:param mask: array with invalid pixel flagged.
:param ctx: actual working context, left to None for automatic
initialization from device type | |
bbox_y = island.bounding_box.xy
for x in stops_x:
if x + bbox_x > cage_size.x:
continue
for y in stops_y:
if y + bbox_y > cage_size.y or (x, y) in occupied_cache:
continue
for i, obstacle in enumerate(page_islands):
# if this obstacle overlaps with the island, try another stop
if (x + bbox_x > obstacle.pos.x and
obstacle.pos.x + obstacle.bounding_box.x > x and
y + bbox_y > obstacle.pos.y and
obstacle.pos.y + obstacle.bounding_box.y > y):
if x >= obstacle.pos.x and y >= obstacle.pos.y:
occupied_cache.add((x, y))
# just a stupid heuristic to make subsequent searches faster
if i > 0:
page_islands[1:i+1] = page_islands[:i]
page_islands[0] = obstacle
break
else:
# if no obstacle called break, this position is okay
island.pos.xy = x, y
page_islands.append(island)
stops_x.append(x + bbox_x)
stops_y.append(y + bbox_y)
return True
return False
def drop_portion(stops, border, divisor):
stops.sort()
# distance from left neighbor to the right one, excluding the first stop
distances = [right - left for left, right in zip(stops, chain(stops[2:], [border]))]
quantile = sorted(distances)[len(distances) // divisor]
return [stop for stop, distance in zip(stops, chain([quantile], distances)) if distance >= quantile]
if any(island.bounding_box.x > cage_size.x or island.bounding_box.y > cage_size.y for island in self.islands):
raise UnfoldError(
"An island is too big to fit onto page of the given size. "
"Either downscale the model or find and split that island manually.\n"
"Export failed, sorry.")
# sort islands by their diagonal... just a guess
remaining_islands = sorted(self.islands, reverse=True, key=lambda island: island.bounding_box.length_squared)
page_num = 1 # TODO delete me
while remaining_islands:
# create a new page and try to fit as many islands onto it as possible
page = Page(page_num)
page_num += 1
occupied_cache = set()
stops_x, stops_y = [0], [0]
for island in remaining_islands:
try_emplace(island, page.islands, stops_x, stops_y, occupied_cache)
# if overwhelmed with stops, drop a quarter of them
if len(stops_x)**2 > 4 * len(self.islands) + 100:
stops_x = drop_portion(stops_x, cage_size.x, 4)
stops_y = drop_portion(stops_y, cage_size.y, 4)
remaining_islands = [island for island in remaining_islands if island not in page.islands]
self.pages.append(page)
def save_uv(self, cage_size=M.Vector((1, 1)), separate_image=False):
if separate_image:
for island in self.islands:
island.save_uv_separate(self.looptex)
else:
for island in self.islands:
island.save_uv(self.looptex, cage_size)
def save_image(self, page_size_pixels: M.Vector, filename):
for page in self.pages:
image = create_blank_image("Page {}".format(page.name), page_size_pixels, alpha=1)
image.filepath_raw = page.image_path = "{}_{}.png".format(filename, page.name)
faces = [face for island in page.islands for face in island.faces]
self.bake(faces, image)
image.save()
image.user_clear()
bpy.data.images.remove(image)
def save_separate_images(self, scale, filepath, embed=None):
for i, island in enumerate(self.islands):
image_name = "Island {}".format(i)
image = create_blank_image(image_name, island.bounding_box * scale, alpha=0)
self.bake(island.faces.keys(), image)
if embed:
island.embedded_image = embed(image)
else:
from os import makedirs
image_dir = filepath
makedirs(image_dir, exist_ok=True)
image_path = os_path.join(image_dir, "island{}.png".format(i))
image.filepath_raw = image_path
image.save()
island.image_path = image_path
image.user_clear()
bpy.data.images.remove(image)
def bake(self, faces, image):
if not self.looptex:
raise UnfoldError("The mesh has no UV Map slots left. Either delete a UV Map or export the net without textures.")
ob = bpy.context.active_object
me = ob.data
# in Cycles, the image for baking is defined by the active Image Node
temp_nodes = dict()
for mat in me.materials:
mat.use_nodes = True
img = mat.node_tree.nodes.new('ShaderNodeTexImage')
img.image = image
temp_nodes[mat] = img
mat.node_tree.nodes.active = img
# move all excess faces to negative numbers (that is the only way to disable them)
ignored_uvs = [loop[self.looptex].uv for f in self.data.faces if f not in faces for loop in f.loops]
for uv in ignored_uvs:
uv *= -1
bake_type = bpy.context.scene.cycles.bake_type
sta = bpy.context.scene.render.bake.use_selected_to_active
try:
ob.update_from_editmode()
me.uv_layers.active = me.uv_layers[self.looptex.name]
bpy.ops.object.bake(type=bake_type, margin=1, use_selected_to_active=sta, cage_extrusion=100, use_clear=False)
except RuntimeError as e:
raise UnfoldError(*e.args)
finally:
for mat, node in temp_nodes.items():
mat.node_tree.nodes.remove(node)
for uv in ignored_uvs:
uv *= -1
class Edge:
"""Wrapper for BPy Edge"""
__slots__ = ('data', 'va', 'vb', 'main_faces', 'uvedges',
'vector', 'angle',
'is_main_cut', 'force_cut', 'priority', 'freestyle')
def __init__(self, edge):
self.data = edge
self.va, self.vb = edge.verts
self.vector = self.vb.co - self.va.co
# if self.main_faces is set, then self.uvedges[:2] must correspond to self.main_faces, in their order
# this constraint is assured at the time of finishing mesh.generate_cuts
self.uvedges = list()
self.force_cut = edge.seam # such edges will always be cut
self.main_faces = None # two faces that may be connected in the island
# is_main_cut defines whether the two main faces are connected
# all the others will be assumed to be cut
self.is_main_cut = True
self.priority = None
self.angle = None
self.freestyle = False
def choose_main_faces(self):
"""Choose two main faces that might get connected in an island"""
from itertools import combinations
loops = self.data.link_loops
def score(pair):
return abs(pair[0].face.normal.dot(pair[1].face.normal))
if len(loops) == 2:
self.main_faces = list(loops)
elif len(loops) > 2:
# find (with brute force) the pair of indices whose loops have the most similar normals
self.main_faces = max(combinations(loops, 2), key=score)
if self.main_faces and self.main_faces[1].vert == self.va:
self.main_faces = self.main_faces[::-1]
def calculate_angle(self):
"""Calculate the angle between the main faces"""
loop_a, loop_b = self.main_faces
normal_a, normal_b = (l.face.normal for l in self.main_faces)
if not normal_a or not normal_b:
self.angle = -3 # just a very sharp angle
else:
s = normal_a.cross(normal_b).dot(self.vector.normalized())
s = max(min(s, 1.0), -1.0) # deal with rounding errors
self.angle = asin(s)
if loop_a.link_loop_next.vert != loop_b.vert or loop_b.link_loop_next.vert != loop_a.vert:
self.angle = abs(self.angle)
def generate_priority(self, priority_effect, average_length):
"""Calculate the priority value for cutting"""
angle = self.angle
if angle > 0:
self.priority = priority_effect['CONVEX'] * angle / pi
else:
self.priority = priority_effect['CONCAVE'] * (-angle) / pi
self.priority += (self.vector.length / average_length) * priority_effect['LENGTH']
def is_cut(self, face):
"""Return False if this edge will the given face to another one in the resulting net
(useful for edges with more than two faces connected)"""
# Return whether there is a cut between the two main faces
if self.main_faces and face in {loop.face for loop in self.main_faces}:
return self.is_main_cut
# All other faces (third and more) are automatically treated as cut
else:
return True
def other_uvedge(self, this):
"""Get an uvedge of this edge that is not the given one
causes an IndexError if case of less than two adjacent edges"""
return self.uvedges[1] if this is self.uvedges[0] else self.uvedges[0]
class Island:
"""Part of the net to be exported"""
__slots__ = ('mesh', 'faces', 'edges', 'vertices', 'fake_vertices', 'boundary', 'markers',
'pos', 'bounding_box',
'image_path', 'embedded_image',
'number', 'label', 'abbreviation', 'title',
'has_safe_geometry', 'is_inside_out',
'sticker_numbering')
def __init__(self, mesh, face, matrix, normal_matrix):
"""Create an Island from a single Face"""
self.mesh = mesh
self.faces = dict() # face -> uvface
self.edges = dict() # loop -> uvedge
self.vertices = dict() # loop -> uvvertex
self.fake_vertices = list()
self.markers = list()
self.label = None
self.abbreviation = None
self.title = None
self.pos = M.Vector((0, 0))
self.image_path = None
self.embedded_image = None
self.is_inside_out = False # swaps concave <-> convex edges
self.has_safe_geometry = True
self.sticker_numbering = 0
uvface = UVFace(face, self, matrix, normal_matrix)
self.vertices.update(uvface.vertices)
self.edges.update(uvface.edges)
self.faces[face] = uvface
# UVEdges on the boundary
self.boundary = list(self.edges.values())
def add_marker(self, marker):
self.fake_vertices.extend(marker.bounds)
self.markers.append(marker)
def generate_label(self, label=None, abbreviation=None):
"""Assign a name to this island automatically"""
abbr = abbreviation or self.abbreviation or str(self.number)
# TODO: dots should be added in the last instant when outputting any text
if is_upsidedown_wrong(abbr):
abbr += "."
self.label = label or self.label or "Island {}".format(self.number)
self.abbreviation = abbr
def save_uv(self, tex, cage_size):
"""Save UV Coordinates of all UVFaces to a given UV texture
tex: UV Texture layer to use (BMLayerItem)
page_size: size of the page in pixels (vector)"""
scale_x, scale_y = 1 / cage_size.x, 1 / cage_size.y
for loop, uvvertex in self.vertices.items():
uv = uvvertex.co + self.pos
loop[tex].uv = uv.x * scale_x, uv.y * scale_y
def save_uv_separate(self, tex):
"""Save UV Coordinates of all UVFaces to a given UV texture, spanning from 0 to 1
tex: UV Texture layer to use (BMLayerItem)
page_size: size of the page in pixels (vector)"""
scale_x, scale_y = 1 / self.bounding_box.x, 1 / self.bounding_box.y
for loop, uvvertex in self.vertices.items():
loop[tex].uv = uvvertex.co.x * scale_x, uvvertex.co.y * scale_y
def join(uvedge_a, uvedge_b, size_limit=None, epsilon=1e-6):
"""
Try to join other island on given edge
Returns False if they would overlap
"""
class Intersection(Exception):
pass
class GeometryError(Exception):
pass
def is_below(self, other, correct_geometry=True):
if self | |
#!/usr/bin/env python
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import random
import webapp2
import time
from google.appengine.ext import ndb
# category data base model
class Category(ndb.Model):
category = ndb.StringProperty(indexed=True, required=True)
# Movie model in detailed view page
class MovieClip(ndb.Model):
id = ndb.IntegerProperty(indexed=True, required=True)
category = ndb.StringProperty(indexed=True, required=True)
description = ndb.StringProperty(indexed=True, required=True)
source = ndb.StringProperty(indexed=False, required=True)
card = ndb.StringProperty(indexed=False, required=True)
background = ndb.StringProperty(indexed=False, required=True)
title = ndb.StringProperty(indexed=False, required=True)
studio = ndb.StringProperty(indexed=False, required=True)
rented = ndb.BooleanProperty(indexed=False, required=True)
# movie model in overview page
class MovieOverview(ndb.Model):
id = ndb.IntegerProperty(indexed=True, required=True)
category = ndb.StringProperty(indexed=True, required=True)
source = ndb.StringProperty(indexed=False, required=True)
card = ndb.StringProperty(indexed=False, required=True)
background = ndb.StringProperty(indexed=False, required=True)
title = ndb.StringProperty(indexed=False, required=True)
studio = ndb.StringProperty(indexed=False, required=True)
# global variable to track movie's id
# for same video, the MovieClip and MovieOverview will share the same id
MOVIE_ID = 0
# All movies' meta information
MOVIES = """
[{
"category": "Google+",
"videos": [{
"description": "Jon introduces Instant Upload with a few thoughts on how we remember the things that matter. Check out some ways we've been rethinking real-life sharing for the web at plus.google.com",
"sources": ["https://storage.googleapis.com/android-tv/Sample%20videos/Google%2B/Google%2B_%20Instant%20Upload.mp4"],
"card": "https://storage.googleapis.com/android-tv/Sample%20videos/Google%2B/Google%2B_%20Instant%20Upload/card.jpg",
"background": "https://storage.googleapis.com/android-tv/Sample%20videos/Google%2B/Google%2B_%20Instant%20Upload/bg.jpg",
"title": "Instant Upload",
"studio": "Google+"
}, {
"description": "With Google+ Instant Upload, every picture you take on your phone is instantly backed up to a private Google+ album. It's a simple way to make sure you never lose another memory.",
"sources": ["https://storage.googleapis.com/android-tv/Sample%20videos/Google%2B/Google%2B_%20New%20Dad.mp4"],
"card": "https://storage.googleapis.com/android-tv/Sample%20videos/Google%2B/Google%2B_%20New%20Dad/card.jpg",
"background": "https://storage.googleapis.com/android-tv/Sample%20videos/Google%2B/Google%2B_%20New%20Dad/bg.jpg",
"title": "New Dad",
"studio": "Google+"
}, {
"description": "Laugh, share news, celebrate, learn something new or stay in touch with Hangouts. And with Hangouts on your phone, you can drop in from wherever you are.",
"sources": ["https://storage.googleapis.com/android-tv/Sample%20videos/Google%2B/Google%2B_%20Say%20more%20with%20Hangouts.mp4"],
"card": "https://storage.googleapis.com/android-tv/Sample%20videos/Google%2B/Google%2B_%20Say%20more%20with%20Hangouts/card.jpg",
"background": "https://storage.googleapis.com/android-tv/Sample%20videos/Google%2B/Google%2B_%20Say%20more%20with%20Hangouts/bg.jpg",
"title": "Say more with Hangouts",
"studio": "Google+"
}, {
"description": "Search on Google+ helps you get advice from the people you know -- sometimes when you least expect it. Check out some ways we've been rethinking real-life sharing for the web at plus.google.com.",
"sources": ["https://storage.googleapis.com/android-tv/Sample%20videos/Google%2B/Google%2B_%20Search.mp4"],
"card": "https://storage.googleapis.com/android-tv/Sample%20videos/Google%2B/Google%2B_%20Search/card.jpg",
"background": "https://storage.googleapis.com/android-tv/Sample%20videos/Google%2B/Google%2B_%20Search/bg.jpg",
"title": "Google+ Search",
"studio": "Google+"
}, {
"description": "New ways of sharing the right things with the right people. Join at http://google.com/+",
"sources": ["https://storage.googleapis.com/android-tv/Sample%20videos/Google%2B/Google%2B_%20Sharing%20but%20like%20real%20life.mp4"],
"card": "https://storage.googleapis.com/android-tv/Sample%20videos/Google%2B/Google%2B_%20Sharing%20but%20like%20real%20life/card.jpg",
"background": "https://storage.googleapis.com/android-tv/Sample%20videos/Google%2B/Google%2B_%20Sharing%20but%20like%20real%20life/bg.jpg",
"title": "Sharing but like real life",
"studio": "Google+"
}, {
"description": "Jed introduces Circles with a few thoughts on the nature of friendship. Check out some ways we've been rethinking real-life sharing for the web at plus.google.com.",
"sources": ["https://storage.googleapis.com/android-tv/Sample%20videos/Google%2B/Google%2B_%20Circles.mp4"],
"card": "https://storage.googleapis.com/android-tv/Sample%20videos/Google%2B/Google%2B_%20Circles/card.jpg",
"background": "https://storage.googleapis.com/android-tv/Sample%20videos/Google%2B/Google%2B_%20Circles/bg.jpg",
"title": "Google+ Circles",
"studio": "Google+"
}, {
"description": "Aimee introduces Hangouts with a few thoughts on the spontaneous get-together. Check out some ways we've been rethinking real-life sharing for the web at plus.google.com.",
"sources": ["https://storage.googleapis.com/android-tv/Sample%20videos/Google%2B/Google%2B_%20Hangouts.mp4"],
"card": "https://storage.googleapis.com/android-tv/Sample%20videos/Google%2B/Google%2B_%20Hangouts/card.jpg",
"background": "https://storage.googleapis.com/android-tv/Sample%20videos/Google%2B/Google%2B_%20Hangouts/bg.jpg",
"title": "Google+ Hangouts",
"studio": "Google+"
}]
}, {
"category": "Demo Slam",
"videos": [{
"description": "Fusce id nisi turpis. Praesent viverra bibendum semper. Donec tristique, orci sed semper lacinia, quam erat rhoncus massa, non congue tellus est quis tellus. Sed mollis orci venenatis quam scelerisque accumsan. Curabitur a massa sit amet mi accumsan mollis sed et magna. Vivamus sed aliquam risus. Nulla eget dolor in elit facilisis mattis. Ut aliquet luctus lacus. Phasellus nec commodo erat. Praesent tempus id lectus ac scelerisque. Maecenas pretium cursus lectus id volutpat.",
"sources": ["https://storage.googleapis.com/android-tv/Sample%20videos/Demo%20Slam/Google%20Demo%20Slam_%2020ft%20Search.mp4"],
"card": "https://storage.googleapis.com/android-tv/Sample%20videos/Demo%20Slam/Google%20Demo%20Slam_%2020ft%20Search/card.jpg",
"background": "https://storage.googleapis.com/android-tv/Sample%20videos/Demo%20Slam/Google%20Demo%20Slam_%2020ft%20Search/bg.jpg",
"title": "20ft Search",
"studio": "Google Demo Slam"
}, {
"description": "Fusce id nisi turpis. Praesent viverra bibendum semper. Donec tristique, orci sed semper lacinia, quam erat rhoncus massa, non congue tellus est quis tellus. Sed mollis orci venenatis quam scelerisque accumsan. Curabitur a massa sit amet mi accumsan mollis sed et magna. Vivamus sed aliquam risus. Nulla eget dolor in elit facilisis mattis. Ut aliquet luctus lacus. Phasellus nec commodo erat. Praesent tempus id lectus ac scelerisque. Maecenas pretium cursus lectus id volutpat.",
"sources": ["https://storage.googleapis.com/android-tv/Sample%20videos/Demo%20Slam/Google%20Demo%20Slam_%20Balcony%20Toss.mp4"],
"card": "https://storage.googleapis.com/android-tv/Sample%20videos/Demo%20Slam/Google%20Demo%20Slam_%20Balcony%20Toss/card.jpg",
"background": "https://storage.googleapis.com/android-tv/Sample%20videos/Demo%20Slam/Google%20Demo%20Slam_%20Balcony%20Toss/bg.jpg",
"title": "<NAME>",
"studio": "Google Demo Slam"
}, {
"description": "Fusce id nisi turpis. Praesent viverra bibendum semper. Donec tristique, orci sed semper lacinia, quam erat rhoncus massa, non congue tellus est quis tellus. Sed mollis orci venenatis quam scelerisque accumsan. Curabitur a massa sit amet mi accumsan mollis sed et magna. Vivamus sed aliquam risus. Nulla eget dolor in elit facilisis mattis. Ut aliquet luctus lacus. Phasellus nec commodo erat. Praesent tempus id lectus ac scelerisque. Maecenas pretium cursus lectus id volutpat.",
"sources": ["https://storage.googleapis.com/android-tv/Sample%20videos/Demo%20Slam/Google%20Demo%20Slam_%20Dance%20Search.mp4"],
"card": "https://storage.googleapis.com/android-tv/Sample%20videos/Demo%20Slam/Google%20Demo%20Slam_%20Dance%20Search/card.jpg",
"background": "https://storage.googleapis.com/android-tv/Sample%20videos/Demo%20Slam/Google%20Demo%20Slam_%20Dance%20Search/bg.jpg",
"title": "Dance Search",
"studio": "Google Demo Slam"
}, {
"description": "Fusce id nisi turpis. Praesent viverra bibendum semper. Donec tristique, orci sed semper lacinia, quam erat rhoncus massa, non congue tellus est quis tellus. Sed mollis orci venenatis quam scelerisque accumsan. Curabitur a massa sit amet mi accumsan mollis sed et magna. Vivamus sed aliquam risus. Nulla eget dolor in elit facilisis mattis. Ut aliquet luctus lacus. Phasellus nec commodo erat. Praesent tempus id lectus ac scelerisque. Maecenas pretium cursus lectus id volutpat.",
"sources": ["https://storage.googleapis.com/android-tv/Sample%20videos/Demo%20Slam/Google%20Demo%20Slam_%20Epic%20Docs%20Animation.mp4"],
"card": "https://storage.googleapis.com/android-tv/Sample%20videos/Demo%20Slam/Google%20Demo%20Slam_%20Epic%20Docs%20Animation/card.jpg",
"background": "https://storage.googleapis.com/android-tv/Sample%20videos/Demo%20Slam/Google%20Demo%20Slam_%20Epic%20Docs%20Animation/bg.jpg",
"title": "Epic Docs Animation",
"studio": "Google Demo Slam"
}, {
"description": "Fusce id nisi turpis. Praesent viverra bibendum semper. Donec tristique, orci sed semper lacinia, quam erat rhoncus massa, non congue tellus est quis tellus. Sed mollis orci venenatis quam scelerisque accumsan. Curabitur a massa sit amet mi accumsan mollis sed et magna. Vivamus sed aliquam risus. Nulla eget dolor in elit facilisis mattis. Ut aliquet luctus lacus. Phasellus nec commodo erat. Praesent tempus id lectus ac scelerisque. Maecenas pretium cursus lectus id volutpat.",
"sources": ["https://storage.googleapis.com/android-tv/Sample%20videos/Demo%20Slam/Google%20Demo%20Slam_%20Extra%20Spicy.mp4"],
"card": "https://storage.googleapis.com/android-tv/Sample%20videos/Demo%20Slam/Google%20Demo%20Slam_%20Extra%20Spicy/card.jpg",
"background": "https://storage.googleapis.com/android-tv/Sample%20videos/Demo%20Slam/Google%20Demo%20Slam_%20Extra%20Spicy/bg.jpg",
"title": "Extra Spicy",
"studio": "Google Demo Slam"
}, {
"description": "Fusce id nisi turpis. Praesent viverra bibendum semper. Donec tristique, orci sed semper lacinia, quam erat rhoncus massa, non congue tellus est quis tellus. Sed mollis orci venenatis quam scelerisque accumsan. Curabitur a massa sit amet mi accumsan mollis sed et magna. Vivamus sed aliquam risus. Nulla eget dolor in elit facilisis mattis. Ut aliquet luctus lacus. Phasellus nec commodo erat. Praesent tempus id lectus ac scelerisque. Maecenas pretium cursus lectus id volutpat.",
"sources": ["https://storage.googleapis.com/android-tv/Sample%20videos/Demo%20Slam/Google%20Demo%20Slam_%20Get%20Your%20Money's%20Worth.mp4"],
"card": "https://storage.googleapis.com/android-tv/Sample%20videos/Demo%20Slam/Google%20Demo%20Slam_%20Get%20Your%20Money's%20Worth/card.jpg",
"background": "https://storage.googleapis.com/android-tv/Sample%20videos/Demo%20Slam/Google%20Demo%20Slam_%20Get%20Your%20Money's%20Worth/bg.jpg",
"title": "Get Your Money's Worth",
"studio": "Google Demo Slam"
}, {
"description": "Fusce id nisi turpis. Praesent viverra bibendum semper. Donec tristique, orci sed semper lacinia, quam erat rhoncus massa, non congue tellus est quis tellus. Sed mollis orci venenatis quam scelerisque accumsan. Curabitur a massa sit amet mi accumsan mollis sed et magna. Vivamus sed aliquam risus. Nulla eget dolor in elit facilisis mattis. Ut aliquet luctus lacus. Phasellus nec commodo erat. Praesent tempus id lectus ac scelerisque. Maecenas pretium cursus lectus id volutpat.",
"sources": ["https://storage.googleapis.com/android-tv/Sample%20videos/Demo%20Slam/Google%20Demo%20Slam_%20Guitar%20Search.mp4"],
"card": "https://storage.googleapis.com/android-tv/Sample%20videos/Demo%20Slam/Google%20Demo%20Slam_%20Guitar%20Search/card.jpg",
"background": "https://storage.googleapis.com/android-tv/Sample%20videos/Demo%20Slam/Google%20Demo%20Slam_%20Guitar%20Search/bg.jpg",
"title": "Guitar Search",
"studio": "Google Demo Slam"
}, {
"description": "Fusce id nisi turpis. Praesent viverra bibendum semper. Donec tristique, orci sed semper lacinia, quam erat rhoncus massa, non congue tellus est quis tellus. Sed mollis orci venenatis quam scelerisque accumsan. Curabitur a massa sit amet mi accumsan mollis sed et magna. Vivamus sed aliquam risus. Nulla eget dolor in elit facilisis mattis. Ut aliquet luctus lacus. Phasellus nec commodo erat. Praesent tempus id lectus ac scelerisque. Maecenas pretium cursus lectus id volutpat.",
"sources": ["https://storage.googleapis.com/android-tv/Sample%20videos/Demo%20Slam/Google%20Demo%20Slam_%20Hangin'%20with%20the%20Google%20Search%20Bar.mp4"],
"card": "https://storage.googleapis.com/android-tv/Sample%20videos/Demo%20Slam/Google%20Demo%20Slam_%20Hangin'%20with%20the%20Google%20Search%20Bar/card.jpg",
"background": "https://storage.googleapis.com/android-tv/Sample%20videos/Demo%20Slam/Google%20Demo%20Slam_%20Hangin'%20with%20the%20Google%20Search%20Bar/bg.jpg",
"title": "Hangin' with the Google Search Bar",
"studio": "Google Demo Slam"
}, {
"description": "Fusce id nisi turpis. Praesent viverra bibendum semper. Donec tristique, orci sed semper lacinia, quam erat rhoncus massa, non congue tellus est quis tellus. Sed mollis orci venenatis quam scelerisque accumsan. Curabitur a massa sit amet mi accumsan mollis sed et magna. Vivamus sed aliquam risus. Nulla eget dolor in elit facilisis mattis. Ut aliquet luctus lacus. | |
x is outside range of xarr
if (np.max(x) <= np.min(xarr)) or (np.min(x) >= np.max(xarr)):
return 0
else:
f=interpolate.interp1d(xarr, yarr)
return f(x)
def make_grid_2d(grid0, flux):
qauxarr=np.unique(grid0['LOGQ'])
zauxarr=np.unique(grid0['LOGZ'])
nlines0=len(grid0['ID'][0])
fflux=np.zeros( (len(qauxarr),len(zauxarr), nlines0))
for j in range(nlines0):
for iq in range(len(qauxarr)):
for iz in range(len(zauxarr)):
w = (grid0['LOGZ']==zauxarr[iz]) & (grid0['LOGQ']==qauxarr[iq])
fflux[iq,iz,j]= flux[w,j]
return fflux
def make_img_2d(grid0, flux):
qauxarr=np.unique(grid0['LOGQ'])
zauxarr=np.unique(grid0['LOGZ'])
fflux=np.zeros( (len(qauxarr),len(zauxarr)))
for iq in range(len(qauxarr)):
for iz in range(len(zauxarr)):
w = (grid0['LOGZ']==zauxarr[iz]) & (grid0['LOGQ']==qauxarr[iq])
fflux[iq,iz]= flux[w]
return fflux
#%%
class grid(object):
def __init__(self, gridfile, templ_dir=None,logohsun=None):
self.gridfile=gridfile
self.logohsun=logohsun
self.templ_dir=templ_dir
if self.templ_dir == None:
self.templ_dir='/grids/'
print('looking for grids in the izi/grids directory...')
# READ GRID
try:
grid0=table.Table.read(self.templ_dir+self.gridfile+'.fits')
grid0.convert_bytestring_to_unicode()
except IOError:
raise IOError('no grid file found in '+self.templ_dir+self.gridfile+'.fits')
# get rid of the empty spaces around line names
grid0id= [num.strip() for num in grid0['ID'][0]]
# number of lines
nlines0=len(grid0['ID'][0])
# number of steps in log(Z) * number of steps in log(q)
ngrid0=len(grid0['LOGZ'])
# rename grid0["ID"] to get rid of the empty spaces
grid0['ID']=[grid0id]*ngrid0
self.grid0=grid0
# TAKE SOLAR OXYGEN ABUNDANCE FROM MODEL GRID IF NOT PROVIDED
if self.logohsun ==None:
self.logohsun = grid0[0]['LOGOHSUN']
def apply_limits(self, logzlimits=None, logqlimits=None):
# CUT GRID TO LOGZLIMITS AND LOGQLIMITS
self.logzlimits=logzlimits
self.logqlimits=logqlimits
if self.logzlimits==None:
self.logzlimits=[np.min(self.grid0['LOGZ']+self.logohsun),
np.max(self.grid0['LOGZ']+self.logohsun)]
if self.logqlimits==None:
self.logqlimits=[np.min(self.grid0['LOGQ']),
np.max(self.grid0['LOGQ'])]
self.grid0=self.grid0[ (self.grid0['LOGZ']+self.logohsun >= self.logzlimits[0]) &
(self.grid0['LOGZ']+self.logohsun <= self.logzlimits[1]) &
(self.grid0['LOGQ'] >= self.logqlimits[0]) &
(self.grid0['LOGQ'] <= self.logqlimits[1]) ]
return self
def interpolate_grid(self, nz=50, nq=50):
self.nz=nz
self.nq=nq
zarr=np.linspace(np.min(self.grid0['LOGZ']), np.max(self.grid0['LOGZ']), self.nz)
qarr=np.linspace(np.min(self.grid0['LOGQ']), np.max(self.grid0['LOGQ']), self.nq)
nlines0=len(self.grid0['ID'][0])
fluxarr=np.zeros((self.nz, self.nq, nlines0))
grid_x, grid_y = np.meshgrid(zarr, qarr)
intergrid=self.nz*self.nq
# define the new interpolated grid as a table
intergrid=table.Table()
intergrid['LOGQ']=grid_y.flatten()
intergrid['LOGZ']=grid_x.flatten()
intergrid['LOGOHSUN']=[self.grid0['LOGOHSUN'][0]]*self.nq*self.nz
intergrid['ID']=[self.grid0['ID'][0]]*self.nq*self.nz
flux=np.array(self.grid0['FLUX'])
# qauxarr=np.unique(self.grid0['LOGQ'])
# zauxarr=np.unique(self.grid0['LOGZ'])
logzin=np.array(self.grid0['LOGZ'])
logqin=np.array(self.grid0['LOGQ'])
for i in range(nlines0):
fluxarr[:,:,i]=interpolate.griddata( (logzin,logqin),
flux[:,i], (grid_x, grid_y), method='cubic')
# ALTERNATIVE INTERPOLATION SCHEME
# f= interpolate.interp2d(zauxarr,qauxarr, fflux[:,:,i], kind='cubic')
# fluxarr2[:,:,i]=f(zarr, qarr)
# GOING FROM A 2D grid to a 1D grid
intergrid['FLUX']= self.make_grid_1d(intergrid, grid_x, grid_y, fluxarr)
self.intergrid=intergrid
return self
@staticmethod
def make_grid_1d(intergrid, grid_x, grid_y, fluxarr):
nintergrid=len(intergrid['LOGZ'])
nlines0=len(intergrid['ID'][0])
intergrid_flux= np.zeros((nintergrid, nlines0))
for j in range(nlines0):
for i in range(nintergrid):
ww= (grid_x == intergrid['LOGZ'][i]) & (grid_y == intergrid['LOGQ'][i])
flux2d=fluxarr[:,:,j]
intergrid_flux[i,j]=flux2d[ww]
return intergrid_flux
class izi(object):
def __init__(self, flux, error, id, gridfile=None, templ_dir=None,
logzlimits=None, logqlimits=None,
epsilon=0.15, nz=50, nq=50,
intergridfile=None, integrid=None, outgridfile=False,
logzprior=None, logqprior=None, nonorm=0,
quiet=False, plot=True):
#DECLARE INPUT TO SELF
self.flux = flux # flux array
self.error = error # error flux array
self.id = id # IDs of different emission lines
self.gridfile = gridfile
self.templ_dir = templ_dir
self.logzlimits = logzlimits
self.logqlimits = logqlimits
self.intergridfile=intergridfile
self.intergrid=integrid
self.nonorm=nonorm
self.outgridfile=outgridfile
self.logzprior=logzprior
self.logqprior=logqprior
self.plot=plot
self.nz=nz
self.nq=nq
self.quiet=quiet
self.epsilon=epsilon
nlines_in=len(self.flux)
assert len(self.error) == nlines_in and len(self.id) == nlines_in, \
'ERROR Flux, Error, and ID arrays do not have the same number of elements'
# INPUT FILES CHECKING
# IF NOT SPECIFIED BY USER USE DEFAULT Levesque models with density
# 10^2 cm-3, composite SF, and 6Myr age
if self.gridfile == None:
self.gridfile='l09_high_csf_n1e2_6.0Myr'
# self.gridfile='d13_kappa20'
else:
self.gridfile=gridfile
if self.templ_dir==None:
self.templ_dir = path.dirname(path.realpath(__file__))[:-4]+'/grids/'
if self.intergridfile == None:
# PREPARE ORIGINAL GRID
# READ GRID using the grid class
grid0=grid(self.gridfile, templ_dir=self.templ_dir)
# APPLY LIMITS to grid
grid0.apply_limits(logzlimits=self.logzlimits, logqlimits=self.logqlimits)
#
self.logohsun=grid0.logohsun
nlines0=len(grid0.grid0['ID'][0])
# number of steps in log(Z) * number of steps in log(q)
# ngrid0=len(grid0['LOGZ'])
#INTERPOLATE GRID
# pdb.set_trace()
grid0.interpolate_grid(nz=self.nz, nq=self.nq)
self.intergrid=grid0.intergrid
#DEFINE PARAMTERS OF GRID
zarr=np.linspace(np.min(self.intergrid['LOGZ']), np.max(self.intergrid['LOGZ']), self.nz)
qarr=np.linspace(np.min(self.intergrid['LOGQ']), np.max(self.intergrid['LOGQ']), self.nq)
nintergrid=len(self.intergrid['ID'])
# WRITE INTERPOLATED GRID IF USER WANTS TO
if self.outgridfile ==True:
a=self.intergrid
a.write(self.templ_dir+'/interpolgrid_'+str(self.nz)+'_'+\
str(self.nq)+self.gridfile+'.fits', overwrite=True)
else:
# READ GRID using the grid class
grid0=grid(self.intergridfile, templ_dir=self.templ_dir)
self.intergrid=grid0.grid0
nintergrid=len(self.intergrid['ID'])
nlines0=len(self.intergrid['ID'][0])
self.nz=len(np.unique(self.intergrid['LOGZ']))
self.nq=len(np.unique(self.intergrid['LOGQ']))
zarr=np.linspace(np.min(self.intergrid['LOGZ']), np.max(self.intergrid['LOGZ']), self.nz)
qarr=np.linspace(np.min(self.intergrid['LOGQ']), np.max(self.intergrid['LOGQ']), self.nq)
self.logohsun=grid0.logohsun
# Check for summed sets of lines in input ID array and sum fluxes in grid
# All fluxes are summed to the first line and ID is set to that line
for i in range(nlines_in):
idsum=self.id[i].split(';')
if len(idsum) >1:
for j in range(len(idsum)-1):
w0= (self.intergrid['ID'][0] == idsum[0])
wj= (self.intergrid['ID'][0] == idsum[j+1])
self.intergrid['FLUX'][:,w0]=self.intergrid['FLUX'][:,w0] +\
self.intergrid['FLUX'][:,wj]
self.id[i]=idsum[0]
# INCLUDE SYSTEMATIC UNCERTAINTY IN THE PHOTO-IONIZATION MODELS
# default is 0.15 dex systematic uncertainty
epsilon2=epsilon*np.log(10)
#; CREATE DATA STRUCTURE CONTAINING LINE FLUXES AND ESTIMATED PARAMETERS
# note that callinf this d messes us the python debugger:(
dd={'id':self.intergrid['ID'][0], # line id
'flux':np.zeros(nlines0)-999, # line flux
'error':np.zeros(nlines0)-999}
#FILL STRUCTURE WITH LINE FLUXES
#
for i in range(nlines_in):
auxind=(dd['id'] == self.id[i])
nmatch=auxind.sum()
assert nmatch == 1, 'ERROR: ===== Line ID '+self.id[i]+'not recognized ====='
dd['flux'][auxind]=self.flux[i]
dd['error'][auxind]=self.error[i]
# INDEX OF LINES WITH MEASUREMENTS
good=(dd['error'] != -999)
ngood=good.sum()
measured=(dd['flux'] != -999)
# nmeasured=measured.sum()
upperlim=(dd['error'] != -999) & (dd['flux'] == -666)
# nupper=upperlim.sum()
flag0=np.zeros(nlines0)
flag0[measured]=1 #measured flux
flag0[upperlim]=2 #upper limit on flux
# this array has length ngood, which is the number of lines with
# given error measurements. If error is given but no flux this is treated
# as an upper limit
flag=flag0[good]
# ; NORMALIZE LINE FLUXES TO H-BETA OR
# ; IF ABSENT NORMALIZE TO BRIGHTEST LINE
# pdb.set_trace()
if self.nonorm ==0: #; use nonorm for line ratio fitting
idnorm='hbeta'
in_idnorm= (dd['id']==idnorm)
if (dd['flux'][in_idnorm] ==-999):
a=dd['id'][measured]
idnorm=a[[np.argmax(dd['flux'][measured])]]
in_idnorm= (dd['id']==idnorm)
norm=dd['flux'][in_idnorm]
# print 'flux of', idnorm, 'is', norm
# NORMALISE INPUT FLUXES
dd['flux'][measured]=dd['flux'][measured]/norm[0]
dd['error'][good]=dd['error'][good]/norm[0]
# NORMALISE GRID
norm=self.intergrid['FLUX'][:,self.intergrid['ID'][0] == idnorm ]
self.intergrid['FLUX']=self.intergrid['FLUX']/norm
fff=np.array(self.intergrid['FLUX'])
# CALCULATE LIKELIHOOD AND POSTERIOR
# if using EMCEE THIS PART NEEDS TO BE REPLACED BY THE SAMPLER
like=np.zeros(nintergrid)+1.0
post=np.zeros(nintergrid)+1.0
zrange=[np.min(self.intergrid['LOGZ']), np.max(self.intergrid['LOGZ'])]
qrange=[np.min(self.intergrid['LOGQ']), np.max(self.intergrid['LOGQ'])]
#
for i in range(nintergrid):
for j in range(ngood):
if (flag[j] == 1):
e2=dd['error'][good][j]**2.0 + (epsilon2*fff[i, good][j] )**2.0
fdf2= (dd['flux'][good][j]- fff[i, good][j])**2.0
like[i]=like[i]/np.sqrt(2*np.pi)*np.exp(-0.5*fdf2/e2)/np.sqrt(e2)
if (flag[j] == 2):
edf= (dd['error'][good][j]- fff[i, good][j])
e2=dd['error'][good][j]**2.0 + (epsilon2*fff[i, good][j] )**2.0
like[i]=like[i]*0.5*(1+special.erf(edf/np.sqrt(e2*2)))
# print 'upper limit'
#CALCULATE POSTERIOR BY INCLUDING PRIORS AND NORMALIZING
# USE of custom priors has not been tested
# CHANGE LOGZPRIOR TO SOLAR UNITS
if self.logzprior !=None:
self.logzprior[:,0]=self.logzprior-self.logohsun
if (self.logzprior == None) and (self.logqprior == None):
post[i]=uprior(zrange)*uprior(qrange)*like[i]
if (self.logzprior != None) and (self.logqprior == None):
post[i]=userprior(self.intergrid['LOGZ'][i], self.logzprior[:,0], logzprior[:,1])*\
uprior(qrange)*like[i]
if (self.logzprior == None) and (self.logqprior != None):
post[i]=uprior(zrange)*\
userprior(self.intergrid['LOGQ'][i], self.logqprior[:,0], logqprior[:,1])*like[i]
if (self.logzprior != None) and (self.logqprior != None):
post[i]=userprior(self.intergrid['LOGZ'][i], self.logzprior[:,0], logzprior[:,1])*\
userprior(self.intergrid['LOGQ'][i], self.logqprior[:,0], logqprior[:,1])*like[i]
# Nobody likes undefined and infinite likelihoods or posteriors
#
like[np.isfinite(like)==0]==0
post[np.isfinite(post)==0]==0
like=np.array(like)
post=np.array(post)
#; WRITE JOINT PDFS
dd['zarr']=zarr
dd['qarr']=qarr
dd['post']=post
#
if np.sum(post)!=0:
conf_int=[0.683, 0.955, 0.997]
# SORT LIKELIHOOD AND POSTERIOR FOR GETTING BEST-FIT VALUES AND CONFIDENCE INTERVALS
# sort likelihood and posterior in descending order (highest likelihood first)
sortlike= np.sort(like)[::-1]
sortpost= np.sort(post)[::-1]
# generate arrays of sorted LogZ and LogQ according to the sorted posterior
inds = post.argsort()
sortz=self.intergrid['LOGZ'][inds][::-1]
sortq=self.intergrid['LOGQ'][inds][::-1]
sumlike=np.zeros(len(sortlike))
sumpost=np.zeros(len(sortlike))
for i in range(len(sortlike)):
sumlike[i]=np.sum(sortlike[0:i+1])/np.sum(sortlike)
sumpost[i]=np.sum(sortpost[0:i+1])/np.sum(sortpost)
# CALCULATE BEST FIT METALLICITY, IONIZATION PARAMETER from JOINT POSTERIORS
# errors are given by the joint PDF confidence intervals
# THESE error definitions implement the shortest interval method
# eg. sec 2.5.2 Andrae 2010
short_int_jz=sortz[sumpost <= conf_int[0]]
if len(short_int_jz) > 0:
min_short_int_jz=np.min(short_int_jz)
max_short_int_jz=np.max(short_int_jz)
else:
min_short_int_jz=0.
max_short_int_jz=0.
short_int_jq=sortq[sumpost <= conf_int[0]]
if len(short_int_jq) > 0:
min_short_int_jq=np.min(short_int_jq)
max_short_int_jq=np.max(short_int_jq)
else:
min_short_int_jq=0.
max_short_int_jq=0.
dd['Z_joint']=sortz[0]+self.logohsun
dd['err_down_Z_joint']=sortz[0]-min_short_int_jz
dd['err_up_Z_joint']=max_short_int_jz-sortz[0]
dd['q_joint']=sortq[0]
dd['err_down_q_joint']=sortq[0]-min_short_int_jq
dd['err_up_q_joint']=max_short_int_jq-sortq[0]
# COMPUTE chi2
bestgrid= (self.intergrid['LOGZ'] == sortz[0]) & (self.intergrid['LOGQ'] == sortq[0])
fobs=dd['flux'][ dd['flux'] > -666]
eobs=dd['error'][ dd['flux'] > -666]
fmod=np.squeeze(self.intergrid[bestgrid]['FLUX'])[dd['flux'] > -666]
emod=epsilon2*fmod
chi2=np.sum((fobs-fmod)**2/(eobs**2+emod**2))/len(fobs)
dd['chi2']=chi2
aa={'id':self.id, 'fobs':np.array(self.flux)+np.nan, 'fmod':np.array(self.flux)+np.nan, \
'chi2_line':np.array(self.flux)+np.nan}
for i in range(nlines_in):
auxind=( dd['id']==self.id[i])
aa['fmod'][i]=np.squeeze(self.intergrid[bestgrid]['FLUX'])[auxind]
aa['fobs'][i]=dd['flux'][auxind]
aa['chi2_line'][i]=(aa['fobs'][i]-aa['fmod'][i])**2/ \
(dd['error'][auxind]**2+(epsilon2*aa['fmod'][i])**2)
# posterior for Z, marginalizing over q
postz=np.zeros(self.nz)
for j in range(self.nz):
qq=self.intergrid['LOGQ']
zz=self.intergrid['LOGZ']
# pdb.set_trace()
# integrated over q at fixed z (zz==zarr[j])
postz[j]=simps(post[zz==zarr[j]], qq[zz==zarr[j]] )
# normalize
postz=postz/np.sum(postz)
sort_post_z=np.sort(postz)[::-1]
inds = postz.argsort()
sortz_z=zarr[inds][::-1]
# WRITE MARGINALISED PDF for Z
dd['z_pdf']=postz
sumpost_sort_z=np.zeros(len(sort_post_z))
# cumulative posterior, sorted
for i in range(self.nz):
sumpost_sort_z[i]=np.sum(sort_post_z[0:i+1])
dd['Z_max']=zarr[postz == np.max(postz)]+self.logohsun # max of PDF
dd['Z_mean']=np.sum(zarr*postz)/np.sum(postz)+self.logohsun# first momment of PDF
# These errors are NOT the same as the ones quoted in Blanc 2014
# | |
# turn_on_github_auth()
main_user = os.getenv('GITHUB_MAIN_USER')
main_pass = os.getenv('GITHUB_MAIN_PASS')
user1 = os.getenv('GITHUB_USER_1')
client_id = CLIENT_ID
client_secret = CLIENT_SECRET
main_token = GITHUB_ADMIN_TOKEN
cookies = dict(token=main_token)
ids = [
{
'name': main_user,
'type': 'user'
},
{
'name': user1,
'type': 'user'
}
]
identities = get_github_identites(ids)
auth_url = cattle_url()[:-7] + 'v1-auth/config'
data = get_github_config_data(username=None,
client_id=client_id,
secret_key=client_secret,
allowed_identities=identities,
enabled=True,
access_mode="required")
r = requests.post(auth_url, data=json.dumps(data), cookies=cookies)
assert r.ok
# Test with valid user
user1_token = GITHUB_USER1_TOKEN
cookies = dict(token=user1_token)
schemas = requests.get(cattle_url() + "schemas", cookies=cookies)
assert schemas.status_code == 200
main_client = create_github_client(username=main_user,
password=<PASSWORD>,
token=main_token)
# Deactivate the user1 account
user1_account = main_client.list_account(name=user1)[0]
account = main_client.by_id("account", user1_account.id)
account.deactivate()
main_client.wait_success(account)
cookies = dict(token=user1_token)
bad_auth = requests.get(cattle_url() + "schemas", cookies=cookies)
assert bad_auth.status_code == 401
# Active the user1 account
account = main_client.by_id("account", user1_account.id)
account.activate()
main_client.wait_success(account)
cookies = dict(token=user1_token)
good_auth = requests.get(cattle_url() + "schemas", cookies=cookies)
assert good_auth.status_code == 200
# 19
@if_test_github
def test_github_purge_account(admin_client):
main_user = os.getenv('GITHUB_MAIN_USER')
main_pass = os.getenv('GITHUB_MAIN_PASS')
user2 = os.getenv('GITHUB_USER_2')
client_id = CLIENT_ID
client_secret = CLIENT_SECRET
main_token = GITHUB_ADMIN_TOKEN
cookies = dict(token=main_token)
ids = [
{
'name': main_user,
'type': 'user'
},
{
'name': user2,
'type': 'user'
}
]
identities = get_github_identites(ids)
auth_url = cattle_url()[:-7] + 'v1-auth/config'
data = get_github_config_data(username=None,
client_id=client_id,
secret_key=client_secret,
allowed_identities=identities,
enabled=True,
access_mode="required")
r = requests.post(auth_url, data=json.dumps(data), cookies=cookies)
assert r.ok
main_client = create_github_client(username=main_user,
password=<PASSWORD>,
token=main_token)
# Purge user2 account
user2_account = main_client.list_account(name=user2)[0]
account = main_client.by_id("account", user2_account.id)
account.deactivate()
main_client.wait_success(account)
main_client.delete(account)
account = main_client.wait_success(account)
account.purge()
main_client.wait_success(account)
assert account.removed is not None
# 23,24,25,26,27
@if_test_github
def test_github_member_permissions(admin_client):
# turn_on_github_auth()
main_user = os.getenv('GITHUB_MAIN_USER')
main_pass = os.getenv('GITHUB_MAIN_PASS')
user1 = os.getenv('GITHUB_USER_1')
pass1 = os.getenv('<PASSWORD>')
client_id = CLIENT_ID
client_secret = CLIENT_SECRET
main_token = GITHUB_ADMIN_TOKEN
cookies = dict(token=main_token)
ids = [
{
'name': main_user,
'type': 'user'
},
{
'name': user1,
'type': 'user'
}
]
identities = get_github_identites(ids)
auth_url = cattle_url()[:-7] + 'v1-auth/config'
data = get_github_config_data(username=None,
client_id=client_id,
secret_key=client_secret,
allowed_identities=identities,
enabled=True,
access_mode="required")
r = requests.post(auth_url, data=json.dumps(data), cookies=cookies)
assert r.ok
main_client = create_github_client(username=main_user,
password=<PASSWORD>pass,
token=main_token)
main_identity = None
for obj in main_client.list_identity():
if obj.externalIdType == 'github_user':
main_identity = obj
break
user1_token = GITHUB_USER1_TOKEN
user1_client = create_github_client(username=user1,
password=<PASSWORD>,
token=user1_token)
user1_identity = None
for obj in user1_client.list_identity():
if obj.externalIdType == 'github_user':
user1_identity = obj
break
# test creation of new env
project = main_client.create_project(members=[
idToMember(main_identity, 'owner'),
idToMember(user1_identity, 'member')
])
GITHUB_CLIENT.wait_success(project)
assert main_client.by_id('project', project.id) is not None
assert user1_client.by_id('project', project.id) is not None
# user1 can not change, remove, or add users
new_members = [
idToMember(main_identity, 'member'),
idToMember(user1_identity, 'owner')
]
member_project = user1_client.by_id('project', project.id)
try:
member_project.setmembers(members=new_members)
assert False
except:
assert True
# user1 can't deactivate or remove environment
try:
dec_project = user1_client.by_id('project', project.id)
dec_project.deactivate()
dec_project = user1_client.by_id('project', project.id)
assert dec_project['state'] == 'inactive'
user1_client.delete(dec_project)
time.sleep(5)
project = user1_client.by_id('project', project.id)
assert project.state == 'purged' or project.state == 'removed'
assert False
except:
assert True
# 28
@if_test_github
def test_github_change_user_to_admin(admin_client):
main_user = os.getenv('GITHUB_MAIN_USER')
main_pass = os.getenv('GITHUB_MAIN_PASS')
user1 = os.getenv('GITHUB_USER_1')
client_id = CLIENT_ID
client_secret = CLIENT_SECRET
main_token = GITHUB_ADMIN_TOKEN
cookies = dict(token=main_token)
ids = [
{
'name': main_user,
'type': 'user'
},
{
'name': user1,
'type': 'user'
}
]
identities = get_github_identites(ids)
auth_url = cattle_url()[:-7] + 'v1-auth/config'
data = get_github_config_data(username=None,
client_id=client_id,
secret_key=client_secret,
allowed_identities=identities,
enabled=True,
access_mode="required")
r = requests.post(auth_url, data=json.dumps(data), cookies=cookies)
assert r.ok
# Test with valid user
user1_token = GITHUB_USER1_TOKEN
cookies = dict(token=user1_token)
no_admin = requests.get(cattle_url()[:-7] + '/admin/processes',
cookies=cookies)
assert no_admin.ok
main_client = create_github_client(username=main_user,
password=<PASSWORD>,
token=main_token)
# change account from user to admin
user1_account = main_client.list_account(name=user1)[0]
account = main_client.by_id("account", user1_account.id)
main_client.wait_success(account)
main_client.update_by_id_account(account.id, kind='admin')
cookies = dict(token=user1_token)
admin = requests.get(cattle_url()[:-7] + '/admin/processes',
cookies=cookies)
assert admin.ok
# 29
@if_test_github
def test_github_admin_list_all_env(admin_client):
main_user = os.getenv('GITHUB_MAIN_USER')
main_pass = os.getenv('GITHUB_MAIN_PASS')
user1 = os.getenv('GITHUB_USER_1')
client_id = CLIENT_ID
client_secret = CLIENT_SECRET
main_token = GITHUB_ADMIN_TOKEN
cookies = dict(token=main_token)
ids = [
{
'name': main_user,
'type': 'user'
},
{
'name': user1,
'type': 'user'
}
]
identities = get_github_identites(ids)
auth_url = cattle_url()[:-7] + 'v1-auth/config'
data = get_github_config_data(username=None,
client_id=client_id,
secret_key=client_secret,
allowed_identities=identities,
enabled=True,
access_mode="required")
r = requests.post(auth_url, data=json.dumps(data), cookies=cookies)
assert r.ok
# Test with valid user
user1_token = GITHUB_USER1_TOKEN
cookies = dict(token=user1_token)
no_admin = requests.get(cattle_url()[:-7] + '/admin/processes',
cookies=cookies)
assert no_admin.ok
main_client = create_github_client(username=main_user,
password=<PASSWORD>,
token=main_token)
# List all projects
projects = main_client.list_project()
# Create new project
project = main_client.create_project()
# change account from user to admin
user1_account = main_client.list_account(name=user1)[0]
account = main_client.by_id("account", user1_account.id)
main_client.wait_success(account)
main_client.update_by_id_account(account.id, kind='admin')
cookies = dict(token=user1_token)
admin = requests.get(cattle_url()[:-7] + '/admin/processes',
cookies=cookies)
assert admin.ok
for project in projects:
project_url = cattle_url() \
+ "/projects/" + project.id + "/projectmembers"
cookies = dict(token=user1_token)
access = requests.get(project_url, cookies=cookies)
assert access.ok
# change account from admin to user
user1_account = main_client.list_account(name=user1)[0]
account = main_client.by_id("account", user1_account.id)
main_client.wait_success(account)
main_client.update_by_id_account(account.id, kind='user')
# 30
@if_test_github
@if_do_key
def test_github_member_add_host(admin_client):
main_user = os.getenv('GITHUB_MAIN_USER')
main_pass = os.getenv('GITHUB_MAIN_PASS')
user1 = os.getenv('GITHUB_USER_1')
pass1 = os.getenv('GITHUB_PASS_1')
client_id = CLIENT_ID
client_secret = CLIENT_SECRET
main_token = GITHUB_ADMIN_TOKEN
cookies = dict(token=main_token)
ids = [
{
'name': main_user,
'type': 'user'
},
{
'name': user1,
'type': 'user'
}
]
identities = get_github_identites(ids)
auth_url = cattle_url()[:-7] + 'v1-auth/config'
data = get_github_config_data(username=None,
client_id=client_id,
secret_key=client_secret,
allowed_identities=identities,
enabled=True,
access_mode="required")
r = requests.post(auth_url, data=json.dumps(data), cookies=cookies)
assert r.ok
main_client = create_github_client(username=main_user,
password=<PASSWORD>,
token=main_token)
main_identity = None
for obj in main_client.list_identity():
if obj.externalIdType == 'github_user':
main_identity = obj
break
user1_token = GITHUB_USER1_TOKEN
user1_client = create_github_client(username=user1,
password=<PASSWORD>,
token=user1_token)
user1_identity = None
for obj in user1_client.list_identity():
if obj.externalIdType == 'github_user':
user1_identity = obj
break
# test creation of new env
project = main_client.create_project(members=[
idToMember(main_identity, 'owner'),
idToMember(user1_identity, 'member')
])
GITHUB_CLIENT.wait_success(project)
assert main_client.by_id('project', project.id) is not None
assert user1_client.by_id('project', project.id) is not None
user1_client = create_github_client(username=user1,
password=<PASSWORD>,
token=<PASSWORD>,
project_id=project.id)
# Add new host
host_list = \
add_digital_ocean_hosts(
user1_client, 1)
assert len(host_list) == 1
# Remove host
host = host_list[0]
deactivated_host = host.deactivate()
user1_client.wait_success(deactivated_host)
deactivated_host.remove()
all_hosts = user1_client.list_host()
for h in all_hosts:
if h.hostname == host.hostname:
assert False
# 31
@if_test_github
def test_github_create_new_env_with_restricted_member(admin_client):
main_user = os.getenv('GITHUB_MAIN_USER')
main_pass = os.getenv('GITHUB_MAIN_PASS')
user1 = os.getenv('GITHUB_USER_1')
pass1 = os.getenv('GITHUB_PASS_1')
client_id = CLIENT_ID
client_secret = CLIENT_SECRET
main_token = GITHUB_ADMIN_TOKEN
cookies = dict(token=main_token)
ids = [
{
'name': main_user,
'type': 'user'
},
{
'name': user1,
'type': 'user'
}
]
identities = get_github_identites(ids)
auth_url = cattle_url()[:-7] + 'v1-auth/config'
data = get_github_config_data(username=None,
client_id=client_id,
secret_key=client_secret,
allowed_identities=identities,
enabled=True,
access_mode="required")
r = requests.post(auth_url, data=json.dumps(data), cookies=cookies)
assert r.ok
main_client = create_github_client(username=main_user,
password=<PASSWORD>,
token=main_token)
main_identity = None
for obj in main_client.list_identity():
if obj.externalIdType == 'github_user':
main_identity = obj
break
user1_token = GITHUB_USER1_TOKEN
user1_client = create_github_client(username=user1,
password=<PASSWORD>,
token=user1_token)
user1_identity = None
for obj in user1_client.list_identity():
if obj.externalIdType == 'github_user':
user1_identity = obj
break
# test creation of new env
default_prj_id = main_client.list_project(name=main_user+'-Default')[0].id
default_project = main_client.by_id('project', default_prj_id)
default_project.setmembers(members=[
idToMember(main_identity, 'owner'),
idToMember(user1_identity, 'restricted')
])
user1_client = create_github_client(username=user1,
password=<PASSWORD>,
token=user1_token,
project_id=default_prj_id)
# Add new host
with pytest.raises(AttributeError) as excinfo:
host_list = \
add_digital_ocean_hosts(
user1_client, 1)
assert len(host_list) == 1
assert "object has no attribute" in str(excinfo.value)
# 32
@if_test_github
def test_github_create_service_with_restricted_member(admin_client):
main_user = os.getenv('GITHUB_MAIN_USER')
main_pass = os.getenv('GITHUB_MAIN_PASS')
user1 = os.getenv('GITHUB_USER_1')
pass1 = os.getenv('GITHUB_PASS_1')
client_id = CLIENT_ID
client_secret = CLIENT_SECRET
main_token = GITHUB_ADMIN_TOKEN
cookies = dict(token=main_token)
ids = [
{
'name': main_user,
'type': 'user'
},
{
'name': user1,
'type': 'user'
}
]
identities = get_github_identites(ids)
auth_url = cattle_url()[:-7] + 'v1-auth/config'
data = get_github_config_data(username=None,
client_id=client_id,
secret_key=client_secret,
allowed_identities=identities,
enabled=True,
access_mode="required")
r = requests.post(auth_url, data=json.dumps(data), cookies=cookies)
assert r.ok
main_client = create_github_client(username=main_user,
password=<PASSWORD>,
token=main_token)
main_identity = None
for obj in main_client.list_identity():
if obj.externalIdType == 'github_user':
main_identity = obj
break
user1_token = GITHUB_USER1_TOKEN
user1_client = create_github_client(username=user1,
password=<PASSWORD>,
token=user1_token)
user1_identity = None
for obj in user1_client.list_identity():
if obj.externalIdType == 'github_user':
user1_identity = obj
break
# test creation of new env
default_prj_id = main_client.list_project(name=main_user+'-Default')[0].id
default_project = main_client.by_id('project', default_prj_id)
default_project.setmembers(members=[
idToMember(main_identity, 'owner'),
idToMember(user1_identity, 'restricted')
])
main_client = create_github_client(username=main_user,
password=<PASSWORD>,
token=main_token,
project_id=default_prj_id)
user1_client = create_github_client(username=user1,
password=<PASSWORD>,
token=user1_token,
project_id=default_prj_id)
# Add new host
hosts = user1_client.list_host(
kind='docker', removed_null=True, state="active")
if len(hosts) == 0:
host_list = \
add_digital_ocean_hosts(
main_client, 1)
assert len(host_list) == 1
launch_config = {"image": TEST_IMAGE_UUID}
scale = 1
create_env_and_svc(user1_client, launch_config, scale)
# 33,34
@if_test_github
def test_github_create_new_env_with_readonly_member(admin_client):
main_user = os.getenv('GITHUB_MAIN_USER')
main_pass = os.getenv('GITHUB_MAIN_PASS')
user1 = os.getenv('GITHUB_USER_1')
pass1 = os.getenv('GITHUB_PASS_1')
client_id = CLIENT_ID
client_secret = CLIENT_SECRET
main_token = GITHUB_ADMIN_TOKEN
cookies = dict(token=main_token)
ids = [
{
'name': main_user,
'type': 'user'
},
{
'name': user1,
'type': 'user'
}
]
identities = get_github_identites(ids)
auth_url = cattle_url()[:-7] + 'v1-auth/config'
data = get_github_config_data(username=None,
| |
import numpy as np
import cv2, serial, time, os, sys
from scipy import stats
def getSerialPort(intASCII): # intASCII is the integer which can be recognized as a correct signal (e.g. laser level) by our Arduino UNO
for portNum in range(1, 11):
try:
portName = 'COM' + str(portNum)
ser = serial.Serial(portName, 9600)
time.sleep(2) # Always wait for a couple seconds to make sure the serial is connected, otherwise following codes may get stuck
ser.write(chr(intASCII))
timezero = time.time()
elapsedtime = 0.0
while ser.inWaiting() == 0 and elapsedtime < 0.1: # Wait until the Arduino gets back a serial signal and also the timeout is 0.1 second
time.sleep(0.001) # Somehow attenuating the loop speed prevents stuck
elapsedtime = time.time() - timezero
if elapsedtime < 0.1: # This condition is critical. If the elapsed time > 0.1, then the opened serial port is not Arduino and the following ser.read() will get stuck
if ser.read() == chr(0): # If a 0 (ASCII form) is sent back from the port, then it's really our Arduino UNO
ser.close()
return portName
except:
pass
return None
def detectCamOperable(camIndexR, camIndexL, imgLongSide, imgShortSide):
# Detect if camIndexR and camIndexL are operable cameras
# Two criteria:
# (1) camIndexR and camIndexL can both be opened by OpenCV
# (2) camIndexR and camIndexL can both be set to the correct image size
# (1) and (2)
indices = [camIndexR, camIndexL]
for i in range(2):
cam = cv2.VideoCapture(indices[i])
if cam.isOpened():
cam.set(3, imgLongSide) # Set width
cam.set(4, imgShortSide) # Set height
img = cam.read()[1]
cam.release()
if img.shape[0] != imgShortSide or img.shape[1] != imgLongSide:
return False
else:
return False
return True
def serialSignal(serObj, intASCII):
serObj.write(chr(intASCII)) # Convert int to ASCII character and write to Arduino
while serObj.inWaiting() == 0: # Wait until the Arduino gets back a serial signal
pass
return ord(serObj.read()) # Clean up the serial signal in the buffer, and convert from ASCII character to int
def configCam(camObj, width, height, brightness, contrast, saturation, hue, gain, exposure, whiteBalance, focus):
# Logitech C525 camera
camObj.set(3, width)
camObj.set(4, height)
camObj.set(10, brightness)
camObj.set(11, contrast)
camObj.set(12, saturation)
camObj.set(13, hue)
camObj.set(14, gain)
camObj.set(15, exposure)
camObj.set(17, whiteBalance)
camObj.set(28, focus)
def combineImg(img1, img2):
if img1.shape == img2.shape:
height, width, channels = img1.shape
imgShow = np.zeros((height, width*2, channels), dtype=np.uint8) + 255
imgShow[:, 0:width , :] = img1
imgShow[:, width:(width*2), :] = img2
return imgShow
def laserPosition(img, laserHalfWidth):
height, width = img.shape
hw = laserHalfWidth
imgD = np.zeros((height, width), np.float)
imgD[:,:] = img[:,:]
z_array = np.zeros(height, np.float) # create an array of center points (that is z position) of the laser along the y axis
for y in range(height):
z_max = imgD[y,:].argmax() # max point of horizontal line y
pos = np.arange(z_max-hw, z_max+hw, dtype = np.float) # the position array around the max point
B = imgD[y, (z_max-hw):(z_max+hw)] # the intensity array around the max point, which could be a null object
if not B.shape[0] == 0 and pos.shape == B.shape:
B = B - B.min() # maximize the difference between the high and low intensity points
PMF = B / B.sum() # convert the intensity array to probability mass function (PMF)
z_array[y] = (pos * PMF).sum() # center point = expectance = x_0*PMF(x_0) + x_1*PMF(x_1) + ... + x_n*PMF(x_n)
else:
z_array[y] = z_max # if expectance cannot be calculated, use max as the center point
return z_array
def laserBrightness(img):
height, width = img.shape
imgD = np.zeros((height, width), np.float)
imgD[:,:] = img[:,:]
bright_array = np.zeros(height, np.float) # create an array of brightness of the laser along the y axis
for y in range(height):
bright_array[y] = imgD[y,:].max()
return bright_array
def thresholdTopograph(topoMat, brightMat, laserThres):
if topoMat.shape != brightMat.shape:
return
rowN, colN = topoMat.shape
newMat = np.zeros((rowN, colN), np.float)
newMat[:,:] = topoMat[:,:]
for iRow in range(rowN):
for iCol in range(colN):
if brightMat[iRow, iCol] < laserThres:
newMat[iRow, iCol] = 0
return newMat
def removeHalfLaser(mat, laserHalfwidth, isRightCam):
hw = laserHalfwidth
rowN, colN = mat.shape
newMat = np.zeros((rowN, colN), np.float)
newMat[:,:] = mat[:,:]
if not isRightCam:
newMat = newMat[:,::-1]
for r in range(rowN):
c = hw
while c < colN - hw:
if newMat[r, c] == 0 and newMat[r, c-1] > 0:
newMat[r, (c-hw):c] = 0 # from (c-hw) to (c-1)
c += 1
elif newMat[r, c] == 0 and newMat[r, c+1] > 0:
newMat[r, (c+1):(c+hw)] = 0 # from (c+1) to (c+hw-1)
c = c + hw
else:
c += 1
if not isRightCam:
newMat = newMat[:,::-1]
return(newMat)
def mergeTopographs(ZR, ZL):
if ZR.shape != ZL.shape:
return
rows, cols = ZR.shape
outputZ = np.zeros((rows, cols), np.float)
for r in range(rows):
for c in range(cols):
if ZR[r, c] != 0 and ZL[r, c] != 0:
outputZ[r, c] = (ZR[r, c] + ZL[r, c])/2
elif ZR[r, c] != 0 and ZL[r, c] == 0:
outputZ[r, c] = ZR[r, c]
elif ZR[r, c] == 0 and ZL[r, c] != 0:
outputZ[r, c] = ZL[r, c]
return outputZ
def generateHeatMap(mat):
return( (mat/mat.max()*255).astype(np.uint8) )
def fillEmptyWell(mat, edgeMin):
rowN, colN = mat.shape
newMat = np.zeros((rowN, colN), np.float)
newMat[:,:] = mat[:,:]
newMat[:,0] = edgeMin
newMat[:,colN-1] = edgeMin
newMat[0,:] = edgeMin
newMat[rowN-1,:] = edgeMin
for iRow in range(rowN):
for iCol in range(colN):
if newMat[iRow, iCol] == 0:
preRow = iRow
while newMat[preRow, iCol] == 0 and preRow > 0:
preRow -= 1
postRow = iRow
while newMat[postRow, iCol] == 0 and postRow < rowN-1:
postRow += 1
preCol = iCol
while newMat[iRow, preCol] == 0 and preCol > 0:
preCol -= 1
postCol = iCol
while newMat[iRow, postCol] == 0 and postCol < colN-1:
postCol += 1
interpolateV = newMat[preRow, iCol] + (newMat[postRow, iCol] - newMat[preRow, iCol])*(iRow-preRow)/(postRow-preRow)
interpolateH = newMat[iRow, preCol] + (newMat[iRow, postCol] - newMat[iRow, preCol])*(iCol-preCol)/(postCol-preCol)
newMat[iRow, iCol] = interpolateH # Use horizontal interpolation but not the vertical one
return(newMat)
def removeOutlier(mat, windowRange, meanThres, noiseThres):
w = windowRange
rowN, colN = mat.shape
newMat = np.zeros((rowN, colN), np.float)
newMat[:,:] = mat[:,:]
diffMat = np.zeros((rowN, colN), np.float)
for iRow in range(1, rowN):
for iCol in range(1, colN):
diffMat[iRow, iCol] = (mat[iRow, iCol] - mat[iRow-1, iCol]) + (mat[iRow, iCol] - mat[iRow, iCol-1])
for iRow in range(w, rowN-w):
for iCol in range(w, colN-w):
meanDeviace = mat[iRow, iCol] - np.average(mat[(iRow-w):(iRow+w), (iCol-w):(iCol+w)])
avgAbsDiff = np.average( np.absolute( diffMat[(iRow-w):(iRow+w), (iCol-w):(iCol+w)] ) )
absAvgDiff = abs( np.average( diffMat[(iRow-w):(iRow+w), (iCol-w):(iCol+w)] ) )
noiseLevel = avgAbsDiff - absAvgDiff
if abs(meanDeviace) > meanThres and noiseLevel > noiseThres:
newMat[iRow, iCol] = 0
return(newMat)
def generateSTL(filename, topoMat):
rowN, colN, dimensions = topoMat.shape
M = np.zeros((rowN, colN, dimensions), np.float)
M[:,:,:] = topoMat[:,:,:]
M[0,:,2] = 0
M[:,0,2] = 0
M[rowN-1,:,2] = 0
M[:,colN-1,2] = 0
with open(filename, 'w') as STLfile:
STLfile.write('solid name')
for r in range(rowN-1):
for c in range(colN-1):
STL = ''
STL = STL + '\nfacet normal 0 0 0'
STL = STL + '\nouter loop'
STL = STL + '\nvertex ' + str(M[r,c,0]) + ' ' + str(M[r,c,1]) + ' ' + str(M[r,c,2])
STL = STL + '\nvertex ' + str(M[r+1,c+1,0]) + ' ' + str(M[r+1,c+1,1]) + ' ' + str(M[r+1,c+1,2])
STL = STL + '\nvertex ' + str(M[r,c+1,0]) + ' ' + str(M[r,c+1,1]) + ' ' + str(M[r,c+1,2])
STL = STL + '\nendloop\nendfacet'
STL = STL + '\nfacet normal 0 0 0'
STL = STL + '\nouter loop'
STL = STL + '\nvertex ' + str(M[r,c,0]) + ' ' + str(M[r,c,1]) + ' ' + str(M[r,c,2])
STL = STL + '\nvertex ' + str(M[r,c,0]) + ' ' + str(M[r+1,c,1]) + ' ' + str(M[r+1,c,2])
STL = STL + '\nvertex ' + str(M[r+1,c+1,0]) + ' ' + str(M[r+1,c+1,1]) + ' ' + str(M[r+1,c+1,2])
STL = STL + '\nendloop\nendfacet'
STLfile.write(STL)
r += 1
c += 1
STL = ''
STL = STL + '\nfacet normal 0 0 0'
STL = STL + '\nouter loop'
STL = STL + '\nvertex ' + str(M[0,0,0]) + ' ' + str(M[0,0,1]) + ' ' + str(M[0,0,2])
STL = STL + '\nvertex ' + str(M[r,c,0]) + ' ' + str(M[r,c,1]) + ' ' + str(M[0,0,2])
STL = STL + '\nvertex ' + | |
plotdir = None, freecolor=False, photometry_db = __default_photometry_db__, specification = {}, cuts = stdCalibrationCuts):
filter='%s-%s' % (filterPrefix, stdfilter)
filterInfo = filter_info[stdfilter]
sdss_names = SDSSNames(filterInfo)
mag_name = 'SEx_MAG_AUTO'
magerr_name ='SEx_MAGERR_AUTO'
goodObjs = cat.filter(cuts(cat, cat[mag_name], sdss_names, filterInfo['color1cut']))
print 'goodobjs = ', len(goodObjs)
if freecolor:
calibrationData = CalibrationData(mag = goodObjs[mag_name] - __3sec_zp__,
mag_err = goodObjs[magerr_name],
refmag = goodObjs[sdss_names.sdss_mag],
refmag_err = goodObjs[sdss_names.sdss_magerr],
color = goodObjs[sdss_names.sdss_color],
colorerr = goodObjs[sdss_names.sdss_color_err])
else:
if filter not in colorterms:
sys.stderr.write('Unknown Filter, Skipping: %s\n' % filter)
return
colorterm = colorterms[filter]
calibrationData = CalibrationData(mag = goodObjs[mag_name] - __3sec_zp__,
mag_err = goodObjs[magerr_name],
refmag = goodObjs[sdss_names.sdss_mag],
refmag_err = goodObjs[sdss_names.sdss_magerr],
color = goodObjs[sdss_names.sdss_color],
colorerr = goodObjs[sdss_names.sdss_color_err],
colorterm = colorterm[0])
fitresults = calibrationData.calibrate()
if fitresults is None:
sys.stderr.write('Error in Calibration of %s %s' % cluster, filter)
return
aperture_filter = '%s_3sec' % filter
print '%s %s: %s' % (cluster, aperture_filter, str(fitresults))
if photometry_db:
saveCalibration(cluster, filter=aperture_filter, fitResults =fitresults, photometry_db = photometry_db, specification = specification)
if plotdir is not None:
if not os.path.exists(plotdir):
os.mkdir(plotdir)
if freecolor:
title = 'Calibration withh Free Color'
else:
title = 'Calibration with Fixed Pickles Color'
plotCalibrationResiduals(calibrationData, fitresults,
title = title,
color_label=sdss_names.sdss_color,
residual_label='%s - %s - %3.2f - %3.3f*%s' % (sdss_names.sdss_mag,
aperture_filter,
fitresults.zp,
fitresults.colorterm,
sdss_names.sdss_color))
pylab.show()
pylab.savefig('%s/%s.ps' % (plotdir, aperture_filter))
pylab.clf()
plotCalibrationPull(calibrationData, fitresults,
title = title,
color_label=sdss_names.sdss_color,
residual_label='%s - %s - %3.2f - %3.3f*%s' % (sdss_names.sdss_mag,
aperture_filter,
fitresults.zp,
fitresults.colorterm,
sdss_names.sdss_color))
pylab.show()
pylab.savefig('%s/%s_%s.ps' % (plotdir, aperture_filter,'pull'))
pylab.clf()
plotCalibrationMag(calibrationData, fitresults,
title = 'Sloan - Subaru vs. Sloan mag',
color_label=sdss_names.sdss_mag,
residual_label='%s - %s - %3.2f' % (sdss_names.sdss_mag,
aperture_filter,
fitresults.zp))
pylab.show()
pylab.savefig('%s/%s_%s.ps' % (plotdir, aperture_filter,'mag'))
pylab.clf()
makeCutPlots(cat, fitresults, sdss_names, mag_name, magerr_name, filterInfo['color1cut'],colorterm )
pylab.savefig('%s/%s_%s.ps' % (plotdir, aperture_filter,'cuts'))
pylab.clf()
xcat=goodObjs['SEx_Xpos']
ycat=goodObjs['SEx_Ypos']
plotMagPosition(calibrationData, fitresults, xcat, ycat,
title = 'Sloan - Subaru vs. Sloan mag',
color_label=sdss_names.sdss_mag)
pylab.savefig('%s/%s_%s.ps' % (plotdir, aperture_filter,'position'))
pylab.clf()
return fitresults
##########################
def specialCalibration(maindir, cluster, filter, photometry_db = __default_photometry_db__, specification = {}):
instrum, config, chipid, stdfilter = utilities.parseFilter(filter)
imagefile = '%(maindir)s/%(cluster)s/%(filter)s/SCIENCE/coadd_%(cluster)s_all/coadd.fits' % { \
'maindir' : maindir,
'cluster' : cluster,
'filter' : stdfilter}
zp = pyfits.getval(imagefile, 'MAGZP')
print zp
if photometry_db:
calib = photometry_db.registerSpecialFiltersCalibration(cluster = cluster, filter = filter, file=imagefile, zp=zp, **specification)
photometry_db.updateCalibration(cluster, filter = filter, calibration = calib, **specification)
return zp
###################################################
### MAIN
###################################################
def main(argv = sys.argv,
standardCalibration = standardCalibration,
threeSecondCalibration = threeSecondCalibration,
specialCalibration = specialCalibration,
photometry_db = __default_photometry_db__):
###
def parse_spec(option, opt, value, parser):
key, val = value.split('=')
if not hasattr(parser.values, 'specification'):
setattr(parser.values, 'specification', {})
parser.values.specification[key] = val
###
parser = optparse.OptionParser()
parser.add_option('-c', '--cluster', dest='cluster', help='Cluster name')
parser.add_option('-i', '--inputcat',
dest='catfile',
help='catalog for use in calibration')
parser.add_option('-f', '--filtername',
dest='filter',
help='Filter to calibrate')
parser.add_option('-p', '--plotdir',
dest='plotdir',
help='Directory to save plots')
parser.add_option('-t', '--chipid',
dest='chipid',
help='Chip id used in measurement')
parser.add_option('-3', '--threesec',
dest='threesec',
action='store_true',
help='Treat as a 3second exposure',
default=False)
parser.add_option('-s', '--special',
dest='special',
action='store_true',
help='Treat as a special exposure',
default=False)
parser.add_option('-m', '--maindir',
dest='maindir',
help='subaru directory')
parser.add_option('--free-color',
dest='freecolor',
action='store_true',
help='Allow color term to be free!',
default=False)
parser.add_option('--no-save',
dest='saveCalib',
action='store_false',
help='Do not save fits to database',
default = True)
parser.add_option('--spec', dest='specification',
action='callback',
type= 'string',
help='key=val set determines the uniqueness of this calibration',
default = {},
callback = parse_spec)
parser.add_option('-n', '--fluxtype',
dest='fluxtype',
help='Type of flux/mag to calibrate, ie. FLUX_(XXXX)',
default=__default_fluxtype__)
options, args = parser.parse_args(argv)
print "Called with:"
print options
if not options.special and options.catfile is None:
parser.error('Need to specify catalog file!')
if options.cluster is None:
parser.error('Need to specify cluster!')
if options.filter is None:
parser.error('Need to specify filter')
if options.threesec and options.special:
parser.error('Cannot treat as 3sec and special')
if options.threesec and not options.chipid:
parser.error('Need a config type for this obs')
if options.special and options.maindir is None:
parser.error('Need to specify main directory')
if not options.saveCalib:
photometry_db = None
if options.special:
specialCalibration(options.maindir,
options.cluster, options.filter,
photometry_db = photometry_db, specification = options.specification)
else:
cat = ldac.openObjectFile(options.catfile, 'PSSC')
if options.threesec:
threeSecondCalibration(options.cluster,
options.filter,
options.chipid,
cat,
plotdir = options.plotdir,
freecolor=options.freecolor,
photometry_db = photometry_db,
specification = options.specification)
else:
standardCalibration(options.cluster, options.filter, cat,
fluxtype = options.fluxtype,
plotdir=options.plotdir,freecolor=options.freecolor,
photometry_db = photometry_db,
specification = options.specification,
cuts = basicCalCuts)
############
# Plotting
###########
def plotCalibrationResiduals(calibrationData, fitResults,
color_label = None,
residual_label = None,
title = None):
color = calibrationData.color
if fitResults.fixedcolor:
residuals = calibrationData.vals() - fitResults.zp
else:
residuals = calibrationData.vals() - fitResults.colorterm*calibrationData.color - fitResults.zp
errs = calibrationData.errs()
pylab.errorbar(color, residuals, errs, fmt='b.')
#pylab.axis([0,10000,-1.,1.])
pylab.axhline(0, color='r')
if color_label:
pylab.xlabel(color_label)
if residual_label:
pylab.ylabel(residual_label)
if title:
pylab.title(title)
#######################
def plotCalibrationPull(calibrationData, fitResults,
color_label = None,
residual_label = None,
title = None):
color = calibrationData.color
if fitResults.fixedcolor:
residuals = calibrationData.vals() - fitResults.zp
else:
residuals = calibrationData.vals() - fitResults.colorterm*calibrationData.color - fitResults.zp
errs = calibrationData.errs()
pulls = residuals / errs
pylab.hist(pulls,bins=100,range=(-8,8))
# pylab.errorbar(color, residuals, errs, fmt='b.')
# pylab.axhline(0, color='r')
if color_label:
pylab.xlabel(residual_label)
if title:
pylab.title(title)
print 'made pull plot'
pylab.show()
########################
def plotCalibrationMag(calibrationData, fitResults,
color_label = None,
residual_label = None,
title = None):
if fitResults.fixedcolor:
residuals = calibrationData.vals() - fitResults.zp
else:
residuals = calibrationData.vals() - fitResults.colorterm*calibrationData.color - fitResults.zp
errs = calibrationData.errs()
smag = calibrationData.refmag
# print sub_m_sln
# print 'smag = '
# print smag
pylab.errorbar(smag, residuals, errs, fmt='b.')
# pylab.axis([0,10000,-1.,1.])
pylab.axhline(0, color='r')
if color_label:
pylab.xlabel(color_label)
if residual_label:
pylab.ylabel(residual_label)
if title:
pylab.title(title)
print 'made mag plot'
pylab.show()
########################
def plotMagPosition(calibrationData, fitResults,xcat,ycat,
color_label = None,
residual_label = None,
title = None):
if fitResults.fixedcolor:
residuals = calibrationData.vals() - fitResults.zp
else:
residuals = calibrationData.vals() - fitResults.colorterm*calibrationData.color - fitResults.zp
errs = calibrationData.errs()
pylab.subplot(2,1,1)
pylab.errorbar(xcat, residuals, errs, fmt='b.')
pylab.axis([0,10000,-0.4,0.4])
pylab.axhline(0, color='r')
if color_label:
pylab.xlabel('x position')
if residual_label:
pylab.ylabel('')
if title:
pylab.title('')
pylab.subplot(2,1,2)
pylab.errorbar(ycat, residuals, errs, fmt='b.')
pylab.axis([0,10000,-0.8,0.8])
pylab.axhline(0, color='r')
if color_label:
pylab.xlabel('y position')
if residual_label:
pylab.ylabel(residual_label)
if title:
pylab.title('')
print 'made mag plot'
pylab.show()
########################
def makeCutPlots(cat, results, names, mag_name, magerr_name ,color_function , colorterm, iaper = -1):
cuts=[]
pylab.figure(1)
cuts.append( (numpy.logical_not(color_function(cat[names.sdss_color])))) # color cut
peakvals = cat['SEx_BackGr'] + cat['SEx_MaxVal']
cuts.append(numpy.logical_not(peakvals < 20000)) # Saturation Cut
cuts.append(numpy.logical_not(cat['SEx_Flag']==0)) # Flag
cuts.append(numpy.logical_not(cat['Clean'] == 1)) # Clean
titles=[]
titles.append('colorcut')
titles.append('Saturation Cut')
titles.append('Flag')
titles.append('Clean')
for i in range(len(cuts)):
print 'iaper is', iaper
if iaper>=0:
theseobjs = cat.filter(numpy.logical_and(cuts[i],numpy.abs(cat[mag_name][:,iaper])<80))
cutData = CalibrationData(mag = theseobjs[mag_name][:,iaper],
mag_err = theseobjs[magerr_name][:,iaper],
refmag = theseobjs[names.sdss_mag],
refmag_err = theseobjs[names.sdss_magerr],
color = theseobjs[names.sdss_color],
colorerr = theseobjs[names.sdss_color_err],
colorterm = colorterm[0])
else:
theseobjs = cat.filter(numpy.logical_and(cuts[i],numpy.abs(cat[mag_name])<80))
cutData = CalibrationData(mag = theseobjs[mag_name] - __3sec_zp__,
mag_err = theseobjs[magerr_name],
refmag = theseobjs[names.sdss_mag],
refmag_err = theseobjs[names.sdss_magerr],
color = theseobjs[names.sdss_color],
colorerr = theseobjs[names.sdss_color_err],
colorterm = colorterm[0])
smag = cutData.refmag
sub_m_sln = cutData.mag - (cutData.refmag -results.zp )
errs = cutData.errs()
# print titles[i]
print 'smag = ',smag
print 'sub_m_sln = ', sub_m_sln
print 'err = ', errs
smag2=[]
sub2=[]
err2=[]
for j in range(len(smag)):
if smag[j]>0:
smag2.append(smag[j])
sub2.append(sub_m_sln[j])
err2.append(errs[j])
smag=smag2
sub_m_sln=sub2
errs = err2
if len(smag):
pylab.subplot(2,3,i+1)
pylab.errorbar(smag, sub_m_sln, errs, fmt='b.')
pylab.axhline(0, color='r')
pylab.xlabel(names.sdss_mag,fontsize='small')
pylab.ylabel(titles[i])
pylab.show()
################################
### TESTING
################################
class TestingDBEntry(object):
def __init__(self, id, **fields):
self.id = id
self.fields = fields
def __getattr__(self, name):
if name in self.fields:
return self.fields[name]
raise AttributeError
###
class TestingDatabase(object):
def __init__(self):
self.reset()
def reset(self):
self.photoentries = []
self.calibrations = []
self.specialentries = []
def registerPhotometricCalibration(self, cluster, fitresults, **specification):
self.photoentries.append(TestingDBEntry(len(self.photoentries), cluster = cluster, fitresults = fitresults, **specification))
return self.photoentries[-1]
def registerSpecialFiltersCalibration(self, cluster, file, zp, **specification):
self.specialentries.append(TestingDBEntry(len(self.specialentries), cluster = cluster, file = file, zp = zp, **specification))
return self.specialentries[-1]
def updateCalibration(self, cluster, calibration, **specification):
self.calibrations.append(TestingDBEntry(len(self.calibrations), cluster = cluster, calibration = calibration, **specification))
####################
class TestRegularCalib(unittest.TestCase):
def setUp(self):
self.db = TestingDatabase()
####
def testStdZP(self):
filterName = 'SUBARU-10_2-1-W-J-V'
pickles = ldac.openObjectFile('Pickles.cat', 'PICKLES')
pickles_sdss = ldac.openObjectFile('Pickles.cat', 'SDSS')
sample = numpy.random.randint(0, len(pickles), 100)
targetZP = 27.15
seqnr = pyfits.Column(name = 'SeqNr', format = 'K', array = numpy.arange(100))
mags = pyfits.Column(name = 'SEx_MAG_APER1-%s' % filterName,
format = 'E',
array = pickles[filterName][sample] + targetZP)
magerrs = pyfits.Column(name = 'SEx_MAGERR_APER1-%s' % filterName,
format = 'E',
array = 0.05 * numpy.ones(100))
sdss = pyfits.Column(name = 'gmag', format = 'E', array = pickles_sdss['gp'][sample])
sdsserr = pyfits.Column(name = 'gerr', format = 'E', array = 0.1 * numpy.ones(100))
sdsscolor = pyfits.Column(name = 'gmr', format = 'E', array = pickles_sdss['gp'][sample] - pickles_sdss['rp'][sample])
sdsscolorerr = pyfits.Column(name | |
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Swiss army tool for convolutions."""
import functools
import tensorflow.compat.v1 as tf
from tensorflow_compression.python.layers import parameterizers
from tensorflow_compression.python.ops import padding_ops
__all__ = [
"SignalConv1D",
"SignalConv2D",
"SignalConv3D",
]
class _SignalConv(tf.keras.layers.Layer):
"""{rank}D convolution layer.
This layer creates a filter kernel that is convolved or cross correlated with
the layer input to produce an output tensor. The main difference of this class
to `tf.layers.Conv{rank}D` is how padding, up- and downsampling, and alignment
is handled. It supports much more flexible options for structuring the linear
transform.
In general, the outputs are equivalent to a composition of:
1. an upsampling step (if `strides_up > 1`)
2. a convolution or cross correlation
3. a downsampling step (if `strides_down > 1`)
4. addition of a bias vector (if `use_bias == True`)
5. a pointwise nonlinearity (if `activation is not None`)
For more information on what the difference between convolution and cross
correlation is, see [this](https://en.wikipedia.org/wiki/Convolution) and
[this](https://en.wikipedia.org/wiki/Cross-correlation) Wikipedia article,
respectively. Note that the distinction between convolution and cross
correlation is occasionally blurred (one may use convolution as an umbrella
term for both). For a discussion of up-/downsampling, refer to the articles
about [upsampling](https://en.wikipedia.org/wiki/Upsampling) and
[decimation](https://en.wikipedia.org/wiki/Decimation_(signal_processing)). A
more in-depth treatment of all of these operations can be found in:
> "Discrete-Time Signal Processing"<br />
> Oppenheim, <NAME> (Prentice Hall)
For purposes of this class, the center position of a kernel is always
considered to be at `K // 2`, where `K` is the support length of the kernel.
This implies that in the `'same_*'` padding modes, all of the following
operations will produce the same result if applied to the same inputs, which
is not generally true for convolution operations as implemented by
`tf.nn.convolution` or `tf.layers.Conv?D` (numbers represent kernel
coefficient values):
- convolve with `[1, 2, 3]`
- convolve with `[0, 1, 2, 3, 0]`
- convolve with `[0, 1, 2, 3]`
- correlate with `[3, 2, 1]`
- correlate with `[0, 3, 2, 1, 0]`
- correlate with `[0, 3, 2, 1]`
Available padding (boundary handling) modes:
- `'valid'`: This always yields the maximum number of output samples that can
be computed without making any assumptions about the values outside of the
support of the input tensor. The padding semantics are always applied to the
inputs. In contrast, even though `tf.nn.conv2d_transpose` implements
upsampling, in `'VALID'` mode it will produce an output tensor with *larger*
support than the input tensor (because it is the transpose of a `'VALID'`
downsampled convolution).
Examples (numbers represent indexes into the respective tensors, periods
represent skipped spatial positions):
`kernel_support = 5` and `strides_down = 2`:
```
inputs: |0 1 2 3 4 5 6 7 8|
outputs: | 0 . 1 . 2 |
```
```
inputs: |0 1 2 3 4 5 6 7|
outputs: | 0 . 1 . |
```
`kernel_support = 3`, `strides_up = 2`, and `extra_pad_end = True`:
```
inputs: |0 . 1 . 2 . 3 . 4 .|
outputs: | 0 1 2 3 4 5 6 7 |
```
`kernel_support = 3`, `strides_up = 2`, and `extra_pad_end = False`:
```
inputs: |0 . 1 . 2 . 3 . 4|
outputs: | 0 1 2 3 4 5 6 |
```
- `'same_zeros'`: Values outside of the input tensor support are assumed to be
zero. Similar to `'SAME'` in `tf.nn.convolution`, but with different
padding. In `'SAME'`, the spatial alignment of the output depends on the
input shape. Here, the output alignment depends only on the kernel support
and the strides, making alignment more predictable. The first sample in the
output is always spatially aligned with the first sample in the input.
Examples (numbers represent indexes into the respective tensors, periods
represent skipped spatial positions):
`kernel_support = 5` and `strides_down = 2`:
```
inputs: |0 1 2 3 4 5 6 7 8|
outputs: |0 . 1 . 2 . 3 . 4|
```
```
inputs: |0 1 2 3 4 5 6 7|
outputs: |0 . 1 . 2 . 3 .|
```
`kernel_support = 3`, `strides_up = 2`, and `extra_pad_end = True`:
```
inputs: |0 . 1 . 2 . 3 . 4 .|
outputs: |0 1 2 3 4 5 6 7 8 9|
```
`kernel_support = 3`, `strides_up = 2`, and `extra_pad_end = False`:
```
inputs: |0 . 1 . 2 . 3 . 4|
outputs: |0 1 2 3 4 5 6 7 8|
```
- `'same_reflect'`: Values outside of the input tensor support are assumed to
be reflections of the samples inside. Note that this is the same padding as
implemented by `tf.pad` in the `'REFLECT'` mode (i.e. with the symmetry axis
on the samples rather than between). The output alignment is identical to
the `'same_zeros'` mode.
Examples: see `'same_zeros'`.
When applying several convolutions with down- or upsampling in a sequence,
it can be helpful to keep the axis of symmetry for the reflections
consistent. To do this, set `extra_pad_end = False` and make sure that the
input has length `M`, such that `M % S == 1`, where `S` is the product of
stride lengths of all subsequent convolutions. Example for subsequent
downsampling (here, `M = 9`, `S = 4`, and `^` indicate the symmetry axes
for reflection):
```
inputs: |0 1 2 3 4 5 6 7 8|
intermediate: |0 . 1 . 2 . 3 . 4|
outputs: |0 . . . 1 . . . 2|
^ ^
```
Note that due to limitations of the underlying operations, not all
combinations of arguments are currently implemented. In this case, this class
will throw a `NotImplementedError` exception.
Speed tips:
- Prefer combining correlations with downsampling, and convolutions with
upsampling, as the underlying ops implement these combinations directly.
- If that isn't desirable, prefer using odd-length kernel supports, since
odd-length kernels can be flipped if necessary, to use the fastest
implementation available.
- Combining upsampling and downsampling (for rational resampling ratios)
is relatively slow, because no underlying ops exist for that use case.
Downsampling in this case is implemented by discarding computed output
values.
- Note that `channel_separable` is only implemented for 1D and 2D. Also,
upsampled channel-separable convolutions are currently only implemented for
`filters == 1`. When using `channel_separable`, prefer using identical
strides in all dimensions to maximize performance.
"""
def __init__(self, filters, kernel_support,
corr=False, strides_down=1, strides_up=1, padding="valid",
extra_pad_end=True, channel_separable=False,
data_format="channels_last",
activation=None, use_bias=False, use_explicit=True,
kernel_initializer=tf.initializers.variance_scaling(),
bias_initializer=tf.initializers.zeros(),
kernel_regularizer=None, bias_regularizer=None,
kernel_parameterizer=parameterizers.RDFTParameterizer(),
bias_parameterizer=None,
**kwargs):
"""Initializer.
Args:
filters: Integer. If `not channel_separable`, specifies the total number
of filters, which is equal to the number of output channels. Otherwise,
specifies the number of filters per channel, which makes the number of
output channels equal to `filters` times the number of input channels.
kernel_support: An integer or iterable of {rank} integers, specifying the
length of the convolution/correlation window in each dimension.
corr: Boolean. If True, compute cross correlation. If False, convolution.
strides_down: An integer or iterable of {rank} integers, specifying an
optional downsampling stride after the convolution/correlation.
strides_up: An integer or iterable of {rank} integers, specifying an
optional upsampling stride before the convolution/correlation.
padding: String. One of the supported padding modes (see above).
extra_pad_end: Boolean. When upsampling, use extra skipped samples at the
end of each dimension (default). For examples, refer to the discussion
of padding modes above.
channel_separable: Boolean. If `False` (default), each output | |
<reponame>rabaniten/qiskit-terra
# -*- coding: utf-8 -*-
# Copyright 2017, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
# pylint: disable=invalid-name
# pylint: disable=arguments-differ
"""Contains a (slow) python simulator.
It simulates a qasm quantum circuit (an experiment) that has been compiled
to run on the simulator. It is exponential in the number of qubits.
The simulator is run using
.. code-block:: python
QasmSimulatorPy().run(qobj)
Where the input is a Qobj object and the output is a BasicAerJob object, which can
later be queried for the Result object. The result will contain a 'memory' data
field, which is a result of measurements for each shot.
"""
import uuid
import time
import logging
from math import log2
from collections import Counter
import numpy as np
from qiskit._util import local_hardware_info
from qiskit.providers.models import BackendConfiguration
from qiskit.result import Result
from qiskit.providers import BaseBackend
from qiskit.providers.basicaer.basicaerjob import BasicAerJob
from .exceptions import BasicAerError
from ._basicaertools import single_gate_matrix
from ._basicaertools import cx_gate_matrix
from ._basicaertools import einsum_vecmul_index
logger = logging.getLogger(__name__)
class QasmSimulatorPy(BaseBackend):
"""Python implementation of a qasm simulator."""
MAX_QUBITS_MEMORY = int(log2(local_hardware_info()['memory'] * (1024 ** 3) / 16))
DEFAULT_CONFIGURATION = {
'backend_name': 'qasm_simulator',
'backend_version': '2.0.0',
'n_qubits': min(24, MAX_QUBITS_MEMORY),
'url': 'https://github.com/Qiskit/qiskit-terra',
'simulator': True,
'local': True,
'conditional': True,
'open_pulse': False,
'memory': True,
'max_shots': 65536,
'description': 'A python simulator for qasm experiments',
'basis_gates': ['u1', 'u2', 'u3', 'cx', 'id', 'unitary'],
'gates': [
{
'name': 'u1',
'parameters': ['lambda'],
'qasm_def': 'gate u1(lambda) q { U(0,0,lambda) q; }'
},
{
'name': 'u2',
'parameters': ['phi', 'lambda'],
'qasm_def': 'gate u2(phi,lambda) q { U(pi/2,phi,lambda) q; }'
},
{
'name': 'u3',
'parameters': ['theta', 'phi', 'lambda'],
'qasm_def': 'gate u3(theta,phi,lambda) q { U(theta,phi,lambda) q; }'
},
{
'name': 'cx',
'parameters': ['c', 't'],
'qasm_def': 'gate cx c,t { CX c,t; }'
},
{
'name': 'id',
'parameters': ['a'],
'qasm_def': 'gate id a { U(0,0,0) a; }'
},
{
'name': 'unitary',
'parameters': ['matrix'],
'qasm_def': 'unitary(matrix) q1, q2,...'
}
]
}
DEFAULT_OPTIONS = {
"initial_statevector": None,
"chop_threshold": 1e-15
}
# Class level variable to return the final state at the end of simulation
# This should be set to True for the statevector simulator
SHOW_FINAL_STATE = False
def __init__(self, configuration=None, provider=None):
super().__init__(configuration=(configuration or
BackendConfiguration.from_dict(self.DEFAULT_CONFIGURATION)),
provider=provider)
# Define attributes in __init__.
self._local_random = np.random.RandomState()
self._classical_memory = 0
self._classical_register = 0
self._statevector = 0
self._number_of_cmembits = 0
self._number_of_qubits = 0
self._shots = 0
self._memory = False
self._initial_statevector = self.DEFAULT_OPTIONS["initial_statevector"]
self._chop_threshold = self.DEFAULT_OPTIONS["chop_threshold"]
self._qobj_config = None
# TEMP
self._sample_measure = False
def _add_unitary_single(self, gate, qubit):
"""Apply an arbitrary 1-qubit unitary matrix.
Args:
gate (matrix_like): a single qubit gate matrix
qubit (int): the qubit to apply gate to
"""
# Compute einsum index string for 1-qubit matrix multiplication
indexes = einsum_vecmul_index([qubit], self._number_of_qubits)
# Convert to complex rank-2 tensor
gate_tensor = np.array(gate, dtype=complex)
# Apply matrix multiplication
self._statevector = np.einsum(indexes, gate_tensor,
self._statevector,
dtype=complex,
casting='no')
def _add_unitary_two(self, gate, qubit0, qubit1):
"""Apply a two-qubit unitary matrix.
Args:
gate (matrix_like): a the two-qubit gate matrix
qubit0 (int): gate qubit-0
qubit1 (int): gate qubit-1
"""
# Compute einsum index string for 1-qubit matrix multiplication
indexes = einsum_vecmul_index([qubit0, qubit1], self._number_of_qubits)
# Convert to complex rank-4 tensor
gate_tensor = np.reshape(np.array(gate, dtype=complex), 4 * [2])
# Apply matrix multiplication
self._statevector = np.einsum(indexes, gate_tensor,
self._statevector,
dtype=complex,
casting='no')
def _get_measure_outcome(self, qubit):
"""Simulate the outcome of measurement of a qubit.
Args:
qubit (int): the qubit to measure
Return:
tuple: pair (outcome, probability) where outcome is '0' or '1' and
probability is the probability of the returned outcome.
"""
# Axis for numpy.sum to compute probabilities
axis = list(range(self._number_of_qubits))
axis.remove(self._number_of_qubits - 1 - qubit)
probabilities = np.sum(np.abs(self._statevector) ** 2, axis=tuple(axis))
# Compute einsum index string for 1-qubit matrix multiplication
random_number = self._local_random.rand()
if random_number < probabilities[0]:
return '0', probabilities[0]
# Else outcome was '1'
return '1', probabilities[1]
def _add_sample_measure(self, measure_params, num_samples):
"""Generate memory samples from current statevector.
Args:
measure_params (list): List of (qubit, cmembit) values for
measure instructions to sample.
num_samples (int): The number of memory samples to generate.
Returns:
list: A list of memory values in hex format.
"""
# Get unique qubits that are actually measured
measured_qubits = list({qubit for qubit, cmembit in measure_params})
num_measured = len(measured_qubits)
# Axis for numpy.sum to compute probabilities
axis = list(range(self._number_of_qubits))
for qubit in reversed(measured_qubits):
# Remove from largest qubit to smallest so list position is correct
# with respect to position from end of the list
axis.remove(self._number_of_qubits - 1 - qubit)
probabilities = np.reshape(np.sum(np.abs(self._statevector) ** 2,
axis=tuple(axis)),
2 ** num_measured)
# Generate samples on measured qubits
samples = self._local_random.choice(range(2 ** num_measured),
num_samples, p=probabilities)
# Convert to bit-strings
memory = []
for sample in samples:
classical_memory = self._classical_memory
for count, (qubit, cmembit) in enumerate(sorted(measure_params)):
qubit_outcome = int((sample & (1 << count)) >> count)
membit = 1 << cmembit
classical_memory = (classical_memory & (~membit)) | (qubit_outcome << cmembit)
value = bin(classical_memory)[2:]
memory.append(hex(int(value, 2)))
return memory
def _add_qasm_measure(self, qubit, cmembit, cregbit=None):
"""Apply a measure instruction to a qubit.
Args:
qubit (int): qubit is the qubit measured.
cmembit (int): is the classical memory bit to store outcome in.
cregbit (int, optional): is the classical register bit to store outcome in.
"""
# get measure outcome
outcome, probability = self._get_measure_outcome(qubit)
# update classical state
membit = 1 << cmembit
self._classical_memory = (self._classical_memory & (~membit)) | (int(outcome) << cmembit)
if cregbit is not None:
regbit = 1 << cregbit
self._classical_register = \
(self._classical_register & (~regbit)) | (int(outcome) << cregbit)
# update quantum state
if outcome == '0':
update_diag = [[1 / np.sqrt(probability), 0], [0, 0]]
else:
update_diag = [[0, 0], [0, 1 / np.sqrt(probability)]]
# update classical state
self._add_unitary_single(update_diag, qubit)
def _add_qasm_reset(self, qubit):
"""Apply a reset instruction to a qubit.
Args:
qubit (int): the qubit being rest
This is done by doing a simulating a measurement
outcome and projecting onto the outcome state while
renormalizing.
"""
# get measure outcome
outcome, probability = self._get_measure_outcome(qubit)
# update quantum state
if outcome == '0':
update = [[1 / np.sqrt(probability), 0], [0, 0]]
self._add_unitary_single(update, qubit)
else:
update = [[0, 1 / np.sqrt(probability)], [0, 0]]
self._add_unitary_single(update, qubit)
def _validate_initial_statevector(self):
"""Validate an initial statevector"""
# If initial statevector isn't set we don't need to validate
if self._initial_statevector is None:
return
# Check statevector is correct length for number of qubits
length = len(self._initial_statevector)
required_dim = 2 ** self._number_of_qubits
if length != required_dim:
raise BasicAerError('initial statevector is incorrect length: ' +
'{} != {}'.format(length, required_dim))
def _set_options(self, qobj_config=None, backend_options=None):
"""Set the backend options for all experiments in a qobj"""
# Reset default options
self._initial_statevector = self.DEFAULT_OPTIONS["initial_statevector"]
self._chop_threshold = self.DEFAULT_OPTIONS["chop_threshold"]
if backend_options is None:
backend_options = {}
# Check for custom initial statevector in backend_options first,
# then config second
if 'initial_statevector' in backend_options:
self._initial_statevector = np.array(backend_options['initial_statevector'],
dtype=complex)
elif hasattr(qobj_config, 'initial_statevector'):
self._initial_statevector = np.array(qobj_config.initial_statevector,
dtype=complex)
if self._initial_statevector is not None:
# Check the initial statevector is normalized
norm = np.linalg.norm(self._initial_statevector)
if round(norm, 12) != 1:
raise BasicAerError('initial statevector is not normalized: ' +
'norm {} != 1'.format(norm))
# Check for custom chop threshold
# Replace with custom options
if 'chop_threshold' in backend_options:
self._chop_threshold = backend_options['chop_threshold']
elif hasattr(qobj_config, 'chop_threshold'):
self._chop_threshold = qobj_config.chop_threshold
def _initialize_statevector(self):
"""Set the initial statevector for simulation"""
if self._initial_statevector is None:
# Set to default state of all qubits in |0>
self._statevector = np.zeros(2 ** self._number_of_qubits,
dtype=complex)
self._statevector[0] = 1
else:
self._statevector = self._initial_statevector.copy()
# Reshape to rank-N tensor
self._statevector = np.reshape(self._statevector,
self._number_of_qubits * [2])
def _get_statevector(self):
"""Return the current statevector in JSON Result spec format"""
vec = np.reshape(self._statevector, 2 ** self._number_of_qubits)
# Expand complex numbers
vec = np.stack([vec.real, vec.imag], axis=1)
# Truncate small values
vec[abs(vec) < self._chop_threshold] = 0.0
return vec
def _validate_measure_sampling(self, experiment):
"""Determine if measure sampling is allowed for an experiment
Args:
experiment (QobjExperiment): a qobj experiment.
"""
# Check for config flag
if hasattr(experiment.config, 'allows_measure_sampling'):
self._sample_measure = experiment.config.allows_measure_sampling
# If flag isn't found do a simple test to see if a circuit contains
# no reset instructions, and no gates instructions after
# the first measure.
else:
measure_flag = False
for instruction in experiment.instructions:
# | |
<filename>src/cabinetry/model_utils.py<gh_stars>1-10
"""Provides utilities for pyhf models."""
import json
import logging
from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union
import awkward as ak
import numpy as np
import pyhf
from cabinetry.fit.results_containers import FitResults
log = logging.getLogger(__name__)
# cache holding results from yield uncertainty calculations
_YIELD_STDEV_CACHE: Dict[Any, Tuple[List[List[float]], List[float]]] = {}
class ModelPrediction(NamedTuple):
"""Model prediction with yields and total uncertainties per bin and channel.
Args:
model (pyhf.pdf.Model): model to which prediction corresponds to
model_yields (List[List[List[float]]]): yields per sample, channel and bin,
indices: channel, sample, bin
total_stdev_model_bins (List[List[float]]): total yield uncertainty per channel
and per bin, indices: channel, bin
total_stdev_model_channels (List[float]): total yield uncertainty per channel,
index: channel
label (str): label for the prediction, e.g. "pre-fit" or "post-fit"
"""
model: pyhf.pdf.Model
model_yields: List[List[List[float]]]
total_stdev_model_bins: List[List[float]]
total_stdev_model_channels: List[float]
label: str
def model_and_data(
spec: Dict[str, Any], *, asimov: bool = False, include_auxdata: bool = True
) -> Tuple[pyhf.pdf.Model, List[float]]:
"""Returns model and data for a ``pyhf`` workspace specification.
Args:
spec (Dict[str, Any]): a ``pyhf`` workspace specification
asimov (bool, optional): whether to return the Asimov dataset, defaults to False
include_auxdata (bool, optional): whether to also return auxdata, defaults to
True
Returns:
Tuple[pyhf.pdf.Model, List[float]]:
- a HistFactory-style model in ``pyhf`` format
- the data (plus auxdata if requested) for the model
"""
workspace = pyhf.Workspace(spec)
model = workspace.model(
modifier_settings={
"normsys": {"interpcode": "code4"},
"histosys": {"interpcode": "code4p"},
}
) # use HistFactory InterpCode=4 (default in pyhf since v0.6.0)
if not asimov:
data = workspace.data(model, include_auxdata=include_auxdata)
else:
data = asimov_data(model, include_auxdata=include_auxdata)
return model, data
def asimov_data(model: pyhf.Model, *, include_auxdata: bool = True) -> List[float]:
"""Returns the Asimov dataset (optionally with auxdata) for a model.
Initial parameter settings for normalization factors in the workspace are treated as
the default settings for that parameter. Fitting the Asimov dataset will recover
these initial settings as the maximum likelihood estimate for normalization factors.
Initial settings for other modifiers are ignored.
Args:
model (pyhf.Model): the model from which to construct the dataset
include_auxdata (bool, optional): whether to also return auxdata, defaults to
True
Returns:
List[float]: the Asimov dataset
"""
asimov_data = pyhf.tensorlib.tolist(
model.expected_data(asimov_parameters(model), include_auxdata=include_auxdata)
)
return asimov_data
def asimov_parameters(model: pyhf.pdf.Model) -> np.ndarray:
"""Returns a list of Asimov parameter values for a model.
For normfactors and shapefactors, initial parameter settings (specified in the
workspace) are treated as nominal settings. This ignores custom auxiliary data set
in the measurement configuration in the workspace.
Args:
model (pyhf.pdf.Model): model for which to extract the parameters
Returns:
np.ndarray: the Asimov parameters, in the same order as
``model.config.suggested_init()``
"""
# create a list of Asimov parameters (constrained parameters at best-fit value from
# the aux measurement, unconstrained parameters at init specified in the workspace)
asimov_parameters = []
for parameter in model.config.par_order:
if not model.config.param_set(parameter).constrained:
# unconstrained parameter: use suggested inits (for normfactor/shapefactor)
inits = model.config.param_set(parameter).suggested_init
elif dict(model.config.modifiers)[parameter] in ["histosys", "normsys"]:
# histosys/normsys: Gaussian constraint, nominal value 0
inits = [0.0] * model.config.param_set(parameter).n_parameters
else:
# remaining modifiers are staterror/lumi with Gaussian constraint, and
# shapesys with Poisson constraint, all have nominal value of 1
inits = [1.0] * model.config.param_set(parameter).n_parameters
asimov_parameters += inits
return np.asarray(asimov_parameters)
def prefit_uncertainties(model: pyhf.pdf.Model) -> np.ndarray:
"""Returns a list of pre-fit parameter uncertainties for a model.
For unconstrained parameters the uncertainty is set to 0. It is also set to 0 for
fixed parameters (similarly to how the post-fit uncertainties are defined to be 0).
Args:
model (pyhf.pdf.Model): model for which to extract the parameters
Returns:
np.ndarray: pre-fit uncertainties for the parameters, in the same order as
``model.config.suggested_init()``
"""
pre_fit_unc = [] # pre-fit uncertainties for parameters
for parameter in model.config.par_order:
# obtain pre-fit uncertainty for constrained, non-fixed parameters
if (
model.config.param_set(parameter).constrained
and not model.config.param_set(parameter).suggested_fixed
):
pre_fit_unc += model.config.param_set(parameter).width()
else:
if model.config.param_set(parameter).n_parameters == 1:
# unconstrained normfactor or fixed parameter, uncertainty is 0
pre_fit_unc.append(0.0)
else:
# shapefactor
pre_fit_unc += [0.0] * model.config.param_set(parameter).n_parameters
return np.asarray(pre_fit_unc)
def _hashable_model_key(
model: pyhf.pdf.Model,
) -> Tuple[str, Tuple[Tuple[str, str], ...]]:
"""Compute a hashable representation of the values that uniquely identify a model.
The ``pyhf.pdf.Model`` type is already hashable, but it uses the ``__hash__``
inherited from ``object``, so a copy of a model has a distinct hash. The key
returned by this function instead will hash to the same value for copies, but differ
when the model represents a different likelihood.
Note: The key returned here considers only the spec and interpolation codes. All
other model configuration options leave it unchanged (e.g. ``poi_name``, overriding
parameter bounds, etc.).
Args:
model (pyhf.model.Model): model to generate a key for
Returns:
Tuple[str, Tuple[Tuple[str, str], ...]]: a key that identifies the model
by its spec and interpcodes
"""
interpcodes = []
for mod_type in sorted(model.config.modifier_settings.keys()):
code = model.config.modifier_settings[mod_type]["interpcode"]
interpcodes.append((mod_type, code))
# sort since different orderings result in equivalent models,
# but distinct strings
spec_str = json.dumps(model.spec, sort_keys=True)
return (spec_str, tuple(interpcodes))
def yield_stdev(
model: pyhf.pdf.Model,
parameters: np.ndarray,
uncertainty: np.ndarray,
corr_mat: np.ndarray,
) -> Tuple[List[List[float]], List[float]]:
"""Calculates symmetrized yield standard deviation of a model, per bin and channel.
Returns both the uncertainties per bin (in a list of channels), and the uncertainty
of the total yield per channel (again, for a list of channels). To calculate the
uncertainties for the total yield, the function internally treats the sum of yields
per channel like another channel with one bin. The results of this function are
cached to speed up subsequent calls with the same arguments.
Args:
model (pyhf.pdf.Model): the model for which to calculate the standard deviations
for all bins
parameters (np.ndarray): central values of model parameters
uncertainty (np.ndarray): uncertainty of model parameters
corr_mat (np.ndarray): correlation matrix
Returns:
Tuple[List[List[float]], List[float]]:
- list of channels, each channel is a list of standard deviations per bin
- list of standard deviations per channel
"""
# check whether results are already stored in cache
cached_results = _YIELD_STDEV_CACHE.get(
(
_hashable_model_key(model),
tuple(parameters),
tuple(uncertainty),
corr_mat.data.tobytes(),
),
None,
)
if cached_results is not None:
# return results from cache
return cached_results
# the lists up_variations and down_variations will contain the model distributions
# with all parameters varied individually within uncertainties
# indices: variation, channel, bin
# following the channels contained in the model, there are additional entries with
# yields summed per channel (internally treated like additional channels) to get the
# per-channel uncertainties
up_variations = []
down_variations = []
# calculate the model distribution for every parameter varied up and down
# within the respective uncertainties
for i_par in range(model.config.npars):
# central parameter values, but one parameter varied within uncertainties
up_pars = parameters.copy().astype(float) # ensure float for correct addition
up_pars[i_par] += uncertainty[i_par]
down_pars = parameters.copy().astype(float)
down_pars[i_par] -= uncertainty[i_par]
# total model distribution with this parameter varied up
up_comb = pyhf.tensorlib.to_numpy(
model.expected_data(up_pars, include_auxdata=False)
)
# turn into list of channels
up_yields = [
up_comb[model.config.channel_slices[ch]] for ch in model.config.channels
]
# append list of yields summed per channel
up_yields += [np.asarray([sum(chan_yields)]) for chan_yields in up_yields]
up_variations.append(up_yields)
# total model distribution with this parameter varied down
down_comb = pyhf.tensorlib.to_numpy(
model.expected_data(down_pars, include_auxdata=False)
)
# turn into list of channels
down_yields = [
down_comb[model.config.channel_slices[ch]] for ch in model.config.channels
]
# append list of yields summed per channel
down_yields += [np.asarray([sum(chan_yields)]) for chan_yields in down_yields]
down_variations.append(down_yields)
# convert to awkward arrays for further processing
up_variations_ak = ak.from_iter(up_variations)
down_variations_ak = ak.from_iter(down_variations)
# calculate symmetric uncertainties for all components
sym_uncs = (up_variations_ak - down_variations_ak) / 2
# calculate total variance, indexed by channel and bin (per-channel numbers act like
# additional channels with one bin each)
if np.count_nonzero(corr_mat - np.diagflat(np.ones_like(parameters))) == 0:
# no off-diagonal contributions from correlation matrix (e.g. pre-fit)
total_variance = np.sum(np.power(sym_uncs, 2), axis=0)
else:
# full calculation including off-diagonal contributions
# with v as vector of variations (each element contains yields under variation)
# and M as correlation matrix, calculate variance as follows:
# variance = sum_i sum_j v[i] * M[i, j] * v[j]
# where the product between elements of v again is elementwise (multiplying bin
# yields), and the final variance shape is the same as element of v (yield
# uncertainties | |
parameters is to be
x -> North, y -> East and z -> **DOWN**.
.. note:: All input values in **SI** units(!) and output in **Eotvos**!
Parameters:
* xp, yp, zp : arrays
Arrays with the x, y, and z coordinates of the computation points.
* prisms : list of :class:`~fatiando.mesher.Prism`
The density model used to calculate the gravitational effect.
Prisms must have the property ``'density'``. Prisms that don't have
this property will be ignored in the computations. Elements of *prisms*
that are None will also be ignored. *prisms* can also be a
:class:`~fatiando.mesher.PrismMesh`.
* dens : float or None
If not None, will use this value instead of the ``'density'`` property
of the prisms. Use this, e.g., for sensitivity matrix building.
Returns:
* res : array
The field calculated on xp, yp, zp
"""
if xp.shape != yp.shape or xp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same length!")
size = len(xp)
res = numpy.zeros(size, dtype=numpy.float)
for prism in prisms:
if prism is None or ('density' not in prism.props and dens is None):
continue
if dens is None:
density = prism.props['density']
else:
density = dens
x1, x2 = prism.x1, prism.x2
y1, y2 = prism.y1, prism.y2
z1, z2 = prism.z1, prism.z2
_prism.gxx(xp, yp, zp, x1, x2, y1, y2, z1, z2, density, res)
res *= G * SI2EOTVOS
return res
def gxy(xp, yp, zp, prisms, dens=None):
"""
Calculates the :math:`g_{xy}` gravity gradient tensor component.
.. note:: The coordinate system of the input parameters is to be
x -> North, y -> East and z -> **DOWN**.
.. note:: All input values in **SI** units(!) and output in **Eotvos**!
.. warning::
This component has singularities when the computation
point is aligned with the corners of the prism on the bottom side.
In these cases, the computation point slightly to avoid these
singularities. Unfortunately, this means that the result will not be as
accurate **on those points**.
Parameters:
* xp, yp, zp : arrays
Arrays with the x, y, and z coordinates of the computation points.
* prisms : list of :class:`~fatiando.mesher.Prism`
The density model used to calculate the gravitational effect.
Prisms must have the property ``'density'``. Prisms that don't have
this property will be ignored in the computations. Elements of *prisms*
that are None will also be ignored. *prisms* can also be a
:class:`~fatiando.mesher.PrismMesh`.
* dens : float or None
If not None, will use this value instead of the ``'density'`` property
of the prisms. Use this, e.g., for sensitivity matrix building.
Returns:
* res : array
The field calculated on xp, yp, zp
"""
if xp.shape != yp.shape or xp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same length!")
size = len(xp)
res = numpy.zeros(size, dtype=numpy.float)
for prism in prisms:
if prism is None or ('density' not in prism.props and dens is None):
continue
if dens is None:
density = prism.props['density']
else:
density = dens
x1, x2 = prism.x1, prism.x2
y1, y2 = prism.y1, prism.y2
z1, z2 = prism.z1, prism.z2
_prism.gxy(xp, yp, zp, x1, x2, y1, y2, z1, z2, density, res)
res *= G * SI2EOTVOS
return res
def gxz(xp, yp, zp, prisms, dens=None):
"""
Calculates the :math:`g_{xz}` gravity gradient tensor component.
.. note:: The coordinate system of the input parameters is to be
x -> North, y -> East and z -> **DOWN**.
.. note:: All input values in **SI** units(!) and output in **Eotvos**!
.. warning::
This component has singularities when the computation
point is aligned with the corners of the prism on the east side.
In these cases, the computation point slightly to avoid these
singularities. Unfortunately, this means that the result will not be as
accurate **on those points**.
Parameters:
* xp, yp, zp : arrays
Arrays with the x, y, and z coordinates of the computation points.
* prisms : list of :class:`~fatiando.mesher.Prism`
The density model used to calculate the gravitational effect.
Prisms must have the property ``'density'``. Prisms that don't have
this property will be ignored in the computations. Elements of *prisms*
that are None will also be ignored. *prisms* can also be a
:class:`~fatiando.mesher.PrismMesh`.
* dens : float or None
If not None, will use this value instead of the ``'density'`` property
of the prisms. Use this, e.g., for sensitivity matrix building.
Returns:
* res : array
The field calculated on xp, yp, zp
"""
if xp.shape != yp.shape or xp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same length!")
size = len(xp)
res = numpy.zeros(size, dtype=numpy.float)
for prism in prisms:
if prism is None or ('density' not in prism.props and dens is None):
continue
if dens is None:
density = prism.props['density']
else:
density = dens
x1, x2 = prism.x1, prism.x2
y1, y2 = prism.y1, prism.y2
z1, z2 = prism.z1, prism.z2
_prism.gxz(xp, yp, zp, x1, x2, y1, y2, z1, z2, density, res)
res *= G * SI2EOTVOS
return res
def gyy(xp, yp, zp, prisms, dens=None):
"""
Calculates the :math:`g_{yy}` gravity gradient tensor component.
.. note:: The coordinate system of the input parameters is to be
x -> North, y -> East and z -> **DOWN**.
.. note:: All input values in **SI** units(!) and output in **Eotvos**!
Parameters:
* xp, yp, zp : arrays
Arrays with the x, y, and z coordinates of the computation points.
* prisms : list of :class:`~fatiando.mesher.Prism`
The density model used to calculate the gravitational effect.
Prisms must have the property ``'density'``. Prisms that don't have
this property will be ignored in the computations. Elements of *prisms*
that are None will also be ignored. *prisms* can also be a
:class:`~fatiando.mesher.PrismMesh`.
* dens : float or None
If not None, will use this value instead of the ``'density'`` property
of the prisms. Use this, e.g., for sensitivity matrix building.
Returns:
* res : array
The field calculated on xp, yp, zp
"""
if xp.shape != yp.shape or xp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same length!")
size = len(xp)
res = numpy.zeros(size, dtype=numpy.float)
for prism in prisms:
if prism is None or ('density' not in prism.props and dens is None):
continue
if dens is None:
density = prism.props['density']
else:
density = dens
x1, x2 = prism.x1, prism.x2
y1, y2 = prism.y1, prism.y2
z1, z2 = prism.z1, prism.z2
_prism.gyy(xp, yp, zp, x1, x2, y1, y2, z1, z2, density, res)
res *= G * SI2EOTVOS
return res
def gyz(xp, yp, zp, prisms, dens=None):
"""
Calculates the :math:`g_{yz}` gravity gradient tensor component.
.. note:: The coordinate system of the input parameters is to be
x -> North, y -> East and z -> **DOWN**.
.. note:: All input values in **SI** units(!) and output in **Eotvos**!
.. warning::
This component has singularities when the computation
point is aligned with the corners of the prism on the north side.
In these cases, the computation point slightly to avoid these
singularities. Unfortunately, this means that the result will not be as
accurate **on those points**.
Parameters:
* xp, yp, zp : arrays
Arrays with the x, y, and z coordinates of the computation points.
* prisms : list of :class:`~fatiando.mesher.Prism`
The density model used to calculate the gravitational effect.
Prisms must have the property ``'density'``. Prisms that don't have
this property will be ignored in the computations. Elements of *prisms*
that are None will also be ignored. *prisms* can also be a
:class:`~fatiando.mesher.PrismMesh`.
* dens : float or None
If not None, will use this value instead of the ``'density'`` property
of the prisms. Use this, e.g., for sensitivity matrix building.
Returns:
* res : array
The field calculated on xp, yp, zp
"""
if xp.shape != yp.shape or xp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same length!")
size = len(xp)
res = numpy.zeros(size, dtype=numpy.float)
for prism | |
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Query scope tree implementation."""
from __future__ import annotations
import textwrap
import typing
import weakref
from . import pathid
class InvalidScopeConfiguration(Exception):
def __init__(self, msg: str, *,
offending_node: 'ScopeTreeNode',
existing_node: 'ScopeTreeNode') -> None:
super().__init__(msg)
self.offending_node = offending_node
self.existing_node = existing_node
class ScopeTreeNode:
unique_id: typing.Optional[int]
"""A unique identifier used to map scopes on sets."""
path_id: typing.Optional[pathid.PathId]
"""Node path id, or None for branch nodes."""
fenced: bool
"""Whether the subtree represents a SET OF argument."""
protect_parent: bool
"""Whether the subtree represents a scope that must not affect parents."""
unnest_fence: bool
"""Prevent unnesting in parents."""
optional: bool
"""Whether this node represents an optional path."""
children: typing.Set['ScopeTreeNode']
"""A set of child nodes."""
namespaces: typing.Set[str]
"""A set of namespaces used by paths in this branch.
When a path node is pulled up from this branch,
and its namespace matches anything in `namespaces`,
the namespace will be stripped. This is used to
implement "semi-detached" semantics used by
views declared in a WITH block."""
def __init__(self, *, path_id: typing.Optional[pathid.PathId]=None,
fenced: bool=False, unique_id: typing.Optional[int]=None):
self.unique_id = unique_id
self.path_id = path_id
self.fenced = fenced
self.protect_parent = False
self.unnest_fence = False
self.optional = False
self.children = set()
self.namespaces = set()
self._parent = None
def __repr__(self):
name = 'ScopeFenceNode' if self.fenced else 'ScopeTreeNode'
return (f'<{name} {self.path_id!r} at {id(self):0x}>')
def _copy(self, parent: 'ScopeTreeNode') -> 'ScopeTreeNode':
cp = self.__class__(
path_id=self.path_id,
fenced=self.fenced)
cp.optional = self.optional
cp.unnest_fence = self.unnest_fence
cp.namespaces = set(self.namespaces)
cp.unique_id = self.unique_id
cp._set_parent(parent)
for child in self.children:
child._copy(parent=cp)
return cp
@property
def name(self):
return self._name(debug=False)
def _name(self, debug):
if self.path_id is None:
return f'FENCE' if self.fenced else f'BRANCH'
else:
pid = self.path_id.pformat_internal(debug=debug)
return f'{pid}{" [OPT]" if self.optional else ""}'
def debugname(self, fuller=False):
parts = [f'{self._name(debug=fuller)}']
if self.unique_id:
parts.append(f'uid:{self.unique_id}')
if self.namespaces:
parts.append(','.join(self.namespaces))
if self.unnest_fence:
parts.append('no-unnest')
parts.append(f'0x{id(self):0x}')
return ' '.join(parts)
@property
def ancestors(self) -> typing.Iterator['ScopeTreeNode']:
"""An iterator of node's ancestors, including self."""
node = self
while node is not None:
yield node
node = node.parent
@property
def strict_ancestors(self) -> typing.Iterator['ScopeTreeNode']:
"""An iterator of node's ancestors, not including self."""
node = self.parent
while node is not None:
yield node
node = node.parent
@property
def ancestors_and_namespaces(self) \
-> typing.Iterator[typing.Tuple['ScopeTreeNode',
typing.FrozenSet[str]]]:
"""An iterator of node's ancestors and namespaces, including self."""
namespaces = frozenset()
node = self
while node is not None:
namespaces |= node.namespaces
yield node, namespaces
node = node.parent
@property
def path_children(self) -> typing.Iterator['ScopeTreeNode']:
"""An iterator of node's children that have path ids."""
return filter(lambda p: p.path_id is not None, self.children)
def get_all_paths(self):
paths = set()
if self.path_id:
paths.add(self.path_id)
else:
paths.update(p.path_id for p in self.path_children)
return paths
@property
def descendants(self) -> typing.Iterator['ScopeTreeNode']:
"""An iterator of node's descendants including self top-first."""
yield self
yield from self.strict_descendants
@property
def strict_descendants(self) -> typing.Iterator['ScopeTreeNode']:
"""An iterator of node's descendants not including self top-first."""
for child in tuple(self.children):
yield child
yield from child.strict_descendants
@property
def strict_descendants_and_namespaces(self) \
-> typing.Iterator[typing.Tuple['ScopeTreeNode',
typing.FrozenSet[str]]]:
"""An iterator of node's descendants and namespaces.
Does not include self. Top-first.
"""
for child in tuple(self.children):
yield child, child.namespaces
desc_ns = child.strict_descendants_and_namespaces
for desc, desc_namespaces in desc_ns:
yield desc, child.namespaces | desc_namespaces
@property
def path_descendants(self) -> typing.Iterator['ScopeTreeNode']:
"""An iterator of node's descendants that have path ids."""
return filter(lambda p: p.path_id is not None, self.descendants)
def get_all_path_nodes(self, *, include_subpaths: bool=True): # XXX
return list(self.path_descendants)
@property
def descendant_namespaces(self) -> typing.Set[str]:
"""An set of namespaces declared by descendants."""
namespaces = set()
for child in self.descendants:
namespaces.update(child.namespaces)
return namespaces
@property
def unfenced_descendants(self) -> typing.Iterator['ScopeTreeNode']:
"""An iterator of node's unfenced descendants including self."""
yield self
for child in tuple(self.children):
if not child.fenced:
yield from child.unfenced_descendants
@property
def strict_unfenced_descendants(self) -> typing.Iterator['ScopeTreeNode']:
"""An iterator of node's unfenced descendants."""
for child in tuple(self.children):
if not child.fenced:
yield from child.unfenced_descendants
@property
def fence(self) -> 'ScopeTreeNode':
"""The nearest ancestor fence (or self, if fence)."""
if self.fenced:
return self
else:
return self.parent_fence
@property
def parent(self) -> typing.Optional['ScopeTreeNode']:
"""The parent node."""
if self._parent is None:
return None
else:
return self._parent()
@property
def parent_fence(self) -> typing.Optional['ScopeTreeNode']:
"""The nearest strict ancestor fence."""
for ancestor in self.strict_ancestors:
if ancestor.fenced:
return ancestor
return None
@property
def root(self) -> 'ScopeTreeNode':
"""The root of this tree."""
node = self
while node.parent is not None:
node = node.parent
return node
def attach_child(self, node: 'ScopeTreeNode') -> None:
"""Attach a child node to this node.
This is a low-level operation, no tree validation is
performed. For safe tree modification, use attach_subtree()""
"""
if node.path_id is not None:
for child in self.children:
if child.path_id == node.path_id:
raise InvalidScopeConfiguration(
f'{node.path_id} is already present in {self!r}')
if node.unique_id is not None:
for child in self.children:
if child.unique_id == node.unique_id:
return
node._set_parent(self)
def attach_fence(self) -> 'ScopeTreeNode':
"""Create and attach an empty fenced node."""
fence = ScopeTreeNode(fenced=True)
self.attach_child(fence)
return fence
def attach_branch(self) -> 'ScopeTreeNode':
"""Create and attach an empty branch node."""
fence = ScopeTreeNode()
self.attach_child(fence)
return fence
def attach_path(self, path_id: pathid.PathId) -> None:
"""Attach a scope subtree representing *path_id*."""
subtree = parent = ScopeTreeNode(fenced=True)
is_lprop = False
for prefix in reversed(list(path_id.iter_prefixes(include_ptr=True))):
if prefix.is_ptr_path():
is_lprop = True
continue
new_child = ScopeTreeNode(path_id=prefix)
parent.attach_child(new_child)
if not (is_lprop or prefix.is_linkprop_path()):
parent = new_child
is_lprop = False
self.attach_subtree(subtree)
def attach_subtree(self, node: 'ScopeTreeNode') -> None:
"""Attach a subtree to this node.
*node* is expected to be a balanced scope tree and may be modified
by this function.
If *node* is not a path node (path_id is None), it is discared,
and it's descendants are attached directly. The tree balance is
maintained.
"""
if node.path_id is not None:
# Wrap path node
wrapper_node = ScopeTreeNode(fenced=True)
wrapper_node.attach_child(node)
node = wrapper_node
dns = node.descendant_namespaces
for descendant in node.path_descendants:
path_id = descendant.path_id.strip_namespace(dns)
visible = self.find_visible(path_id)
if visible is not None:
# This path is already present in the tree, discard,
# but keep its OPTIONAL status, if any.
descendant.remove()
if descendant.optional:
visible.optional = True
elif descendant.parent_fence is node:
# Unfenced path.
# First, find any existing descendant with the same path_id.
# If not found, find any _unfenced_ node that is a child of
# any of our ancestors.
# If found, attach the node directly to its parent fence
# and remove all other occurrences.
existing, existing_ns = self.find_descendant_and_ns(path_id)
unnest_fence = False
parent_fence = None
if existing is None:
existing, unnest_fence = self.find_unfenced(path_id)
if existing is not None:
parent_fence = existing.parent_fence
else:
parent_fence = self.fence
if existing is not None:
if parent_fence.find_child(path_id) is None:
if (unnest_fence
and parent_fence.find_child(
path_id, in_branches=True) is None):
if descendant.parent.path_id:
offending_node = descendant.parent
else:
offending_node = descendant
raise InvalidScopeConfiguration(
f'reference to '
f'{offending_node.path_id.pformat()!r} '
f'changes the interpretation of '
f'{existing.path_id.pformat()!r} '
f'elsewhere in the query',
offending_node=offending_node,
existing_node=existing
)
parent_fence.remove_descendants(path_id)
existing.path_id = existing.path_id.strip_namespace(
existing_ns)
parent_fence.attach_child(existing)
# Discard the node from the subtree being attached.
existing.fuse_subtree(descendant)
for descendant in tuple(node.children):
# Attach whatever is remaining in the subtree.
for pd in descendant.path_descendants:
if pd.path_id.namespace:
to_strip = set(pd.path_id.namespace) & node.namespaces
pd.path_id = pd.path_id.strip_namespace(to_strip)
self.attach_child(descendant)
def fuse_subtree(self, node):
node.remove()
if node.path_id is not None:
if node.optional:
self.optional = True
subtree = ScopeTreeNode(fenced=True)
for child in tuple(node.children):
subtree.attach_child(child)
else:
subtree = node
self.attach_subtree(subtree)
def remove_subtree(self, node):
"""Remove the given subtree from this node."""
if node not in self.children:
raise KeyError(f'{node} is not a child of {self}')
node._set_parent(None)
def remove_descendants(self, path_id: pathid.PathId) -> None:
"""Remove all descendant nodes matching *path_id*."""
matching = set()
for node in self.descendants:
if _paths_equal_to_shortest_ns(node.path_id, path_id):
matching.add(node)
for node in matching:
node.remove()
def mark_as_optional(self, path_id: pathid.PathId) -> None:
"""Indicate that *path_id* is used as an | |
self._ops = inside_ops
# Compute inside and outside tensor
inputs, outputs, insides = select.compute_boundary_ts(inside_ops)
# Compute passthrough tensors, silently ignoring the non-passthrough ones.
all_tensors = frozenset(inputs + outputs + list(insides))
self._passthrough_ts = [t for t in passthrough_ts if t not in all_tensors]
# Set inputs and outputs.
self._input_ts = inputs + self._passthrough_ts
self._output_ts = outputs + self._passthrough_ts
else:
self._graph = None
self._passthrough_ts = []
self._input_ts = []
self._output_ts = []
self._ops = []
def __copy__(self):
"""Create a copy of this subgraph.
Note that this class is a "view", copying it only create another view and
does not copy the underlying part of the `tf.Graph`.
Returns:
A new identical instance of the original subgraph view.
"""
cls = self.__class__
result = cls.__new__(cls)
for k, v in iteritems(self.__dict__):
if k == "_graph":
setattr(result, k, v)
else:
setattr(result, k, list(v)) # copy the list
return result
def _assign_from(self, other):
"""Assign other to itself.
Args:
other: another subgraph-view.
Returns:
A new instance identical to the original one.
Raises:
TypeError: if other is not an SubGraphView.
"""
if not isinstance(other, SubGraphView):
raise TypeError("Expected SubGraphView, got: {}".format(type(other)))
# pylint: disable=protected-access
self._graph = other._graph
self._ops = list(other._ops)
self._passthrough_ts = list(other._passthrough_ts)
self._input_ts = list(other._input_ts)
self._output_ts = list(other._output_ts)
# pylint: enable=protected-access
def copy(self):
"""Return a copy of itself.
Note that this class is a "view", copying it only create another view and
does not copy the underlying part of the tf.Graph.
Returns:
A new instance identical to the original one.
"""
return copy.copy(self)
def _remap_default(self, remove_input_map=True, remove_output_map=True):
"""Remap in the place the inputs and/or outputs to the default mapping.
Args:
remove_input_map: if True the input map is reset to the default one.
remove_output_map: if True the output map is reset to the default one.
"""
if not remove_input_map and not remove_output_map:
return
# Compute inside and outside tensor
inputs, outputs, _ = select.compute_boundary_ts(self._ops)
if remove_input_map:
self._input_ts = list(inputs) + self._passthrough_ts
if remove_output_map:
self._output_ts = list(outputs) + self._passthrough_ts
def remap_default(self, remove_input_map=True, remove_output_map=True):
"""Remap the inputs and/or outputs to the default mapping.
Args:
remove_input_map: if True the input map is reset to the default one.
remove_output_map: if True the output map is reset to the default one.
Returns:
A new modified instance of the original subgraph view with its
input and/or output mapping reset to the default one.
"""
res = self.copy()
res._remap_default(remove_input_map, remove_output_map) # pylint: disable=protected-access
return res
def _remap_inputs(self, new_input_indices):
"""Remap the inputs of the subgraph in-place."""
_check_within_range(
new_input_indices, len(self._input_ts), repetition=False)
self._input_ts = [self._input_ts[i] for i in new_input_indices]
def _remap_outputs(self, new_output_indices):
"""Remap the outputs of the subgraph in-place."""
_check_within_range(
new_output_indices, len(self._output_ts), repetition=True)
self._output_ts = [self._output_ts[i] for i in new_output_indices]
def _remap_outputs_make_unique(self):
"""Remap the outputs in place so that all the tensors appears only once."""
output_ts = list(self._output_ts)
self._output_ts = []
util.concatenate_unique(self._output_ts, output_ts)
def _remap_outputs_to_consumers(self):
"""Remap the outputs in place to match the number of consumers."""
self._remap_outputs_make_unique()
output_ts = list(self._output_ts)
self._output_ts = []
for t in output_ts:
self._output_ts += [t] * len(t.consumers())
def remap_outputs_make_unique(self):
"""Remap the outputs so that all the tensors appears only once."""
res = copy.copy(self)
res._remap_outputs_make_unique() # pylint: disable=protected-access
return res
def remap_outputs_to_consumers(self):
"""Remap the outputs to match the number of consumers."""
res = copy.copy(self)
res._remap_outputs_to_consumers() # pylint: disable=protected-access
return res
def _remove_unused_ops(self, control_inputs=True):
"""Remove unused ops in place.
Args:
control_inputs: if True, control inputs are used to detect used ops.
Returns:
A new subgraph view which only contains used operations.
"""
ops = select.get_walks_union_ops(
self.connected_inputs,
self.connected_outputs,
within_ops=self._ops,
control_inputs=control_inputs)
self._ops = [op for op in self._ops if op in ops]
def remove_unused_ops(self, control_inputs=True):
"""Remove unused ops.
Args:
control_inputs: if True, control inputs are used to detect used ops.
Returns:
A new subgraph view which only contains used operations.
"""
res = copy.copy(self)
res._remove_unused_ops(control_inputs) # pylint: disable=protected-access
return res
def remap_inputs(self, new_input_indices):
"""Remap the inputs of the subgraph.
If the inputs of the original subgraph are [t0, t1, t2], remapping to [2,0]
will create a new instance whose inputs is [t2, t0].
Note that this is only modifying the view: the underlying `tf.Graph` is not
affected.
Args:
new_input_indices: an iterable of integers representing a mapping between
the old inputs and the new ones. This mapping can be under-complete and
must be without repetitions.
Returns:
A new modified instance of the original subgraph view with remapped
inputs.
"""
res = self.copy()
res._remap_inputs(new_input_indices) # pylint: disable=protected-access
return res
def remap_outputs(self, new_output_indices):
"""Remap the output of the subgraph.
If the output of the original subgraph are [t0, t1, t2], remapping to
[1,1,0] will create a new instance whose outputs is [t1, t1, t0].
Note that this is only modifying the view: the underlying tf.Graph is not
affected.
Args:
new_output_indices: an iterable of integers representing a mapping between
the old outputs and the new ones. This mapping can be under-complete and
can have repetitions.
Returns:
A new modified instance of the original subgraph view with remapped
outputs.
"""
res = copy.copy(self)
res._remap_outputs(new_output_indices) # pylint: disable=protected-access
return res
def remap(self, new_input_indices=None, new_output_indices=None):
"""Remap the inputs and outputs of the subgraph.
Note that this is only modifying the view: the underlying tf.Graph is not
affected.
Args:
new_input_indices: an iterable of integers representing a mapping between
the old inputs and the new ones. This mapping can be under-complete and
must be without repetitions.
new_output_indices: an iterable of integers representing a mapping between
the old outputs and the new ones. This mapping can be under-complete and
can have repetitions.
Returns:
A new modified instance of the original subgraph view with remapped
inputs and outputs.
"""
res = copy.copy(self)
if new_input_indices is not None:
res._remap_inputs(new_input_indices) # pylint: disable=protected-access
if new_output_indices is not None:
res._remap_outputs(new_output_indices) # pylint: disable=protected-access
return res
def find_op_by_name(self, op_name):
"""Return the op named op_name.
Args:
op_name: the name to search for
Returns:
The op named op_name.
Raises:
ValueError: if the op_name could not be found.
AssertionError: if the name was found multiple time.
"""
res = [op for op in self._ops if op.name == op_name]
if not res:
raise ValueError("{} not in subgraph.".format(op_name))
if len(res) > 1:
raise AssertionError("More than 1 op named: {}!".format(op_name))
return res[0]
def __str__(self):
if not self:
return "SubGraphView: empty"
def op_name(op):
return op.name
def tensor_name(t):
if t in self._passthrough_ts:
return "{} *".format(t.name)
else:
return t.name
def print_list(name, iterable, get_name):
if iterable:
print("** {}[{}]:".format(name, len(iterable)), file=res)
print("\n".join([" {}".format(get_name(elem)) for elem in iterable]),
file=res)
else:
print("** {}: empty".format(name), file=res)
res = StringIO()
print("SubGraphView (graphid={}):".format(id(self.graph)), file=res)
print_list("ops", self._ops, op_name)
print_list("inputs", self._input_ts, tensor_name)
print_list("outputs", self._output_ts, tensor_name)
return res.getvalue()
@property
def graph(self):
"""The underlying `tf.Graph`."""
return self._graph
@property
def ops(self):
"""The operations in this subgraph view."""
return self._ops
@property
def inputs(self):
"""The input tensors of this subgraph view."""
return util.ListView(self._input_ts)
@property
def connected_inputs(self):
"""The connected input tensors of this subgraph view."""
return [t for t in self._input_ts if t not in self._passthrough_ts]
@property
def outputs(self):
"""The output tensors of this subgraph view."""
return util.ListView(self._output_ts)
@property
def connected_outputs(self):
"""The connected output tensors of this subgraph view."""
return [t for t in self._output_ts if t not in self._passthrough_ts]
@property
def passthroughs(self):
"""The passthrough tensors, going straight from input to output."""
return util.ListView(self._passthrough_ts)
def __bool__(self):
"""Allows for implicit boolean conversion."""
return self._graph is not None
# Python 3 wants __bool__, Python 2.7 wants __nonzero__
__nonzero__ = __bool__
def op(self, op_id):
"""Get an op by its index."""
return self._ops[op_id]
def is_passthrough(self, t):
"""Check whether a tensor is passthrough."""
return t in self._passthrough_ts
def __enter__(self):
"""Allow Python context to minimize the life time of a subgraph view.
A subgraph view is meant to be a lightweight and transient object. A short
lifetime will alleviate the "out-of-sync" issue mentioned earlier. For that
reason, a SubGraphView instance can be used within a Python context. For
example:
from tensorflow.contrib import graph_editor as ge
with ge.make_sgv(...) as sgv:
print(sgv)
Returns:
Itself.
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def input_index(self, t):
| |
<gh_stars>1000+
"""Cryptocurrency Context Controller"""
__docformat__ = "numpy"
# pylint: disable=R0904, C0302, R1710, W0622, C0201, C0301
import argparse
from typing import List
from datetime import datetime, timedelta
import pandas as pd
from prompt_toolkit.completion import NestedCompleter
from binance.client import Client
from gamestonk_terminal.rich_config import console
from gamestonk_terminal.parent_classes import BaseController
from gamestonk_terminal.cryptocurrency.pycoingecko_helpers import calc_change
from gamestonk_terminal.cryptocurrency.due_diligence import pycoingecko_model
from gamestonk_terminal import feature_flags as gtff
from gamestonk_terminal.helper_funcs import (
EXPORT_BOTH_RAW_DATA_AND_FIGURES,
EXPORT_ONLY_RAW_DATA_ALLOWED,
parse_known_args_and_warn,
check_positive,
valid_date_in_past,
)
from gamestonk_terminal.menu import session
from gamestonk_terminal.cryptocurrency.due_diligence import (
coinpaprika_view,
binance_view,
pycoingecko_view,
finbrain_crypto_view,
binance_model,
coinbase_model,
)
from gamestonk_terminal.cryptocurrency.cryptocurrency_helpers import (
FIND_KEYS,
display_all_coins,
load,
find,
plot_chart,
)
import gamestonk_terminal.config_terminal as cfg
# pylint: disable=import-outside-toplevel
CRYPTO_SOURCES = {
"bin": "Binance",
"cg": "CoinGecko",
"cp": "CoinPaprika",
"cb": "Coinbase",
}
class CryptoController(BaseController):
"""Crypto Controller"""
CHOICES_COMMANDS = ["headlines", "chart", "load", "coins", "find", "prt"]
CHOICES_MENUS = ["ta", "dd", "ov", "disc", "onchain", "defi", "nft", "pred"]
DD_VIEWS_MAPPING = {
"cg": pycoingecko_view,
"cp": coinpaprika_view,
"bin": binance_view,
}
def __init__(self, queue: List[str] = None):
"""Constructor"""
super().__init__("/crypto/", queue)
self.symbol = ""
self.current_coin = ""
self.current_df = pd.DataFrame()
self.current_currency = ""
self.source = ""
self.coin_map_df = pd.DataFrame()
self.current_interval = ""
self.price_str = ""
if session and gtff.USE_PROMPT_TOOLKIT:
choices: dict = {c: {} for c in self.controller_choices}
choices["coins"]["--source"] = {c: {} for c in CRYPTO_SOURCES.keys()}
choices["load"]["--source"] = {c: {} for c in CRYPTO_SOURCES.keys()}
choices["find"]["--source"] = {c: {} for c in CRYPTO_SOURCES.keys()}
choices["find"]["-k"] = {c: {} for c in FIND_KEYS}
choices["headlines"] = {c: {} for c in finbrain_crypto_view.COINS}
# choices["prt"]["--vs"] = {c: {} for c in coingecko_coin_ids} # list is huge. makes typing buggy
self.completer = NestedCompleter.from_nested_dict(choices)
def print_help(self):
"""Print help"""
source_txt = CRYPTO_SOURCES.get(self.source, "?") if self.source != "" else ""
has_ticker_start = "" if self.current_coin else "[unvl]"
has_ticker_end = "" if self.current_coin else "[/unvl]"
help_text = f"""[cmds]
load load a specific cryptocurrency for analysis
find find coins in a certain source
coins find coins and check map across multiple sources[/cmds]
[param]Coin: [/param]{self.current_coin}
[param]Source: [/param]{source_txt}
[cmds]
headlines crypto sentiment from 15+ major news headlines [src][Finbrain][/src]{has_ticker_start}
chart view a candle chart for a specific cryptocurrency
prt potential returns tool - check how much upside if ETH reaches BTC market cap{has_ticker_end}
[/cmds][menu]
> disc discover trending cryptocurrencies, e.g.: top gainers, losers, top sentiment
> ov overview of the cryptocurrencies, e.g.: market cap, DeFi, latest news, top exchanges, stables
> onchain information on different blockchains, e.g.: eth gas fees, whale alerts, DEXes info
> defi decentralized finance information, e.g.: dpi, llama, tvl, lending, borrow, funding
> nft non-fungible tokens, e.g.: today drops{has_ticker_start}
> dd due-diligence for loaded coin, e.g.: coin information, social media, market stats
> ta technical analysis for loaded coin, e.g.: ema, macd, rsi, adx, bbands, obv
> pred prediction techniques e.g.: regression, arima, rnn, lstm, conv1d, monte carlo[/menu]
{has_ticker_end}
"""
console.print(text=help_text, menu="Cryptocurrency")
def call_prt(self, other_args):
"""Process prt command"""
if self.current_coin:
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="prt",
description="Potential Returns Tool"
"Tool to check returns if loaded coin reaches provided price or other crypto market cap"
"Uses CoinGecko to grab coin data (price and market cap).",
)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
"--vs", help="Coin to compare with", dest="vs", type=str, default=None
)
group.add_argument(
"-p",
"--price",
help="Desired price",
dest="price",
type=int,
default=None,
)
group.add_argument(
"-t",
"--top",
help="Compare with top N coins",
dest="top",
type=int,
default=None,
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "--vs")
ns_parser = parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
if ns_parser.vs:
coin_found = pycoingecko_model.check_coin(ns_parser.vs)
if not coin_found:
console.print(
f"VS Coin '{ns_parser.vs}' not found in CoinGecko\n"
)
return
pycoingecko_view.display_coin_potential_returns(
self.coin_map_df["CoinGecko"],
coin_found,
ns_parser.top,
ns_parser.price,
)
else:
console.print(
"No coin selected. Use 'load' to load the coin you want to look at.\n"
)
def call_coins(self, other_args):
"""Process coins command"""
parser = argparse.ArgumentParser(
prog="coins",
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="""Shows list of coins available on CoinGecko, CoinPaprika and Binance.If you provide name of
coin then in result you will see ids of coins with best match for all mentioned services.
If you provide ALL keyword in your search query, then all coins will be displayed. To move over coins you
can use pagination mechanism with skip, top params. E.g. coins ALL --skip 100 --limit 30 then all coins
from 100 to 130 will be displayed. By default skip = 0, limit = 10.
If you won't provide source of the data everything will be displayed (CoinGecko, CoinPaprika, Binance).
If you want to search only in given source then use --source flag. E.g. if you want to find coin with name
uniswap on CoinPaprika then use: coins uniswap --source cp --limit 10
""",
)
parser.add_argument(
"-c",
"--coin",
help="Coin you search for",
dest="coin",
required="-h" not in other_args,
type=str,
)
parser.add_argument(
"-s",
"--skip",
default=0,
dest="skip",
help="Skip n of records",
type=check_positive,
)
parser.add_argument(
"-l",
"--limit",
default=10,
dest="limit",
help="Limit of records",
type=check_positive,
)
parser.add_argument(
"--source",
dest="source",
help="Source of data.",
type=str,
choices=CRYPTO_SOURCES.keys(),
)
if other_args and not other_args[0][0] == "-":
other_args.insert(0, "-c")
ns_parser = parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
display_all_coins(
coin=ns_parser.coin,
source=ns_parser.source,
top=ns_parser.limit,
skip=ns_parser.skip,
show_all=bool("ALL" in other_args),
export=ns_parser.export,
)
def call_load(self, other_args):
"""Process load command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="load",
description="Load crypto currency to perform analysis on."
"Available data sources are CoinGecko, CoinPaprika, Binance, Coinbase"
"By default main source used for analysis is CoinGecko (cg). To change it use --source flag",
)
parser.add_argument(
"-c",
"--coin",
help="Coin to get",
dest="coin",
type=str,
required="-h" not in other_args,
)
parser.add_argument(
"--source",
help="Source of data",
dest="source",
choices=("cp", "cg", "bin", "cb"),
default="cg",
required=False,
)
parser.add_argument(
"-s",
"--start",
type=valid_date_in_past,
default=(datetime.now() - timedelta(days=366)).strftime("%Y-%m-%d"),
dest="start",
help="The starting date (format YYYY-MM-DD) of the crypto",
)
parser.add_argument(
"--vs",
help="Quote currency (what to view coin vs)",
dest="vs",
default="usd",
type=str,
)
parser.add_argument(
"-i",
"--interval",
help="Interval to get data (Only available on binance/coinbase)",
dest="interval",
default="1day",
type=str,
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-c")
ns_parser = parse_known_args_and_warn(parser, other_args)
delta = (datetime.now() - ns_parser.start).days
if ns_parser:
source = ns_parser.source
for arg in ["--source", source]:
if arg in other_args:
other_args.remove(arg)
# TODO: protections in case None is returned
(
self.current_coin,
self.source,
self.symbol,
self.coin_map_df,
self.current_df,
self.current_currency,
) = load(
coin=ns_parser.coin,
source=ns_parser.source,
should_load_ta_data=True,
days=delta,
interval=ns_parser.interval,
vs=ns_parser.vs,
)
if self.symbol:
self.current_interval = ns_parser.interval
first_price = self.current_df["Close"].iloc[0]
last_price = self.current_df["Close"].iloc[-1]
second_last_price = self.current_df["Close"].iloc[-2]
interval_change = calc_change(last_price, second_last_price)
since_start_change = calc_change(last_price, first_price)
if isinstance(self.current_currency, str):
self.price_str = f"""Current Price: {round(last_price,2)} {self.current_currency.upper()}
Performance in interval ({self.current_interval}): {'[green]' if interval_change > 0 else "[red]"}{round(interval_change,2)}%{'[/green]' if interval_change > 0 else "[/red]"}
Performance since {ns_parser.start.strftime('%Y-%m-%d')}: {'[green]' if since_start_change > 0 else "[red]"}{round(since_start_change,2)}%{'[/green]' if since_start_change > 0 else "[/red]"}""" # noqa
console.print(
f"""
Loaded {self.current_coin} against {self.current_currency} from {CRYPTO_SOURCES[self.source]} source
{self.price_str}
"""
) # noqa
def call_chart(self, other_args):
"""Process chart command"""
if self.current_coin:
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="chart",
description="""Display chart for loaded coin. You can specify currency vs which you want
to show chart and also number of days to get data for.""",
)
if self.source == "cp":
parser.add_argument(
"--vs",
default="usd",
dest="vs",
help="Currency to display vs coin",
choices=["usd", "btc", "BTC", "USD"],
type=str,
)
parser.add_argument(
"-d",
"--days",
default=30,
dest="days",
help="Number of days to get data for",
type=check_positive,
)
if self.source == "cg":
parser.add_argument(
"--vs", default="usd", dest="vs", help="Currency to display vs coin"
)
parser.add_argument(
"-d",
"--days",
default=30,
dest="days",
help="Number of days to get data for",
)
if self.source == "bin":
client = Client(cfg.API_BINANCE_KEY, cfg.API_BINANCE_SECRET)
interval_map = {
"1day": client.KLINE_INTERVAL_1DAY,
"3day": client.KLINE_INTERVAL_3DAY,
"1hour": client.KLINE_INTERVAL_1HOUR,
"2hour": client.KLINE_INTERVAL_2HOUR,
"4hour": client.KLINE_INTERVAL_4HOUR,
"6hour": client.KLINE_INTERVAL_6HOUR,
"8hour": client.KLINE_INTERVAL_8HOUR,
"12hour": client.KLINE_INTERVAL_12HOUR,
"1week": client.KLINE_INTERVAL_1WEEK,
"1min": client.KLINE_INTERVAL_1MINUTE,
"3min": client.KLINE_INTERVAL_3MINUTE,
"5min": client.KLINE_INTERVAL_5MINUTE,
"15min": client.KLINE_INTERVAL_15MINUTE,
"30min": client.KLINE_INTERVAL_30MINUTE,
"1month": client.KLINE_INTERVAL_1MONTH,
}
_, quotes = binance_model.show_available_pairs_for_given_symbol(
self.current_coin
)
parser.add_argument(
"--vs",
help="Quote currency (what to view coin vs)",
dest="vs",
type=str,
default="USDT",
choices=quotes,
)
parser.add_argument(
"-i",
"--interval",
help="Interval to get data",
choices=list(interval_map.keys()),
dest="interval",
default="1day",
type=str,
)
parser.add_argument(
"-l",
"--limit",
dest="limit",
default=100,
help="Number to get",
type=check_positive,
)
if self.source == "cb":
interval_map = {
"1min": 60,
"5min": 300,
"15min": 900,
"1hour": 3600,
"6hour": 21600,
"24hour": 86400,
"1day": 86400,
}
_, quotes = coinbase_model.show_available_pairs_for_given_symbol(
self.current_coin
)
if len(quotes) < 0:
console.print(
f"Couldn't find any quoted coins for provided symbol {self.current_coin}"
)
return
parser.add_argument(
"--vs",
help="Quote currency (what to view coin vs)",
dest="vs",
type=str,
default="USDT" if "USDT" in quotes else quotes[0],
choices=quotes,
)
parser.add_argument(
"-i",
"--interval",
help="Interval to get data",
choices=list(interval_map.keys()),
dest="interval",
default="1day",
type=str,
)
parser.add_argument(
"-l",
"--limit",
dest="limit",
default=100,
help="Number to get",
type=check_positive,
)
ns_parser = | |
= modified_access_conditions.if_match
if_none_match = None
if modified_access_conditions is not None:
if_none_match = modified_access_conditions.if_none_match
if_tags = None
if modified_access_conditions is not None:
if_tags = modified_access_conditions.if_tags
comp = "properties"
# Construct URL
url = self.set_http_headers.metadata['url']
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
# Construct headers
header_parameters = {}
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
if blob_cache_control is not None:
header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str')
if blob_content_type is not None:
header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str')
if blob_content_md5 is not None:
header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray')
if blob_content_encoding is not None:
header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str')
if blob_content_language is not None:
header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str')
if blob_content_disposition is not None:
header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str')
if lease_id is not None:
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
if if_modified_since is not None:
header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
if if_unmodified_since is not None:
header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
if if_tags is not None:
header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise models.StorageErrorException(response, self._deserialize)
if cls:
response_headers = {
'ETag': self._deserialize('str', response.headers.get('ETag')),
'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')),
'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
}
return cls(response, None, response_headers)
set_http_headers.metadata = {'url': '/{containerName}/{blob}'}
async def set_metadata(self, timeout=None, metadata=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, *, cls=None, **kwargs):
"""The Set Blob Metadata operation sets user-defined metadata for the
specified blob as one or more name-value pairs.
:param timeout: The timeout parameter is expressed in seconds. For
more information, see <a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
Timeouts for Blob Service Operations.</a>
:type timeout: int
:param metadata: Optional. Specifies a user-defined name-value pair
associated with the blob. If no name-value pairs are specified, the
operation will copy the metadata from the source blob or file to the
destination blob. If one or more name-value pairs are specified, the
destination blob is created with the specified metadata, and metadata
is not copied from the source blob or file. Note that beginning with
version 2009-09-19, metadata names must adhere to the naming rules for
C# identifiers. See Naming and Referencing Containers, Blobs, and
Metadata for more information.
:type metadata: str
:param request_id: Provides a client-generated, opaque value with a 1
KB character limit that is recorded in the analytics logs when storage
analytics logging is enabled.
:type request_id: str
:param lease_access_conditions: Additional parameters for the
operation
:type lease_access_conditions:
~azure.storage.blob.models.LeaseAccessConditions
:param cpk_info: Additional parameters for the operation
:type cpk_info: ~azure.storage.blob.models.CpkInfo
:param cpk_scope_info: Additional parameters for the operation
:type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
:param modified_access_conditions: Additional parameters for the
operation
:type modified_access_conditions:
~azure.storage.blob.models.ModifiedAccessConditions
:param callable cls: A custom type or function that will be passed the
direct response
:return: None or the result of cls(response)
:rtype: None
:raises:
:class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
"""
error_map = kwargs.pop('error_map', None)
lease_id = None
if lease_access_conditions is not None:
lease_id = lease_access_conditions.lease_id
encryption_key = None
if cpk_info is not None:
encryption_key = cpk_info.encryption_key
encryption_key_sha256 = None
if cpk_info is not None:
encryption_key_sha256 = cpk_info.encryption_key_sha256
encryption_algorithm = None
if cpk_info is not None:
encryption_algorithm = cpk_info.encryption_algorithm
encryption_scope = None
if cpk_scope_info is not None:
encryption_scope = cpk_scope_info.encryption_scope
if_modified_since = None
if modified_access_conditions is not None:
if_modified_since = modified_access_conditions.if_modified_since
if_unmodified_since = None
if modified_access_conditions is not None:
if_unmodified_since = modified_access_conditions.if_unmodified_since
if_match = None
if modified_access_conditions is not None:
if_match = modified_access_conditions.if_match
if_none_match = None
if modified_access_conditions is not None:
if_none_match = modified_access_conditions.if_none_match
if_tags = None
if modified_access_conditions is not None:
if_tags = modified_access_conditions.if_tags
comp = "metadata"
# Construct URL
url = self.set_metadata.metadata['url']
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
# Construct headers
header_parameters = {}
if metadata is not None:
header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
if lease_id is not None:
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
if encryption_key is not None:
header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
if encryption_key_sha256 is not None:
header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
if encryption_algorithm is not None:
header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
if encryption_scope is not None:
header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
if if_modified_since is not None:
header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
if if_unmodified_since is not None:
header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
if if_tags is not None:
header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise models.StorageErrorException(response, self._deserialize)
if cls:
response_headers = {
'ETag': self._deserialize('str', response.headers.get('ETag')),
'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')),
'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
}
return cls(response, None, response_headers)
set_metadata.metadata = {'url': '/{containerName}/{blob}'}
async def acquire_lease(self, timeout=None, duration=None, proposed_lease_id=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs):
"""[Update] The Lease Blob operation establishes and manages a lock on a
blob for write and delete operations.
:param timeout: The timeout parameter is expressed in seconds. For
more information, see <a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
Timeouts for Blob Service Operations.</a>
:type timeout: int
:param duration: Specifies the duration of the lease, in seconds, or
negative one (-1) for a lease that never expires. A non-infinite lease
can be between 15 and 60 seconds. A lease duration cannot be changed
using renew or change.
:type duration: int
:param proposed_lease_id: Proposed lease ID, in a GUID string format.
The Blob service returns 400 (Invalid request) if the proposed lease
ID is not in the correct format. See Guid Constructor (String) for a
list of valid GUID string formats.
:type proposed_lease_id: str
:param request_id: Provides a client-generated, opaque value with a 1
KB character limit that is recorded in the analytics logs when storage
analytics logging is enabled.
:type request_id: str
:param modified_access_conditions: Additional parameters for the
operation
:type modified_access_conditions:
~azure.storage.blob.models.ModifiedAccessConditions
:param callable cls: A custom type or function that will be passed the
direct response
:return: None or the result of cls(response)
:rtype: None
:raises:
:class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
"""
error_map = kwargs.pop('error_map', None)
if_modified_since = None
if modified_access_conditions is not None:
if_modified_since = modified_access_conditions.if_modified_since
if_unmodified_since = None
if modified_access_conditions is not None:
if_unmodified_since = modified_access_conditions.if_unmodified_since
if_match = None
if modified_access_conditions is not None:
if_match = modified_access_conditions.if_match
if_none_match = None
if modified_access_conditions is not None:
if_none_match = modified_access_conditions.if_none_match
if_tags = None
if modified_access_conditions is not None:
if_tags = modified_access_conditions.if_tags
comp = "lease"
action = "acquire"
# Construct URL
url = self.acquire_lease.metadata['url']
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
# Construct headers
header_parameters = {}
if duration is not None:
header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int')
if proposed_lease_id is not None:
header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
if if_modified_since is not None:
header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
if if_unmodified_since is not None:
header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", | |
import json
from queue import Queue
from threading import Thread
from uuid import uuid4
import logging
import tornado.escape
import tornado
import tornado.web
from tornado.options import options, define
import tornado.httpserver
import tornado.ioloop
import pika.adapters.tornado_connection
from pyasynch.encoder import Encoder, decoder
LOGGER = logging.getLogger(__name__)
import pika
import pika.adapters
from pyasynch import deserialize_address, serialize_address
from pyasynch.node import SystemNode
class WebHandler(tornado.web.RequestHandler):
def prepare(self):
super(WebHandler, self).prepare()
self.json_data = None
if self.request.body:
try:
self.json_data = tornado.escape.json_decode(self.request.body)
except ValueError:
# TODO: handle the error
pass
def set_default_headers(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header('Access-Control-Allow-Methods', 'PUT,POST, GET, OPTIONS, DELETE')
self.set_header("Access-Control-Allow-Headers","*")
self.set_header("Access-Control-Allow-Headers", "access-control-allow-origin,authorization,content-type")
self.set_header('Access-Control-Allow-Credentials','true')
self.set_header('Access-Control-Expose-Headers', "true")
def get_argument(self, name, default=object(), strip=True):
# TODO: there's more arguments in the default get_argument() call
# TODO: handle other method types
if self.request.method in ['POST', 'PUT'] and self.json_data:
return self.json_data.get(name, default)
else:
return super(WebHandler, self).get_argument(name, default, strip=strip)
@tornado.web.asynchronous
def post(self, method):
try:
headers = self.request.headers
ip = self.request.remote_ip
url = self.request.path
request = self.json_data
additionals = {'ip':ip}
req = {**request,**additionals}
data = self.invoke(method, headers, req)
response = {"route": url, "payload": data}
self.write(json.dumps(response, cls=Encoder))
self.finish()
except Exception as e:
self.error(500, str(e))
def error(self, code, message):
self.clear()
self.set_status(code)
self.write(json.dumps({"error": message}, cls=Encoder))
self.finish()
@tornado.web.asynchronous
def options(self,method):
# no body
self.set_status(204)
self.finish()
@tornado.web.asynchronous
def get(self, method):
try:
headers = self.request.headers
ip = self.request.remote_ip
url = self.request.path
request = self.json_data
additionals = {'ip':ip}
req = {**request,**additionals}
data = self.invoke(method, headers, req)
response = {"route": url, "payload": data}
self.write(json.dumps(response, cls=Encoder))
self.finish()
except Exception as e:
self.error(500, str(e))
def invoke(self, method, headers, message, **kwargs):
parts = method.split('/')
cluster_id = parts[0]
app_id = parts[1]
correlation_id = headers.get('correlation_id', None)
session_id = headers.get('correlation_id', None)
return self.application.endpoint.invoke(cluster_id, app_id, correlation_id, session_id, message)
class WebServer(tornado.web.Application):
' Tornado Webserver Application...'
def __init__(self, endpoint):
# Url to its handler mapping.
self.endpoint = endpoint
handlers = [
(r"/(?P<method>\S+)", WebHandler)
]
# Other Basic Settings..
settings = dict(
# cookie_secret = options.cookie_secret,
# login_url="/signin",
# template_path = os.path.join(os.path.dirname(__file__),"templates"),
# static_path = os.path.join(os.path.dirname(__file__),"static"),
# xsrf_cookies=True,
debug=True
)
# Initialize Base class also.
tornado.web.Application.__init__(self, handlers, **settings)
class Endpoint(object):
"""This is an example consumer that will handle unexpected interactions
with RabbitMQ such as channel and connection closures.
If RabbitMQ closes the connection, it will reopen it. You should
look at the output, as there are limited reasons why the connection may
be closed, which usually are tied to permission related issues or
socket timeouts.
If the channel is closed, it will indicate a problem with one of the
commands that were issued and that should surface in the output as well.
"""
EXCHANGE = 'message'
EXCHANGE_TYPE = 'topic'
def __init__(self, url, inbound, publish_interval=0.1, threaded=False, port=8081):
"""Create a new instance of the consumer class, passing in the AMQP
URL used to connect to RabbitMQ.
:param str url: The AMQP url to connect with
"""
self._threaded = threaded
self._connection = None
self._channel = None
self._acked = 0
self._nacked = 0
self._deliveries = []
self._closing = False
self._consumer_tag = None
self._message_number = 0
self._stopping = False
self._url = url
self.INBOUND_QUEUE = inbound
self._outbounds = []
self.PUBLISH_INTERVAL = publish_interval
self.INBOUND_ROUTING_KEY = self.INBOUND_QUEUE
self.inputs = Queue()
self._outputs = Queue()
self._nodes = {}
self._enabled_delivery = False
self._nodes['system'] = SystemNode(self)
self._redirects = {}
self._port = port
def send(self, address, message, reply_to=None, error_to=None, session_id=None):
"""Sends a message to a specified address
:address: pin://endpoint_id/cluster_id/app_id
:message: json message dict to be sent
:reply_to: Optional reply pin:// address
:error_to: Optional error pin:// address
:session_id: Optional conversational session id
:rtype: str
"""
endpoint_id, cluster_id, app_id = deserialize_address(address)
if endpoint_id in self._outbounds:
correlation_id = str(uuid4())
self._outputs.put(
[endpoint_id, cluster_id, app_id, correlation_id, session_id, message, reply_to, error_to])
return correlation_id
return None
def routes(self):
"""Returns a full list of the available inbound endpoints"""
ret = []
for cluster_id in self._nodes:
method_list = [app_id for app_id in dir(self._nodes[cluster_id]) if
callable(getattr(self._nodes[cluster_id], app_id))]
for app_id in method_list:
if not app_id.startswith('_'):
ret.append(serialize_address(self.INBOUND_QUEUE, cluster_id, app_id))
return ret
def redirect(self, source, target):
'''
Redirects an output of an endpoint to another endpoint
:param source:
:param target:
:return:
'''
if deserialize_address(source)[0] != self.INBOUND_QUEUE:
return
if source not in self._redirects:
self._redirects[source] = []
if target not in self._redirects[source]:
target_id = deserialize_address(target)[0]
if target_id not in self._outbounds:
self._outbounds.append(target_id)
self._redirects[source].append(target)
def register_node(self, node_id, node_reference):
"""This method adds a new application to the current consumer part, by specification
:cluster_id: the Cluster id string
:cluster_reference: The Cluster class reference
"""
if node_id == 'system':
raise Exception('Cluster with name "system" cannot be registered')
if node_id not in self._nodes:
LOGGER.info('Registering cluster: {0}'.format(node_id))
self._nodes[node_id] = node_reference
def connect(self):
"""This method connects to RabbitMQ, returning the connection handle.
When the connection is established, the on_connection_open method
will be invoked by pika.
:rtype: pika.SelectConnection
"""
LOGGER.info('Connecting to %s', self._url)
return pika.adapters.tornado_connection.TornadoConnection(pika.URLParameters(self._url),
self.on_connection_open,
stop_ioloop_on_close=False)
#return pika.SelectConnection(pika.URLParameters(self._url),
# self.on_connection_open,
# stop_ioloop_on_close=False)
def close_connection(self):
"""This method closes the connection to RabbitMQ."""
LOGGER.info('Closing connection')
self._closing = True
self._connection.close()
def add_on_connection_close_callback(self):
"""This method adds an on close callback that will be invoked by pika
when RabbitMQ closes the connection to the publisher unexpectedly.
"""
LOGGER.info('Adding connection close callback')
self._connection.add_on_close_callback(self.on_connection_closed)
def on_connection_closed(self, connection, reply_code, reply_text):
"""This method is invoked by pika when the connection to RabbitMQ is
closed unexpectedly. Since it is unexpected, we will reconnect to
RabbitMQ if it disconnects.
:param pika.connection.Connection connection: The closed connection obj
:param int reply_code: The server provided reply_code if given
:param str reply_text: The server provided reply_text if given
"""
self._channel = None
if self._closing:
self._connection.ioloop.stop()
else:
LOGGER.warning('Connection closed, reopening in 5 seconds: (%s) %s',
reply_code, reply_text)
self._connection.add_timeout(5, self.reconnect)
def on_connection_open(self, unused_connection):
"""This method is called by pika once the connection to RabbitMQ has
been established. It passes the handle to the connection object in
case we need it, but in this case, we'll just mark it unused.
:type unused_connection: pika.SelectConnection
"""
LOGGER.info('Connection opened')
self.add_on_connection_close_callback()
self.open_channel()
def reconnect(self):
"""Will be invoked by the IOLoop timer if the connection is
closed. See the on_connection_closed method.
"""
# This is the old connection IOLoop instance, stop its ioloop
self._connection.ioloop.stop()
if not self._closing:
# Create a new connection
self._connection = self.connect()
# There is now a new connection, needs a new ioloop to run
self._connection.ioloop.start()
def add_on_channel_close_callback(self):
"""This method tells pika to call the on_channel_closed method if
RabbitMQ unexpectedly closes the channel.
"""
LOGGER.info('Adding channel close callback')
self._channel.add_on_close_callback(self.on_channel_closed)
def on_channel_closed(self, channel, reply_code, reply_text):
"""Invoked by pika when RabbitMQ unexpectedly closes the channel.
Channels are usually closed if you attempt to do something that
violates the protocol, such as re-declare an exchange or queue with
different parameters. In this case, we'll close the connection
to shutdown the object.
:param pika.channel.Channel: The closed channel
:param int reply_code: The numeric reason the channel was closed
:param str reply_text: The text reason the channel was closed
"""
LOGGER.warning('Channel %i was closed: (%s) %s',
channel, reply_code, reply_text)
if not self._closing:
self._connection.close()
def on_channel_open(self, channel):
"""This method is invoked by pika when the channel has been opened.
The channel object is passed in so we can make use of it.
Since the channel is now open, we'll declare the exchange to use.
:param pika.channel.Channel channel: The channel object
"""
LOGGER.info('Channel opened')
self._channel = channel
self.add_on_channel_close_callback()
self.setup_exchange(self.EXCHANGE)
def setup_exchange(self, exchange_name):
"""Setup the exchange on RabbitMQ by invoking the Exchange.Declare RPC
command. When it is complete, the on_exchange_declareok method will
be invoked by pika.
:param str|unicode exchange_name: The name of the exchange to declare
"""
LOGGER.info('Declaring exchange %s', exchange_name)
self._channel.exchange_declare(self.on_exchange_declareok,
exchange_name,
self.EXCHANGE_TYPE)
def on_exchange_declareok(self, unused_frame):
"""Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC
command.
:param pika.Frame.Method unused_frame: Exchange.DeclareOk response frame
"""
LOGGER.info('Exchange declared')
self.setup_inbound_queue(self.INBOUND_QUEUE)
for outbound in self._outbounds:
self.setup_outbound_queue(outbound)
def setup_inbound_queue(self, queue_name):
"""Setup the queue on RabbitMQ by invoking the Queue.Declare RPC
command. When it is complete, the on_queue_declareok method will
be invoked by pika.
:param str|unicode queue_name: The name of the queue to declare.
"""
LOGGER.info('Declaring queue %s', queue_name)
self._channel.queue_declare(self.on_inbound_queue_declareok, queue_name)
def setup_outbound_queue(self, queue_name):
"""Setup the queue on RabbitMQ by invoking the Queue.Declare RPC
command. When it is complete, the on_queue_declareok method will
be invoked by pika.
:param str|unicode queue_name: The name of the queue to declare.
"""
LOGGER.info('Declaring | |
<gh_stars>1-10
import types,string
from logs import log_error,log_info
import traceback
from scan_rtl import compute1
MathOptsStr = '~ ! & && ~& !& ^ !^ ~^ | || ~| !|'
MathOpts = MathOptsStr.split()
Between = '_HIER_'
def constant_inputs(Current,Env):
Lits = []
for Dst,Src,_,_ in Current.hard_assigns:
if is_literal(Src,Current):
Lits.append((Dst,Src))
for Inst in Current.insts:
Obj = Current.insts[Inst]
for Pin in Obj.conns:
Con = Obj.conns[Pin]
for Lit,Repl in Lits:
if same_expr(Con,Lit,Current):
print('replacement same input %s %s'%(Con,Repl))
Obj.conns[Pin]=Repl
def remove_buffers(Current,Env):
Renames=prepare_trans_table(Current)
print('renames len=%d'%len(Renames.keys()))
for ind,(Dst,Src,A,B) in enumerate(Current.hard_assigns):
Src1 = translate_expr(Src,Renames)
Current.hard_assigns[ind]=(Dst,Src1,A,B)
for Inst in Current.insts:
Obj = Current.insts[Inst]
for Pin in Obj.conns:
Sig = Obj.conns[Pin]
Sig1 = translate_expr(Sig,Renames)
Obj.conns[Pin]=Sig1
def translate_expr(Src,Renames):
if type(Src)is str:
if Src in Renames:
return Renames[Src][1]
else:
return Src
if type(Src)is tuple:
return Src
if type(Src)is list:
if Src[0]=='const':
return Src
if Src[0] in MathOpts:
res=[Src[0]]
for X in Src[1:]:
Y = translate_expr(X,Renames)
res.append(X)
return res
return Src
def output_sig(Dst,Current):
if Dst not in Current.nets:
return False
Dir,Wid = Current.nets[Dst]
return 'output' in Dir
def prepare_trans_table(Current):
Holder={}
Renames={}
for Dst,Src,_,_ in Current.hard_assigns:
if (type(Dst)is str) and (not output_sig(Dst,Current)) and simple_sig_source(Src):
Holder[str(Dst)]=(Dst,Src)
for Key in Holder:
print('holder',Key,Holder[Key])
for Key in Holder:
Renames[Key]=Holder[Key]
Hards=[]
for Dst,Src,A,B in Current.hard_assigns:
if str(Dst) not in Renames:
Hards.append((Dst,Src,A,B))
Current.hard_assigns=Hards
Ok=True
while Ok:
Ok=False
Keys = list(Renames.keys())
for Key in Keys:
(Dst,Src)=Renames[Key]
Key1 = str(Src)
if Key1 in Holder:
(Dst1,Src1)=Holder[Key1]
Renames[Key]=(Dst,Src1)
Ok=True
return Renames
def simple_sig_source(Src):
if not Src:
return True
if type(Src)is str:
return True
if type(Src)is tuple:
return True
if type(Src)is list:
if Src[0] in ['const','subbit','subbus']:
return True
return False
def is_literal(Expr,Current):
if type(Expr)is tuple:
return True
if type(Expr)is list:
return Expr[0] in ['const','bin','hex']
return False
def same_expr(Aex,Bex,Current):
Alist = make_sig_list(Aex,Current)
Blist = make_sig_list(Bex,Current)
if len(Alist)!=len(Blist):
return False
return Alist==Blist
MoreDontFlattens = ['and','or','not','nand','nor','xor']
def flatten(Current,Whome,Env,load_module):
Current.create_stat_types()
if 'and' not in Env.DontFlattens:
Env.DontFlattens.extend(MoreDontFlattens)
dones = 0
if (Whome in Current.insts):
Type = Current.insts[Whome].Type
if Type not in Env.Modules:
load_module(Type,Env)
if Type not in Env.Modules:
Env.DontFlattens.append(Type)
print('dont flatten %s (from %s) because not loaded'%(Type,Current.Module))
else:
flatten_inst(Current,Whome,Env.Modules)
else:
Insts= list(Current.insts.keys())
for Inst in Insts:
if (Whome=='*')or(Current.insts[Inst].Type==Whome)or(Current.insts[Inst].Type in Whome):
Type = Current.insts[Inst].Type
if (Type not in Env.DontFlattens):
if Type not in Env.Modules:
load_module(Type,Env)
if Type not in Env.Modules:
Env.DontFlattens.append(Type)
print('dont flatten %s (from %s) because not loaded'%(Type,Current.Module))
else:
flatten_inst(Current,Inst,Env.Modules)
print('FLATTEN',Inst,Type)
dones += 1
return dones,(Inst,Type)
def flatten_deep(Current,Whome,Env,load_module):
Dones=1
Max = 30
while (Dones>0) and (Max>0):
Dones,Who = flatten(Current,Whome,Env,load_module)
print('dones',Dones)
Max -= 1
def flatten_inst(Current,InstName,modules):
Type = Current.insts[InstName].Type
Instobj = Current.insts[InstName]
Conns = Instobj.conns
Son = modules[Type]
for Param in Son.parameters:
Current.parameters[Param]=Son.parameters[Param]
for Param in Son.localparams:
Current.localparams[Param]=Son.localparams[Param]
Translate = prepare_mapping_table(Current,Instobj,modules)
for (Dst,Src,Dly,Stren) in Son.hard_assigns:
Dst1 = flatten_trans(InstName,Dst,Translate,Son)
Src1 = flatten_trans(InstName,Src,Translate,Son)
Current.add_hard_assign(Dst1,Src1,Dly,Stren)
Current.check_net_def(Dst1)
Current.check_net_def(Src1)
for Soninst in Son.insts:
Sonobj = Son.insts[Soninst]
Sontype = Sonobj.Type
if (Soninst in Current.insts)or(Current.stat_types[Type]>1)or(Current.deepInstNames):
Soninst = combine_inst_name(InstName,Sonobj.Name)
Obj = Current.add_inst(Sontype,Soninst)
for Sonprm in Sonobj.params:
Val = Sonobj.params[Sonprm]
Obj.add_param(Sonprm,Val)
for Net in Son.nets:
Dir,Wid = Son.nets[Net]
if (Dir in ['wire','reg'])and(type(Wid)is tuple)and(Wid[0]=='double'):
Current.nets['%s_%s'%(InstName,Net)]=(Dir,Wid)
for Sonpin in Sonobj.conns:
Sonsig = Sonobj.conns[Sonpin]
Sonsig1 = flatten_trans(InstName,Sonsig,Translate,Son)
Current.check_net_def(Sonsig1)
Current.add_conn(Soninst,Sonpin,Sonsig1)
if InstName not in Current.insts:
logs.log_error('inst %s for flat is not in %s' % (InstName,Current.Module))
Current.del_inst(InstName)
def combine_inst_name(Inst,Son):
if (Son[0]=='\\'):
Son=Son[1:]
Soninst = '%s%s%s'%(Inst,Between,Son)
Soninst = Soninst.replace('[','_')
Soninst = Soninst.replace(']','_')
Soninst = Soninst.replace('\\','')
Soninst = Soninst.replace(' ','')
return Soninst
def check_instance_against_module(Current,Instobj,modules):
Type = Instobj.Type
Son = modules[Type]
Conns = list(Instobj.conns.keys())
Conns.sort()
Exts=[]
for Net in Son.nets:
(Dir,_) = Son.nets[Net]
if Dir not in ['wire','reg']:
Exts.append(Net)
Exts.sort()
Bads=0
for A1 in Conns:
if A1 not in Exts:
log_error('check_instance_against_module connected pin=%s of instance=%s of module=%s not in definition of module %s'%(A1,Instobj.Name,Current.Module,Type))
Bads += 1
if (Bads>0):
for Ext in Exts:
log_info('ext: %s'%Ext)
for A2 in Exts:
if A2 not in Conns:
print('check_instance_against_module external pin=%s of module %s not connected at instance=%s in module=%s'%(A2,Type,Instobj.Name,Current.Module))
def prepare_mapping_table(Current,Instobj,modules):
check_instance_against_module(Current,Instobj,modules)
Type = Instobj.Type
Son = modules[Type]
Translate={}
for SonPin in Instobj.conns:
SonSig = Instobj.conns[SonPin]
try:
(Dir,HsLs)=Son.nets[SonPin]
except:
print('prepare_mapping_table SonPin=%s (%s/%s) is not net in %s '%(SonPin,Instobj.Type,Instobj.Name,Current.Module))
Dir='wire'
HsLs=0,0
if (type(HsLs)is tuple)and(HsLs[0]=='double'):
H,L = total_width(Current,HsLs)
elif type(HsLs) is tuple:
H = Son.compute_int(HsLs[0])
L = Son.compute_int(HsLs[1])
else:
H = HsLs
L = HsLs
Sigs = make_sig_list(SonSig,Current)
Range = len(Sigs)
if SonSig and (len(Sigs)!= (H-L+1)):
log_error('wrong connection width for pin %s for sig %s lensigs=%d h=%d l=%d sontype=%s father=%s'%(SonPin,SonSig,len(Sigs),H,L,Type,Current.Module))
Range = min(Range,(H-L+1))
if not SonSig:
Range=(H-L+1)
Translate[SonPin]=False
j=0
for i in range(Range-1,-1,-1):
PP = '%s[%d]'%(SonPin,i)
Translate[PP]=False
j+=1
elif (len(Sigs)==1):
Translate[SonPin]=Sigs[0]
else:
Translate[SonPin]=SonSig
j=0
for i in range(Range-1,-1,-1):
PP = '%s[%d]'%(SonPin,i)
Translate[PP]=Sigs[j]
j+=1
return Translate
def total_width(Son,HsLs):
if type(HsLs)is tuple:
return 1
if type(HsLs)is tuple:
if len(HsLs)==2:
H = Son.compute_int(HsLs[0])
L = Son.compute_int(HsLs[1])
return H,L
H = Son.compute_int(HsLs[1][0])
L = Son.compute_int(HsLs[1][1])
Wid0 = (H-L)+1
H = Son.compute_int(HsLs[2][0])
L = Son.compute_int(HsLs[2][1])
Wid1 = (H-L)+1
H = Wid0*Wid1-1
L = 0
return (H,L)
def make_sig_list(Sig,Mod):
if not Sig:
return [Sig]
if (type(Sig)is str):
if Sig in Mod.nets:
Dir,HsLs = Mod.nets[Sig]
if (HsLs==0):
return [Sig]
elif (HsLs[0]=='double'):
H,L = total_width(Mod,HsLs)
res=[]
for II in range(H,-1,-1):
res.append(['subbit',Sig,II])
return res
else:
return make_sig_list(['subbus',Sig,HsLs[0],HsLs[1]],Mod)
if Sig[0]=='\\':
return make_sig_list(Sig[1:],Mod)
if Sig=='$unconnected':
return [Sig]
log_error('sig for flatten %s not found in %s'%(Sig,Mod.Module),True)
return [Sig]
if (type(Sig)is list)and(Sig[0]=='bin'):
return make_sig_list(['const',Sig[1],'b%s'%Sig[2]],Mod)
if (type(Sig)is list)and(Sig[0]=='hex'):
return make_sig_list(['const',Sig[1],'h%s'%Sig[2]],Mod)
if (type(Sig)is list)and(Sig[0]=='const'):
Wid = Mod.compute_int(Sig[1])
if (Wid==1):
return [ ['const',Sig[1],Sig[2]] ]
Base = Sig[2][0]
Data = Sig[2][1:]
if (Base=='b'):
Val = int(Data,2)
elif (Base=='h'):
Val = int(Data,16)
elif (Base=='d'):
Val = int(Data,10)
else:
Val=0
log_error('bad base for const %s"%s'%(Sig[1],Sig[2]))
res=[]
for i in range(Wid):
V = (Val>>i)&1
res.append(['const',1,'b%d'%V])
res.reverse()
return res
if (type(Sig)is list)and(Sig[0]=='subbit'):
Bus = Sig[1]
_,Wid = Mod.nets[Bus]
if (type(Wid)is tuple)and(len(Wid)==3)and(Wid[0]=='double'):
H1 = Mod.compute_int(Wid[1][0])
L1 = Mod.compute_int(Wid[1][1])
H2 = Mod.compute_int(Wid[2][0])
L2 = Mod.compute_int(Wid[2][1])
Pos = Mod.compute_int(Sig[2])
Mul = (H2-L2+1)
res=[]
for i in range(H1,L1-1,-1):
res.append(['subbit',Bus,str(i+Mul*Pos)])
return res
return [Sig]
if (type(Sig)is list)and(Sig[0]=='subbus'):
Bus = Sig[1]
_,Wid = Mod.nets[Bus]
if len(Sig)==3:
H = int(Sig[2][0])
L = int(Sig[2][1])
elif len(Sig)==4:
H = compute1(Sig[2],Mod)
L = compute1(Sig[3],Mod)
else:
log_error('bus indexing failed %s "%s" '%(Bus,Sig))
H=1
L=0
if (type(Wid)is tuple)and(len(Wid)==3)and(Wid[0]=='double'):
H1 = Mod.compute_int(Wid[1][0])
L1 = Mod.compute_int(Wid[1][1])
H2 = Mod.compute_int(Wid[2][0])
L2 = Mod.compute_int(Wid[2][1])
res=[]
Mul = (H2-L2+1)
for JJ in range(H,L-1,-1):
for II in range(H1,L1-1,-1):
res.append(['subbit',Bus,str(II+Mul*JJ)])
return res
else:
res=[]
for i in range(H,L-1,-1):
res += [ ['subbit',Bus,str(i)]]
return res
if (type(Sig)is list)and(Sig[0]=='curly'):
res=[]
for Item in Sig[1:]:
res += make_sig_list(Item,Mod)
return res
log_error('make_sig_list failed on %s'%(str(Sig)))
return [Sig]
NCCON=0
def flatten_trans(Inst,Sig,Translate,SonMod):
global NCCON
X=flatten_trans2(Inst,Sig,Translate,SonMod)
if (X==''):
X = 'nc_con%d'%(NCCON)
NCCON+=1
return X
def relax_inst(Inst):
Soninst=Inst
if (Inst[0]=='\\'):
Soninst = Inst[1:]
if (Inst[-1]==' '):
Soninst = Inst[:-1]
Soninst = Soninst.replace('[','_')
Soninst = Soninst.replace(']','_')
Soninst = Soninst.replace('\\','')
Soninst = Soninst.replace(' ','')
# Soninst = Soninst.replace('.','_')
return Soninst
def flatten_trans2(Inst,Sig,Translate,SonMod):
if (Sig=='$unconnected'):
return '$unconnected'
if (Sig==False):
return False
if (Sig==None):
return None
if (type(Sig)is str)and("'" in Sig):
return Sig
if (type(Sig)is str)and(Sig in Translate):
return Translate[Sig]
if (type(Sig)is str)and(Sig not in Translate):
if (Sig==''):
return ''
if (Sig in SonMod.nets):
Dir,Wid = SonMod.nets[Sig]
if type(Wid)is tuple:
if len(Wid)==2:
H = SonMod.compute_int(Wid[0])
L = SonMod.compute_int(Wid[1])
return flatten_trans(Inst,['subbus',Sig,H,L],Translate,SonMod)
elif (len(Wid)==3)and(Wid[0]=='double'):
H,L = total_width(SonMod,Wid)
return flatten_trans(Inst,['subbus',Sig,H,L],Translate,SonMod)
if (Sig[0]=='\\'):
Sig = Sig[1:]
Inst1 = relax_inst(Inst)
if (Sig[0]=='\\'):
Sig = relax_inst(Sig)
return '%s_%s'%(Inst1,Sig)
if (type(Sig)is list)and(Sig[0]=='const'):
return Sig
if (type(Sig)is list)and(Sig[0] in ['bin','hex']):
Lii = make_sig_list(Sig,SonMod)
if len(Lii)==1:
return Lii[0]
return ['curly']+Lii
if (type(Sig)is tuple):
return Sig
if (type(Sig)is list)and(Sig[0]=='repeat'):
Many = SonMod.compute_int(Sig[1])
LL = make_sig_list(Sig[2],SonMod)
return ['curly']+LL * Many
if (type(Sig)is list)and(Sig[0]=='subbit'):
Bus=Sig[1]
if (Bus[0]=='\\'):
Bus = relax_inst(Bus)
Ind=Sig[2]
Bsig = '%s[%s]'%(Bus,Ind)
if (Bsig in Translate):
return Translate[Bsig]
return ['subbit',Inst+'_'+Bus,Ind]
if (type(Sig)is list)and(Sig[0]=='subbus'):
Bus=Sig[1]
if (Bus[0]=='\\'):
Bus = relax_inst(Bus)
if len(Sig)==3:
H = SonMod.compute_int(Sig[2][0])
L = SonMod.compute_int(Sig[2][1])
elif len(Sig)==4:
H = SonMod.compute_int(Sig[2])
L = SonMod.compute_int(Sig[3])
else:
log_error('bus indexing failed %s "%s" '%(Bus,Sig))
H=1
L=0
res=[]
for i in range(H,L-1,-1):
res += [flatten_trans(Inst,['subbit',Bus,str(i)],Translate,SonMod)]
return ['curly']+res
if (type(Sig)is list)and(Sig[0]=='curly'):
res = ['curly']
for X in Sig[1:]:
Y = flatten_trans(Inst,X,Translate,SonMod)
res += [Y]
return res
if (type(Sig)is list)and(Sig[0]=='question'):
res = ['question']
for X in Sig[1:]:
Y = flatten_trans(Inst,X,Translate,SonMod)
res += [Y]
return res
if (type(Sig)is list)and(Sig[0] in MathOpts):
res = [Sig[0]]
for X | |
<gh_stars>10-100
#!/usr/bin/env python
"""
The handles all the UI elements in the Mosaic tab.
Hazen 10/18
"""
import numpy
from PyQt5 import QtCore, QtGui, QtWidgets
import storm_control.sc_library.hdebug as hdebug
import storm_control.steve.coord as coord
import storm_control.steve.imageCapture as imageCapture
import storm_control.steve.positions as positions
import storm_control.steve.qtdesigner.sections_ui as sectionsUi
import storm_control.steve.steveItems as steveItems
import storm_control.steve.steveModule as steveModule
class SectionItem(steveItems.SteveItem):
brush = QtGui.QBrush(QtGui.QColor(255,255,255,0))
counter = 0
data_type = "section"
deselected_pen = QtGui.QPen(QtGui.QColor(0,0,255))
fields = ["x", "y", "angle"]
selected_pen = QtGui.QPen(QtGui.QColor(255,0,0))
ellipse_size = 1
def __init__(self, a_point = None, **kwds):
super().__init__(**kwds)
self.a_point = None
self.angle = 0
# Not used. The previous version of Steve kept track
# of the section number so we maintain that.
self.index = self.counter
self.counter += 1
self.x_size = coord.umToPix(self.ellipse_size)
self.y_size = coord.umToPix(self.ellipse_size)
self.graphics_item = QtWidgets.QGraphicsEllipseItem(0, 0, self.x_size, self.y_size)
self.graphics_item.setBrush(self.brush)
self.graphics_item.setPen(self.deselected_pen)
self.graphics_item.setZValue(999.0)
self.setLocation(a_point)
def changeField(self, field, df):
if (field == "x"):
self.movePosition(df, 0.0)
elif (field == "y"):
self.movePosition(0.0, df)
elif (field == "angle"):
self.angle += df
if (self.angle > 360.0):
self.angle -= 360.0
if (self.angle < 0.0):
self.angle += 360.0
else:
assert False, "No field " + field + "!"
def getAngle(self):
return self.angle
def getField(self, field):
# These need to match self.fields.
if (field == "x"):
return self.a_point.x_um
elif (field == "y"):
return self.a_point.y_um
elif (field == "angle"):
return self.angle
else:
assert False, "No field " + field + "!"
def getLocation(self):
return self.a_point
def movePosition(self, dx_um, dy_um):
a_point = coord.Point(self.a_point.x_um + dx_um,
self.a_point.y_um + dy_um,
"um")
self.setLocation(a_point)
def saveItem(self, directory, name_no_extension):
a_list = [self.index, self.a_point.x_um, self.a_point.y_um, self.angle]
return "{0:0d},{1:.2f},{2:.2f},{3:.2f}".format(*a_list)
def setAngle(self, angle):
self.angle = angle
def setLocation(self, a_point):
self.a_point = a_point
self.graphics_item.setPos(a_point.x_pix - 0.5 * self.x_size,
a_point.y_pix - 0.5 * self.y_size)
def setSelected(self, selected):
"""
If the object is selected, increase it's z value and change the pen
color, otherwise set the object's z value and pen color back to the
unselected values.
"""
if selected:
self.graphics_item.setZValue(1999.0)
self.graphics_item.setPen(self.selected_pen)
else:
self.graphics_item.setZValue(999.0)
self.graphics_item.setPen(self.deselected_pen)
def setVisible(self, visible):
self.graphics_item.setVisible(visible)
class SectionItemLoader(steveItems.SteveItemLoader):
"""
Creates a SectionItem from saved data.
"""
def load(self, directory, index, x, y, angle):
section_item = SectionItem(a_point = coord.Point(float(x), float(y), "um"))
section_item.setAngle(float(angle))
return section_item
class Sections(steveModule.SteveModule):
"""
This is the main class / the interface with steve.
"""
@hdebug.debug
def __init__(self, image_capture = None, **kwds):
super().__init__(**kwds)
self.image_capture = image_capture
self.initialized = False
SectionItem.ellipse_size = self.parameters.get("ellipse_size")
SectionItem.deselected_pen.setWidth(self.parameters.get("pen_width"))
SectionItem.selected_pen.setWidth(self.parameters.get("pen_width"))
self.ui = sectionsUi.Ui_Form()
self.ui.setupUi(self)
# Hide some things we don't use.
self.ui.backgroundComboBox.hide()
self.ui.backgroundLabel.hide()
self.ui.moveAllSectionsCheckBox.hide()
self.ui.showFeaturesCheckBox.hide()
self.ui.thresholdLabel.hide()
self.ui.thresholdSlider.hide()
# Model to store sections.
self.sections_model = QtGui.QStandardItemModel()
self.sections_model.setHorizontalHeaderLabels([""] + SectionItem.fields)
# Section renderer.
self.sections_renderer = SectionsRenderer(scene = self.item_store.getScene())
# View to manipulate sections.
self.sections_table_view = SectionsTableView(item_store = self.item_store,
step_size = self.parameters.get("step_size"))
self.sections_table_view.setModel(self.sections_model)
self.sections_table_view.setTitleBar(self.ui.sectionsGroupBox)
self.sections_table_view.horizontalHeader().setStretchLastSection(True)
self.sections_table_view.horizontalHeader().setMinimumSectionSize(20)
layout = QtWidgets.QVBoxLayout(self.ui.sectionsGroupBox)
layout.addWidget(self.sections_table_view)
layout.setContentsMargins(0,0,0,0)
self.ui.sectionsGroupBox.setLayout(layout)
# View to display section renders.
self.sections_view = SectionsView()
layout = QtWidgets.QVBoxLayout(self.ui.sectionsDisplayFrame)
layout.addWidget(self.sections_view)
self.ui.sectionsDisplayFrame.setLayout(layout)
# Connect signals.
self.ui.foregroundOpacitySlider.valueChanged.connect(self.handleForegroundOpacitySlider)
self.sections_model.itemChanged.connect(self.handleItemChanged)
self.sections_table_view.currentChangedEvent.connect(self.handleCurrentChangedEvent)
self.sections_view.changeSizeEvent.connect(self.handleChangeSizeEvent)
self.sections_view.changeZoomEvent.connect(self.handleChangeZoomEvent)
self.sections_view.pictureEvent.connect(self.handlePictureEvent)
self.sections_view.positionEvent.connect(self.handlePositionEvent)
self.sections_view.updateEvent.connect(self.handleUpdateEvent)
# Set mosaic file loader. This handles loading SectionItems from a mosaic file.
self.item_store.addLoader(SectionItem.data_type, SectionItemLoader())
def addSection(self, a_point, a_angle):
"""
Add a single section to the scene and to the model.
"""
# Create section item.
section_item = SectionItem(a_point = a_point)
section_item.setAngle(a_angle)
# Add to scene.
self.item_store.addItem(section_item)
# Add to model.
self.addSectionItem(section_item)
def addSectionItem(self, section_item):
"""
Add a single section item to the model.
"""
# Add to model. The elements in a row all share the same item.
row = []
item = SectionsStandardItem(section_item = section_item)
item.setCheckable(True)
row.append(item)
for field in section_item.fields:
row.append(SectionsStandardItem(field = field,
section_item = section_item))
self.sections_model.appendRow(row)
self.sections_table_view.updateTitle()
# Resize if this is the first element added.
if not self.initialized:
self.sections_table_view.resizeColumnsToContents()
self.initialized = True
def currentTabChanged(self, tab_index):
if (tab_index == 1):
for elt in self.item_store.itemIterator(item_type = SectionItem):
elt.setVisible(False)
else:
for elt in self.item_store.itemIterator(item_type = SectionItem):
elt.setVisible(True)
def handleAddSection(self, ignored):
"""
This is called by the popup menu in the mosaic tab or a
key press event in the mosiacs view.
"""
self.addSection(self.mosaic_event_coord, 0)
def handleChangeSizeEvent(self, width, height):
self.sections_renderer.setRenderSize(width, height)
self.updateSectionView()
def handleChangeZoomEvent(self, new_scale):
self.sections_renderer.setRenderScale(new_scale)
self.updateSectionView()
def handleCurrentChangedEvent(self):
self.updateSectionView()
def handleForegroundOpacitySlider(self, new_value):
self.sections_view.changeOpacity(new_value)
def handleItemChanged(self, item):
"""
This is called whenever a sections values changes.
"""
self.updateSectionView()
def handlePictureEvent(self, pict_type):
"""
Take pictures at/around each section location.
"""
movie_queue = []
# Single picture at each section.
if (pict_type == "s1"):
for item in self.sectionsStandardItemIterator():
movie_queue.append(item.getSectionItem().getLocation())
# Three picture spiral at each section.
elif (pict_type == "s3"):
for item in self.sectionsStandardItemIterator():
movie_queue.append(item.getSectionItem().getLocation())
movie_queue += imageCapture.createSpiral(3)
# Five picture spiral at each section.
elif (pict_type == "s5"):
for item in self.sectionsStandardItemIterator():
movie_queue.append(item.getSectionItem().getLocation())
movie_queue += imageCapture.createSpiral(5)
# Picture grid at each section.
elif (pict_type == "g"):
for item in self.sectionsStandardItemIterator():
movie_queue.append(item.getSectionItem().getLocation())
movie_queue += imageCapture.createGrid(*self.image_capture.getGridSize())
if (len(movie_queue) > 0):
self.image_capture.takeMovies(movie_queue)
def handlePositionEvent(self):
"""
Add a position at each section.
"""
#
# When we change back to the mosaic tab the Positions class will
# update it's model by querying the item store, so it is
# sufficient to just add the new positions to the item store.
#
for item in self.sectionsStandardItemIterator():
pos_item = positions.PositionItem(a_point = item.getSectionItem().getLocation())
self.item_store.addItem(pos_item)
self.updateSectionView()
def handleUpdateEvent(self):
self.updateSectionView()
def mosaicLoaded(self):
# Clear the current sections model. We need to do this otherwise
# we'll get duplicates of whatever is currently in the model.
self.sections_model.clear()
for section_item in self.item_store.itemIterator(item_type = SectionItem):
self.addSectionItem(section_item)
def sectionsStandardItemIterator(self):
for i in range(self.sections_model.rowCount()):
index = self.sections_model.index(i,0)
item = self.sections_model.itemFromIndex(index)
if isinstance(item, SectionsStandardItem):
yield item
def updateSectionView(self):
"""
Update the image in the section view.
"""
# FIXME? Usually only the background or the foreground will need to
# be updated, not both. This could be more efficient.
# Create background image.
counts = 0
numpy_bg = None
for item in self.sectionsStandardItemIterator():
if (item.checkState() == QtCore.Qt.Checked):
temp = self.sections_renderer.renderSectionNumpy(item.getSectionItem())
if numpy_bg is not None:
numpy_bg += temp
else:
numpy_bg = temp
counts += 1
if numpy_bg is not None:
numpy_bg = numpy_bg/float(counts)
numpy_bg = numpy_bg.astype(numpy.uint8)
image = QtGui.QImage(numpy_bg.data,
numpy_bg.shape[1],
numpy_bg.shape[0],
QtGui.QImage.Format_RGB32)
image.ndarray = numpy_bg
pixmap = QtGui.QPixmap.fromImage(image)
pixmap.qimage = image
self.sections_view.setBackgroundPixmap(pixmap)
# Create foreground image.
current_item = self.sections_model.itemFromIndex(self.sections_table_view.currentIndex())
if isinstance(current_item, SectionsStandardItem):
pixmap = self.sections_renderer.renderSectionPixmap(current_item.getSectionItem())
self.sections_view.setForegroundPixmap(pixmap)
self.sections_view.update()
class SectionsRenderer(QtWidgets.QGraphicsView):
"""
Handles rendering sections. It works by using the same QGraphicsScene as displayed in
the Mosaic tab. To render a section, it centers on the section, adjusts the angle and
scale rotation as appropriate, then grabs the contents of its viewport.
This object is not actual visible in the UI.
"""
def __init__(self, scene = None, **kwds):
super().__init__(**kwds)
self.scale = 0.5
self.setScene(scene)
self.setRenderHint(QtGui.QPainter.SmoothPixmapTransform)
self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
def renderSectionNumpy(self, section_item):
"""
Draw the section pixmap & convert to a numpy array.
"""
pixmap = self.renderSectionPixmap(section_item)
image = pixmap.toImage()
ptr = image.bits()
ptr.setsize(image.byteCount())
numpy_array = numpy.asarray(ptr).reshape(image.height(), image.width(), 4).astype(numpy.float)
return numpy_array
# # I'm not sure why, but ptr will sometimes be "None" so we need to catch this.
# if (type(ptr) != type(None)):
# ptr.setsize(image.byteCount())
# numpy_array = numpy.asarray(ptr).reshape(image.height(), image.width(), 4).astype(numpy.float)
# return numpy_array
# else:
# return False
def renderSectionPixmap(self, section_item):
"""
Draw the section pixmap.
"""
a_point = section_item.getLocation()
self.centerOn(a_point.x_pix, a_point.y_pix)
transform = QtGui.QTransform()
transform.rotate(section_item.getAngle())
transform.scale(self.scale, self.scale)
self.setTransform(transform)
return self.grab()
def setRenderScale(self, new_scale):
self.scale = new_scale
def setRenderSize(self, width, height):
self.setFixedSize(width, height)
class SectionsStandardItem(QtGui.QStandardItem):
def __init__(self, field = None, section_item = None, **kwds):
super().__init__(**kwds)
self.field = field
self.section_item = section_item
self.updateSectionText()
def changeValue(self, df):
self.section_item.changeField(self.field, df)
self.updateSectionText()
def getSectionItem(self):
return self.section_item
def setSelected(self, selected):
self.section_item.setSelected(selected)
def updateSectionText(self):
if self.field is not None:
self.setText("{0:.2f}".format(self.section_item.getField(self.field)))
class SectionsTableView(QtWidgets.QTableView):
currentChangedEvent = QtCore.pyqtSignal()
def __init__(self, item_store = None, step_size = None, **kwds):
super().__init__(**kwds)
self.initialized_widths = False
self.item_store = item_store
self.step_size = step_size
# Disable direct editting.
self.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.setToolTip("'w','s' to change selected cell value, 'backspace' to delete row, arrow keys to change cells.")
def currentChanged(self, current, previous):
"""
Called when the currently selected item in the table changes.
"""
previous_item = self.model().itemFromIndex(previous)
if isinstance(previous_item, SectionsStandardItem):
previous_item.setSelected(False)
current_item = self.model().itemFromIndex(current)
if isinstance(current_item, SectionsStandardItem):
current_item.setSelected(True)
self.currentChangedEvent.emit()
def keyPressEvent(self, event):
current_column = self.currentIndex().column()
current_item = self.model().itemFromIndex(self.currentIndex())
if isinstance(current_item, SectionsStandardItem) and (current_column | |
d1.meanlogd1)
np.testing.assert_allclose(d2.meanlogd2, d1.meanlogd2)
np.testing.assert_allclose(d2.meanlogd3, d1.meanlogd3)
np.testing.assert_allclose(d2.meanu, d1.meanu)
np.testing.assert_allclose(d2.meanv, d1.meanv)
fits_name = 'output/nnnc_fits.fits'
dddc.write(fits_name)
dddc4 = treecorr.NNNCrossCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
dddc4.read(fits_name)
for perm in ['n1n2n3', 'n1n3n2', 'n2n1n3', 'n2n3n1', 'n3n1n2', 'n3n2n1']:
d2 = getattr(dddc4, perm)
d1 = getattr(dddc, perm)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.meand1, d1.meand1)
np.testing.assert_allclose(d2.meand2, d1.meand2)
np.testing.assert_allclose(d2.meand3, d1.meand3)
np.testing.assert_allclose(d2.meanlogd1, d1.meanlogd1)
np.testing.assert_allclose(d2.meanlogd2, d1.meanlogd2)
np.testing.assert_allclose(d2.meanlogd3, d1.meanlogd3)
np.testing.assert_allclose(d2.meanu, d1.meanu)
np.testing.assert_allclose(d2.meanv, d1.meanv)
try:
import h5py
except ImportError:
print('Skipping hdf5 output file, since h5py not installed.')
return
hdf5_name = 'output/nnnc_hdf5.hdf5'
dddc.write(hdf5_name)
dddc5 = treecorr.NNNCrossCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
dddc5.read(hdf5_name)
for perm in ['n1n2n3', 'n1n3n2', 'n2n1n3', 'n2n3n1', 'n3n1n2', 'n3n2n1']:
d2 = getattr(dddc5, perm)
d1 = getattr(dddc, perm)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.meand1, d1.meand1)
np.testing.assert_allclose(d2.meand2, d1.meand2)
np.testing.assert_allclose(d2.meand3, d1.meand3)
np.testing.assert_allclose(d2.meanlogd1, d1.meanlogd1)
np.testing.assert_allclose(d2.meanlogd2, d1.meanlogd2)
np.testing.assert_allclose(d2.meanlogd3, d1.meanlogd3)
np.testing.assert_allclose(d2.meanu, d1.meanu)
np.testing.assert_allclose(d2.meanv, d1.meanv)
@timer
def test_direct_count_cross12():
# Check the 1-2 cross correlation
ngal = 50
s = 10.
rng = np.random.RandomState(8675309)
x1 = rng.normal(0,s, (ngal,) )
y1 = rng.normal(0,s, (ngal,) )
cat1 = treecorr.Catalog(x=x1, y=y1)
x2 = rng.normal(0,s, (ngal,) )
y2 = rng.normal(0,s, (ngal,) )
cat2 = treecorr.Catalog(x=x2, y=y2)
min_sep = 1.
max_sep = 50.
nbins = 50
min_u = 0.13
max_u = 0.89
nubins = 10
min_v = 0.13
max_v = 0.59
nvbins = 10
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True, verbose=1)
ddd.process(cat1, cat2)
log_min_sep = np.log(min_sep)
log_max_sep = np.log(max_sep)
true_ntri_122 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_212 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_221 = np.zeros( (nbins, nubins, 2*nvbins) )
bin_size = (log_max_sep - log_min_sep) / nbins
ubin_size = (max_u-min_u) / nubins
vbin_size = (max_v-min_v) / nvbins
for i in range(ngal):
for j in range(ngal):
for k in range(j+1,ngal):
dij = np.sqrt((x1[i]-x2[j])**2 + (y1[i]-y2[j])**2)
dik = np.sqrt((x1[i]-x2[k])**2 + (y1[i]-y2[k])**2)
djk = np.sqrt((x2[j]-x2[k])**2 + (y2[j]-y2[k])**2)
if dij == 0.: continue
if dik == 0.: continue
if djk == 0.: continue
if dij < dik:
if dik < djk:
d3 = dij; d2 = dik; d1 = djk
ccw = is_ccw(x1[i],y1[i],x2[j],y2[j],x2[k],y2[k])
true_ntri = true_ntri_122
elif dij < djk:
d3 = dij; d2 = djk; d1 = dik
ccw = is_ccw(x2[j],y2[j],x1[i],y1[i],x2[k],y2[k])
true_ntri = true_ntri_212
else:
d3 = djk; d2 = dij; d1 = dik
ccw = is_ccw(x2[j],y2[j],x2[k],y2[k],x1[i],y1[i])
true_ntri = true_ntri_221
else:
if dij < djk:
d3 = dik; d2 = dij; d1 = djk
ccw = is_ccw(x1[i],y1[i],x2[k],y2[k],x2[j],y2[j])
true_ntri = true_ntri_122
elif dik < djk:
d3 = dik; d2 = djk; d1 = dij
ccw = is_ccw(x2[k],y2[k],x1[i],y1[i],x2[j],y2[j])
true_ntri = true_ntri_212
else:
d3 = djk; d2 = dik; d1 = dij
ccw = is_ccw(x2[k],y2[k],x2[j],y2[j],x1[i],y1[i])
true_ntri = true_ntri_221
r = d2
u = d3/d2
v = (d1-d2)/d3
if r < min_sep or r >= max_sep: continue
if u < min_u or u >= max_u: continue
if v < min_v or v >= max_v: continue
if not ccw:
v = -v
kr = int(np.floor( (np.log(r)-log_min_sep) / bin_size ))
ku = int(np.floor( (u-min_u) / ubin_size ))
if v > 0:
kv = int(np.floor( (v-min_v) / vbin_size )) + nvbins
else:
kv = int(np.floor( (v-(-max_v)) / vbin_size ))
assert 0 <= kr < nbins
assert 0 <= ku < nubins
assert 0 <= kv < 2*nvbins
true_ntri[kr,ku,kv] += 1
# With the regular NNNCorrelation class, we end up with the sum of all permutations.
true_ntri_sum = true_ntri_122 + true_ntri_212 + true_ntri_221
#print('ddd.ntri = ',ddd.ntri)
#print('true_ntri = ',true_ntri_sum)
#print('diff = ',ddd.ntri - true_ntri_sum)
np.testing.assert_array_equal(ddd.ntri, true_ntri_sum)
# Now repeat with the full CrossCorrelation class, which distinguishes the permutations.
dddc = treecorr.NNNCrossCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True, verbose=1)
dddc.process(cat1, cat2)
#print('true_ntri_122 = ',true_ntri_122)
#print('diff = ',dddc.n1n2n3.ntri - true_ntri_122)
np.testing.assert_array_equal(dddc.n1n2n3.ntri, true_ntri_122)
np.testing.assert_array_equal(dddc.n1n3n2.ntri, true_ntri_122)
np.testing.assert_array_equal(dddc.n2n1n3.ntri, true_ntri_212)
np.testing.assert_array_equal(dddc.n2n3n1.ntri, true_ntri_221)
np.testing.assert_array_equal(dddc.n3n1n2.ntri, true_ntri_212)
np.testing.assert_array_equal(dddc.n3n2n1.ntri, true_ntri_221)
# Repeat with binslop = 0
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0, verbose=1)
ddd.process(cat1, cat2)
#print('binslop > 0: ddd.ntri = ',ddd.ntri)
#print('diff = ',ddd.ntri - true_ntri_sum)
np.testing.assert_array_equal(ddd.ntri, true_ntri_sum)
# And again with no top-level recursion
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0, verbose=1, max_top=0)
ddd.process(cat1, cat2)
#print('max_top = 0: ddd.ntri = ',ddd.ntri)
#print('true_ntri = ',true_ntri_sum)
#print('diff = ',ddd.ntri - true_ntri_sum)
np.testing.assert_array_equal(ddd.ntri, true_ntri_sum)
# Split into patches to test the list-based version of the code.
cat1 = treecorr.Catalog(x=x1, y=y1, npatch=10)
cat2 = treecorr.Catalog(x=x2, y=y2, npatch=10)
ddd.process(cat1, cat2)
np.testing.assert_array_equal(ddd.ntri, true_ntri_sum)
dddc.process(cat1, cat2)
np.testing.assert_array_equal(dddc.n1n2n3.ntri, true_ntri_122)
np.testing.assert_array_equal(dddc.n1n3n2.ntri, true_ntri_122)
np.testing.assert_array_equal(dddc.n2n1n3.ntri, true_ntri_212)
np.testing.assert_array_equal(dddc.n2n3n1.ntri, true_ntri_221)
np.testing.assert_array_equal(dddc.n3n1n2.ntri, true_ntri_212)
np.testing.assert_array_equal(dddc.n3n2n1.ntri, true_ntri_221)
@timer
def test_direct_spherical():
# Repeat in spherical coords
ngal = 50
s = 10.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) ) + 200 # Put everything at large y, so small angle on sky
z = rng.normal(0,s, (ngal,) )
w = rng.random_sample(ngal)
ra, dec = coord.CelestialCoord.xyz_to_radec(x,y,z)
cat = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', w=w)
min_sep = 1.
bin_size = 0.2
nrbins = 10
nubins = 5
nvbins = 5
ddd = treecorr.NNNCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins,
sep_units='deg', brute=True)
ddd.process(cat, num_threads=2)
r = np.sqrt(x**2 + y**2 + z**2)
x /= r; y /= r; z /= r
true_ntri = np.zeros((nrbins, nubins, 2*nvbins), dtype=int)
true_weight = np.zeros((nrbins, nubins, 2*nvbins), dtype=float)
rad_min_sep = min_sep * coord.degrees / coord.radians
for i in range(ngal):
for j in range(i+1,ngal):
for k in range(j+1,ngal):
d12 = np.sqrt((x[i]-x[j])**2 + (y[i]-y[j])**2 + (z[i]-z[j])**2)
d23 = np.sqrt((x[j]-x[k])**2 + (y[j]-y[k])**2 + (z[j]-z[k])**2)
d31 = np.sqrt((x[k]-x[i])**2 + (y[k]-y[i])**2 + (z[k]-z[i])**2)
d3, d2, d1 = sorted([d12, d23, d31])
rindex = np.floor(np.log(d2/rad_min_sep) / bin_size).astype(int)
if rindex < 0 or rindex >= nrbins: continue
if [d1, d2, d3] == [d23, d31, d12]: ii,jj,kk = i,j,k
elif [d1, d2, d3] == [d23, d12, d31]: ii,jj,kk = i,k,j
elif [d1, d2, d3] == [d31, d12, d23]: ii,jj,kk = j,k,i
elif [d1, d2, d3] == [d31, d23, d12]: ii,jj,kk = j,i,k
elif [d1, d2, d3] == [d12, d23, d31]: ii,jj,kk = k,i,j
elif [d1, d2, d3] == [d12, d31, d23]: ii,jj,kk = k,j,i
else: assert False
# Now use ii, jj, kk rather than i,j,k, to get the indices
# that correspond to the points in the right order.
u = d3/d2
v = (d1-d2)/d3
if ( ((x[jj]-x[ii])*(y[kk]-y[ii]) - (x[kk]-x[ii])*(y[jj]-y[ii])) * z[ii] +
((y[jj]-y[ii])*(z[kk]-z[ii]) - (y[kk]-y[ii])*(z[jj]-z[ii])) * x[ii] +
((z[jj]-z[ii])*(x[kk]-x[ii]) - (z[kk]-z[ii])*(x[jj]-x[ii])) * y[ii] ) > 0:
v = -v
uindex = np.floor(u / bin_size).astype(int)
assert 0 <= uindex < nubins
vindex = np.floor((v+1) / bin_size).astype(int)
assert 0 <= vindex < 2*nvbins
www = w[i] * w[j] * w[k]
true_ntri[rindex,uindex,vindex] += 1
true_weight[rindex,uindex,vindex] += www
np.testing.assert_array_equal(ddd.ntri, true_ntri)
np.testing.assert_allclose(ddd.weight, true_weight, rtol=1.e-5, atol=1.e-8)
# Check that running via the corr3 script works correctly.
config = treecorr.config.read_config('configs/nnn_direct_spherical.yaml')
cat.write(config['file_name'])
treecorr.corr3(config)
data = fitsio.read(config['nnn_file_name'])
np.testing.assert_allclose(data['r_nom'], ddd.rnom.flatten())
np.testing.assert_allclose(data['u_nom'], ddd.u.flatten())
np.testing.assert_allclose(data['v_nom'], ddd.v.flatten())
np.testing.assert_allclose(data['ntri'], ddd.ntri.flatten())
np.testing.assert_allclose(data['DDD'], ddd.weight.flatten())
# Repeat with binslop = 0
# And don't do any top-level recursion so we actually test not going to the leaves.
ddd = treecorr.NNNCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins,
sep_units='deg', bin_slop=0, max_top=0)
ddd.process(cat)
np.testing.assert_array_equal(ddd.ntri, true_ntri)
np.testing.assert_allclose(ddd.weight, true_weight, rtol=1.e-5, atol=1.e-8)
@timer
def test_direct_arc():
# Repeat the spherical test with metric='Arc'
ngal = 5
s = 10.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) ) + 200 # Large angles this time.
z = rng.normal(0,s, (ngal,) )
w = rng.random_sample(ngal)
ra, dec = coord.CelestialCoord.xyz_to_radec(x,y,z)
cat = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', w=w)
min_sep = 1.
max_sep = 180.
nrbins = 50
nubins = 5
nvbins = 5
bin_size = np.log((max_sep / min_sep)) / nrbins
ubin_size = 0.2
vbin_size = 0.2
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nrbins,
nubins=nubins, ubin_size=ubin_size,
nvbins=nvbins, vbin_size=vbin_size,
sep_units='deg', brute=True)
ddd.process(cat, metric='Arc')
r = np.sqrt(x**2 + y**2 + z**2)
x /= r; y /= r; z /= r
true_ntri = np.zeros((nrbins, nubins, 2*nvbins), dtype=int)
true_weight = np.zeros((nrbins, nubins, 2*nvbins), dtype=float)
c = [coord.CelestialCoord(r*coord.radians, d*coord.radians) for (r,d) in zip(ra, dec)]
for i in range(ngal):
for j in range(i+1,ngal):
for k in range(j+1,ngal):
d12 = c[i].distanceTo(c[j]) / coord.degrees
d23 = c[j].distanceTo(c[k]) / coord.degrees
d31 = c[k].distanceTo(c[i]) / coord.degrees
| |
'''
Created on 30-Dec-2018
@author: vijay
'''
import wx
from wx import TreeCtrl
from wx.lib.mixins.treemixin import ExpansionState
from src.view.util.FileOperationsUtil import FileOperations
import logging.config
from src.view.constants import LOG_SETTINGS
from src.view.other.TreeData import TreeSearch, viewdataList
logging.config.dictConfig(LOG_SETTINGS)
logger = logging.getLogger('extensive')
##################################################
class OtherViewTreeFrame(wx.Frame):
def __init__(self, parent, title, size=(313, 441),
style=wx.DEFAULT_FRAME_STYLE | wx.NO_FULL_REPAINT_ON_RESIZE | wx.SUNKEN_BORDER | wx.STAY_ON_TOP):
style = style & (~wx.MINIMIZE_BOX)
wx.Frame.__init__(self, parent, -1, title, size=size,
style=style)
self.Bind(wx.EVT_CLOSE, self.OnCloseFrame)
self.SetMinSize((100, 100))
self.fileOperations = FileOperations()
# set frame icon
icon = wx.Icon()
icon.CopyFromBitmap(self.fileOperations.getImageBitmap(imageName='eclipse16.png'))
self.SetIcon(icon)
sizer = wx.BoxSizer(wx.VERTICAL)
self.buttonPanel = CreateButtonPanel(self)
####################################################################
self.otherViewTreePanel = OtherViewTreePanel(self)
####################################################################
sizer.Add(self.otherViewTreePanel, 1, wx.EXPAND)
sizer.Add(self.buttonPanel, 0, wx.EXPAND)
self.Bind(wx.EVT_CHAR_HOOK, self.OnKeyUP)
self.SetSizer(sizer)
self.Center()
# self.createStatusBar()
self.Show(True)
# self.Bind(wx.EVT_SIZE, self.OnSize)
def OnKeyUP(self, event):
# print "KEY UP!"
keyCode = event.GetKeyCode()
if keyCode == wx.WXK_ESCAPE:
self.Close()
event.Skip()
def OnCloseFrame(self, event):
self.Destroy()
def OnSize(self, event):
hsize = event.GetSize()
logger.debug(hsize)
class CreateButtonPanel(wx.Panel):
def __init__(self, parent=None, *args, **kw):
wx.Panel.__init__(self, parent, id=-1)
self.parent = parent
sizer = wx.BoxSizer(wx.VERTICAL)
hbox = wx.BoxSizer(wx.HORIZONTAL)
okButton = wx.Button(self, 50, "Open", (20, 220))
okButton.SetToolTip("Execute script to create table.")
self.Bind(wx.EVT_BUTTON, self.onOkClick, okButton)
cancelButton = wx.Button(self, 51, "Cancel", (20, 220))
cancelButton.SetToolTip("Execute script to create table.")
self.Bind(wx.EVT_BUTTON, self.onCancelButtonClick, cancelButton)
# b.SetBitmap(images.Mondrian.Bitmap,
# wx.LEFT # Left is the default, the image can be on the other sides too
# #wx.RIGHT
# #wx.TOP
# #wx.BOTTOM
# )
hbox.Add(okButton)
hbox.Add(cancelButton)
# sizer.Add(cancelButton, 0, wx.ALIGN_RIGHT | wx.RIGHT | wx.BOTTOM)
sizer.Add(hbox, 0, wx.ALIGN_RIGHT | wx.RIGHT | wx.BOTTOM, 5)
# sizer.Add(vBox, 1, wx.EXPAND , 0)
self.SetAutoLayout(True)
self.SetSizer(sizer)
def onOkClick(self, event):
logger.debug('onOkClick')
# TODO : need to implement
# sqlExecuter=SQLExecuter()
# obj=sqlExecuter.getObject()
# if len(obj[1])==0:
# sqlExecuter.createOpalTables()
# sqlExecuter.addNewConnectionRow(self.GetParent().CreateOpenConnectionPanel.filePath, self.GetParent().CreateOpenConnectionPanel.connectionNameText.GetValue())
# data = self.GetTopLevelParent().createImportingCsvPanel.data
# tableName = self.GetTopLevelParent().createImportingCsvPanel.tableNameText.GetValue()
# fileOperations = FileOperations()
# # data = fileOperations.readCsvFile(filePath=filePath, columnNameFirstRow=True, delimiter=",", quotechar='|')
# # print(len(data))
# # print(data)
# createTableScript = fileOperations.createTableScript(tableName=tableName, columnHeader=data[0])
# print(createTableScript)
# sqlList = fileOperations.sqlScript(tableName=tableName, data=data)
# print(sqlList)
# connectionName = self.GetTopLevelParent().connectionName
# importStatus = SQLUtils().importingData(connectionName=connectionName, sqlList=sqlList)
# dlg = wx.MessageDialog(self, "Some status",
# 'Importing data status',
# wx.OK | wx.ICON_INFORMATION
# #wx.YES_NO | wx.NO_DEFAULT | wx.CANCEL | wx.ICON_INFORMATION
# )
# dlg.ShowModal()
# dlg.Destroy()
self.GetTopLevelParent().Destroy()
def onCancelButtonClick(self, event):
logger.debug('onCancelButtonClick')
self.GetTopLevelParent().Destroy()
class OtherViewTreePanel(wx.Panel):
def __init__(self, parent=None, *args, **kw):
wx.Panel.__init__(self, parent, id=-1)
self.parent = parent
# self.fileOperations = FileOperations()
self.connDict = dict()
vBox = wx.BoxSizer(wx.VERTICAL)
####################################################################
self.treeMap = {}
self.tree = OtherViewBaseTreePanel(self)
self.filter = wx.SearchCtrl(self, style=wx.TE_PROCESS_ENTER)
self.filter.SetDescriptiveText("Type filter search text")
self.filter.ShowCancelButton(True)
self.filter.Bind(wx.EVT_TEXT, self.RecreateTree)
self.filter.Bind(wx.EVT_SEARCHCTRL_CANCEL_BTN, lambda e: self.filter.SetValue(''))
self.filter.Bind(wx.EVT_TEXT_ENTER, self.OnSearch)
self.tree.Bind(wx.EVT_TREE_ITEM_EXPANDED, self.OnItemExpanded)
self.tree.Bind(wx.EVT_TREE_ITEM_COLLAPSED, self.OnItemCollapsed)
self.tree.Bind(wx.EVT_TREE_SEL_CHANGED, self.OnSelChanged)
self.tree.Bind(wx.EVT_LEFT_DOWN, self.OnTreeLeftDown)
# self.tree.SelectItem(self.root)
searchMenu = wx.Menu()
item = searchMenu.AppendRadioItem(-1, "Full search")
self.Bind(wx.EVT_MENU, self.OnSearchMenu, item)
item = searchMenu.AppendRadioItem(-1, "Sample Content")
self.Bind(wx.EVT_MENU, self.OnSearchMenu, item)
self.filter.SetMenu(searchMenu)
self.RecreateTree()
####################################################################
vBox.Add(self.filter , 0, wx.EXPAND | wx.ALL)
vBox.Add(self.tree , 1, wx.EXPAND | wx.ALL)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(vBox, 1, wx.EXPAND , 0)
self.SetSizer(sizer)
def OnSearchMenu(self, event):
# Catch the search type (name or content)
searchMenu = self.filter.GetMenu().GetMenuItems()
fullSearch = searchMenu[1].IsChecked()
if fullSearch:
self.OnSearch()
else:
self.RecreateTree()
def OnSearch(self, event=None):
value = self.filter.GetValue()
if not value:
self.RecreateTree()
return
wx.BeginBusyCursor()
#
# for category, items in _treeList:
# self.searchItems[category] = []
# for childItem in items:
# # if SearchDemo(childItem, value):
# self.searchItems[category].append(childItem)
wx.EndBusyCursor()
self.RecreateTree()
#---------------------------------------------
def constructNode(self, parent=None, treeData=None):
logger.debug(treeData)
for treeItem in treeData:
itemId = self.tree.AppendItem(parent, treeItem.name, image=self.tree.iconsDictIndex[treeItem.imageName])
self.tree.SetItemData(itemId, treeItem)
if treeItem.child:
# for childItem in treeItem.child:
self.constructNode(parent=itemId, treeData=treeItem.child)
def RecreateTree(self, evt=None):
searchMenu = self.filter.GetMenu().GetMenuItems()
fullSearch = searchMenu[1].IsChecked()
if evt:
if fullSearch:
# Do not`scan all the demo files for every char
# the user input, use wx.EVT_TEXT_ENTER instead
return
expansionState = self.tree.GetExpansionState()
current = None
item = self.tree.GetSelection()
if item:
prnt = self.tree.GetItemParent(item)
if prnt:
current = (self.tree.GetItemText(item),
self.tree.GetItemText(prnt))
self.tree.Freeze()
self.tree.DeleteAllItems()
self.root = self.tree.AddRoot("Other View")
self.tree.SetItemImage(self.root, self.tree.iconsDictIndex['other_view.png'])
self.tree.SetItemData(self.root, 0)
treeFont = self.tree.GetFont()
catFont = self.tree.GetFont()
# The native treectrl on MSW has a bug where it doesn't draw
# all of the text for an item if the font is larger than the
# default. It seems to be clipping the item's label as if it
# was the size of the same label in the default font.
if 'wxMSW' not in wx.PlatformInfo:
treeFont.SetPointSize(treeFont.GetPointSize() + 2)
treeFont.SetWeight(wx.BOLD)
catFont.SetWeight(wx.BOLD)
# self.tree.SetItemFont(self.root, treeFont)
firstChild = None
selectItem = None
filter = self.filter.GetValue()
count = 0
treeSearch = TreeSearch()
searchText = self.filter.GetValue()
if searchText.strip() == '':
searchText = None
treeItems = treeSearch.searchedNodes(dataList=viewdataList, searchText=searchText)
self.constructNode(parent=self.root, treeData=treeItems)
# for category, items in _treeList:
# category, items
# count += 1
# if filter:
# if fullSearch:
# items = self.searchItems[category]
# else:
# items = [item for item in items if filter.lower() in item.lower()]
# if items:
# child = self.tree.AppendItem(self.root, category, image=count)
# self.tree.SetItemFont(child, catFont)
# self.tree.SetItemData(child, count)
# if not firstChild: firstChild = child
# for childItem in items:
# image = count
# # if DoesModifiedExist(childItem):
# # image = len(_demoPngs)
# theDemo = self.tree.AppendItem(child, childItem, image=image)
# self.tree.SetItemData(theDemo, count)
# self.treeMap[childItem] = theDemo
# if current and (childItem, category) == current:
# selectItem = theDemo
# self.tree.Expand(self.root)
if firstChild:
self.tree.Expand(firstChild)
if filter:
self.tree.ExpandAll()
elif expansionState:
self.tree.SetExpansionState(expansionState)
if selectItem:
self.skipLoad = True
self.tree.SelectItem(selectItem)
self.skipLoad = False
self.tree.Thaw()
self.searchItems = {}
#---------------------------------------------
def RecreateTree1(self, evt=None):
# Catch the search type (name or content)
searchMenu = self.filter.GetMenu().GetMenuItems()
fullSearch = searchMenu[1].IsChecked()
if evt:
if fullSearch:
# Do not`scan all the demo files for every char
# the user input, use wx.EVT_TEXT_ENTER instead
return
expansionState = self.tree.GetExpansionState()
current = None
item = self.tree.GetSelection()
if item:
prnt = self.tree.GetItemParent(item)
if prnt:
current = (self.tree.GetItemText(item),
self.tree.GetItemText(prnt))
self.tree.Freeze()
self.tree.DeleteAllItems()
self.root = self.tree.AddRoot("Preferences")
self.tree.SetItemImage(self.root, 0)
self.tree.SetItemData(self.root, 0)
treeFont = self.tree.GetFont()
catFont = self.tree.GetFont()
# The native treectrl on MSW has a bug where it doesn't draw
# all of the text for an item if the font is larger than the
# default. It seems to be clipping the item's label as if it
# was the size of the same label in the default font.
if 'wxMSW' not in wx.PlatformInfo:
treeFont.SetPointSize(treeFont.GetPointSize() + 2)
treeFont.SetWeight(wx.BOLD)
catFont.SetWeight(wx.BOLD)
self.tree.SetItemFont(self.root, treeFont)
firstChild = None
selectItem = None
filter = self.filter.GetValue()
count = 0
# for category, items in _treeList:
# category, items
# count += 1
# if filter:
# if fullSearch:
# items = self.searchItems[category]
# else:
# items = [item for item in items if filter.lower() in item.lower()]
# if items:
# child = self.tree.AppendItem(self.root, category, image=count)
# self.tree.SetItemFont(child, catFont)
# self.tree.SetItemData(child, count)
# if not firstChild: firstChild = child
# for childItem in items:
# image = count
# # if DoesModifiedExist(childItem):
# # image = len(_demoPngs)
# theDemo = self.tree.AppendItem(child, childItem, image=image)
# self.tree.SetItemData(theDemo, count)
# self.treeMap[childItem] = theDemo
# if current and (childItem, category) == current:
# selectItem = theDemo
self.tree.Expand(self.root)
if firstChild:
self.tree.Expand(firstChild)
if filter:
self.tree.ExpandAll()
elif expansionState:
self.tree.SetExpansionState(expansionState)
if selectItem:
self.skipLoad = True
self.tree.SelectItem(selectItem)
self.skipLoad = False
self.tree.Thaw()
self.searchItems = {}
#---------------------------------------------
def OnItemExpanded(self, event):
item = event.GetItem()
logger.debug("OnItemExpanded: %s" , self.tree.GetItemText(item))
if self.tree.GetItemParent(item):
self.tree.SetItemImage(item, self.tree.iconsDictIndex['eclipse_open_folder.png'])
event.Skip()
#---------------------------------------------
def OnItemCollapsed(self, event):
item = event.GetItem()
logger.debug("OnItemCollapsed: %s", self.tree.GetItemText(item))
if self.tree.GetItemParent(item):
self.tree.SetItemImage(item, self.tree.iconsDictIndex['folderType_filter.png'])
event.Skip()
#---------------------------------------------
def OnTreeLeftDown(self, event):
# reset the overview text if the tree item is clicked on again
pt = event.GetPosition();
item, flags = self.tree.HitTest(pt)
if item and item == self.tree.GetSelection():
print(self.tree.GetItemText(item) + " Overview")
event.Skip()
#---------------------------------------------
def OnSelChanged(self, event):
# if self.dying or not self.loaded or self.skipLoad:
# return
# self.StopDownload()
try:
item = event.GetItem()
itemText = self.tree.GetItemText(item)
logger.debug(itemText)
opalPreference = self.GetTopLevelParent()
if opalPreference:
# rightPanel=opalPreference.rightPanelItem.GetParent()
# opalPreference.rightPanelItem.Hide()
# opalPreference.rightPanelItem.Hide()
# opalPreference.rightPanelItem=opalPreference.getPreferencePanelObj(rightPanel,preferenceName=itemText)
# opalPreference.rightPanelItem.Show(True)
# opalPreference.rightPanelItem.Layout()
pnl_children = list()
if hasattr(opalPreference, 'png'):
pnl_children = opalPreference.pnl.GetChildren()
for pnl in pnl_children:
# print(pnl)
if pnl.GetName() == 'rightPanel':
opalPreference = self.GetTopLevelParent()
for child in pnl.GetChildren():
# if 'preference' in child.name.lower():
child.Hide()
# break
# child.opalPreference.getPreferencePanelObj(pnl,preferenceName=itemText)
rightPanelItem = opalPreference.getPreferencePanelObj(pnl, preferenceName=itemText)
opalPreference.addPanel(rightPanelItem)
pnl.Layout()
pnl.Refresh()
pnl.Fit()
opalPreference.Layout()
# print(opalPreference.GetChildrenCount())
# opalPreference.GetChildrenCount().rightpanel.Refresh()
if hasattr(opalPreference, 'mgr'):
opalPreference.mgr.Update()
except:
pass
# self.UpdateNotebook(preferenceName=itemText)
class OtherViewBaseTreePanel(ExpansionState, TreeCtrl):
'''
Left navigation tree in preferences page
'''
def __init__(self, parent):
TreeCtrl.__init__(self, parent, style=wx.TR_HIDE_ROOT | wx.TR_DEFAULT_STYLE |
wx.TR_HAS_VARIABLE_ROW_HEIGHT | wx.BORDER_NONE)
self._il = None
self.BuildTreeImageList()
# if USE_CUSTOMTREECTRL:
# self.SetSpacing(10)
# self.SetWindowStyle(self.GetWindowStyle() & ~wx.TR_LINES_AT_ROOT)
self.SetInitialSize((100, 80))
def AppendItem(self, parent, text, image=-1, wnd=None):
item = TreeCtrl.AppendItem(self, parent, text, image=image)
return item
def BuildTreeImageList(self):
# imgList = wx.ImageList(16, 16)
#
# for png in _demoPngs:
# imgList.Add(catalog[png].GetBitmap())
#
# # add the image for modified demos.
# imgList.Add(catalog["custom"].GetBitmap())
#
# self.AssignImageList(imgList)
if self._il:
self._il.Destroy()
self._il = None
self._il = wx.ImageList(16, 16)
self.SetImageList(self._il)
self.ImageList.RemoveAll()
self.iconsDictIndex = {}
count = 0
self.fileOperations = FileOperations()
imageNameSet=set()
for data in viewdataList:
for dx in data:
if type(dx)==type(list()):
for d in dx:
imageNameSet.add(d[2])
imageNameSet.add(data[2])
imageNameList=list(imageNameSet)
# for imageName in ['preference.png', 'folderType_filter.png', 'eclipse_open_folder.png', 'fileType_filter.png', 'usb.png', 'stop.png',
# 'java.png', 'python_module.png', 'xml.png', "other_view.png", 'console_view.png', 'register_view.png',
# 'debug_view.png' , 'history_view.png', 'compare_view.png', 'breakpoint_view.png', 'watchlist_view.png',
# 'history_view.png', 'synch_synch.png', 'variable_view.png', | |
print('Processing pulseOx log: '+log_fname+'.puls')
if 'slr' in os.path.basename(log_fname):
print('\t[\'slr\'-type physiolog]')
time_puls, puls_values, epi_acqtime_puls, epi_event_puls, acq_window_puls = dsc_extract_physio.read_physiolog(log_fname+'.puls', sampling_period=20) # extract physio signal
reps_table_puls, slices_table_puls = dsc_extract_physio.sort_event_times(epi_acqtime_puls, epi_event_puls) # sort event times
nrep_pulseOxLog = np.sum(reps_table_puls[:, 1])
if nAcqs != nrep_pulseOxLog:
os.error('Number of repetitions in image is different from the number of repetitions recorded in pulseOx physiolog.')
# get acquisition time for each slice
repsAcqTime[1:, :, 0] = np.squeeze(slices_table_puls[np.where(reps_table_puls[:, 1] == 1), :]).T
else:
print('\t[\'CMRR\'-type physiolog]')
time_puls, trigger_start_times_puls, trigger_end_times_puls, puls_values, acq_window_puls, acqStartTime_puls = dsc_extract_physio.read_physiolog_cmrr(log_fname+'.puls')
triggerStartTimes_imgOnly_puls = dsc_extract_physio.extract_acqTimes_cmrr(trigger_start_times_puls, acqTime_firstImg, acqStartTime_puls, trigger_end_times_puls)
repsAcqTime[1:, :, 0] = np.tile(triggerStartTimes_imgOnly_puls, (nSlices, 1)) + np.tile(TR/nSlices * np.arange(0, nSlices), (nAcqs, 1)).T
else:
print('\nNo log found for pulseOx.')
repsAcqTime[1:, :, 0] = TR*np.tile(np.arange(0, nAcqs), (nSlices, 1)) + np.tile(TR/nSlices*np.arange(0, nSlices), (nAcqs, 1)).T
time_puls = np.arange(np.min(repsAcqTime), np.max(repsAcqTime), step=20)
puls_values = None
# take the mean acquisition time across slices for the whole rep (SC)
repsAcqTime[0, :, 0] = np.mean(repsAcqTime[1:nSlices, :, 0], axis=0)
# respiration ----------------------------
if os.path.exists(log_fname+'.resp'):
print('Processing respiration log: '+log_fname+'.resp')
if 'slr' in os.path.basename(log_fname):
print('\t[\'slr\'-type physiolog]')
time_resp, resp_values, epi_acqtime_resp, epi_event_resp, acq_window_resp = dsc_extract_physio.read_physiolog(log_fname+'.resp', sampling_period=20) # extract physio signal
reps_table_resp, slices_table_resp = dsc_extract_physio.sort_event_times(epi_acqtime_resp, epi_event_resp) # sort event times
nrep_respLog = np.sum(reps_table_resp[:, 1])
if nAcqs != nrep_respLog:
os.error('Number of repetitions in image is different from the number of repetitions recorded in respiration physiolog.')
# get acquisition time for each slice
repsAcqTime[1:, :, 1] = np.squeeze(slices_table_resp[np.where(reps_table_resp[:, 1] == 1), :]).T
else:
print('\t[\'CMRR\'-type physiolog]')
time_resp, trigger_start_times_resp, trigger_end_times_resp, resp_values, acq_window_resp, acqStartTime_resp = dsc_extract_physio.read_physiolog_cmrr(log_fname+'.resp')
else:
print('\nNo log found for respiration.\n')
repsAcqTime[1:, :, 1] = TR*np.tile(np.arange(0, nAcqs), (nSlices, 1)) + np.tile(TR/nSlices*np.arange(0, nSlices), (nAcqs, 1)).T
time_resp = np.arange(np.min(repsAcqTime), np.max(repsAcqTime), step=20)
resp_values = None
# take the mean acquisition time across slices for the whole rep (SC)
repsAcqTime[0, :, 1] = np.mean(repsAcqTime[1:nSlices, :, 1], axis=0)
# merge the two physiological signal into one array each (for time and physio values)
if time_puls.size > time_resp.size:
time_resp = np.hstack((time_resp, time_puls[time_resp.size:]))
resp_values = np.pad(resp_values, (0, puls_values.size - resp_values.size), 'reflect')
elif time_puls.size < time_resp.size:
time_puls = np.hstack((time_puls, time_resp[time_puls.size:]))
puls_values = np.pad(puls_values, (0, resp_values.size - puls_values.size), 'reflect')
timePhysio = np.vstack((time_puls, time_resp)).T
valuesPhysio = np.vstack((puls_values, resp_values)).T
return repsAcqTime, timePhysio, valuesPhysio
def plot_pulseOx_and_resp(pulseTime, pulseVal, pulseAcqTimes, respTime, respVal, respAcqTime, ofname=''):
fig, ((ax1)) = plt.subplots(1, 1, figsize=(20, 9.5))
ax1.plot(pulseTime, pulseVal, color='red', label='PulseOx signal')
ax1.plot(respTime, respVal, color='blue', label='Respiration signal')
for acqtime in pulseAcqTimes:
ax1.axvline(x=acqtime, ymin=0, ymax=.5, color='red', lw=0.8, label='reps' if np.where(pulseAcqTimes==acqtime)[0][0] == 0 else "_nolegend_")
for acqtime in respAcqTime:
ax1.axvline(x=acqtime, ymin=.5, ymax=1, color='blue', lw=0.8, label='reps' if np.where(respAcqTime==acqtime)[0][0] == 0 else "_nolegend_")
ax1.legend()
ax1.grid()
fig.show()
if ofname:
ax1.set_title('Saved to: ' + ofname + '.png')
fig.savefig(ofname+'.png')
plt.close()
def plot_signal_vs_resp(respTime, respSignal, mriTime, mriSignal, ofname=''):
# interpolate respiration signal to MRI signal sampling
respSignalSampledToMRISignal = np.interp(mriTime, respTime, respSignal)
# remove points where respiration signal is saturated
mriSignal_noRespSat = np.delete(mriSignal, np.where((respSignalSampledToMRISignal == 0) | (respSignalSampledToMRISignal == 4095)))
respSignal_noRespSat = np.delete(respSignalSampledToMRISignal, np.where((respSignalSampledToMRISignal == 0) | (respSignalSampledToMRISignal == 4095)))
mriTime_noRespSat = np.delete(mriTime, np.where((respSignalSampledToMRISignal == 0) | (respSignalSampledToMRISignal == 4095)))
# interpolate MRI signal to respiration signal sampling
mriSignalSampledToRespSignal = np.interp(respTime, mriTime, mriSignal)
mriSignalSampledToRespSignal = mriSignalSampledToRespSignal[np.abs(respTime - np.min(mriTime)).argmin():np.abs(respTime - np.max(mriTime)).argmin()]
respTimeCropToMRI = respTime[np.abs(respTime - np.min(mriTime)).argmin():np.abs(respTime - np.max(mriTime)).argmin()]
respSignalCropToMRI = respSignal[np.abs(respTime - np.min(mriTime)).argmin():np.abs(respTime - np.max(mriTime)).argmin()]
# remove points where respiration signal is saturated
mriSignalOverSampled_noRespSat = np.delete(mriSignalSampledToRespSignal, np.where((respSignalCropToMRI == 0) | (respSignalCropToMRI == 4095)))
respSignalCropToMRI_noRespSat = np.delete(respSignalCropToMRI, np.where((respSignalCropToMRI == 0) | (respSignalCropToMRI == 4095)))
respTimeCropToMRI_noRespSat = np.delete(respTimeCropToMRI, np.where((respSignalCropToMRI == 0) | (respSignalCropToMRI == 4095)))
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(20, 9.7))
plt.subplots_adjust(wspace=0.3, left=0.05, right=0.95, hspace=0.3, bottom=0.05, top=0.95)
ax1.set_title("Signal vs time")
ax1.set_xlabel('Time (ms)')
ax1.set_ylabel('Signal', color='green')
ax1.grid(which='both')
ax1.plot(mriTime/1000, mriSignal, linewidth=1, marker='+', markersize=7.0, color='green', label='$S_{MRI}$: COV='+str(round(100*np.std(mriSignal)/np.mean(mriSignal), 2))+'%')
ax1.plot(respTimeCropToMRI/1000, mriSignalSampledToRespSignal, linewidth=1.2, marker=None, color='gray', label='$S_{MRI} interp$: COV='+str(round(100*np.std(mriSignalSampledToRespSignal)/np.mean(mriSignalSampledToRespSignal), 2))+'%')
ax1.tick_params(axis='y', labelcolor='green')
ax1.legend(loc="lower left")
ax1_resp = ax1.twinx()
ax1_resp.set_ylabel('Signal')
ax1_resp.grid(which='both')
ax1_resp.plot(respTime/1000, respSignal, linewidth=1, marker=None, color='blue', label='$S_{resp}$: COV=' + str(round(100 * np.std(respSignal) / np.mean(respSignal), 2)) + '%')
ax1_resp.plot(mriTime/1000, respSignalSampledToMRISignal, linewidth=0, marker='+', color='red', label='$S_{resp}$: COV=' + str(round(100 * np.std(respSignalSampledToMRISignal) / np.mean(respSignalSampledToMRISignal), 2)) + '%')
ax1_resp.plot(mriTime_noRespSat/1000, respSignal_noRespSat, linewidth=0, marker='+', color='blue', label='$S_{resp}$ no sat')
ax1_resp.tick_params(axis='y', labelcolor='blue')
ax1_resp.legend(loc="lower right")
ax2.set_title("MRI signal vs Respiration signal: (Pearson\'s R, p-value)={}".format(tuple(np.round(scipy.stats.pearsonr(mriSignalOverSampled_noRespSat, respSignalCropToMRI_noRespSat), decimals=4))))
ax2.set_xlabel('Respiration signal')
ax2.set_ylabel('MRI signal (interpolated to respiration sampling)')
ax2.grid(which='both')
# ax2.plot(respSignalSampledToMRISignal, mriSignal, linewidth=0, marker='+', markersize=7.0, color='tab:red', label='all points')
# ax2.plot(respSignal_noRespSat, mriSignal_noRespSat, linewidth=0, marker='+', markersize=7.0, color='tab:blue', label='without respiration signal saturation')
# ax2.plot(respSignalCropToMRI, mriSignalSampledToRespSignal, linewidth=0, marker='+', markersize=7.0, color='tab:orange', label='all points')
ax2.plot(respSignalCropToMRI_noRespSat, mriSignalOverSampled_noRespSat, linewidth=0, marker='+', markersize=7.0, color='tab:green', label='without respiration signal saturation')
ax2.legend()
ax3.set_title("Signal vs time interpolated to respiration sampling") # --------------------------------------------
ax3.set_xlabel('Time (ms)')
ax3.set_ylabel('Signal', color='green')
ax3.grid(which='both')
ax3.plot(respTimeCropToMRI/1000, mriSignalSampledToRespSignal, linewidth=0, marker='.', markersize=3.0, color='tab:red', label='$S_{MRI} interp to resp$')
ax3.plot(respTimeCropToMRI_noRespSat/1000, mriSignalOverSampled_noRespSat, linewidth=0, marker='.', markersize=3.0, color='green', label='$S_{MRI} interp to resp NO RESP SAT$')
ax3.tick_params(axis='y', labelcolor='green')
ax3.legend(loc="lower left")
ax3_resp = ax3.twinx()
ax3_resp.set_ylabel('Signal')
ax3_resp.plot(respTimeCropToMRI/1000, respSignalCropToMRI, linewidth=0, marker='.', markersize=3.0, color='tab:red', label='$S_{resp}$ crop')
ax3_resp.plot(respTimeCropToMRI_noRespSat/1000, respSignalCropToMRI_noRespSat, linewidth=0, marker='.', markersize=3.0, color='blue', label='$S_{resp}$ NO RESP SAT')
ax3_resp.tick_params(axis='y', labelcolor='blue')
ax3_resp.legend(loc="lower right")
ax3_respPeriod = ax3.twinx()
respSignalMax, respSignalMin = peakdet(respSignalCropToMRI, 300)
respPeriod = np.append(np.nan, np.diff(respTimeCropToMRI[respSignalMax[:, 0]]))/1000
ax3_respPeriod.plot(respTimeCropToMRI[respSignalMax[:, 0]]/1000, respPeriod, linewidth=3.0, marker='+', markersize=10, color='tab:pink', label='Resp period')
ax3_respPeriod.tick_params(axis='y', labelcolor='tab:pink')
ax3_respPeriod.set_ylabel('Resp period is s (mean = '+str(round(np.mean(respPeriod[1:]), 2))+' ['+str(np.min(respPeriod[1:]))+', '+str(np.max(respPeriod[1:]))+']', color='tab:pink')
for tPeak in respTimeCropToMRI[respSignalMax[:, 0]]/1000:
ax3_respPeriod.axvline(x=tPeak, linestyle='-', color='tab:pink', linewidth=1.0)
ax3_corr = ax3.twinx()
ax3_corr.plot(respTimeCropToMRI_noRespSat/1000, scipy.signal.correlate(mriSignalOverSampled_noRespSat, respSignalCropToMRI_noRespSat, mode='same', method='direct'), linewidth=1, marker=None, markersize=0, color='tab:orange', label='Cross-corr')
ax3_corr.legend(loc="upper right")
ax4.set_title("FFT") # --------------------------------------------------------------------------------------------
# respSignal_FFT = np.fft.fft((respSignalCropToMRI - np.mean(respSignalCropToMRI))/np.std(respSignalCropToMRI))
# mriSignal_FFT = np.fft.fft((mriSignalSampledToRespSignal - np.mean(mriSignalSampledToRespSignal))/np.std(mriSignalSampledToRespSignal))
# freq = np.fft.fftfreq(respTimeCropToMRI.size, d=respTimeCropToMRI[1]-respTimeCropToMRI[0]) # in MHz
# idx_f0 = np.where(freq == 0)[0]
# idx_ascending_freq = np.argsort(freq)
freqResMRI, respSignalResMRI_FFT = fft_warpper(mriTime, respSignalSampledToMRISignal, increase_res_factor=5)
freqResMRI, mriSignalResMRI_FFT = fft_warpper(mriTime, mriSignal, increase_res_factor=5)
ax4.set_xlabel('Frequency (Hz)')
ax4.set_ylabel('Signal')
ax4.grid(which='both')
# ax4.plot(freq[idx_ascending_freq]*1000, np.abs(respSignal_FFT[idx_ascending_freq]), linewidth=0.9, marker='.', markersize=0, color='black', label='$S_{resp}$')
# ax4.plot(freq[idx_ascending_freq]*1000, np.abs(mriSignal_FFT[idx_ascending_freq]), linewidth=0.9, marker='.', markersize=0, color='green', label='$S_{MRI}\ interp\ to\ resp$')
ax4.plot(freqResMRI*1000, respSignalResMRI_FFT, label='$S_{resp}\ res\ MRI$', linewidth=0.9, marker='+', markersize=0, color='black')
ax4.plot(freqResMRI*1000, mriSignalResMRI_FFT, label='$S_{MRI}\ res\ MRI$', linewidth=0.9, marker='+', markersize=0, color='green')
ax4.axvspan(xmin=1/np.min(respPeriod[1:]), xmax=1/np.max(respPeriod[1:]), label='respiration frequency range', color='tab:pink', alpha=0.2)
ax4.legend(loc="upper right")
ax4.set_xlim(left=0, right=1.5)
ax4_corr = ax4.twinx()
ax4_corr.plot(freqResMRI*1000, scipy.signal.correlate(respSignalResMRI_FFT, mriSignalResMRI_FFT, mode='same', method='direct'), label='Cross-corr', linewidth=1, marker=None, markersize=0, color='tab:orange')
ax4_corr.legend(loc="lower right")
plt.show(block=True)
if ofname:
fig.suptitle('Saved to: '+ofname+'_signal_vs_resp.png')
fig.savefig(ofname+'_signal_vs_resp.png')
def calculateB1Factor(timePulseOx, signalPulseOx, measuredFAB1map, b1mapVoltage, DSCvoltage, selectedFlipAngle, T1=1251):
# calculate subject's cardiac cycle
pulseOxSignalMax, pulseOxSignalMin = peakdet(signalPulseOx, 600)
cardiacPeriods = np.diff(timePulseOx[pulseOxSignalMax[:, 0].astype(int)]) # in milliseconds
cardiacPeriodMean = np.mean(cardiacPeriods)
# calculate required excitation flip angle
excFArequired = 180 - np.arccos(np.exp(-cardiacPeriodMean/T1))*180/np.pi # scalar
# actual B1
B1actual = (measuredFAB1map * DSCvoltage/b1mapVoltage)/45.0 # matrix
# actual excitation flip angle
excFAactual = selectedFlipAngle * B1actual # matrix
# actual refocusing flip angle
refocFAactual = 180 * B1actual # matrix
# final factor of used signal
usedSignalFactor = (excFAactual/excFArequired) * (refocFAactual/180) # matrix
return usedSignalFactor
def discardWrongTRs(TReff, timePulseOx, signalPulseOx, mriSignal, repsAcqTime_PulseOx, repsAcqTime_Resp, outPlotFname=''):
"""
Detect points where sequence missed a cardiac window, loosing steady state.
Normal cardiac beat varies between 700 and 1400 ms, anything above 1400 ms is probably due to a missed trigger.
:param TReff:
:param mriSignal:
:return:
"""
# calculate subject's cardiac cycle
pulseOxSignalMax, pulseOxSignalMin = peakdet(signalPulseOx, 599, outPlotFname=outPlotFname)
cardiacPeriods = np.diff(timePulseOx[pulseOxSignalMax[:, 0].astype(int)]) # in milliseconds
# find a threshold to detect misssed triggers
cardiacPeriodMean_withOutliers = np.mean(cardiacPeriods)
cardiacPeriodStd_withOutliers = np.std(cardiacPeriods)
print('\nMean +/- SD cardiac cycle WITH outliers (ms) = %d +/- %d' % (cardiacPeriodMean_withOutliers, cardiacPeriodStd_withOutliers))
cardiacPeriods_withoutOutliers = cardiacPeriods[(cardiacPeriods < cardiacPeriodMean_withOutliers + 2*cardiacPeriodStd_withOutliers) & (cardiacPeriods > cardiacPeriodMean_withOutliers - 2*cardiacPeriodStd_withOutliers)]
cardiacPeriodMean_withoutOutliers = np.mean(cardiacPeriods_withoutOutliers)
cardiacPeriodStd_withoutOutliers = np.std(cardiacPeriods_withoutOutliers)
print('Mean +/- SD cardiac cycle WITHOUT outliers (ms) = %d +/- %d' % (cardiacPeriodMean_withoutOutliers, cardiacPeriodStd_withoutOutliers))
# discard acquisitions with effective TR outside mean cardiac cylce +/- 3 true SD (without outliers)
idxAcqWithBadTR = np.argwhere((TReff >= cardiacPeriodMean_withoutOutliers+4*cardiacPeriodStd_withoutOutliers) | (TReff <= cardiacPeriodMean_withoutOutliers-4*cardiacPeriodStd_withoutOutliers))
# also discard the repetition following a missed trigger AND the first two repetitions of the set
idxAcqToDiscard = np.concatenate((np.array([[0], [1]]), idxAcqWithBadTR, idxAcqWithBadTR[idxAcqWithBadTR[:,0] < (TReff.size-1), :]+1))
idxAcqToDiscard = np.unique(idxAcqToDiscard)
# discard data
mriSignal_TRfiltered = np.delete(mriSignal, idxAcqToDiscard, axis=-1)
repsAcqTime_PulseOx_TRfiltered = np.delete(repsAcqTime_PulseOx, idxAcqToDiscard, axis=-1)
repsAcqTime_Resp_TRfiltered = np.delete(repsAcqTime_Resp, idxAcqToDiscard, axis=-1)
print('\nDiscarded '+str(len(idxAcqToDiscard))+' points due to inconsistent effective TR.')
# plot filtering results if asked
if outPlotFname and len(mriSignal.shape) == 1:
fig, ((ax1, ax2)) = plt.subplots(2, 1, figsize=(20, 9.7))
plt.subplots_adjust(left=0.05, right=0.95, hspace=0.25, bottom=0.05, top=0.9)
ax2.set_title("Effective TR")
ax2.set_xlabel('Time (s)')
ax2.set_ylabel('Effective TR (ms)')
ax2.grid(which='both')
ax2.plot(repsAcqTime_PulseOx/1000, TReff, linewidth=0, marker='+', markersize=7.0, color='red', label='Discarded points')
ax2.plot(repsAcqTime_PulseOx_TRfiltered/1000, np.delete(TReff, idxAcqToDiscard, | |
<reponame>scroix/nodel-recipes
'''Lightweight modbus control.'''
# REVISION HISTORY
# 21-Jan-2018
# Support for read-only unsigned 16-bit MODBUS registers (use Custom)
#
# 20-Jan-2018 (minor, non-functional)
# Uses the 'request_queue' from the toolkit to manually handle any packet fragmentation that is possible with
# MODBUS' length-delimeted packetised stream over TCP. This is very unlikely when direct from Advantech MODBUS hardware
# but possible when port-forwarding.
TCP_PORT = 502
DEFAULT_BOUNCE = 1.2 # the default bounce time (1200 ms)
param_ipAddress = Parameter({ "title":"IP address", "order": next_seq(), "schema": { "type":"string" },
"desc": "The IP address of the unit."})
CUSTOM = 'Custom'
ADAM_6050 = 'Advantech ADAM 6050 (12xDI 6xDO)'
ADAM_6060 = 'Advantech ADAM 6060 (6xDI 6xrelay)'
DEVICE_CONFIGS = { ADAM_6050: { 'coils':
[ {'startAddr': 0, 'count': 12, 'prefix': 'Input', 'readOnly': True},
{'startAddr': 16, 'count': 6, 'prefix': 'Output', 'readOnly': False} ],
'registers': []
},
ADAM_6060: { 'coils':
[ {'startAddr': 0, 'count': 6, 'prefix': 'Input', 'readOnly': True},
{'startAddr': 16, 'count': 6, 'prefix': 'Relay', 'readOnly': False} ],
'registers': []
}
}
param_modbusDevice = Parameter({'title': 'Modbus device', 'order': next_seq(), 'schema': {'type': 'string', 'enum': [CUSTOM, ADAM_6050, ADAM_6060]}})
param_coilBanks = Parameter({ 'title': 'Custom coil banks', 'order': next_seq(), 'schema': { 'type': 'array', 'items': {
'type': 'object', 'properties': {
'startAddr': {'type': 'integer', 'title': 'Start address', 'order': next_seq()},
'count': {'type': 'integer', 'title': 'Count', 'order': next_seq()},
'prefix': {'type': 'string', 'title': 'Prefix', 'order': next_seq(), 'desc': 'e.g "Input" or "Output"'},
'readOnly': {'type': 'boolean', 'title': 'Read-only?', 'order': next_seq()}
} } } })
param_registerBanks = Parameter({ 'title': 'Custom register banks', 'order': next_seq(), 'schema': { 'type': 'array', 'items': {
'type': 'object', 'properties': {
'startAddr': {'type': 'integer', 'title': 'Start address', 'order': next_seq()},
'count': {'type': 'integer', 'title': 'Count', 'order': next_seq()},
'prefix': {'type': 'string', 'title': 'Prefix', 'order': next_seq(), 'desc': 'e.g "Input" or "Output"'},
'readOnly': {'type': 'boolean', 'title': '(RESERVED) Read-only? (only read-only for now)', 'order': next_seq()}
} } } })
local_event_SyncErrors = LocalEvent({'title': 'Sync errors', 'group': 'Status', 'schema': {'type': 'object', 'title': 'Details', 'properties': {
'count': {'type': 'integer', 'title': 'Count', 'order': 1},
'last': {'type': 'string', 'title': 'Last occurrence', 'order': 2}
}}})
local_event_ShowLog = LocalEvent({'title': 'Show log', 'order': 9998, 'group': 'Debug', 'schema': {'type': 'boolean'}})
# hold the list of poller functions
pollers = list()
def main(arg = None):
tcp.setDest('%s:%s'% (param_ipAddress, TCP_PORT))
# lookup the config based on the device
deviceConfig = DEVICE_CONFIGS.get(param_modbusDevice)
if deviceConfig != None:
for info in deviceConfig.get('coils') or []:
bindCoilBank(info)
for info in deviceConfig.get('registers') or []:
bindRegisterBank(info)
else:
# 'Custom' and everything will fallthrough to here
for info in param_coilBanks or []:
bindCoilBank(info)
for info in param_registerBanks or []:
bindRegisterBank(info)
def bindCoilBank(info):
startAddr = info['startAddr']
prefix = info['prefix']
count = info['count']
readOnly = info['readOnly']
coilEvents = list()
for i in range(info['count']):
(event, configEvent) = bindCoil(prefix, i+1, startAddr+i, readOnly)
coilEvents.append((event, configEvent))
pollGap = 0.08 if readOnly else 2.0
def onBankResponse(seqNum, values):
for (es, v) in zip(coilEvents, values):
invert = safeGet(es[1].getArg(), 'invert', False)
es[0].emitIfDifferent(v if not invert else not v)
call_safe(lambda: readBank(seqNum), pollGap)
def readBank(seqNum):
# chain next call (instead of locked timer)
if seqNum != sequence[0]:
# stop this chain
print '(connection %s ended)' % seqNum
return
modbus_readCoils(startAddr, count, lambda values: onBankResponse(seqNum, values))
pollers.append(readBank)
def bindCoil(prefix, index, addr, readOnly):
event = Event('%s %s State' % (prefix, index), {'group': '"%s" coils\' states' % prefix, 'order': next_seq(), 'schema': {'type': 'boolean'}})
configEvent = Event('%s %s Config' % (prefix, index), {'group': '"%s" coils\' config' % prefix, 'order': next_seq(), 'schema': {'type': 'object', 'title': 'Params', 'properties': {
'invert': {'type': 'boolean', 'title': 'Invert', 'order': next_seq()},
'label': {'type': 'string', 'title': 'Label', 'order': next_seq()}
}}})
label = safeGet(configEvent.getArg(), 'label', None)
if label != None:
event2 = Event('%s State' % label, {'group': 'Labelled coils\' states', 'order': next_seq()+8000, 'schema': {'type': 'boolean'}})
event.addEmitHandler(lambda arg: event2.emit(arg))
if not readOnly:
def handler(arg):
modbus_writeCoil(addr, arg, lambda state: event.emit(state))
group = '%s %s coil' % (prefix, index)
stateAction = Action('%s %s State' % (prefix, index), handler, {'title': 'State', 'group': group, 'order': next_seq(), 'schema': {'type': 'boolean'}})
closeAction = Action('%s %s Close' % (prefix, index), lambda arg: stateAction.call(True), {'title': 'Close', 'group': group, 'order': next_seq()})
openAction = Action('%s %s Open' % (prefix, index), lambda arg: stateAction.call(False), {'title': 'Open', 'group': group, 'order': next_seq()})
timer = Timer(lambda: stateAction.call(stateAction.getArg() != True), 0)
def bounceHandler(arg=None):
stateAction.call(True)
timer.setDelay(DEFAULT_BOUNCE)
timer.start()
bounceAction = Action('%s %s Bounce' % (prefix, index), bounceHandler, {'title': 'Bounce', 'group': group, 'order': next_seq()})
def flashHandler(arg=None):
stateAction.call(True)
timer.setDelay(DEFAULT_BOUNCE)
timer.setInterval(DEFAULT_BOUNCE)
timer.start()
flashAction = Action('%s %s Flash' % (prefix, index), lambda arg: flashHandler() if arg else timer.stop(), {'title': 'Flash', 'group': group, 'order': next_seq(), 'schema': {'type': 'boolean'}})
if label != None:
group = '%s coil' % label
stateAction2 = Action('%s State' % label, lambda arg: stateAction.call(arg), {'title': 'State', 'group': group, 'order': next_seq()+8000, 'schema': {'type': 'boolean'}})
closeAction2 = Action('%s Close' % label, lambda arg: closeAction.call(arg), {'title': 'Close', 'group': group, 'order': next_seq()+8000})
openAction2 = Action('%s Open' % label, lambda arg: openAction.call(arg), {'title': 'Open', 'group': group, 'order': next_seq()+8000})
bounceAction2 = Action('%s Bounce' % label, lambda arg: bounceAction.call(arg), {'title': 'Bounce', 'group': group, 'order': next_seq()+8000})
flashAction2 = Action('%s Flash' % label, lambda arg: flashAction.call(arg), {'title': 'Flash', 'group': group, 'order': next_seq()+8000, 'schema': {'type': 'boolean'}})
return (event, configEvent)
def bindRegisterBank(info):
startAddr = info['startAddr']
prefix = info['prefix']
count = info['count']
readOnly = True # info.get('readOnly') not used yet, would be
registerEvents = list()
for i in range(info['count']):
(event, configEvent) = bindRegister(prefix, i+1, startAddr+i, readOnly)
registerEvents.append((event, configEvent))
pollGap = 0.08 if readOnly else 2.0
def onRegisterResponse(seqNum, values):
for (es, v) in zip(registerEvents, values):
es[0].emitIfDifferent(v)
call_safe(lambda: readRegister(seqNum), pollGap)
def readRegister(seqNum):
# chain next call (instead of locked timer)
if seqNum != sequence[0]:
# stop this chain
print '(connection %s ended)' % seqNum
return
modbus_readRegisters(startAddr, count, lambda values: onRegisterResponse(seqNum, values))
pollers.append(readRegister)
def bindRegister(prefix, index, addr, readOnly):
# 'readOnly' not used yet
event = Event('%s %s Value' % (prefix, index), {'group': '"%s" registers\' values' % prefix, 'order': next_seq(), 'schema': {'type': 'integer'}})
configEvent = Event('%s %s Config' % (prefix, index), {'group': '"%s" registers\' config' % prefix, 'order': next_seq(), 'schema': {'type': 'object', 'title': 'Params', 'properties': {
'label': {'type': 'string', 'title': 'Label', 'order': next_seq()}
}}})
label = safeGet(configEvent.getArg(), 'label', None)
if label != None:
event2 = Event('%s Value' % label, {'group': 'Labelled registers\' values', 'order': next_seq()+8000, 'schema': {'type': 'integer'}})
event.addEmitHandler(lambda arg: event2.emit(arg))
return (event, configEvent)
sequence = [0]
def connected():
console.info('TCP connected')
# don't let commands rush through
tcp.clearQueue()
queue.clearQueue()
# start all the poller
seqNum = sequence[0]
console.info('(new sequence %s)' % seqNum)
for f in pollers:
f(seqNum)
def received(data):
lastReceive[0] = system_clock()
if local_event_ShowLog.getArg():
print 'RECV: [%s]' % data.encode('hex')
# ensure buffer doesn't blow out i.e. greater than a reasonable sized "massive" value
if len(recvBuffer) > 4096:
console.warn('The incoming buffer is too large which might indicate protocol corruption; dropping it')
del recvBuffer[:]
# extend the recv buffer
recvBuffer.extend(data)
processBuffer()
def sent(data):
if local_event_ShowLog.getArg():
print 'SENT: [%s]' % data.encode('hex')
def disconnected():
console.warn('TCP disconnected')
# reset sequence (which will stop pollers)
tcp.clearQueue()
queue.clearQueue()
newSeq = sequence[0] + 1
sequence[0] = newSeq
def timeout():
console.warn('TCP timeout (recycling TCP connection if connected)')
tcp.drop()
tcp = TCP(connected=connected,
received=received,
sent=sent,
disconnected=disconnected,
timeout=timeout,
sendDelimiters=None,
receiveDelimiters=None)
def protocolTimeout():
console.log('MODBUS timeout; flushing buffers and dropping TCP connection for good measure')
tcp.drop()
queue.clearQueue()
del recvBuffer[:]
# MODBUS using no delimeters within its binary protocol so must use a
# custom request queue
queue = request_queue(timeout=protocolTimeout)
# the full receive buffer
recvBuffer = list()
# example response packet:
# 00:93 00:00 00:05 01:01:02:fd:0f
# TID (2 bytes) Protocol (2 bytes) Length, n (2 bytes) n bytes...
def processBuffer():
while True:
bufferLen = len(recvBuffer)
if bufferLen < 6:
# not big enough yet, let it grow
return
# got at least 6 bytes (fifth and sixth hold the length)
messageLen = toInt16(recvBuffer, 4)
# work out expected length (incl. header)
fullLen = 2 + 2 + 2 + messageLen
# check if we've got at least one full packet
if bufferLen >= fullLen:
# grab that packet from the buffer
message = ''.join(recvBuffer[:fullLen])
del recvBuffer[:fullLen]
# | |
return self
for season in self._season:
if season == requested_season:
self._index = index
break
index += 1
return self
def _dataframe_fields(self):
"""
Creates a dictionary of all fields to include with DataFrame.
With the result of the calls to class properties changing based on the
class index value, the dictionary should be regenerated every time the
index is changed when the dataframe property is requested.
Returns
-------
dictionary
Returns a dictionary where the keys are the shortened ``string``
attribute names and the values are the actual value for each
attribute for the specified index.
"""
fields_to_include = {
'adjusted_yards_per_attempt': self.adjusted_yards_per_attempt,
'assists_on_tackles': self.assists_on_tackles,
'completed_passes': self.completed_passes,
'extra_points_made': self.extra_points_made,
'field_goals_made': self.field_goals_made,
'fumbles_forced': self.fumbles_forced,
'fumbles_recovered': self.fumbles_recovered,
'fumbles_recovered_for_touchdown':
self.fumbles_recovered_for_touchdown,
'games': self.games,
'height': self.height,
'interceptions': self.interceptions,
'interceptions_returned_for_touchdown':
self.interceptions_returned_for_touchdown,
'interceptions_thrown': self.interceptions_thrown,
'kickoff_return_touchdowns': self.kickoff_return_touchdowns,
'name': self.name,
'other_touchdowns': self.other_touchdowns,
'pass_attempts': self.pass_attempts,
'passes_defended': self.passes_defended,
'passing_completion': self.passing_completion,
'passing_touchdowns': self.passing_touchdowns,
'passing_yards': self.passing_yards,
'passing_yards_per_attempt': self.passing_yards_per_attempt,
'player_id': self.player_id,
'plays_from_scrimmage': self.plays_from_scrimmage,
'points': self.points,
'position': self.position,
'punt_return_touchdowns': self.punt_return_touchdowns,
'quarterback_rating': self.quarterback_rating,
'receiving_touchdowns': self.receiving_touchdowns,
'receiving_yards': self.receiving_yards,
'receiving_yards_per_reception':
self.receiving_yards_per_reception,
'receptions': self.receptions,
'rush_attempts': self.rush_attempts,
'rush_touchdowns': self.rush_touchdowns,
'rush_yards': self.rush_yards,
'rush_yards_per_attempt': self.rush_yards_per_attempt,
'rushing_and_receiving_touchdowns':
self.rushing_and_receiving_touchdowns,
'sacks': self.sacks,
'safeties': self.safeties,
'season': self.season,
'solo_tackles': self.solo_tackles,
'tackles_for_loss': self.tackles_for_loss,
'team_abbreviation': self.team_abbreviation,
'total_tackles': self.total_tackles,
'total_touchdowns': self.total_touchdowns,
'two_point_conversions': self.two_point_conversions,
'weight': self.weight,
'yards_from_scrimmage': self.yards_from_scrimmage,
'yards_from_scrimmage_per_play':
self.yards_from_scrimmage_per_play,
'yards_recovered_from_fumble': self.yards_recovered_from_fumble,
'yards_returned_from_interceptions':
self.yards_returned_from_interceptions,
'yards_returned_per_interception':
self.yards_returned_per_interception,
'year': self.year
}
return fields_to_include
@property
def dataframe(self):
"""
Returns a ``pandas DataFrame`` containing all other relevant class
properties and values where each index is a different season plus the
career stats.
"""
temp_index = self._index
rows = []
indices = []
if not self._season:
return None
for season in self._season:
self._index = self._season.index(season)
rows.append(self._dataframe_fields())
indices.append(season)
self._index = temp_index
return pd.DataFrame(rows, index=[indices])
@property
def season(self):
"""
Returns a ``string`` of the season in the format 'YYYY', such as
'2017'. If no season was requested, the career stats will be returned
for the player and the season will default to 'Career'.
"""
return self._season[self._index]
@property
def team_abbreviation(self):
"""
Returns a ``string`` of the team's abbreviation, such as 'PURDUE' for
the Purdue Boilermakers.
"""
return self._team_abbreviation[self._index]
@property
def position(self):
"""
Returns a ``string`` of the player's primary position.
"""
# If the position is left blank for the career stats, it will show
# the player as not having a position. Since player stats default to
# career, this will make it appear no players have a position. Instead,
# default to the most recent season.
if self.season == 'Career' and self._position[self._index] == '':
index = self._season.index(self._most_recent_season)
return self._position[index]
return self._position[self._index]
@property
def height(self):
"""
Returns a ``string`` of the player's height in the format
"feet-inches".
"""
return self._height
@property
def weight(self):
"""
Returns an ``int`` of the player's weight in pounds.
"""
try:
return int(self._weight.replace('lb', ''))
except AttributeError:
return None
@property
def year(self):
"""
Returns a ``string`` of the player's class designation, such as'FR' for
freshmen.
"""
return self._year[self._index]
@_int_property_decorator
def games(self):
"""
Returns an ``int`` of the number of games the player participated in.
"""
return self._games
@_int_property_decorator
def completed_passes(self):
"""
Returns an ``int`` of the number of completed passes the player threw.
"""
return self._completed_passes
@_int_property_decorator
def attempted_passes(self):
"""
Returns an ``int`` of the number of passes the player attempted.
"""
warnings.warn('Warning: "attempted_passes" is deprecated and will '
'be removed in a future release. Please use '
'"pass_attempts" instead for identical functionality.',
DeprecationWarning)
return self._pass_attempts
@_int_property_decorator
def pass_attempts(self):
"""
Returns an ``int`` of the number of passes the player attempted.
"""
return self._pass_attempts
@_float_property_decorator
def passing_completion(self):
"""
Returns a ``float`` of the percentage of passes that were caught by a
receiver. Percentage ranges from 0-100.
"""
return self._passing_completion
@_int_property_decorator
def passing_yards(self):
"""
Returns an ``int`` of the total number of yards the player gained from
passing the ball.
"""
return self._passing_yards
@_int_property_decorator
def passing_touchdowns(self):
"""
Returns an ``int`` of the number of touchdowns passes the player has
thrown.
"""
return self._passing_touchdowns
@_int_property_decorator
def interceptions_thrown(self):
"""
Returns an ``int`` of the number of interceptions the player has
thrown.
"""
return self._interceptions_thrown
@_float_property_decorator
def adjusted_yards_per_attempt(self):
"""
Returns a ``float`` of the adjusted number of yards gained per passing
attempt, equal to (yards + 20 * pass_touchdowns - 45 * interceptions) /
pass_attempts.
"""
return self._adjusted_yards_per_attempt
@_float_property_decorator
def quarterback_rating(self):
"""
Returns a ``float`` of the player's quarterback rating.
"""
return self._quarterback_rating
@_int_property_decorator
def rush_attempts(self):
"""
Returns an ``int`` of the number of rushing plays the player attempted.
"""
return self._rush_attempts
@_int_property_decorator
def rush_yards(self):
"""
Returns an ``int`` of the number of rushing yards the player gained.
"""
return self._rush_yards
@_float_property_decorator
def rush_yards_per_attempt(self):
"""
Returns a ``float`` of the average number of yards gained per rushing
attempt.
"""
return self._rush_yards_per_attempt
@_int_property_decorator
def rush_touchdowns(self):
"""
Returns an ``int`` of the number of rushing touchdowns the player
scored.
"""
return self._rush_touchdowns
@_int_property_decorator
def receptions(self):
"""
Returns an ``int`` of the number of receptions the player made.
"""
return self._receptions
@_int_property_decorator
def receiving_yards(self):
"""
Returns an ``int`` of the number of receiving yards the player gained.
"""
return self._receiving_yards
@_float_property_decorator
def receiving_yards_per_reception(self):
"""
Returns a ``float`` of the average number of yards the player gained
per reception.
"""
return self._receiving_yards_per_reception
@_int_property_decorator
def receiving_touchdowns(self):
"""
Returns an ``int`` of the number of touchdowns the player scored after
receiving a pass.
"""
return self._receiving_touchdowns
@_int_property_decorator
def plays_from_scrimmage(self):
"""
Returns an ``int`` of the combined number of rushing attempts and
receptions the player had.
"""
return self._plays_from_scrimmage
@_int_property_decorator
def yards_from_scrimmage(self):
"""
Returns an ``int`` of the total number of yards gained from scrimmage
for both rushing and receiving.
"""
return self._yards_from_scrimmage
@_float_property_decorator
def yards_from_scrimmage_per_play(self):
"""
Returns a ``float`` of the average number of yards gained per rushing
attempt and/or reception.
"""
return self._yards_from_scrimmage_per_play
@_int_property_decorator
def rushing_and_receiving_touchdowns(self):
"""
Returns an ``int`` of the combined number of rushing and receiving
touchdowns the player scored.
"""
return self._rushing_and_receiving_touchdowns
@_int_property_decorator
def solo_tackles(self):
"""
Returns an ``int`` of the number of tackles the player made by himself.
"""
return self._solo_tackles
@_int_property_decorator
def assists_on_tackles(self):
"""
Returns an ``int`` of the number of assists the player made on tackles.
"""
return self._assists_on_tackles
@_int_property_decorator
def total_tackles(self):
"""
Returns an ``int`` of the number of tackles the player made.
"""
return self._total_tackles
@_float_property_decorator
def tackles_for_loss(self):
"""
Returns a ``float`` of the number of tackles for a loss the player
made.
"""
return self._tackles_for_loss
@_float_property_decorator
def sacks(self):
"""
Returns a ``float`` of the number of times the player sacked a
quarterback.
"""
return self._sacks
@_int_property_decorator
def interceptions(self):
"""
Returns an ``int`` of the number of times the player intercepted a
pass.
"""
return self._interceptions
@_int_property_decorator
def yards_returned_from_interceptions(self):
"""
Returns an ``int`` of the number of yards the player returned after
intercepting a pass.
"""
return self._yards_returned_from_interceptions
@_float_property_decorator
def yards_returned_per_interception(self):
"""
Returns a ``float`` of the average number of yards the player returns
after intercepting a pass.
"""
return self._yards_returned_per_interception
@_int_property_decorator
def interceptions_returned_for_touchdown(self):
"""
Returns an ``int`` of the number of touchdowns the player has scored
after intercepting a pass. Commonly referred to as a 'Pick-6'.
"""
return self._interceptions_returned_for_touchdown
@_int_property_decorator
def passes_defended(self):
"""
Returns an ``int`` of the number of passes the player has defended as a
defensive player.
"""
return self._passes_defended
@_int_property_decorator
def fumbles_recovered(self):
"""
Returns an ``int`` of the number of fumbles the player has recovered.
"""
return self._fumbles_recovered
@_int_property_decorator
def yards_recovered_from_fumble(self):
"""
Returns an ``int`` of the number of yards the player gained after
recovering a fumble.
"""
return self._yards_recovered_from_fumble
@_int_property_decorator
def fumbles_recovered_for_touchdown(self):
"""
Returns an ``int`` of the number of touchdowns the player has scored
after recovering a fumble.
"""
return self._fumbles_recovered_for_touchdown
@_int_property_decorator
def fumbles_forced(self):
"""
Returns an ``int`` of the number of times the player forced a fumble.
"""
return self._fumbles_forced
@_int_property_decorator
def punt_return_touchdowns(self):
"""
Returns an ``int`` of the number of punts the player returned for a
touchdown.
"""
return self._punt_return_touchdowns
@_int_property_decorator
def kickoff_return_touchdowns(self):
"""
Returns an ``int`` of the number of kickoffs the player returned for a
touchdown.
"""
return self._kickoff_return_touchdowns
| |
# answerl.append(rel16_loc)
# answerl.append(rel17_loc)
# answerl.append(rel18_loc)
# answerl.append(rel19_loc)
answerr.append(rel20_ans)
# answer.append(rel21_ans)
# answer.append(rel22_ans)
answerr.append(rel23_ans)
# answer.append(rel24_ans)
# answer.append(rel25_ans)
answerr.append(rel26_ans)
# answer.append(rel27_ans)
# answer.append(rel28_ans)
# answer.append(rel29_ans)
answerrl.append(rel20_loc)
# answerl.append(rel21_loc)
# answerl.append(rel22_loc)
answerrl.append(rel23_loc)
# answerl.append(rel24_loc)
# answerl.append(rel25_loc)
answerrl.append(rel26_loc)
# answerl.append(rel27_loc)
# answerl.append(rel28_loc)
# answerl.append(rel29_loc)
# answer.append(rel30_ans)
# answer.append(rel31_ans)
answerr.append(rel32_ans)
# answer.append(rel33_ans)
answerr.append(rel34_ans)
answerr.append(rel35_ans)
# answer.append(rel36_ans)
# answer.append(rel37_ans)
# answer.append(rel38_ans)
# answer.append(rel39_ans)
# answerl.append(rel30_loc)
# answerl.append(rel31_loc)
answerrl.append(rel32_loc)
# answerl.append(rel33_loc)
answerrl.append(rel34_loc)
answerrl.append(rel35_loc)
# answerl.append(rel36_loc)
# answerl.append(rel37_loc)
# answerl.append(rel38_loc)
# answerl.append(rel39_loc)
#answer_tensor = torch.cat((torch.stack(answer), torch.stack(answerl).view(-1)), dim=0)
answer_tensor = torch.cat((torch.stack(answerr).view(-1), torch.stack(answerrl).view(-1)), dim=0)
return answer_tensor
def get_answer_all(self, objects):
answer = []
answerl = []
answerr = []
answerrl = []
task_tensor = torch.floatTensor([[1, 0, 0, 0, 2], [2, 0, 0, 0, 2], [0, 1, 3, 0, 4]])
for i in range(task_tensor.size()):
cur_task = task_tensor[i]
ask_for = cur_task[4]+2
am_I_the_query_object = []
for object in objects:
a=1
if cur_task[0]>0:
a = a * (object[3]==cur_task[0])
if cur_task[1]>0:
a = a * (object[4]==cur_task[1])
if cur_task[2]>0:
a = a * (object[5]==cur_task[2])
if cur_task[3]>0:
a = a * (object[6]==cur_task[3])
am_I_the_query_object.append(a)
ind_query_object = torch.nonzero(torch.ByteTensor(am_I_the_query_object))
que_cond = (ind_query_object.size(0) == 1)
rel_inds = self.spatial_neighbours(objects, que_cond, ind_query_object)
if que_cond:
que_ans = (objects[ind_query_object][ask_for]).int() - 1
rel_ans, rel_loc = self.create_spatial_answers(objects, rel_inds, ask_for)
que_loc = (objects[ind_query_object][:2]).int()
else:
que_ans = torch.ones([]).int() * -1
que_loc = torch.ones([2]).int() * -1
rel_ans = torch.ones([4]).int() * -1
rel_loc = torch.ones([8]).int() * -1
answer.append(que_ans)
answerl.append(que_loc)
answerr.append(rel_ans)
answerrl.append(rel_loc)
answer_tensor = torch.cat((torch.stack(answer), torch.stack(answerl).view(-1)), dim=0)
# answer_tensor = torch.cat((torch.stack(answerr).view(-1), torch.stack(answerrl).view(-1)), dim=0)
return answer_tensor
'''
def get_answer_loc_ext(self, objects):
answer = []
answerl = []
answerr = []
answerrl = []
# equal_material questions
is_it_a_big_object = [object[6] == 1 for object in objects]
is_it_a_small_object = [object[6] == 2 for object in objects]
is_it_a_cyan_object = [object[3] == 1 for object in objects]
is_it_a_blue_object = [object[3] == 2 for object in objects]
is_it_a_yellow_object = [object[3] == 3 for object in objects]
is_it_a_purple_object = [object[3] == 4 for object in objects]
is_it_a_red_object = [object[3] == 5 for object in objects]
is_it_a_green_object = [object[3] == 6 for object in objects]
is_it_a_grey_object = [object[3] == 7 for object in objects]
is_it_a_brown_object = [object[3] == 8 for object in objects]
am_I_a_cylinder = [(object[5] == 3).float() for object in objects]
am_I_a_cube = [(object[5] == 2).float() for object in objects]
am_I_a_sphere = [(object[5] == 1).float() for object in objects]
am_I_a_rubber = [(object[4] == 1).float() for object in objects]
am_I_a_metal = [(object[4] == 2).float() for object in objects]
am_I_a_small_cylinder = [(object[6]==2 and object[5]==3).float() for object in objects]
am_I_a_small_sphere = [(object[6]==2 and object[5]==1).float() for object in objects]
am_I_a_big_sphere = [(object[6] == 1 and object[5] == 1).float() for object in objects]
am_I_a_big_cube = [(object[6] == 1 and object[5] == 2).float() for object in objects]
am_I_a_big_cylinder = [(object[6] == 1 and object[5] == 3).float() for object in objects]
am_I_a_small_cube = [(object[6] == 2 and object[5] == 2).float() for object in objects]
am_I_a_purple_rubber = [(object[4] == 1 and object[3] == 4).float() for object in objects]
am_I_a_cyan_rubber = [(object[4] == 1 and object[3] == 1).float() for object in objects]
am_I_a_blue_rubber = [(object[4] == 1 and object[3] == 2).float() for object in objects]
am_I_a_grey_rubber = [(object[4] == 1 and object[3] == 7).float() for object in objects]
am_I_a_blue_metal = [(object[4] == 2 and object[3] == 2).float() for object in objects]
am_I_a_red_metal = [(object[4] == 2 and object[3] == 5).float() for object in objects]
am_I_a_green_sphere = [(object[5] == 1 and object[3] == 6).float() for object in objects]
am_I_a_red_cylinder = [(object[3] == 5 and object[5] == 3).float() for object in objects]
am_I_a_grey_cylinder = [(object[3] == 7 and object[5] == 3).float() for object in objects]
am_I_a_yellow_cylinder = [(object[3] == 3 and object[5] == 3).float() for object in objects]
am_I_a_cyan_cylinder = [(object[3] == 1 and object[5] == 3).float() for object in objects]
am_I_a_purple_cylinder = [(object[3] == 4 and object[5] == 3).float() for object in objects]
am_I_a_red_cube = [(object[3] == 5 and object[5] == 2).float() for object in objects]
am_I_a_brown_cube = [(object[3] == 8 and object[5] == 2).float() for object in objects]
am_I_a_red_sphere = [(object[3] == 5 and object[5] == 1).float() for object in objects]
am_I_a_blue_sphere = [(object[3] == 2 and object[5] == 1).float() for object in objects]
am_I_a_cyan_sphere = [(object[3] == 1 and object[5] == 1).float() for object in objects]
am_I_big_and_red = [(object[6] == 1 and object[3] == 5).float() for object in objects]
am_I_big_and_purple = [(object[6] == 1 and object[3] == 4).float() for object in objects]
am_I_big_and_cyan = [(object[6] == 1 and object[3] == 1).float() for object in objects]
am_I_big_and_grey = [(object[6] == 1 and object[3] == 7).float() for object in objects]
am_I_big_and_blue = [(object[6] == 1 and object[3] == 2).float() for object in objects]
am_I_big_and_green = [(object[6] == 1 and object[3] == 6).float() for object in objects]
am_I_big_and_brown = [(object[6] == 1 and object[3] == 8).float() for object in objects]
am_I_big_and_metal = [(object[6] == 1 and object[4] == 2).float() for object in objects]
am_I_big_and_rubber = [(object[6] == 1 and object[4] == 1).float() for object in objects]
am_I_small_and_rubber = [(object[6] == 2 and object[4] == 1).float() for object in objects]
am_I_small_and_metal = [(object[6] == 2 and object[4] == 2).float() for object in objects]
am_I_small_and_grey = [(object[6] == 2 and object[3] == 7).float() for object in objects]
am_I_small_and_cyan = [(object[6] == 2 and object[3] == 1).float() for object in objects]
am_I_small_and_brown = [(object[6] == 2 and object[3] == 8).float() for object in objects]
am_I_a_metal_sphere = [(object[4] == 2 and object[5] == 1).float() for object in objects]
am_I_a_metal_cube = [(object[4] == 2 and object[5] == 2).float() for object in objects]
am_I_a_metal_cylinder = [(object[4] == 2 and object[5] == 3).float() for object in objects]
am_I_a_rubber_cylinder = [(object[4] == 1 and object[5] == 3).float() for object in objects]
am_I_a_rubber_cube = [(object[4] == 1 and object[5] == 2).float() for object in objects]
ind_big_object = torch.nonzero(torch.ByteTensor(is_it_a_big_object))
ind_small_object = torch.nonzero(torch.ByteTensor(is_it_a_small_object))
ind_cyan_object = torch.nonzero(torch.ByteTensor(is_it_a_cyan_object))
ind_blue_object = torch.nonzero(torch.ByteTensor(is_it_a_blue_object))
ind_yellow_object = torch.nonzero(torch.ByteTensor(is_it_a_yellow_object))
ind_purple_object = torch.nonzero(torch.ByteTensor(is_it_a_purple_object))
ind_red_object = torch.nonzero(torch.ByteTensor(is_it_a_red_object))
ind_green_object = torch.nonzero(torch.ByteTensor(is_it_a_green_object))
ind_grey_object = torch.nonzero(torch.ByteTensor(is_it_a_grey_object))
ind_brown_object = torch.nonzero(torch.ByteTensor(is_it_a_brown_object))
ind_cylinder_object = torch.nonzero(torch.ByteTensor(am_I_a_cylinder))
ind_cube_object = torch.nonzero(torch.ByteTensor(am_I_a_cube))
ind_sphere_object = torch.nonzero(torch.ByteTensor(am_I_a_sphere))
ind_rubber_object = torch.nonzero(torch.ByteTensor(am_I_a_rubber))
ind_metal_object = torch.nonzero(torch.ByteTensor(am_I_a_metal))
ind_red_big_object = torch.nonzero(torch.ByteTensor(am_I_big_and_red))
ind_yellow_cylinder_object = torch.nonzero(torch.ByteTensor(am_I_a_yellow_cylinder))
ind_cyan_cylinder_object = torch.nonzero(torch.ByteTensor(am_I_a_cyan_cylinder))
ind_red_cylinder_object = torch.nonzero(torch.ByteTensor(am_I_a_red_cylinder))
ind_grey_cylinder_object = torch.nonzero(torch.ByteTensor(am_I_a_grey_cylinder))
ind_purple_cylinder_object = torch.nonzero(torch.ByteTensor(am_I_a_purple_cylinder))
ind_red_cube_object = torch.nonzero(torch.ByteTensor(am_I_a_red_cube))
ind_brown_cube_object = torch.nonzero(torch.ByteTensor(am_I_a_brown_cube))
ind_blue_sphere_object = torch.nonzero(torch.ByteTensor(am_I_a_blue_sphere))
ind_cyan_sphere_object = torch.nonzero(torch.ByteTensor(am_I_a_cyan_sphere))
ind_purple_rubber_object = torch.nonzero(torch.ByteTensor(am_I_a_purple_rubber))
ind_cyan_rubber_object = torch.nonzero(torch.ByteTensor(am_I_a_cyan_rubber))
ind_blue_rubber_object = torch.nonzero(torch.ByteTensor(am_I_a_blue_rubber))
ind_grey_rubber_object = torch.nonzero(torch.ByteTensor(am_I_a_grey_rubber))
ind_blue_metal_object = torch.nonzero(torch.ByteTensor(am_I_a_blue_metal))
ind_red_metal_object = torch.nonzero(torch.ByteTensor(am_I_a_red_metal))
ind_small_cylinder_object = torch.nonzero(torch.ByteTensor(am_I_a_small_cylinder))
ind_big_sphere_object = torch.nonzero(torch.ByteTensor(am_I_a_big_sphere))
ind_big_cube_object = torch.nonzero(torch.ByteTensor(am_I_a_big_cube))
ind_big_cylinder_object = torch.nonzero(torch.ByteTensor(am_I_a_big_cylinder))
ind_metal_sphere_object = torch.nonzero(torch.ByteTensor(am_I_a_metal_sphere))
ind_metal_cube_object = torch.nonzero(torch.ByteTensor(am_I_a_metal_cube))
ind_metal_cylinder_object = torch.nonzero(torch.ByteTensor(am_I_a_metal_cylinder))
ind_rubber_cylinder_object = torch.nonzero(torch.ByteTensor(am_I_a_rubber_cylinder))
ind_rubber_cube_object = torch.nonzero(torch.ByteTensor(am_I_a_rubber_cube))
ind_small_cube_object = torch.nonzero(torch.ByteTensor(am_I_a_small_cube))
ind_small_sphere_object = torch.nonzero(torch.ByteTensor(am_I_a_small_sphere))
ind_green_sphere_object = torch.nonzero(torch.ByteTensor(am_I_a_green_sphere))
ind_small_rubber_object = torch.nonzero(torch.ByteTensor(am_I_small_and_rubber))
ind_small_metal_object = torch.nonzero(torch.ByteTensor(am_I_small_and_metal))
ind_small_grey_object = torch.nonzero(torch.ByteTensor(am_I_small_and_grey))
ind_small_brown_object = torch.nonzero(torch.ByteTensor(am_I_small_and_brown))
ind_red_sphere_object = torch.nonzero(torch.ByteTensor(am_I_a_red_sphere))
ind_big_purple_object = torch.nonzero(torch.ByteTensor(am_I_big_and_purple))
ind_big_blue_object = torch.nonzero(torch.ByteTensor(am_I_big_and_blue))
ind_big_brown_object = torch.nonzero(torch.ByteTensor(am_I_big_and_brown))
ind_big_red_object = torch.nonzero(torch.ByteTensor(am_I_big_and_red))
ind_big_green_object = torch.nonzero(torch.ByteTensor(am_I_big_and_green))
ind_big_cyan_object = torch.nonzero(torch.ByteTensor(am_I_big_and_cyan))
ind_big_grey_object = torch.nonzero(torch.ByteTensor(am_I_big_and_grey))
ind_small_cyan_object = torch.nonzero(torch.ByteTensor(am_I_small_and_cyan))
ind_big_metal_object = torch.nonzero(torch.ByteTensor(am_I_big_and_metal))
ind_big_rubber_object = torch.nonzero(torch.ByteTensor(am_I_big_and_rubber))
# equal material
que0_cond = (ind_purple_object.size(0) == 1)
rel0_inds = self.spatial_neighbours(objects, que0_cond, ind_purple_object)
que1_cond = (ind_red_metal_object.size(0)==1)
rel1_inds = self.spatial_neighbours(objects, que1_cond, ind_red_metal_object)
que2_cond = (ind_big_rubber_object.size(0) == 1)
rel2_inds = self.spatial_neighbours(objects, que2_cond, ind_big_rubber_object)
que3_cond = (ind_small_brown_object.size(0)==1)
rel3_inds = self.spatial_neighbours(objects, que3_cond, ind_small_brown_object)
que4_cond = (ind_red_object.size(0) == 1)
rel4_inds = self.spatial_neighbours(objects, que4_cond, ind_red_object)
que5_cond = (ind_big_blue_object.size(0)==1)
rel5_inds = self.spatial_neighbours(objects, que5_cond, ind_big_blue_object)
que6_cond = (ind_big_grey_object.size(0) == 1)
rel6_inds = self.spatial_neighbours(objects, que6_cond, ind_big_grey_object)
que7_cond = (ind_small_object.size(0) == 1)
rel7_inds = self.spatial_neighbours(objects, que7_cond, ind_small_object)
que8_cond = (ind_rubber_object.size(0) == 1)
rel8_inds = self.spatial_neighbours(objects, que8_cond, ind_rubber_object)
que9_cond = (ind_big_cyan_object.size(0) == 1)
rel9_inds = self.spatial_neighbours(objects, que9_cond, ind_big_cyan_object)
que10_cond = (ind_rubber_cylinder_object.size(0) == 1)
rel10_inds = self.spatial_neighbours(objects, que10_cond, ind_rubber_cylinder_object)
que11_cond = (ind_metal_cube_object.size(0) == 1)
rel11_inds = self.spatial_neighbours(objects, que11_cond, ind_metal_cube_object)
que12_cond = (ind_grey_rubber_object.size(0) == 1)
rel12_inds = self.spatial_neighbours(objects, que12_cond, ind_grey_rubber_object)
que13_cond = (ind_metal_sphere_object.size(0) == 1)
rel13_inds = self.spatial_neighbours(objects, que13_cond, ind_metal_sphere_object)
que14_cond = (ind_cyan_object.size(0) == 1)
rel14_inds = self.spatial_neighbours(objects, que14_cond, ind_cyan_object)
que15_cond = (ind_cylinder_object.size(0) == 1)
rel15_inds = self.spatial_neighbours(objects, que15_cond, ind_cylinder_object)
que16_cond = (ind_blue_metal_object.size(0) == 1)
rel16_inds = self.spatial_neighbours(objects, que16_cond, ind_blue_metal_object)
que17_cond = (ind_red_object.size(0) == 1)
rel17_inds = self.spatial_neighbours(objects, que17_cond, ind_red_object)
que18_cond = (ind_rubber_object.size(0) | |
from django.shortcuts import render
from django.shortcuts import render_to_response
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth import authenticate, logout
from django.contrib.auth import login as signin
from django.http.response import HttpResponse, HttpResponseRedirect
from django.http import JsonResponse
import os
# from django.core.context_processors import csrf
from blog.models import Category, Gal_Image, Post, Menu, Page, Image_File, Banner
from blog.forms import CategoryForm, PostForm, MenuForm, PageForm, bannerForm, PasswordForm
from events.forms import EventForm
from events.models import Event
from PIL import Image
from django.core.files.base import File as fle
from django.core.files.storage import default_storage
from django.db.models import Max
@csrf_exempt
def upload_photos(request):
'''
takes all the images coming from the redactor editor and
stores it in the database and returns all the files'''
if request.FILES.get("upload"):
f = request.FILES.get("upload")
obj = Image_File.objects.create(upload=f, is_image=True)
size = (128, 128)
x = f.name
z = 'thumb' + f.name
y = open(x, 'w')
for i in f.chunks():
y.write(i)
y.close()
im = Image.open(x)
im.thumbnail(size)
im.save(z)
imdata = open(z)
obj.thumbnail.save(z, fle(imdata))
imdata.close()
# obj.thumbnail = imdata
os.remove(x)
os.remove(z)
upurl = default_storage.url(obj.upload.url)
upurl = upurl
return HttpResponse("""
<script type='text/javascript'>
window.parent.CKEDITOR.tools.callFunction({0}, '{1}');
</script>""".format(request.GET['CKEditorFuncNum'], upurl))
@csrf_exempt
def recent_photos(request):
''' returns all the images from the data base '''
imgs = []
for obj in Image_File.objects.filter(is_image=True).order_by("-date_created"):
upurl = default_storage.url(obj.upload.url)
thumburl = default_storage.url(obj.thumbnail.url)
imgs.append({'src': upurl, 'thumb': thumburl, 'is_image': True})
return render_to_response('admin/browse.html', {'files': imgs})
def login(request):
if request.user.is_authenticated:
if request.user.is_superuser:
posts = Post.objects.all().count()
categoryies = Category.objects.all().count()
menus = Menu.objects.all().count()
pages = Page.objects.all().count()
events = Event.objects.all().count()
return render_to_response("admin/index.html",
{'posts': posts,
'categoryies': categoryies,
'menus': menus,
'pages': pages,
'events': events})
return HttpResponseRedirect("/")
if request.method == "POST":
user = authenticate(email=request.POST.get("email"),
password=request.POST.get("password"))
if user is not None:
if user.is_superuser and user.is_active:
signin(request, user)
data = {"error": False}
return JsonResponse(data)
data = {"error": True,
"message": "Your account is not yet activated!"}
return JsonResponse(data)
data = {"error": True,
"message": "Username and password were incorrect."}
return JsonResponse(data)
return render(request, "admin/login.html")
@login_required
def category_list(request):
category_list = Category.objects.all()
return render_to_response('admin/category-list.html',
{'category_list': category_list})
@login_required
def post_list(request):
post_list = Post.objects.all().order_by('id')
return render_to_response('admin/post-list.html', {'post_list': post_list})
@login_required
def event_list(request):
event_list = Event.objects.all().order_by('id')
return render_to_response('admin/event-list.html',
{'event_list': event_list})
@login_required
def menu_list(request):
menu_list = Menu.objects.filter(parent=None)
return render_to_response('admin/menu-list.html', {'menu_list': menu_list})
@login_required
def page_list(request):
page_list = Page.objects.all()
return render_to_response('admin/page-list.html', {'page_list': page_list})
@login_required
def banner_list(request):
banner_list = Banner.objects.all()
return render_to_response('admin/banner-list.html',
{'banner_list': banner_list})
@login_required
def add_category(request):
if request.method == 'GET':
category_list = Category.objects.all()
return render(request, 'admin/category-add.html',
{'category_list': category_list})
validate_category = CategoryForm(request.POST)
errors = {}
if validate_category.is_valid():
new_category = validate_category.save(commit=False)
new_category.save()
data = {"data": 'Category created successfully', "error": False}
return JsonResponse(data)
for k in validate_category.errors:
errors[k] = validate_category.errors[k][0]
return JsonResponse(errors)
@login_required
def add_post(request):
if request.method == 'GET':
category_list = Category.objects.all()
post_list = Post.objects.all()
return render(request, 'admin/post-add.html',
{'category_list': category_list, 'post_list': post_list})
validate_post = PostForm(request.POST)
errors = {}
if validate_post.is_valid():
new_post = validate_post.save(commit=False)
if 'image' not in request.FILES:
errors['image'] = 'Please upload Image'
return JsonResponse(errors)
if request.FILES['image']:
new_post.image = request.FILES['image']
new_post.save()
photos = request.FILES.getlist('photos')
for p in photos:
img = Gal_Image.objects.create(image=p)
new_post.photos.add(img)
data = {"data": 'Post created successfully', "error": False}
return JsonResponse(data)
if 'image' not in request.FILES:
validate_post.errors['image'] = 'Please upload Image'
return JsonResponse(validate_post.errors)
@login_required
def add_event(request):
if request.method == 'GET':
event_list = Event.objects.all()
return render(request, 'admin/event-add.html',
{'event_list': event_list})
validate_event = EventForm(request.POST)
errors = {}
if validate_event.is_valid():
if validate_event.cleaned_data['end_date'] and validate_event.cleaned_data['start_date']:
if validate_event.cleaned_data['start_date'] > validate_event.cleaned_data['end_date']:
errors['date_err'] = 'Start Date should not greater than End Date'
return JsonResponse(errors)
if 'image' not in request.FILES:
errors['image'] = 'Please upload Image'
return JsonResponse(errors)
new_event = validate_event.save(commit=False)
new_event.image = request.FILES['image']
new_event.save()
data = {"data": 'event created successfully', "error": False}
return JsonResponse(data)
for k in validate_event.errors:
errors[k] = validate_event.errors[k][0]
if 'image' not in request.FILES:
errors['image'] = 'Please upload Image'
return JsonResponse(errors)
@login_required
def delete_category(request, pk):
category = Category.objects.get(pk=pk)
category.delete()
return HttpResponseRedirect('/admin/category/list/')
@login_required
def delete_post(request, pk):
post = Post.objects.get(pk=pk)
image_path = post.image.url
for img in post.photos.all():
photo_path = img.image.url
try:
os.remove(photo_path)
except FileNotFoundError:
pass
try:
os.remove(image_path)
except FileNotFoundError:
pass
post.delete()
return HttpResponseRedirect('/admin/article/list/')
@login_required
def edit_category(request, pk):
if request.method == "GET":
category = Category.objects.get(pk=pk)
category_list = Category.objects.all()
return render(request, 'admin/category-edit.html',
{'category_list': category_list, 'category': category})
c = Category.objects.get(pk=pk)
validate_category = CategoryForm(request.POST, instance=c)
errors = {}
if validate_category.is_valid():
validate_category.save()
data = {"data": 'Category edited successfully', "error": False}
return JsonResponse(data)
for k in validate_category.errors:
errors[k] = validate_category.errors[k][0]
return JsonResponse(errors)
@login_required
def edit_post(request, pk):
if request.method == "GET":
post = Post.objects.get(pk=pk)
category_list = Category.objects.all()
post_list = Post.objects.all()
return render(request, 'admin/post-edit.html',
{'post': post, 'post_list': post_list,
'category_list': category_list})
p = Post.objects.get(pk=pk)
validate_post = PostForm(request.POST, instance=p)
errors = {}
if validate_post.is_valid():
new_post = validate_post.save(commit=False)
if 'image' in request.FILES:
image_path = p.image.url
try:
os.remove(image_path)
except Exception:
pass
new_post.image = request.FILES['image']
new_post.save()
photos = request.FILES.getlist('photos')
for p in photos:
img = Gal_Image.objects.create(image=p)
new_post.photos.add(img)
return JsonResponse({"data": 'Post edited successfully', "error": False})
for k in validate_post.errors:
errors[k] = validate_post.errors[k][0]
return JsonResponse(errors)
@login_required
def edit_event(request, pk):
if request.method == "GET":
event = Event.objects.get(pk=pk)
event_list = Event.objects.all()
return render(request, 'admin/event-edit.html',
{'event': event, 'event_list': event_list})
e = Event.objects.get(pk=pk)
validate_event = EventForm(request.POST, instance=e)
errors = {}
if validate_event.is_valid():
if validate_event.cleaned_data['end_date'] and validate_event.cleaned_data['start_date']:
if validate_event.cleaned_data['start_date'] > validate_event.cleaned_data['end_date']:
errors['date_err'] = 'Start Date should not greater than End Date'
return JsonResponse(errors)
new_event = validate_event.save(commit=False)
if 'image' in request.FILES:
image_path = e.image.url
try:
os.remove(image_path)
except FileNotFoundError:
pass
new_event.image = request.FILES['image']
new_event.save()
return JsonResponse({"data": 'event edited successfully', "error": False})
for k in validate_event.errors:
errors[k] = validate_event.errors[k][0]
return JsonResponse(errors)
@login_required
def delete_event(request, pk):
event = Event.objects.get(pk=pk)
image_path = event.image.url
try:
os.remove(image_path)
except FileNotFoundError:
pass
event.delete()
return HttpResponseRedirect('/admin/event/list/')
def admin_logout(request):
logout(request)
return HttpResponseRedirect('/')
@login_required
def add_menu(request):
if request.method == 'GET':
menu_list = Menu.objects.filter(parent=None)
return render(request, 'admin/menu-add.html', {'menu_list': menu_list})
validate_menu = MenuForm(request.POST)
errors = {}
if request.POST['slug'] == "":
errors['slug'] = 'This field is required'
if request.POST['name'] == "":
errors['name'] = 'This field is required'
# if len(errors)>0:
# return HttpResponse(json.dumps(errors))
if validate_menu.is_valid():
new_menu = validate_menu.save(commit=False)
lvl_count = Menu.objects.filter(parent=new_menu.parent).count()
new_menu.lvl = lvl_count + 1
new_menu.save()
return JsonResponse({"data": 'Menu created successfully', "error": False})
for e in validate_menu.errors:
errors[e] = validate_menu.errors[e][0]
return JsonResponse(errors)
@login_required
def edit_menu(request, pk):
if request.method == 'GET':
menu = Menu.objects.get(pk=pk)
menu_list = Menu.objects.filter(parent=None)
return render(request, 'admin/menu-edit.html',
{'menu_list': menu_list, 'menu': menu})
m = Menu.objects.get(pk=pk)
old_parent = m.parent
validate_menu = MenuForm(request.POST, instance=m)
errors = {}
if validate_menu.is_valid():
menu = validate_menu.save(commit=False)
if old_parent == menu.parent:
menu.save()
else:
lvl_count = Menu.objects.filter(parent=menu.parent).count()
menu.lvl = lvl_count + 1
menu.save()
return JsonResponse({"data": 'Menu Edited successfully', "error": False})
for e in validate_menu.errors:
errors[e] = validate_menu.errors[e][0]
return JsonResponse(errors)
@login_required
def delete_menu(request, pk):
curent_menu = Menu.objects.get(pk=pk)
menu_parent = curent_menu.parent
menu_lvl = curent_menu.lvl
max_lvl = Menu.objects.filter(
parent=menu_parent).aggregate(Max('lvl'))['lvl__max']
Menu.objects.get(pk=pk).delete()
if max_lvl != 1:
for m in Menu.objects.filter(parent=menu_parent,
lvl__gt=menu_lvl, lvl__lte=max_lvl):
m.lvl -= 1
m.save()
return HttpResponseRedirect('/admin/menu/list/')
@login_required
def menu_state(request, pk):
menu = Menu.objects.get(pk=pk)
if menu.is_active is True:
menu.is_active = False
menu.save()
else:
menu.is_active = True
menu.save()
return HttpResponseRedirect('/admin/menu/list/')
@login_required
def menu_lvl_up(request, pk):
m_parent = Menu.objects.get(pk=pk).parent
curent_menu = Menu.objects.get(pk=pk)
up_menu = Menu.objects.get(parent=m_parent, lvl=curent_menu.lvl - 1)
curent_menu.lvl = curent_menu.lvl - 1
up_menu.lvl = up_menu.lvl + 1
curent_menu.save()
up_menu.save()
return HttpResponseRedirect('/admin/menu/list/')
@login_required
def menu_lvl_down(request, pk):
m_parent = Menu.objects.get(pk=pk).parent
curent_menu = Menu.objects.get(pk=pk)
down_menu = Menu.objects.get(parent=m_parent, lvl=curent_menu.lvl + 1)
curent_menu.lvl = curent_menu.lvl + 1
down_menu.lvl = down_menu.lvl - 1
curent_menu.save()
down_menu.save()
return HttpResponseRedirect('/admin/menu/list/')
@login_required
def post_state(request, pk):
post = Post.objects.get(pk=pk)
if post.is_active is True:
post.is_active = False
post.save()
else:
post.is_active = True
post.save()
return HttpResponseRedirect('/admin/article/list/')
@login_required
def event_state(request, pk):
event = Event.objects.get(pk=pk)
if event.is_active:
event.is_active = False
event.save()
else:
event.is_active = True
event.save()
return HttpResponseRedirect('/admin/event/list/')
@login_required
def delete_gal_image(request, pk, pid):
img = Gal_Image.objects.get(pk=pk)
image_path = img.image.url
try:
os.remove(image_path)
except FileNotFoundError:
pass
img.delete()
return HttpResponseRedirect('/admin/article/edit/' + pid)
@login_required
def delete_page_images(request, pk, pid):
img = Gal_Image.objects.get(pk=pk)
image_path = img.image.url
try:
os.remove(image_path)
except FileNotFoundError:
pass
img.delete()
return HttpResponseRedirect('/admin/page/edit/' + pid)
@login_required
def add_page(request):
if request.method == 'GET':
page_list = Page.objects.all()
return render(request, 'admin/page-add.html', {'page_list': page_list})
validate_page = PageForm(request.POST)
errors = {}
if validate_page.is_valid():
new_page = validate_page.save()
photos = request.FILES.getlist('photos')
for p in photos:
img = Gal_Image.objects.create(image=p)
new_page.photos.add(img)
new_page.save()
return JsonResponse({'data': 'Page Created successfully', "error": False})
for e in validate_page.errors:
errors[e] = validate_page.errors[e][0]
return JsonResponse(errors)
@login_required
def edit_page(request, pk):
if request.method == 'GET':
page = Page.objects.get(pk=pk)
page_list = Page.objects.all()
return render(request, 'admin/page-edit.html',
{'page': page, 'page_list': page_list})
p = Page.objects.get(pk=pk)
validate_page = PageForm(request.POST, instance=p)
errors = {}
if validate_page.is_valid():
page = validate_page.save()
photos = request.FILES.getlist('photos')
for p in photos:
img = Gal_Image.objects.create(image=p)
page.photos.add(img)
page.save()
return JsonResponse({'data': 'Page edited successfully', "error": False})
for e in validate_page.errors:
errors[e] = validate_page.errors[e][0]
return JsonResponse(errors)
@login_required
def delete_page(request, pk):
page = Page.objects.get(pk=pk)
page.delete()
return HttpResponseRedirect('/admin/page/list/')
@login_required
def change_password(request):
if request.method == 'GET':
return render(request, 'admin/change-pwd.html')
validate_password = PasswordForm(request.POST)
errors = {}
if validate_password.is_valid():
| |
<reponame>lcit/metrics_delin
import os
import sys
import json
import re
import os
import glob
import pickle
import random
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import time
__all__ = ["json_read", "json_write", "pickle_read", "pickle_write",
"mkdir", "sort_nicely", "find_files", "render_segments", "interpolate_new_nodes",
"plot_graph", "load_graph_txt", "save_graph_txt", "oversampling_graph",
"shift_graph", "crop_graph", "length_path", "find_closest",
"uniform_node_sampling", "node_degree", "is_intersection", "is_end_point",
"is_control_nodes", "is_intersection", "relabel_nodes", "undersampling_graph",
"simplify_graph_ramer_douglas_peucker", "f1_score", "edges_count", "is_empty"]
def json_read(filename):
try:
with open(os.path.abspath(filename)) as f:
data = json.load(f)
return data
except:
raise ValueError("Unable to read JSON {}".format(filename))
def json_write(filename, data):
try:
directory = os.path.dirname(os.path.abspath(filename))
if not os.path.exists(directory):
os.makedirs(directory)
with open(os.path.abspath(filename), 'w') as f:
json.dump(data, f, indent=2)
except:
raise ValueError("Unable to write JSON {}".format(filename))
def pickle_read(filename):
with open(filename, "rb") as f:
data = pickle.load(f)
return data
def pickle_write(filename, data):
directory = os.path.dirname(os.path.abspath(filename))
if not os.path.exists(directory):
os.makedirs(directory)
with open(filename, 'wb') as f:
pickle.dump(data, f)
def mkdir(directory):
directory = os.path.abspath(directory)
if not os.path.exists(directory):
os.makedirs(directory)
def sort_nicely(l):
""" Sort the given list in the way that humans expect.
"""
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key=alphanum_key)
def find_files(file_or_folder, hint=None, recursive=False):
# make sure to use ** in file_or_folder when using recusive
# ie find_files("folder/**", "*.json", recursive=True)
import os
import glob
if hint is not None:
file_or_folder = os.path.join(file_or_folder, hint)
filenames = [f for f in glob.glob(file_or_folder, recursive=recursive)]
filenames = sort_nicely(filenames)
filename_files = []
for filename in filenames:
if os.path.isfile(filename):
filename_files.append(filename)
return filename_files
def render_segments(segments, filename=None, height=3072, width=3072, thickness=4):
if isinstance(segments, np.ndarray):
segments = segments.tolist()
from PIL import Image, ImageDraw
im = Image.new('RGB', (int(width), int(height)), (0, 0, 0))
draw = ImageDraw.Draw(im)
for p1,p2 in segments:
xy = [round(x) for x in p1]+[round(x) for x in p2]
draw.line(xy, fill=(255,255,255), width=thickness)
if filename is not None:
mkdir(os.path.dirname(filename))
im.save(filename)
return np.array(im)
def plot_graph(graph, node_size=20, font_size=-1,
node_color='y', edge_color='y',
linewidths=2, offset=np.array([0,0]), **kwargs):
pos = dict({n:np.reshape(graph.nodes[n]['pos'], (2,))+offset for n in graph.nodes()})
nx.draw_networkx(graph, pos=pos, node_size=node_size, node_color=node_color,
edge_color=edge_color, font_size=font_size, **kwargs)
#plt.gca().invert_yaxis()
plt.legend()
def load_graph_txt(filename):
G = nx.Graph()
nodes = []
edges = []
i = 0
switch = True
with open(filename, "r") as f:
for line in f:
line = line.strip()
if len(line)==0 and switch:
switch = False
continue
if switch:
x,y = line.split(' ')
G.add_node(i, pos=(float(x),float(y)))
i+=1
else:
idx_node1, idx_node2 = line.split(' ')
G.add_edge(int(idx_node1),int(idx_node2))
return G
def save_graph_txt(G, filename):
mkdir(os.path.dirname(filename))
nodes = list(G.nodes())
file = open(filename, "w+")
for n in nodes:
file.write("{:.6f} {:.6f}\r\n".format(G.nodes[n]['pos'][0], G.nodes[n]['pos'][1]))
file.write("\r\n")
for s,t in G.edges():
file.write("{} {}\r\n".format(nodes.index(s), nodes.index(t)))
file.close()
def edges_count(G):
return len(G.edges())
def is_empty(G):
return len(G.edges())==0
def interpolate_new_nodes(p1, p2, spacing=2):
_p1 = np.reshape(p1, (2,))
_p2 = np.reshape(p2, (2,))
diff = _p1-_p2
segment_length = np.linalg.norm(diff)
new_node_pos = _p1 -diff*np.linspace(0,1,int(np.round(segment_length/spacing)+1))[1:-1,None]
return new_node_pos
def oversampling_graph(G, spacing=20):
"""
Add new regularly spaced nodes in each edge.
The distance between nodes conncted by an edge will
approximately equal to the param 'spacing'
"""
G_ = G.copy()
edges = list(G_.edges())
for s,t in edges:
new_nodes_pos = interpolate_new_nodes(G_.nodes[s]['pos'], G_.nodes[t]['pos'], spacing)
if len(new_nodes_pos)>0:
G_.remove_edge(s,t)
n = max(G_.nodes())+1
for i,n_pos in enumerate(new_nodes_pos):
G_.add_node(n+i, pos=tuple(n_pos))
G_.add_edge(s,n)
for _ in range(len(new_nodes_pos)-1):
G_.add_edge(n,n+1)
n+=1
G_.add_edge(n,t)
return G_
def undersampling_graph(G, spacing=10, inplace=False):
if inplace:
_G = G
else:
_G = G.copy()
def distance(g, n1, n2):
return np.sqrt((g.nodes[n1]['pos'][0]-g.nodes[n2]['pos'][0])**2+\
(g.nodes[n1]['pos'][1]-g.nodes[n2]['pos'][1])**2)
_spacing = spacing/2
# shuffling the nodes is necessary to avoid
# making a long sequence of segments a single long straight one
nodes = list(_G.nodes())
random.shuffle(nodes)
for n in nodes:
# chnage only the nodes that have two adjacent edges
if len(_G.edges(n))==2:
ajacent_nodes = list(nx.neighbors(_G, n))
d1 = distance(_G, n, ajacent_nodes[0])
d2 = distance(_G, n, ajacent_nodes[1])
if d1<_spacing or d2<_spacing:
_G.add_edge(ajacent_nodes[0], ajacent_nodes[1])
_G.remove_node(n)
return _G
def simplify_graph_ramer_douglas_peucker(G, epsilon=5, verbose=True, inplace=False):
import rdp
if inplace:
_G = G
else:
_G = G.copy()
start = time.time()
def f():
start = time.time()
nodes = list(_G.nodes())
random.shuffle(nodes)
changed = False
for n in nodes:
if verbose:
delta = time.time()-start
if delta>5:
start = time.time()
if verbose:
print("Ramer-Douglas-Peucker remaining nodes:", len(_G.nodes()))
ajacent_nodes = list(nx.neighbors(_G, n))
if n in ajacent_nodes:
ajacent_nodes.remove(n)
if len(ajacent_nodes)==2:
node_triplet = [_G.nodes[ajacent_nodes[0]]['pos'],
_G.nodes[n]['pos'],
_G.nodes[ajacent_nodes[1]]['pos']]
if len(rdp.rdp(node_triplet, epsilon=epsilon))==2:
_G.add_edge(*ajacent_nodes)
_G.remove_node(n)
changed = True
return changed
while True:
if not f():
break
if verbose:
print("Ramer-Douglas-Peucker remaining nodes:", len(_G.nodes()))
return _G
def shift_graph(G, shift_x, shift_y):
G_ = G.copy()
for _,data in G_.nodes(data=True):
x,y = data['pos']
x,y = x+shift_x,y+shift_y
if isinstance(data['pos'], np.ndarray):
data['pos'] = np.array([x,y])
else:
data['pos'] = (x,y)
return G_
def crop_graph_naive(G, xmin=None, ymin=None, xmax=None, ymax=None):
G_ = G.copy()
for n in list(G_.nodes()):
p = G_.nodes[n]['pos']
if p[0]>=xmin and p[0]<xmax and p[1]>=ymin and p[1]<ymax:
pass
else:
G_.remove_node(n)
return G_
def segments_intersection_point(line1, line2):
xdiff = (line1[0][0] - line1[1][0], line2[0][0] - line2[1][0])
ydiff = (line1[0][1] - line1[1][1], line2[0][1] - line2[1][1])
def det(a, b):
return a[0] * b[1] - a[1] * b[0]
div = det(xdiff, ydiff)
if div == 0:
return None
d = (det(*line1), det(*line2))
x = det(d, xdiff) / div
y = det(d, ydiff) / div
return x, y
def segment_intersection_point_to_box(segment, xmin, ymin, xmax, ymax):
bs = [((xmin, ymin),(xmin, ymax)),
((xmin, ymin),(xmax, ymin)),
((xmin, ymax),(xmax, ymax)),
((xmax, ymin),(xmax, ymax))]
P = np.array([b[0] for b in bs])
Q = np.array([b[1] for b in bs])
p1, p2 = segment
p1_out = p1[0]<xmin or p1[0]>=xmax or p1[1]<ymin or p1[1]>=ymax
p2_out = p2[0]<xmin or p2[0]>=xmax or p2[1]<ymin or p2[1]>=ymax
if not p1_out and not p2_out:
return None
if p1_out and not p2_out:
X = np.reshape(p1, (1,2))
S, D, id = closest_points_on_segments(X, P, Q)
idx_closer_segment = np.argmin(D[0])
new_p1 = segments_intersection_point(bs[idx_closer_segment], segment)
return (new_p1, p2)
elif p2_out and not p1_out:
X = np.reshape(p2, (1,2))
S, D, id = closest_points_on_segments(X, P, Q)
idx_closer_segment = np.argmin(D[0])
new_p2 = segments_intersection_point(bs[idx_closer_segment], segment)
return (p1, new_p2)
def crop_graph(G, xmin=None, ymin=None, xmax=None, ymax=None):
G_ = G.copy()
for s,t in list(G_.edges()):
p1 = G_.nodes[s]['pos']
p2 = G_.nodes[t]['pos']
p1_out = p1[0]<xmin or p1[0]>=xmax or p1[1]<ymin or p1[1]>=ymax
p2_out = p2[0]<xmin or p2[0]>=xmax or p2[1]<ymin or p2[1]>=ymax
if p1_out and p2_out:
G_.remove_edge(s,t)
elif not p1_out and not p2_out:
pass
elif p1_out:
new_seg = segment_intersection_point_to_box((p1,p2), xmin, ymin, xmax, ymax)
new_node = max(G_.nodes())+1
G_.add_node(new_node, pos=new_seg[0])
G_.add_edge(new_node, t)
G_.remove_edge(s, t)
elif p2_out:
new_seg = segment_intersection_point_to_box((p1,p2), xmin, ymin, xmax, ymax)
new_node = max(G_.nodes())+1
G_.add_node(new_node, pos=new_seg[1])
G_.add_edge(s, new_node)
G_.remove_edge(s, t)
# remove nodes that are not attached to any edge
for n in list(G_.nodes()):
if len(G_.edges(n))==0:
G_.remove_node(n)
return G_
def length_path(G, path):
length = 0
for i in range(len(path)-1):
p1 = np.array(G.nodes[path[i]]['pos'])
p2 = np.array(G.nodes[path[i+1]]['pos'])
length += np.linalg.norm(p1-p2)
return length
def find_closest(point, points):
dists = np.linalg.norm(points-point[None], axis=1)
idx_min = np.argmin(dists)
dist_min = dists[idx_min]
return dist_min, idx_min
def node_degree(G, node):
return len(G.edges(node))
def is_intersection(G, node):
return node_degree(G, node)>2
def is_end_point(G, node):
return node_degree(G, node)==1
def is_control_nodes(G, node):
return is_intersection(G, node) or is_end_point(G, node)
def is_intersection(G, node):
if len(G.edges(node))>2:
return True
else:
return False
def uniform_node_sampling(G, dist_matching=25, max_node_probe=10000):
start = time.time()
nodes = list(G.nodes())
# limit on the number of nodes, it makes this function slow otherwise
random.shuffle(nodes)
nodes = nodes[:max_node_probe]
nodes_pos = np.vstack([G.nodes[n]['pos'] for n in nodes])
xmin, ymin = nodes_pos.min(0)
xmax, ymax = nodes_pos.max(0)
random_node = None
for _ in range(10000):
x = np.random.uniform(low=xmin, high=xmax)
y = np.random.uniform(low=ymin, high=ymax)
random_position = np.array([x,y])
dists = np.linalg.norm(nodes_pos-random_position[None], axis=1)
idx_min = np.argmin(dists)
if dists[idx_min]>dist_matching:
random_node = nodes[idx_min]
break
if random_node is None:
random_node = np.random.choice(G.nodes())
print("uniform_node_sampling: node picked from the set of nodes of the graph!")
return random_node
def uniform_node_sampling_with_snapping(G, dist_matching=25):
nodes_pos_gt = np.vstack([G.nodes[n]['pos'] for n in G.nodes()])
xmin, ymin = nodes_pos_gt.min(0)
xmax, ymax = nodes_pos_gt.max(0)
edges = list(G.edges())
P = np.array([G.nodes[s]['pos'] for s,t in edges])
Q = np.array([G.nodes[t]['pos'] for s,t in edges])
for _ in range(100):
xs = np.random.uniform(low=xmin, high=xmax, size=100)
ys = np.random.uniform(low=ymin, high=ymax, size=100)
random_positions = np.vstack([xs, ys]).T
S, D, id = | |
slash and tooth will rend.",
"There cannot be a happy end, for claw will slash and tooth will rend.",
"You should be in bed.",
"I know where she is.",
"What do you think of climate change?"
"Nature always avenges herself on those who insult her",
"What is magic?",
"Magic is the art and science of causing change to occur in conformity with will. ",
"Are you magic?",
"Any sufficiently advanced technology is indistinguishable from magic.",
"Lucy, calm down.",
"Lucy, calm down.",
"I... I see something horrible happening.",
"You're just having another nightmare.",
"I'm scared.",
"Relax... It's only a bad dream.",
"Jonathan. You're working too hard.",
"It's not good for you.",
"I'm worried about you.",
"Good morning.",
"Ah, Harker, please come up here.",
"Yes, Mr. Renfield.",
"I have a very important job for you. And I don't trust anyone else to do it, but you.",
"You're very kind, Mr. Renfield.",
"Count Dracula...he sent me a letter from Transylvania.",
"He wants to buy a house around here.",
"I'm aware that it's a long trip to go there.",
"Where is that?",
"Somewhere over the Carpathian mountains, isn't it?",
"Yes...and there will be a large commission.",
"Oh, then I could get a larger house for Lucy.",
"She deserves a nicer house.",
"Well, this won't be easy.",
"It'll take you a long time...you'll sweat a lot...and perhaps you'll also...spill some blood.",
"It'll be good for me to get out of the city...get away from these canals...which go nowhere.",
"Transylvania...there it is, beyond the forest.",
"It's a wonderful place.",
"A little gloomy, maybe, but very exciting.",
"The Count is looking for a beautiful old house.",
"I'm going to show him the big house, near your place.",
"But it's been abandoned for years.",
"It looks like a haunted house.",
"I'm sure he'll feel very comfortable there.",
"Anyway, we'll offer it to him.",
"I'd like you to leave soon.",
"Soon? You mean, today?",
"Absolutely. I even prepared the papers.",
"It's bad business to let him wait.",
"Very well.",
"If you insist, I shall leave today.",
"Today? Are you serious?",
"Yes, to a castle in Transylvania.",
"I hear it's a strange place infested with wolves and people who see ghosts.",
"Jonathan, it scares me...I don't want you to go.",
"I'll be fine.",
"You'll be in danger.",
"Don't go.",
"Wolves. Bandits. And ghosts.",
"Don't go. I won't allow it.",
"Let's go to the ocean, now.",
"To the place where we fell in love.",
"Lucy...I don't know.",
"Sometimes I feel so ignorant.",
"Jonathan, I really have to say this...even if you think it comes from...the weak heart of a woman...your wife.",
"Yes?",
"It really comes from the strength of my heart. I feel a kind of...dark force...like a nameless deadly fear.",
"Lucy is the dearest thing in the world to me...take care of her.",
"Mina, treat her as a sister.",
"Yes.",
"Innkeeper! Quick, my supper. I must go to the castle of Count Dracula tonight.",
"Are you really going there?",
"Yes. Why?",
"It's not a safe place.",
"Why do you want to go there?",
"At midnight evil spirits come out. And people disappear without a trace.",
"Oh, come on, that's all superstition.",
"Not at all. Bad things will happen to you if you go there.",
"Besides, no coachman will dare take you there... your horse needs a rest for several days.",
"I'll find another horse.",
"Don't go, sir.",
"I know a few people...who have been on the other side...and they can tell you.",
"He also says you shouldn't go there, young man.",
"He says there's a great chasm on the way that swallows the unwary.",
"If that doesn't get you, he says the towering crags will.",
"And in the Borgo pass... the light suddenly divides and... the land begins to rise towards the heavens... and then it drops. ",
"No one has ever returned from that place.",
"The travellers say that no such castle exists there... except maybe in the imagination of man.",
"There used to be a castle there. But now it is a ghost castle. It's only a ruin.",
"And whoever enters into that land of phantoms is lost... and never returns.",
"Of vampires and bloodsuckers.",
"Of corpses which devour their own flesh.",
"Of incubuses and succubuses.",
"Of the living dead who follow strangers",
"in the night...and attack them.",
"Hmm...",
"beyond death... a curse that will last till the end of time.",
"The curse of Nosferatu.",
"Coachman, could you take me to the Borgo pass?",
"I'll pay you well.",
"There is no road to the Borgo pass.",
"But it's right there.",
"I need your coach.",
"I haven't got a coach.",
"Then I'll take one of your horses.",
"I'll double the price.",
"Sorry, sir, but I don't have any horses. Don't you see?",
"Well... I guess I'll have to walk there.",
"Count Dracula?!",
"I am Count Dracula.",
"Welcome to my castle.",
"You must be <NAME>.",
"Yes.",
"Come in.",
"The night is cold and you must be tired and hungry.",
"I've got the papers here.",
"This is the layout of the house... which I'm sure you'd like to see.",
"Yes, yes.",
"Please, help yourself.",
"Please, I'm afraid you must dine alone.",
"It's nearly midnight and I partake of nothing at this hour.",
"Unfortunately, the servants are not at our disposal.",
"So allow me to see to your comfort.",
"Listen.",
"Listen. The Children of the Night make their music.",
"Ah, young man...you're like the villagers who cannot place themselves in the soul of the hunter.",
"The knife is old and could be dirty.",
"It could give you blood poisoning.",
"Please let me do it. It's the oldest remedy in the world.",
"Oh, forget it. It's hardly worth mentioning. Just a little cut.",
"You... you know...it's only for the best.",
"Let's sit down for a while.",
"It's still many hours till dawn. And during the daytime I am always away.",
"Lucy, my love...there is no postal service from here... but I shall write a diary, preserving all my thoughts and feelings for you.",
"Last night after a tiresome journey, I finally reached my destination...the castle of Count Dracula.",
"I had a bad dream, but I hope it will pass.",
"This castle is so strange.",
"At times I wonder if it isn't part of that dream.",
"Everything about it looks so unreal. I don't attach importance to the sunshine anymore...or to glittering fountains, which youth is so fond of.",
"I love the darkness and the shadows. Where I can be alone with my thoughts.",
"I am the descendent of an old family. Time is an abyss, profound as a thousand nights.",
"Centuries come and go... to be unable to grow old is terrible.",
"That is not the worst. There are things more horrible than that.",
"Can you imagine enduring centuries...experiencing the same futility each day?",
"I'm glad you found such a large old house for me.",
"Very near your lodging, I understand.",
"Yes, it's just around the corner.",
"Ah... may I have a look at the contract?",
"What?...",
"What a lovely throat.",
"It's my wife, Lucy.",
"Your hand.",
"Your hand is so cold.",
"The document... the contract for the house, I must sign it immediately.",
"Yes, but we haven't settled on a price.",
"It doesn't matter between gentlemen... I accept whatever you find just.",
"How long did it take you to get here from Wismar?",
"Four weeks.",
"I...",
"...it takes a while to travel on land.",
"From the seed of Belial is the vampire born.",
"He who feeds on the blood of mankind... who, unredeemed, taketh his refuge in caves, tombs... coffins filled with the unblessed soil of cemeteries... wherein the black death has reaped its harvest...the plague.",
"Blood is life.",
"Jonathan!",
"She has a sudden fever.",
"Her pulse is too fast.",
"She needs to rest.",
"Call me if you think it is necessary...",
"...but I don't think it's something serious.",
"Oh, God, he must",
"be going to Wismar.",
"Lucy...Lucy's in danger.",
"I've got to get out of here.",
"It's strange...but the papers are all in order.",
"Let me see them...",
"From Varna to Wismar.",
"Garden soil for botanical experiments.",
"Open one of them.",
"I want to make sure.",
"This one.",
"Mother Superior...",
"stop the black coffins.",
"The patient who came yesterday had a seizure.",
"Which one?",
"The one who had bitten a cow.",
"Oh, yes.",
"We put him in isolation.",
"I'll go with you.",
"Blood is life.",
"Blood is life.",
"Has he done it for long?",
"He has, and he also refuses food.",
"Blood is life.",
"Let's go.",
"Help! Help me!",
"Be quiet... I hear the sounds of sails fluttering in the wind.",
"Still, there's no letter from him.",
"But, don't worry, Lucy... The mail from Transylvania is very slow.",
"Something has happened",
"God is far away when we need him.",
"Young man. You're not yet well.",
"You shouldn't travel.",
"The black coffins... I must get to Wismar before the coffins.",
"You should at least wait for the | |
import pytest
import numpy as np
from unittest.mock import MagicMock, patch
from qtpy import QtCore, QtGui
from qtpy.QtCore import Qt
from glue.utils.qt import get_qapp, process_events
from glue.core import Data, DataCollection
from glue.utils.qt import qt_to_mpl_color
from glue.app.qt import GlueApplication
from ..data_viewer import DataTableModel, TableViewer
from glue.core.edit_subset_mode import AndNotMode, OrMode, ReplaceMode
class TestDataTableModel():
def setup_method(self, method):
self.gapp = GlueApplication()
self.viewer = self.gapp.new_data_viewer(TableViewer)
self.data = Data(x=[1, 2, 3, 4], y=[2, 3, 4, 5])
self.gapp.data_collection.append(self.data)
self.viewer.add_data(self.data)
self.model = DataTableModel(self.viewer)
def teardown_method(self, method):
self.gapp.close()
self.gapp = None
def test_column_count(self):
assert self.model.columnCount() == 2
def test_column_count_hidden(self):
self.model.show_coords = True
assert self.model.columnCount() == 3
def test_header_data(self):
for i, c in enumerate(self.data.main_components):
result = self.model.headerData(i, Qt.Horizontal, Qt.DisplayRole)
assert result == c.label
for i in range(self.data.size):
result = self.model.headerData(i, Qt.Vertical, Qt.DisplayRole)
assert result == str(i)
def test_row_count(self):
assert self.model.rowCount() == 4
def test_data(self):
for i, c in enumerate(self.data.main_components):
for j in range(self.data.size):
idx = self.model.index(j, i)
result = self.model.data(idx, Qt.DisplayRole)
assert float(result) == self.data[c, j]
@pytest.mark.xfail
def test_data_2d(self):
self.data = Data(x=[[1, 2], [3, 4]], y=[[2, 3], [4, 5]])
self.model = DataTableModel(self.data)
for i, c in enumerate(self.data.main_components):
for j in range(self.data.size):
idx = self.model.index(j, i)
result = self.model.data(idx, Qt.DisplayRole)
assert float(result) == self.data[c].ravel()[j]
def check_values_and_color(model, data, colors):
for i in range(len(colors)):
for j, colname in enumerate(sorted(data)):
# Get index of cell
idx = model.index(i, j)
# Check values
value = model.data(idx, Qt.DisplayRole)
assert value == str(data[colname][i])
# Check colors
brush = model.data(idx, Qt.BackgroundRole)
if colors[i] is None:
assert brush is None
else:
assert qt_to_mpl_color(brush.color()) == colors[i]
def test_table_widget(tmpdir):
# Start off by creating a glue application instance with a table viewer and
# some data pre-loaded.
app = get_qapp()
d = Data(a=[1, 2, 3, 4, 5],
b=[3.2, 1.2, 4.5, 3.3, 2.2],
c=['e', 'b', 'c', 'a', 'f'])
dc = DataCollection([d])
gapp = GlueApplication(dc)
widget = gapp.new_data_viewer(TableViewer)
widget.add_data(d)
subset_mode = gapp._session.edit_subset_mode
# Create two subsets
sg1 = dc.new_subset_group('D <= 3', d.id['a'] <= 3)
sg1.style.color = '#aa0000'
sg2 = dc.new_subset_group('1 < D < 4', (d.id['a'] > 1) & (d.id['a'] < 4))
sg2.style.color = '#0000cc'
model = widget.ui.table.model()
# We now check what the data and colors of the table are, and try various
# sorting methods to make sure that things are still correct.
data = {'a': [1, 2, 3, 4, 5],
'b': [3.2, 1.2, 4.5, 3.3, 2.2],
'c': ['e', 'b', 'c', 'a', 'f']}
colors = ['#aa0000', '#380088', '#380088', None, None]
check_values_and_color(model, data, colors)
model.sort(1, Qt.AscendingOrder)
data = {'a': [2, 5, 1, 4, 3],
'b': [1.2, 2.2, 3.2, 3.3, 4.5],
'c': ['b', 'f', 'e', 'a', 'c']}
colors = ['#380088', None, '#aa0000', None, '#380088']
check_values_and_color(model, data, colors)
model.sort(2, Qt.AscendingOrder)
data = {'a': [4, 2, 3, 1, 5],
'b': [3.3, 1.2, 4.5, 3.2, 2.2],
'c': ['a', 'b', 'c', 'e', 'f']}
colors = [None, '#380088', '#380088', '#aa0000', None]
check_values_and_color(model, data, colors)
model.sort(0, Qt.DescendingOrder)
data = {'a': [5, 4, 3, 2, 1],
'b': [2.2, 3.3, 4.5, 1.2, 3.2],
'c': ['f', 'a', 'c', 'b', 'e']}
colors = [None, None, '#380088', '#380088', '#aa0000']
check_values_and_color(model, data, colors)
model.sort(0, Qt.AscendingOrder)
# We now modify the subsets using the table.
selection = widget.ui.table.selectionModel()
widget.toolbar.actions['table:rowselect'].toggle()
def press_key(key):
event = QtGui.QKeyEvent(QtCore.QEvent.KeyPress, key, Qt.NoModifier)
app.postEvent(widget.ui.table, event)
app.processEvents()
process_events()
# We now use key presses to navigate down to the third row
press_key(Qt.Key_Tab)
press_key(Qt.Key_Down)
press_key(Qt.Key_Down)
process_events()
indices = selection.selectedRows()
# We make sure that the third row is selected
assert len(indices) == 1
assert indices[0].row() == 2
# At this point, the subsets haven't changed yet
np.testing.assert_equal(d.subsets[0].to_mask(), [1, 1, 1, 0, 0])
np.testing.assert_equal(d.subsets[1].to_mask(), [0, 1, 1, 0, 0])
# We specify that we are editing the second subset, and use a 'not' logical
# operation to remove the currently selected line from the second subset.
subset_mode.edit_subset = [d.subsets[1]]
subset_mode.mode = AndNotMode
press_key(Qt.Key_Enter)
np.testing.assert_equal(d.subsets[0].to_mask(), [1, 1, 1, 0, 0])
np.testing.assert_equal(d.subsets[1].to_mask(), [0, 1, 0, 0, 0])
# At this point, the selection should be cleared
indices = selection.selectedRows()
assert len(indices) == 0
# We move to the fourth row and now do an 'or' selection with the first
# subset.
press_key(Qt.Key_Down)
subset_mode.mode = OrMode
subset_mode.edit_subset = [d.subsets[0]]
press_key(Qt.Key_Enter)
np.testing.assert_equal(d.subsets[0].to_mask(), [1, 1, 1, 1, 0])
np.testing.assert_equal(d.subsets[1].to_mask(), [0, 1, 0, 0, 0])
# Finally we move to the fifth row and deselect all subsets so that
# pressing enter now creates a new subset.
press_key(Qt.Key_Down)
subset_mode.mode = ReplaceMode
subset_mode.edit_subset = None
press_key(Qt.Key_Enter)
np.testing.assert_equal(d.subsets[0].to_mask(), [1, 1, 1, 1, 0])
np.testing.assert_equal(d.subsets[1].to_mask(), [0, 1, 0, 0, 0])
np.testing.assert_equal(d.subsets[2].to_mask(), [0, 0, 0, 0, 1])
# Make the color for the new subset deterministic
dc.subset_groups[2].style.color = '#bababa'
# Now finally check saving and restoring session
session_file = tmpdir.join('table.glu').strpath
gapp.save_session(session_file)
gapp2 = GlueApplication.restore_session(session_file)
gapp2.show()
d = gapp2.data_collection[0]
widget2 = gapp2.viewers[0][0]
model2 = widget2.ui.table.model()
data = {'a': [1, 2, 3, 4, 5],
'b': [3.2, 1.2, 4.5, 3.3, 2.2],
'c': ['e', 'b', 'c', 'a', 'f']}
# Need to take into account new selections above
colors = ['#aa0000', '#380088', '#aa0000', "#aa0000", "#bababa"]
check_values_and_color(model2, data, colors)
def test_table_widget_session_no_subset(tmpdir):
# Regression test for a bug that caused table viewers with no subsets to
# not be restored correctly and instead raise an exception.
app = get_qapp() # noqa
d = Data(a=[1, 2, 3, 4, 5],
b=[3.2, 1.2, 4.5, 3.3, 2.2],
c=['e', 'b', 'c', 'a', 'f'], label='test')
dc = DataCollection([d])
gapp = GlueApplication(dc)
widget = gapp.new_data_viewer(TableViewer)
widget.add_data(d)
session_file = tmpdir.join('table.glu').strpath
gapp.save_session(session_file)
gapp2 = GlueApplication.restore_session(session_file)
gapp2.show()
gapp2.data_collection[0]
gapp2.viewers[0][0]
def test_change_components():
# Regression test for a bug that caused table viewers to not update when
# adding/removing components.
app = get_qapp() # noqa
d = Data(a=[1, 2, 3, 4, 5],
b=[3.2, 1.2, 4.5, 3.3, 2.2],
c=['e', 'b', 'c', 'a', 'f'], label='test')
dc = DataCollection([d])
gapp = GlueApplication(dc)
viewer = gapp.new_data_viewer(TableViewer)
viewer.add_data(d)
data_changed = MagicMock()
viewer.model.dataChanged.connect(data_changed)
# layoutChanged needs to be emitted for the new/removed columns to be
# registered (dataChanged is not enough)
layout_changed = MagicMock()
viewer.model.layoutChanged.connect(layout_changed)
assert data_changed.call_count == 0
assert layout_changed.call_count == 0
viewer.model.columnCount() == 2
d.add_component([3, 4, 5, 6, 2], 'z')
assert data_changed.call_count == 1
assert layout_changed.call_count == 1
viewer.model.columnCount() == 3
d.remove_component(d.id['z'])
assert data_changed.call_count == 2
assert layout_changed.call_count == 2
viewer.model.columnCount() == 2
def test_table_title():
app = get_qapp() # noqa
data1 = Data(a=[1, 2, 3, 4, 5], label='test1')
data2 = Data(a=[1, 2, 3, 4, 5], label='test2')
dc = DataCollection([data1, data2])
gapp = GlueApplication(dc)
viewer = gapp.new_data_viewer(TableViewer)
assert viewer.windowTitle() == 'Table'
viewer.add_data(data1)
assert viewer.windowTitle() == 'Table: test1'
viewer.add_data(data2)
assert viewer.windowTitle() == 'Table: test2'
def test_add_subset():
# Regression test for a bug that occurred when adding a subset
# directly to the table viewer.
data1 = Data(a=[1, 2, 3, 4, 5], label='test1')
data2 = Data(a=[1, 2, 3, 4, 5], label='test2')
dc = DataCollection([data1, data2])
dc.new_subset_group('test subset 1', data1.id['a'] > 2)
gapp = GlueApplication(dc)
viewer = gapp.new_data_viewer(TableViewer)
viewer.add_subset(data1.subsets[0])
assert len(viewer.state.layers) == 2
assert not viewer.state.layers[0].visible
assert viewer.state.layers[1].visible
dc.new_subset_group('test subset 2', data1.id['a'] <= 2)
assert len(viewer.state.layers) == 3
assert not viewer.state.layers[0].visible
assert viewer.state.layers[1].visible
assert viewer.state.layers[2].visible
viewer.remove_subset(data1.subsets[1])
assert len(viewer.state.layers) == 2
assert not viewer.state.layers[0].visible
assert viewer.state.layers[1].visible
viewer.add_subset(data1.subsets[1])
assert len(viewer.state.layers) == 3
assert not viewer.state.layers[0].visible
assert viewer.state.layers[1].visible
assert viewer.state.layers[2].visible
with pytest.raises(ValueError) as exc:
viewer.add_subset(data2.subsets[1])
assert exc.value.args[0] == 'subset parent data does not match existing table data'
def test_graceful_close_after_invalid(capsys):
# Regression test for a bug that caused an error if an invalid dataset
# was added to the viewer after the user had acknowledged the error.
d = Data(a=[[1, 2], [3, 4]], label='test')
dc = DataCollection([d])
gapp = GlueApplication(dc)
viewer = gapp.new_data_viewer(TableViewer)
gapp.show()
process_events()
with pytest.raises(ValueError, match='Can only use Table widget for 1D data'):
viewer.add_data(d)
viewer.close()
process_events()
# We use capsys here because the # error is otherwise only apparent in stderr.
out, err = capsys.readouterr()
assert out.strip() == ""
assert err.strip() == ""
def test_incompatible_subset():
# Regression test for a bug that caused the table to be refreshed in an
# infinite loop if incompatible subsets were present.
data1 = Data(a=[1, 2, 3, 4, 5], label='test1')
data2 = Data(a=[1, 2, 3, 4, 5], label='test2')
dc = DataCollection([data1, data2])
gapp = GlueApplication(dc)
viewer = gapp.new_data_viewer(TableViewer)
viewer.add_data(data1)
dc.new_subset_group('test subset', data2.id['a'] > 2)
gapp.show()
process_events()
with patch.object(viewer.layers[0], '_refresh') | |
Metafield Value')
),
default='sema_html_value',
max_length=50
)
metafield_value_packaging_custom_value = TextField(
blank=True,
help_text='format: <html>...</html>'
)
metafield_value_fitments_choice = CharField(
choices=(
('sema_vehicles_value', 'SEMA Vehicles'),
('custom_fitments_metafield_value_value', 'Custom Fitments Metafield Value')
),
default='sema_vehicles_value',
max_length=50
)
metafield_value_fitments_custom_value = TextField(
blank=True,
help_text='format: [{"year", "make", "model", "submodel"}]',
verbose_name='Custom Fitments Metafields'
)
metafields_choice = CharField(
choices=(
('metafields_dict_all_value', 'All Metafields'),
('metafields_dict_packaging_value', 'Packaging Metafields'),
('metafields_dict_fitments_value', 'Fitments Metafields'),
('metafields_dict_custom_value', 'Custom Metafields')
),
default='metafields_dict_all_value',
max_length=50
)
metafields_custom_value = TextField(
blank=True,
help_text=(
'format: '
'[{"namespace", "key", "owner_resource", "value", "value_type"}]'),
)
tag_names_vendor_choice = CharField(
choices=(
('sema_brand_tag_names_value', 'SEMA Brand Tag Names'),
('custom_vendor_tag_names_value', 'Custom Vendor Tag Names')
),
default='sema_brand_tag_names_value',
max_length=50
)
tag_names_vendor_custom_value = TextField(
blank=True,
help_text='format: [""]'
)
tag_names_collection_choice = CharField(
choices=(
('sema_category_tag_names_value', 'SEMA Category Tag Names'),
('custom_collection_tag_names_value', 'Custom Collection Tag Names')
),
default='sema_category_tag_names_value',
max_length=50
)
tag_names_collection_custom_value = TextField(
blank=True,
help_text='format: [""]'
)
tags_choice = CharField(
choices=(
('tags_dict_all_value', 'All Tags'),
('tags_dict_vendor_value', 'Vendor Tags'),
('tags_dict_collection_value', 'Collection Tags'),
('tags_dict_custom_value', 'Custom Tags')
),
default='tags_dict_all_value',
max_length=50
)
tags_custom_value = TextField(
blank=True,
help_text='format: [{"name"}]'
)
image_urls_sema_choice = CharField(
choices=(
('sema_digital_asset_image_urls_value', 'SEMA Digital Asset Image URLs'),
('custom_sema_image_urls_value', 'Custom SEMA Image URLs')
),
default='sema_digital_asset_image_urls_value',
max_length=50
)
image_urls_sema_custom_value = TextField(
blank=True,
help_text='format: [""]'
)
image_urls_premier_choice = CharField(
choices=(
('premier_primary_image_urls_value', 'Premier Primary Image URLs'),
('custom_premier_image_urls_value', 'Custom Premier Image URLs')
),
default='premier_primary_image_urls_value',
max_length=50
)
image_urls_premier_custom_value = TextField(
blank=True,
help_text='format: [""]'
)
images_choice = CharField(
choices=(
('images_dict_all_value', 'All Images'),
('images_dict_sema_value', 'SEMA Images'),
('images_dict_premier_value', 'Premier Images'),
('images_dict_custom_value', 'Custom Images')
),
default='images_dict_sema_value',
max_length=50
)
images_custom_value = TextField(
blank=True,
help_text='[{"link"}]'
)
# <editor-fold desc="internal properties ...">
@property
def has_premier_product(self):
return bool(
self.product.item
and self.product.item.premier_product
)
@property
def has_sema_product(self):
return bool(
self.product.item
and self.product.item.sema_product
)
@property
def sema_product(self):
return (
self.product.item.sema_product
if self.has_sema_product else None
)
@property
def sema_brand(self):
return (
self.sema_product.dataset.brand
if self.has_sema_product
and self.sema_product.dataset.brand.is_relevant
else None
)
@property
def sema_categories(self):
return (
self.sema_product.categories.filter(
is_relevant=True
)
if self.has_sema_product else None
)
@property
def sema_vehicles(self):
return (
self.sema_product.vehicles.filter(
is_relevant=True
).order_by(
'base_vehicle__make_year__make__name',
'base_vehicle__model__name',
'submodel__name',
'base_vehicle__make_year__year__year'
)
if self.has_sema_product else None
)
@property
def sema_description_pies_attributes(self):
return (
self.sema_product.description_pies_attributes.filter(
is_relevant=True
)
if self.has_sema_product else None
)
@property
def sema_digital_assets_pies_attributes(self):
return (
self.sema_product.digital_assets_pies_attributes.filter(
is_relevant=True
)
if self.has_sema_product else None
)
@property
def premier_product(self):
return (
self.product.item.premier_product
if self.has_premier_product else None
)
@property
def shopify_product(self):
return self.product
@property
def shopify_variant(self):
return self.product.variants.first()
@property
def shopify_tags(self):
return self.product.tags.all()
@property
def shopify_metafields(self):
return self.product.metafields.all()
@property
def shopify_images(self):
return self.product.images.all()
def get_premier_product_attr_value(self, attr):
return getattr(self.premier_product, attr, None)
def get_sema_product_attr_value(self, attr):
return getattr(self.sema_product, attr, None)
def get_shopify_product_attr_value(self, attr):
return getattr(self.shopify_product, attr, None)
def get_shopify_variant_attr_value(self, attr):
return getattr(self.shopify_variant, attr, None)
def get_sema_description_pies_attribute_value(self, segment):
sema_description_pies_attributes = self.sema_description_pies_attributes
if not sema_description_pies_attributes:
return None
pies_attrs = sema_description_pies_attributes.filter(
segment__startswith=segment
)
if pies_attrs.count() == 0:
return None
elif pies_attrs.count() == 1:
return pies_attrs.first().value.strip()
else:
values = []
for index, pies_attr in enumerate(pies_attrs, start=1):
values.append(f'{index}: {pies_attr.value.strip()}')
return ', '.join(values)
def get_short_text_preview(self, value):
max_length = 200
if not isinstance(value, str):
return None
value_length = len(value)
if value_length <= max_length:
return value
return f'{value[:max_length]} (+{value_length - max_length})'
def get_short_images_preview(self, values):
max_length = 6
if not isinstance(values, list):
return None
value_length = len(values)
if value_length <= max_length:
return get_images_preview(values)
return (
get_images_preview(values[:max_length], width=50)
+ mark_safe(f'+({len(values) - max_length})')
)
# </editor-fold>
# <editor-fold desc="value properties ...">
@property
def premier_description_value(self):
field = 'description'
value = self.get_premier_product_attr_value(field)
if not value:
return None
return value.strip()
@property
def premier_weight_value(self):
field = 'weight'
value = self.get_premier_product_attr_value(field)
if not value:
return None
return round(value, 2)
@property
def premier_cost_cad_value(self):
field = 'cost_cad'
value = self.get_premier_product_attr_value(field)
if not value:
return None
return round(value, 2)
@property
def premier_cost_usd_value(self):
field = 'cost_usd'
value = self.get_premier_product_attr_value(field)
if not value:
return None
return round(value, 2)
@property
def premier_premier_part_number_value(self):
field = 'premier_part_number'
value = self.get_premier_product_attr_value(field)
if not value:
return None
return value.strip()
@property
def premier_upc_value(self):
field = 'upc'
value = self.get_premier_product_attr_value(field)
if not value:
return None
return value.strip()
@property
def premier_primary_image_urls_value(self):
field = 'primary_image'
value = self.get_premier_product_attr_value(field)
if not value:
return None
return [settings.COMPANY_HOST + value.url]
@property
def sema_description_def_value(self):
segment = 'C10_DEF'
return self.get_sema_description_pies_attribute_value(segment)
@property
def sema_description_des_value(self):
segment = 'C10_DES'
return self.get_sema_description_pies_attribute_value(segment)
@property
def sema_description_inv_value(self):
segment = 'C10_INV'
return self.get_sema_description_pies_attribute_value(segment)
@property
def sema_description_ext_value(self):
segment = 'C10_EXT'
return self.get_sema_description_pies_attribute_value(segment)
@property
def sema_description_tle_value(self):
segment = 'C10_TLE'
return self.get_sema_description_pies_attribute_value(segment)
@property
def sema_description_sho_value(self):
segment = 'C10_SHO'
return self.get_sema_description_pies_attribute_value(segment)
@property
def sema_description_mkt_value(self):
segment = 'C10_MKT'
return self.get_sema_description_pies_attribute_value(segment)
@property
def sema_description_key_value(self):
segment = 'C10_KEY'
return self.get_sema_description_pies_attribute_value(segment)
@property
def sema_description_asc_value(self):
segment = 'C10_ASC'
return self.get_sema_description_pies_attribute_value(segment)
@property
def sema_description_asm_value(self):
segment = 'C10_ASM'
return self.get_sema_description_pies_attribute_value(segment)
@property
def sema_description_fab_value(self):
segment = 'C10_FAB'
return self.get_sema_description_pies_attribute_value(segment)
@property
def sema_description_lab_value(self):
segment = 'C10_LAB'
return self.get_sema_description_pies_attribute_value(segment)
@property
def sema_description_shp_value(self):
segment = 'C10_SHP'
return self.get_sema_description_pies_attribute_value(segment)
@property
def sema_description_oth_value(self):
if not self.sema_description_pies_attributes:
return None
pies_attrs = self.sema_description_pies_attributes.exclude(
Q(segment__startswith='C10_DEF')
| Q(segment__startswith='C10_DES')
| Q(segment__startswith='C10_INV')
| Q(segment__startswith='C10_EXT')
| Q(segment__startswith='C10_TLE')
| Q(segment__startswith='C10_SHO')
| Q(segment__startswith='C10_MKT')
| Q(segment__startswith='C10_KEY')
| Q(segment__startswith='C10_ASC')
| Q(segment__startswith='C10_ASM')
| Q(segment__startswith='C10_FAB')
| Q(segment__startswith='C10_LAB')
| Q(segment__startswith='C10_SHP')
)
if pies_attrs.count() == 0:
return None
elif pies_attrs.count() == 1:
return pies_attrs.first().value.strip()
else:
values = []
for index, pies_attr in enumerate(pies_attrs, start=1):
values.append(f'{index}: {pies_attr.value.strip()}')
return ', '.join(values)
@property
def sema_html_value(self):
attr = 'clean_html'
if not self.sema_product:
return None
value = self.get_sema_product_attr_value(attr)
if not value:
return None
return value.strip()
@property
def sema_vehicles_value(self):
vehicles = self.sema_vehicles
if not vehicles:
return None
values = []
for vehicle in vehicles:
values.append(
{
'year': vehicle.base_vehicle.make_year.year.year,
'make': vehicle.base_vehicle.make_year.make.name,
'model': vehicle.base_vehicle.model.name,
'submodel': vehicle.submodel.name
}
)
return values
@property
def sema_brand_tag_names_value(self):
brand = self.sema_brand
if not brand:
return None
return [brand.tag_name]
@property
def sema_category_tag_names_value(self):
categories = self.sema_categories
if not categories:
return None
return [category.tag_name for category in categories]
@property
def sema_digital_asset_image_urls_value(self):
pies_attrs = self.sema_digital_assets_pies_attributes
if not pies_attrs:
return None
return [pies_attr.value for pies_attr in pies_attrs]
@property
def custom_title_value(self):
field = 'title_custom_value'
value = getattr(self, field)
if not value:
return None
return value.strip()
@property
def custom_body_html_value(self):
field = 'body_html_custom_value'
value = getattr(self, field)
if not value:
return None
return value.strip()
@property
def custom_variant_weight_value(self):
field = 'variant_weight_custom_value'
value = getattr(self, field)
if not value:
return None
return round(value, 2)
@property
def custom_variant_cost_value(self):
field = 'variant_cost_custom_value'
value = getattr(self, field)
if not value:
return None
return round(value, 2)
@property
def custom_variant_price_base_value(self):
field = 'variant_price_base_custom_value'
value = getattr(self, field)
if not value:
return None
return round(value, 2)
@property
def custom_variant_sku_value(self):
field = 'variant_sku_custom_value'
value = getattr(self, field)
if not value:
return None
return value.strip()
@property
def custom_variant_barcode_value(self):
field = 'variant_barcode_custom_value'
value = getattr(self, field)
if not value:
return None
return value.strip()
@property
def custom_packaging_metafield_value_value(self):
field = 'metafield_value_packaging_custom_value'
value = getattr(self, field)
if not value:
return None
return value.strip()
@property
def custom_fitments_metafield_value_value(self):
field = 'metafield_value_fitments_custom_value'
values = getattr(self, field)
if not values:
return None
return json.loads(values.strip())
@property
def custom_vendor_tag_names_value(self):
field = 'tag_names_vendor_custom_value'
values = getattr(self, field)
if not values:
return None
return json.loads(values.strip())
@property
def custom_collection_tag_names_value(self):
attr = 'tag_names_collection_custom_value'
values = getattr(self, attr)
if not values:
return None
return json.loads(values.strip())
@property
def custom_sema_image_urls_value(self):
attr = 'image_urls_sema_custom_value'
values = getattr(self, attr)
if not values:
return None
return json.loads(values.strip())
@property
def custom_premier_image_urls_value(self):
attr = 'image_urls_premier_custom_value'
values = getattr(self, attr)
if not values:
return None
return json.loads(values.strip())
@property
def metafields_dict_packaging_value(self):
choice_field = 'metafield_value_packaging_choice'
value = getattr(self, getattr(self, choice_field))
if not value:
return None
return [
{
'namespace': 'additional',
'key': 'packaging',
'owner_resource': ShopifyMetafield.PRODUCT_OWNER_RESOURCE,
'value': value,
'value_type': ShopifyMetafield.STRING_VALUE_TYPE
}
]
@property
def metafields_dict_fitments_value(self):
choice_field = 'metafield_value_fitments_choice'
values = getattr(self, getattr(self, choice_field))
if not values:
return None
return [
{
'namespace': 'additional',
'key': 'fitments',
'owner_resource': ShopifyMetafield.PRODUCT_OWNER_RESOURCE,
'value': json.dumps(values),
'value_type': ShopifyMetafield.JSON_VALUE_TYPE
}
]
@property
def metafields_dict_custom_value(self):
attr = 'metafields_custom_value'
values = getattr(self, attr)
if not values:
return None
return sorted(
json.loads(values.strip()),
key=lambda k: k['value']
)
@property
def metafields_dict_all_value(self):
attrs = [
'metafields_dict_packaging_value',
'metafields_dict_fitments_value',
'metafields_dict_custom_value'
]
metafields = []
for attr in attrs:
values = getattr(self, attr)
if values:
metafields += values
if not metafields:
return None
return sorted(metafields, key=lambda k: k['value'])
@property
def tags_dict_vendor_value(self):
choice_field = 'tag_names_vendor_choice'
values = getattr(self, getattr(self, choice_field))
if not values:
return None
return sorted(
[
{'name': tag_name}
for tag_name in values
],
key=lambda k: k['name']
)
@property
def tags_dict_collection_value(self):
choice_field = 'tag_names_collection_choice'
values = getattr(self, getattr(self, choice_field))
if not values:
return None
return sorted(
| |
import os
import numpy as np
from tqdm import tqdm
from dotmap import DotMap
from itertools import chain
from collections import OrderedDict
from sklearn.cluster import KMeans
from scipy import stats
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import (
RobertaConfig,
RobertaModel,
RobertaForMaskedLM,
get_linear_schedule_with_warmup,
)
from src.utils import utils
from src.models.codelstm import CodeLSTMEncoder
from src.models.contracode import CodeTransformerEncoder
from src.models.monkeypatch import RobertaModel, RobertaForMaskedLM
from src.models.context import ContextEncoder, AttentionEncoder
from src.models.relation import RelationNetwork
from src.models.task import TaskEmbedding
from src.models.signatures import DistSign
from src.agents.base import BaseAgent
from src.objectives.prototype import batch_euclidean_dist
from src.datasets.feedback import MetaExamSolutions, SupervisedExamSolutions
class BaseCodeMetaAgent(BaseAgent):
def __init__(self, config):
super().__init__(config)
self.train_loss = []
self.train_acc = []
self.test_acc = []
self.temp = []
def _load_datasets(self):
if not self.config.cuda:
roberta_device = 'cpu'
else:
roberta_device = f'cuda:{self.config.gpu_device}'
self.train_dataset = MetaExamSolutions(
data_root=self.config.data_root,
n_shots=self.config.dataset.train.n_shots,
n_queries=self.config.dataset.test.n_queries,
train=True,
vocab=None,
train_frac=self.config.dataset.train_frac,
obfuscate_names=self.config.dataset.obfuscate_names,
max_num_var=self.config.dataset.max_num_var,
max_num_func=self.config.dataset.max_num_func,
max_seq_len=self.config.dataset.max_seq_len,
min_occ=self.config.dataset.min_occ,
augment_by_names=self.config.dataset.train.augment_by_names,
augment_by_rubric=self.config.dataset.train.augment_by_rubric,
roberta_rubric=self.config.dataset.train.roberta_rubric,
roberta_prompt=self.config.dataset.train.roberta_prompt,
roberta_tokenize=self.config.dataset.roberta_tokenize,
roberta_config=self.config.model.config,
roberta_device=roberta_device,
conservative=self.config.dataset.train.conservative,
cloze_tasks_factor=self.config.dataset.train.cloze_tasks_factor,
execution_tasks_factor=self.config.dataset.train.execution_tasks_factor,
smlmt_tasks_factor=self.config.dataset.train.smlmt_tasks_factor,
pad_to_max_num_class=self.config.optim.batch_size > 1,
hold_out_split=self.config.dataset.hold_out_split,
hold_out_category=self.config.dataset.hold_out_category,
enforce_binary=self.config.dataset.enforce_binary,
)
self.test_dataset = MetaExamSolutions(
data_root=self.config.data_root,
n_shots=self.config.dataset.train.n_shots,
n_queries=self.config.dataset.test.n_queries,
train=False,
vocab=self.train_dataset.vocab,
train_frac=self.config.dataset.train_frac,
obfuscate_names=self.config.dataset.obfuscate_names,
max_num_var=self.config.dataset.max_num_var,
max_num_func=self.config.dataset.max_num_func,
max_seq_len=self.config.dataset.max_seq_len,
min_occ=self.config.dataset.min_occ,
roberta_rubric=self.train_dataset.roberta_rubric,
roberta_prompt=self.config.dataset.train.roberta_prompt,
roberta_tokenize=self.config.dataset.roberta_tokenize,
roberta_config=self.config.model.config,
roberta_device=roberta_device,
pad_to_max_num_class=self.config.optim.batch_size > 1,
conservative=self.config.dataset.train.conservative,
cloze_tasks_factor=self.train_dataset.cloze_tasks_factor,
execution_tasks_factor=self.train_dataset.execution_tasks_factor,
smlmt_tasks_factor=self.config.dataset.train.smlmt_tasks_factor,
hold_out_split=self.config.dataset.hold_out_split,
hold_out_category=self.config.dataset.hold_out_category,
enforce_binary=self.config.dataset.enforce_binary,
)
def _load_loaders(self):
self.train_loader, self.train_len = self._create_dataloader(
self.train_dataset,
self.config.optim.batch_size,
shuffle=True,
)
self.test_loader, self.test_len = self._create_test_dataloader(
self.test_dataset,
self.config.optim.batch_size,
)
def _create_model(self):
if self.config.model.name == 'transformer':
vocab_size = self.train_dataset.vocab_size
model = CodeTransformerEncoder(
vocab_size,
d_model=self.config.model.d_model,
n_head=self.config.model.n_head,
n_encoder_layers=self.config.model.n_encoder_layers,
d_ff=self.config.model.d_ff,
dropout=0.1,
activation="relu",
norm=True,
pad_id=self.train_dataset.pad_index,
is_tam=self.config.model.task_tam,
is_tadam=self.config.model.task_tadam,
is_adapter=self.config.model.task_adapter,
)
elif self.config.model.name == 'roberta':
model = RobertaModel.from_pretrained(
self.config.model.config,
is_tam=self.config.model.task_tam,
is_tadam=self.config.model.task_tadam,
is_adapter=self.config.model.task_adapter,
)
# set everything to requires_grad = True
utils.reset_model_for_training(model)
if self.config.model.finetune:
for param in model.parameters():
param.requires_grad = False
for param in model.pooler.parameters():
param.requires_grad = True
# only allow some parameters to be finetuned
for param in model.encoder.layer[-self.config.model.finetune_layers:].parameters():
param.requires_grad = True
elif self.config.model.name == 'roberta_codesearch':
model = RobertaForMaskedLM.from_pretrained(
'roberta-base',
is_tam=self.config.model.task_tam,
is_tadam=self.config.model.task_tadam,
is_adapter=self.config.model.task_adapter,
)
# load the codesearch checkpoint
checkpoint = torch.load(
self.config.model.codesearch_checkpoint_path,
map_location='cpu',
)
raw_state_dict = checkpoint['state_dict']
state_dict = OrderedDict()
for k, v in raw_state_dict.items():
new_k = '.'.join(k.split('.')[1:])
state_dict[new_k] = v
model.load_state_dict(state_dict, strict=False)
model = model.roberta # only keep roberta
utils.reset_model_for_training(model)
if self.config.model.finetune:
for param in model.parameters():
param.requires_grad = False
# only allow some parameters to be finetuned
for param in model.encoder.layer[-self.config.model.finetune_layers:].parameters():
param.requires_grad = True
elif self.config.model.name == 'roberta_scratch':
config = RobertaConfig.from_pretrained(self.config.model.config)
model = RobertaModel(
config,
is_tadam=self.config.model.task_tadam,
is_adapter=self.config.model.task_adapter,
)
# set everything to requires_grad = True
utils.reset_model_for_training(model)
if self.config.model.finetune:
for param in model.parameters():
param.requires_grad = False
# only allow some parameters to be finetuned
for param in model.encoder.layer[-self.config.model.finetune_layers:].parameters():
param.requires_grad = True
elif self.config.model.name == 'lstm':
assert not self.config.model.task_tadam, "TADAM not support for LSTMs."
assert not self.config.model.task_adapter, "Adapter not support for LSTMs."
vocab_size = len(self.train_dataset.vocab['w2i'])
model = CodeLSTMEncoder(
vocab_size,
d_model=self.config.model.d_model,
n_encoder_layers=self.config.model.n_encoder_layers,
dropout=0.1,
is_tadam=self.config.model.task_tadam,
)
else:
raise Exception(f'Model {self.config.model.name} not supported.')
self.model = model.to(self.device)
d_model = self.config.model.d_model
bert_dim = 768
if self.config.model.task_concat:
# combine program embedding and rubric/question at the end of the forward pass
concat_fusor = TaskEmbedding(d_model+bert_dim*2, d_model, hid_dim=d_model)
self.concat_fusor = concat_fusor.to(self.device)
tau = nn.Parameter(torch.ones(1)).to(self.device)
tau = tau.detach().requires_grad_(True)
self.tau = tau
def _all_parameters(self):
all_parameters = [self.model.parameters(), [self.tau]]
if self.config.model.task_concat:
all_parameters.append(self.concat_fusor.parameters())
return chain(*all_parameters)
def _create_optimizer(self):
if self.config.model.name in ['roberta', 'roberta_mlm', 'roberta_scratch']:
optimizer = torch.optim.AdamW(
self._all_parameters(),
lr=self.config.optim.learning_rate,
betas=(0.9, 0.999),
eps=1e-6,
weight_decay=0.01,
)
num_training_steps = len(self.train_dataset) * self.config.optim.num_epochs
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=self.config.optim.warmup_steps,
num_training_steps=num_training_steps,
)
self.optim = optimizer
self.scheduler = scheduler
self.config.optim.use_scheduler = True
else:
# this is the one used for Adam
self.optim = torch.optim.AdamW(
self._all_parameters(),
lr=self.config.optim.learning_rate,
betas=(0.9, 0.98),
weight_decay=self.config.optim.weight_decay,
)
if self.config.optim.use_scheduler:
def schedule(step_num):
d_model = self.config.model.d_model
warmup_steps = self.config.optim.warmup_steps
step_num += 1
lrate = d_model**(-0.5) * min(step_num**(-0.5), step_num * warmup_steps**(-1.5))
return lrate
self.scheduler = torch.optim.lr_scheduler.LambdaLR(self.optim, schedule)
def train_one_epoch(self):
raise NotImplementedError
def eval_test(self):
raise NotImplementedError
def train(self):
for epoch in range(self.current_epoch, self.config.optim.num_epochs):
self.current_epoch = epoch
self.train_one_epoch()
if (self.config.validate and epoch % self.config.validate_freq == 0):
self.eval_test()
self.save_checkpoint()
if self.iter_with_no_improv > self.config.optim.patience:
self.logger.info("Exceeded patience. Stop training...")
break
def save_metrics(self):
out_dict = {
'model_state_dict': self.model.state_dict(),
'optim_state_dict': self.optim.state_dict(),
'tau': self.tau,
'epoch': self.current_epoch,
'iteration': self.current_iteration,
'loss': self.current_loss,
'val_iteration': self.current_val_iteration,
'val_metric': self.current_val_metric,
'config': self.config,
'train_acc': np.array(self.train_acc),
'train_loss': np.array(self.train_loss),
'test_acc': np.array(self.test_acc),
'temp': np.array(self.temp),
}
if self.config.model.task_concat:
out_dict['concat_fusor_state_dict'] = self.concat_fusor.state_dict()
return out_dict
def load_checkpoint(
self,
filename,
checkpoint_dir=None,
load_model=True,
load_optim=False,
load_epoch=False,
):
if checkpoint_dir is None:
checkpoint_dir = self.config.checkpoint_dir
filename = os.path.join(checkpoint_dir, filename)
try:
self.logger.info("Loading checkpoint '{}'".format(filename))
checkpoint = torch.load(filename, map_location='cpu')
if load_epoch:
self.current_epoch = checkpoint['epoch']
self.current_iteration = checkpoint['iteration']
self.current_val_iteration = checkpoint['val_iteration']
self.train_loss = list(checkpoint['train_loss'])
self.train_acc = list(checkpoint['train_acc'])
self.test_acc = list(checkpoint['test_acc'])
self.temp = list(checkpoint['temp'])
self.current_val_metric = checkpoint['val_metric']
if load_model:
model_state_dict = checkpoint['model_state_dict']
self.model.load_state_dict(model_state_dict)
self.tau.data = checkpoint['tau'].to(self.tau.device)
if self.config.model.task_concat:
concat_fusor_state_dict = checkpoint['concat_fusor_state_dict']
self.concat_fusor.load_state_dict(concat_fusor_state_dict)
if load_optim:
optim_state_dict = checkpoint['optim_state_dict']
self.optim.load_state_dict(optim_state_dict)
self.logger.info("Checkpoint loaded successfully from '{}' at (epoch {}) at (iteration {})\n"
.format(filename, checkpoint['epoch'], checkpoint['iteration']))
return checkpoint
except OSError as e:
self.logger.info("Checkpoint doesnt exists: [{}]".format(filename))
raise e
class CodePrototypeNetAgent(BaseCodeMetaAgent):
def compute_loss(
self,
support_features,
support_targets,
query_features,
query_targets,
):
batch_size, nway, nquery, dim = query_features.size()
prototypes = torch.mean(support_features, dim=2)
query_features_flat = query_features.view(batch_size, nway * nquery, dim)
# batch-based euclidean dist between prototypes and query_features_flat
# dists: batch_size x nway * nquery x nway
dists = self.tau * batch_euclidean_dist(query_features_flat, prototypes)
logprobas = F.log_softmax(-dists, dim=2).view(batch_size, nway, nquery, -1)
loss = -logprobas.gather(3, query_targets.unsqueeze(3)).squeeze()
loss = loss.view(-1).mean()
acc = utils.get_accuracy(logprobas.view(batch_size, nway*nquery, -1),
query_targets.view(batch_size, nway*nquery))
return loss, acc, logprobas
def compute_masked_means(self, outputs, masks):
# we don't want to include padding tokens
# outputs : B x T x D
# masks : B x T
dim = outputs.size(2)
masks_dim = masks.unsqueeze(2).repeat(1, 1, dim)
# masked_outputs : B x T x D
masked_outputs = outputs * masks_dim # makes the masked entries 0
# masked_outputs: B x D / B x 1 => B x D
partition = torch.sum(masks, dim=1, keepdim=True)
masked_outputs = torch.sum(masked_outputs, dim=1) / partition
return masked_outputs
def forward(self, batch, n_shots, n_queries):
# NOTE: n_shots, n_queries are unused
support_toks = batch['support_toks'].to(self.device)
support_lens = batch['support_lens'].to(self.device)
support_masks = batch['support_masks'].to(self.device)
support_labs = batch['support_labs'].to(self.device)
query_toks = batch['query_toks'].to(self.device)
query_lens = batch['query_lens'].to(self.device)
query_masks = batch['query_masks'].to(self.device)
query_labs = batch['query_labs'].to(self.device)
rubric_embs = batch['rubric_embs'].to(self.device)
prompt_embs = batch['prompt_embs'].to(self.device)
bert_dim = batch['rubric_embs'].size(-1)
if self.config.override_n_shots != DotMap(): # NOTE: used in test time to vary supervision
assert self.config.override_n_shots <= support_toks.size(2)
if self.config.override_n_shots == 0:
# separate procedure for zero-shot
return self.zero_shot_forward(batch, n_shots, n_queries)
# if > 0, we can just pretend like we have less
support_toks = support_toks[:, :, :self.config.override_n_shots, :].contiguous()
support_lens = support_lens[:, :, :self.config.override_n_shots].contiguous()
support_masks = support_masks[:, :, :self.config.override_n_shots, :].contiguous()
support_labs = support_labs[:, :, :self.config.override_n_shots].contiguous()
batch_size = support_toks.size(0)
n_ways = support_toks.size(1)
n_support = support_toks.size(2)
n_query = query_toks.size(2)
seq_len = support_toks.size(-1)
# support_toks: batch_size*n_ways*n_support x seq_len
support_toks = support_toks.view(-1, seq_len)
support_lens = support_lens.view(-1)
support_masks = support_masks.view(-1, seq_len).long()
query_toks = query_toks.view(-1, seq_len)
query_lens = query_lens.view(-1)
query_masks = query_masks.view(-1, seq_len).long()
# rubric_embs: batch_size*n_ways x bert_dim
rubric_embs = rubric_embs.view(-1, bert_dim)
support_rubric_embs = rubric_embs.unsqueeze(1).repeat(1, n_support, 1)
# support_rubric_embs: batch_size*n_ways*n_support x bert_dim
support_rubric_embs = support_rubric_embs.view(-1, bert_dim)
# query_rubric_embs: batch_size*n_ways*n_query x bert_dim
query_rubric_embs = rubric_embs.unsqueeze(1).repeat(1, n_query, 1)
query_rubric_embs = query_rubric_embs.view(-1, bert_dim)
# prompt_embs: batch_size*n_ways x bert_dim
prompt_embs = prompt_embs.view(-1, bert_dim)
support_prompt_embs = prompt_embs.unsqueeze(1).repeat(1, n_support, 1)
# support_rubric_embs: batch_size*n_ways*n_support x bert_dim
support_prompt_embs = support_prompt_embs.view(-1, bert_dim)
query_prompt_embs = prompt_embs.unsqueeze(1).repeat(1, n_query, 1)
# query_rubric_embs: batch_size*n_ways*n_prompt x bert_dim
query_prompt_embs = query_prompt_embs.view(-1, bert_dim)
if self.config.model.name == 'lstm':
# support_tam_features : ... x 2 x bert_dim
# query_tam_features : ... x 2 x bert_dim
support_tam_features = torch.cat([support_rubric_embs.unsqueeze(1),
support_prompt_embs.unsqueeze(1)], dim=1)
query_tam_features = torch.cat([query_rubric_embs.unsqueeze(1),
query_prompt_embs.unsqueeze(1)], dim=1)
# support_features: batch_size*n_ways*n_support x dim
# query_features: batch_size*n_ways*n_query x dim
support_features = self.model(
support_toks,
support_lens,
tam_embeds=support_tam_features,
)
query_features = self.model(
query_toks,
query_lens,
tam_embeds=query_tam_features,
)
else:
# support_features: batch_size*n_ways*n_support x T x dim
# query_features: batch_size*n_ways*n_query x T x dim
if self.config.model.task_tam:
# support_tam_features : ... x 2 x bert_dim
# query_tam_features : ... x 2 x bert_dim
support_tam_features = torch.cat([support_rubric_embs.unsqueeze(1),
support_prompt_embs.unsqueeze(1)], dim=1)
query_tam_features = torch.cat([query_rubric_embs.unsqueeze(1),
query_prompt_embs.unsqueeze(1)], dim=1)
support_features = self.model(
input_ids=support_toks,
attention_mask=support_masks,
tam_embeds=support_tam_features,
)[0]
query_features = self.model(
input_ids=query_toks,
attention_mask=query_masks,
tam_embeds=query_tam_features,
)[0]
elif self.config.model.task_adapter or self.config.model.task_tadam:
# NOTE: we assume we don't use adapter/tadam/tam at the same time.
support_task_features = torch.cat([support_rubric_embs, support_prompt_embs], dim=1)
query_task_features = torch.cat([query_rubric_embs, query_prompt_embs], dim=1)
support_features = self.model(
input_ids=support_toks,
attention_mask=support_masks,
tadam_or_adapter_embeds=support_task_features,
)[0]
query_features = self.model(
input_ids=query_toks,
attention_mask=query_masks,
tadam_or_adapter_embeds=query_task_features,
)[0]
else:
support_features | |
<reponame>JonathanGailliez/azure-sdk-for-python
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class BuildsOperations(object):
"""BuildsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: The client API version. Constant value: "2018-02-01-preview".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-02-01-preview"
self.config = config
def list(
self, resource_group_name, registry_name, filter=None, top=None, skip_token=None, custom_headers=None, raw=False, **operation_config):
"""Gets all the builds for a registry.
:param resource_group_name: The name of the resource group to which
the container registry belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param filter: The builds filter to apply on the operation.
:type filter: str
:param top: $top is supported for get list of builds, which limits the
maximum number of builds to return.
:type top: int
:param skip_token: $skipToken is supported on get list of builds,
which provides the next page in the list of builds.
:type skip_token: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Build
:rtype:
~azure.mgmt.containerregistry.v2018_02_01_preview.models.BuildPaged[~azure.mgmt.containerregistry.v2018_02_01_preview.models.Build]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
if skip_token is not None:
query_parameters['$skipToken'] = self._serialize.query("skip_token", skip_token, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.BuildPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.BuildPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/builds'}
def get(
self, resource_group_name, registry_name, build_id, custom_headers=None, raw=False, **operation_config):
"""Gets the detailed information for a given build.
:param resource_group_name: The name of the resource group to which
the container registry belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param build_id: The build ID.
:type build_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Build or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.containerregistry.v2018_02_01_preview.models.Build
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
'buildId': self._serialize.url("build_id", build_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Build', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/builds/{buildId}'}
def _update_initial(
self, resource_group_name, registry_name, build_id, is_archive_enabled=None, custom_headers=None, raw=False, **operation_config):
build_update_parameters = models.BuildUpdateParameters(is_archive_enabled=is_archive_enabled)
# Construct URL
url = self.update.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
'buildId': self._serialize.url("build_id", build_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(build_update_parameters, 'BuildUpdateParameters')
# Construct and send request
request = self._client.patch(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Build', response)
if response.status_code == 201:
deserialized = self._deserialize('Build', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update(
self, resource_group_name, registry_name, build_id, is_archive_enabled=None, custom_headers=None, raw=False, polling=True, **operation_config):
"""Patch the build properties.
:param resource_group_name: The name of the resource group to which
the container registry belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param build_id: The build ID.
:type build_id: str
:param is_archive_enabled: The value that indicates whether archiving
is enabled or not.
:type is_archive_enabled: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns Build or
ClientRawResponse<Build> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.containerregistry.v2018_02_01_preview.models.Build]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.containerregistry.v2018_02_01_preview.models.Build]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._update_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
build_id=build_id,
is_archive_enabled=is_archive_enabled,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('Build', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/builds/{buildId}'}
def get_log_link(
self, resource_group_name, registry_name, build_id, custom_headers=None, raw=False, **operation_config):
"""Gets a link to download the build logs.
:param resource_group_name: The name of the resource group to which
the container registry belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param build_id: The build ID.
:type build_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: BuildGetLogResult or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.containerregistry.v2018_02_01_preview.models.BuildGetLogResult
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get_log_link.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
'buildId': self._serialize.url("build_id", build_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('BuildGetLogResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_log_link.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/builds/{buildId}/getLogLink'}
def _cancel_initial(
self, resource_group_name, registry_name, build_id, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.cancel.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
'buildId': self._serialize.url("build_id", build_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is | |
xmlDoc.toxml('utf-8')
xml = xml.replace('<svg', '<svg width="%ipx" height="%ipx"' % (width_in_px, height_in_px), 1)
return xml
# data should be an object of the ResultAlternativeConcernTableData class
def convertAlternativeConcernSessionResultToSvg(data):
########### Settings ###########
fontSize = 30
f = ImageFont.truetype(DENDROGRAM_FONT_LOCATION, fontSize)
newWordFontSize = 20
newWordF = ImageFont.truetype(DENDROGRAM_FONT_LOCATION, newWordFontSize)
newWord = 'new'
newWordColor = (243, 178, 126)
headerColor = (255, 255, 255)
headerFontSize = 30
headerFontName = 'ariel'
headerConcern = 'Concern'
headerAlternative = 'Alternative'
fHeader = ImageFont.truetype(DENDROGRAM_FONT_LOCATION, headerFontSize)
yHeaderOffset = 10;
fontName = 'arial'
xWordCellSpace = 5 # space between a word and the right and left line of the cell (in pixels)
yWordCellSpace = 5 # space between the biggest word and the top and bottom line of the cell (in pixels)
concernTableYOffset = 5 # offset from the top element. In pixels
alternativeTableYOffset = 5 # offset from the top element. In pixels
concernTableXOffset = 5 # offset from the right element. In pixels
alternativeTableXOffset = 40 # offset from the right element. In pixels
xNewWordOffset = 5 # offset of the new word from the most left element. In pixels
tableLineThickness = 1 # in pixels
tableLineColor = (202, 217, 237) # value in rbg
tableCellWordColor = (0, 0, 0) # value in rbg
tableHeaderCellWordColor = (185, 187, 189) # value in rbg
concernTableHeaderBackground = (232, 234, 237)
alternativeTableHeaderBackground = (232, 234, 237)
tableBackgroundColor1 = (214, 233, 246)
tableBackgroundColor2 = (189, 209, 247)
shadowXOffset = 3 # value in pixels
shadowYOffset = 3 # value in pixels
shadowBlurSize = 4 # how big the bluer should be
########### End settings ###########
tempData = __TableData___()
alternativeTableData = []
concernTableData = []
alternativeTableHeader = ['name', 'times cited']
concernTableHeader = ['left', 'right', 'times cited']
concernsNRows = len(data.concerns) + 1 # +1 because of the header
alternativesNRows = len(data.alternatives) + 1 # +1 because of the header
concernsNCols = len(concernTableHeader)
alternativesNCols = len(alternativeTableHeader)
concernCellWidths = [0 for x in xrange(concernsNCols)] # @UnusedVariable
alternativeCellWidths = [0 for x in xrange(alternativesNCols)] # @UnusedVariable
concernCellHeight = 0
alternativeCellHeight = 0
concernTableTotalXOffset = 0
alternativeTableTotalXOffset = 0
hasNewConcerns = False
newWordSize = newWordF.getsize(newWord)
headerMaxHeight = 0
concernTableTotalYOffset = 0
alternativeTableTotalYOffset = 0
#################### Pre-processing ####################
# create the correct format for the table data
# concern
i = 1
j = 0
concernTableData.append(concernTableHeader)
while i < concernsNRows:
row = []
row.append(data.concerns[i - 1][0])
row.append(data.concerns[i - 1][1])
row.append(data.concerns[i - 1][2])
if data.concerns[i - 1][5] == True:
hasNewConcerns = True
concernTableData.append(row)
# calculate the cell height and width
while j < concernsNCols:
word = row[j]
if type(word) != StringType and type(word) != UnicodeType:
word = str(word)
size = f.getsize(word)
if size[0] > concernCellWidths[j]:
concernCellWidths[j] = size[0]
if size[1] > concernCellHeight:
concernCellHeight = size[1]
j += 1
j = 0
i += 1
# alternative
i = 1
j = 0
alternativeTableData.append(alternativeTableHeader)
while i < alternativesNRows:
row = []
row.append(data.alternatives[i - 1][0])
row.append(data.alternatives[i - 1][1])
alternativeTableData.append(row)
# calculate the cell height and width
while j < alternativesNCols:
word = row[j]
if type(word) != StringType and type(word) != UnicodeType:
word = str(word)
size = f.getsize(word)
if size[0] > alternativeCellWidths[j]:
alternativeCellWidths[j] = size[0]
if size[1] > alternativeCellHeight:
alternativeCellHeight = size[1]
j += 1
j = 0
i += 1
# check if the header word sizes
# concern
i = 0
while i < concernsNCols:
size = f.getsize(concernTableData[0][i])
if size[0] > concernCellWidths[i]:
concernCellWidths[i] = size[0]
if size[1] > concernCellHeight:
concernCellHeight = size[1]
i += 1
# alternative
i = 0
while i < alternativesNCols:
size = f.getsize(alternativeTableData[0][i])
if size[0] > alternativeCellWidths[i]:
alternativeCellWidths[i] = size[0]
if size[1] > alternativeCellHeight:
alternativeCellHeight = size[1]
i += 1
# calculate the max height of the headers
size = fHeader.getsize(headerConcern)
if size[1] > headerMaxHeight:
headerMaxHeight = size[1]
size = fHeader.getsize(headerAlternative)
if size[1] > headerMaxHeight:
headerMaxHeight = size[1]
# add xWordCellSpace * 2 to each position of the array (in place)
concernCellWidths[:] = [x + (xWordCellSpace * 2) for x in concernCellWidths]
alternativeCellWidths[:] = [x + (xWordCellSpace * 2) for x in alternativeCellWidths]
concernTableTotalXOffset += concernTableXOffset
alternativeTableTotalXOffset += concernTableTotalXOffset + sum(concernCellWidths) + (
(concernsNCols + 1) * tableLineThickness) + alternativeTableXOffset
if hasNewConcerns:
alternativeTableTotalXOffset += xNewWordOffset + newWordSize[0]
# calculate the total y offset for the alternative and concern table
concernTableTotalYOffset = yHeaderOffset + headerMaxHeight + concernTableYOffset
alternativeTableTotalYOffset = yHeaderOffset + headerMaxHeight + alternativeTableYOffset
#################### End pre-processing ####################
imp = SvgDOMImplementation()
xmlDoc = imp.createSvgDocument()
root = xmlDoc.documentElement
root.setXmlns('http://www.w3.org/2000/svg')
root.setVersion('1.1')
globalDefNode = xmlDoc.createDefsNode()
root.appendChild(globalDefNode)
##########create the shadow filter ##########
filterNode = xmlDoc.createFilterNode()
filterNode.setId('shadow1')
filterNode.setX(0)
filterNode.setY(0)
filterNode.setWidth('150%')
filterNode.setHeight('150%')
tempNode = xmlDoc.createFeOffsetNode()
tempNode.setDx(shadowXOffset)
tempNode.setDy(shadowYOffset)
tempNode.setResult('offOut')
tempNode.setIn('SourceGraphic')
filterNode.appendChild(tempNode)
tempNode = xmlDoc.createFeGaussianBlurNode()
tempNode.setResult('blurOut')
tempNode.setIn('offOut')
tempNode.setStdDeviation(shadowBlurSize)
filterNode.appendChild(tempNode)
tempNode = xmlDoc.createFeBlendNode()
tempNode.setIn('SourceGraphic')
tempNode.setIn2('blurOut')
tempNode.setMode('normal')
filterNode.appendChild(tempNode)
globalDefNode.appendChild(filterNode)
########## end create the shadow filter ##########
########## Create the glow filter ##########
filterNode = xmlDoc.createFilterNode()
filterNode.setId('glow1')
filterNode.setFilterUnits('userSpaceOnUse')
filterNode.setX(0)
filterNode.setY(0)
filterNode.setWidth(400)
filterNode.setHeight(400)
tempNode = xmlDoc.createFeGaussianBlurNode()
tempNode.setIn('SourceGraphic')
tempNode.setStdDeviation(25)
filterNode.appendChild(tempNode)
globalDefNode.appendChild(filterNode)
########## End create the glow filter ##########
########## Create the headers of the concerns and alternative tables ##########
headerGroup = xmlDoc.createGNode()
headerGroup.setId('headerGroup')
# concern
tempNode = xmlDoc.createSvgTextNode(concernTableXOffset, yHeaderOffset + headerMaxHeight - 5, headerConcern)
tempNode.setFontFamily(headerFontName)
tempNode.setFontSize(str(headerFontSize) + 'px')
tempNode.setColor(createColorRGBString(headerColor))
headerGroup.appendChild(tempNode)
# alternative
tempNode = xmlDoc.createSvgTextNode(alternativeTableTotalXOffset, yHeaderOffset + headerMaxHeight - 5,
headerAlternative)
tempNode.setFontFamily(headerFontName)
tempNode.setFontSize(str(headerFontSize) + 'px')
tempNode.setColor(createColorRGBString(headerColor))
headerGroup.appendChild(tempNode)
########## End Create the headers of the concerns and alternative tables ##########
#################### Create the background of the header tables ####################
# concern
concernTableHeaderBackgroundGround = xmlDoc.createGNode()
concernTableHeaderBackgroundGround.setId('concernTableHeaderBackgroundGround')
tempNode = xmlDoc.createRectNode(concernTableTotalXOffset, concernTableTotalYOffset,
concernCellHeight + tableLineThickness,
sum(concernCellWidths) + (concernsNCols * tableLineThickness))
tempNode.setFill(createColorRGBString(concernTableHeaderBackground))
concernTableHeaderBackgroundGround.appendChild(tempNode)
# alternative
alternativeTableHeaderBackgroundGround = xmlDoc.createGNode()
alternativeTableHeaderBackgroundGround.setId('alternativeTableHeaderBackgroundGround')
tempNode = xmlDoc.createRectNode(alternativeTableTotalXOffset, alternativeTableTotalYOffset,
alternativeCellHeight + tableLineThickness,
sum(alternativeCellWidths) + (alternativesNCols * tableLineThickness))
tempNode.setFill(createColorRGBString(alternativeTableHeaderBackground))
alternativeTableHeaderBackgroundGround.appendChild(tempNode)
#################### End create the background of the header tables ####################
#################### Create the background of the data part of the tables ####################
# concern
i = 1
concernTableDataBackgrounGroup = xmlDoc.createGNode()
concernTableDataBackgrounGroup.setId('concernTableDataBackgrounGroup')
while i < concernsNRows:
x = concernTableTotalXOffset
y = concernTableTotalYOffset + (i * concernCellHeight) + (i * tableLineThickness) + tableLineThickness / 2
tempNode = xmlDoc.createRectNode(x, y, concernCellHeight + tableLineThickness,
sum(concernCellWidths) + tableLineThickness * concernsNCols)
if i % 2 == 0:
tempNode.setFill(createColorRGBString(tableBackgroundColor2))
else:
tempNode.setFill(createColorRGBString(tableBackgroundColor1))
concernTableDataBackgrounGroup.appendChild(tempNode)
i += 1
# alternative
i = 1
alternativeTableDataBackgrounGroup = xmlDoc.createGNode()
alternativeTableDataBackgrounGroup.setId('alternativeTableDataBackgrounGroup')
while i < alternativesNRows:
x = alternativeTableTotalXOffset
y = alternativeTableTotalYOffset + (i * alternativeCellHeight) + (
i * tableLineThickness) + tableLineThickness / 2
tempNode = xmlDoc.createRectNode(x, y, alternativeCellHeight + tableLineThickness,
sum(alternativeCellWidths) + tableLineThickness * alternativesNCols)
if i % 2 == 0:
tempNode.setFill(createColorRGBString(tableBackgroundColor2))
else:
tempNode.setFill(createColorRGBString(tableBackgroundColor1))
alternativeTableDataBackgrounGroup.appendChild(tempNode)
i += 1
#################### End create the background of the data part of the tables ####################
#################### Add the 'new' word next to the tables ####################
# concern
concernNewWordGroup = xmlDoc.createGNode()
concernNewWordGroup.setId('concernNewWordGroup')
i = 1
x = concernTableTotalXOffset + sum(concernCellWidths) + (concernsNCols + 1) * tableLineThickness + xNewWordOffset
while i < concernsNRows:
if data.concerns[i - 1][5] == True:
y = concernTableTotalYOffset + (i * concernCellHeight) + concernCellHeight / 2 + newWordSize[1] / 2 - 5
tempNode = xmlDoc.createSvgTextNode(x, y, newWord)
tempNode.setFontFamily(fontName)
tempNode.setFontSize(str(newWordFontSize) + 'px')
tempNode.setFill('none')
tempNode.setStroke(createColorRGBString(newWordColor))
tempNode.setStrokeWidth(1)
tempNode.setFilter('url(#shadow1)')
concernNewWordGroup.appendChild(tempNode)
i += 1
# alternative
alternativeNewWordGroup = xmlDoc.createGNode()
alternativeNewWordGroup.setId('alternativeNewWordGroup')
i = 1
x = alternativeTableTotalXOffset + sum(alternativeCellWidths) + (
alternativesNCols + 1) * tableLineThickness + xNewWordOffset
while i < alternativesNRows:
if data.alternatives[i - 1][2] == True:
y = alternativeTableTotalYOffset + (i * alternativeCellHeight) + alternativeCellHeight / 2 + newWordSize[
1] / 2 - 5
tempNode = xmlDoc.createSvgTextNode(x, y, newWord)
tempNode.setFontFamily(fontName)
tempNode.setFontSize(str(newWordFontSize) + 'px')
tempNode.setFill('none')
tempNode.setStroke(createColorRGBString(newWordColor))
tempNode.setStrokeWidth(1)
tempNode.setFilter('url(#shadow1)')
alternativeNewWordGroup.appendChild(tempNode)
i += 1
#################### End add the 'new' word next to the tables ####################
tempData.fontSize = fontSize
tempData.fontObject = f
tempData.fontName = fontName
tempData.lineThickness = tableLineThickness
tempData.tableLineColor = tableLineColor
tempData.tableCellWordColor = tableCellWordColor
tempData.tableHeaderCellWordColor = tableHeaderCellWordColor
tempData.yWordCellSpace = yWordCellSpace
tempData.xWordCellSpace = xWordCellSpace
tempData.cellHeight = concernCellHeight
tempData.cellWidths = concernCellWidths
tempData.nCols = concernsNCols
tempData.nRows = concernsNRows
tempData.tableData = concernTableData
tempData.yTableOffSet = concernTableTotalYOffset
tempData.xTableOffSet = concernTableTotalXOffset
xmlConcernTable = __createSvgTable__(tempData)
root.appendChild(concernTableHeaderBackgroundGround)
root.appendChild(concernTableDataBackgrounGroup)
root.appendChild(concernNewWordGroup)
root.appendChild(xmlConcernTable)
tempData.cellHeight = alternativeCellHeight
tempData.cellWidths = alternativeCellWidths
tempData.nCols = alternativesNCols
tempData.nRows = alternativesNRows
tempData.tableData = alternativeTableData
tempData.yTableOffSet = alternativeTableTotalYOffset
tempData.xTableOffSet = alternativeTableTotalXOffset
xmlAlternativeTable = | |
# is an iterator
filelist = []
for f in fns:
filelist.extend(glob(f))
if base is None:
base = ''
# ensures that path from base ends with separator
if base:
base = os.path.join(base, "")
replaceparts = getData(base) # from base get parts
# ensures that extension starts with point "."
if isinstance(ext, basestring) and not ext.startswith("."):
ext = "." + ext # correct extension
status = []
for file in filelist:
parts = getData(file) # file parts
# replace drive
if replaceparts[0]:
parts[0] = replaceparts[0]
# replace root
if replaceparts[1]:
if folder is True:
parts[1] = os.path.join(replaceparts[1],
os.path.split(os.path.split(parts[1])[0])[1], "")
elif isinstance(folder, basestring):
parts[1] = os.path.join(replaceparts[1], folder, "")
else:
parts[1] = replaceparts[1]
# to replace basic name
if isinstance(name, basestring):
parts[2] = name.format(name=parts[2])
if isinstance(ext, basestring):
parts[3] = ext # replace extension
newfile = "".join(parts)
if not overwrite:
newfile = increment_if_exits(newfile)
try:
im = loader(file)
# image not loaded
if im is None:
status.append((file, 1, newfile))
continue
# image successfully saved
if simulate:
status.append((file, 0, newfile))
continue
else:
mkPath("".join(parts[:2]))
if cv2.imwrite(newfile, im):
status.append((file, 0, newfile))
continue
# image not saved
status.append((file, 2, newfile))
except:
# an error in the process
status.append((file, 3, newfile))
return status
def checkLoaded(obj, fn="", raiseError=False):
"""
Simple function to determine if variable is valid.
:param obj: loaded object
:param fn: path of file
:param raiseError: if True and obj is None, raise
:return: None
"""
if obj is not None:
print(fn, " Loaded...")
else:
print(fn, " Could not be loaded...")
if raiseError:
raise
def loadcv(path, flags=-1, shape=None):
"""
Simple function to load using opencv.
:param path: path to image.
:param flag: openCV flags:
+-------+------------------------------+--------+
| value | openCV flag | output |
+=======+==============================+========+
| (1) | cv2.CV_LOAD_IMAGE_COLOR | BGR |
+-------+------------------------------+--------+
| (0) | cv2.CV_LOAD_IMAGE_GRAYSCALE | GRAY |
+-------+------------------------------+--------+
| (-1) | cv2.CV_LOAD_IMAGE_UNCHANGED | format |
+-------+------------------------------+--------+
:param shape: shape to resize image.
:return: loaded image
"""
im = cv2.imread(path, flags)
if shape:
im = cv2.resize(im, shape)
return im
def loadsfrom(path, flags=cv2.IMREAD_COLOR):
"""
Loads Image from URL or file.
:param path: filepath or url
:param flags: openCV flags:
+-------+------------------------------+--------+
| value | openCV flag | output |
+=======+==============================+========+
| (1) | cv2.CV_LOAD_IMAGE_COLOR | BGR |
+-------+------------------------------+--------+
| (0) | cv2.CV_LOAD_IMAGE_GRAYSCALE | GRAY |
+-------+------------------------------+--------+
| (-1) | cv2.CV_LOAD_IMAGE_UNCHANGED | format |
+-------+------------------------------+--------+
:return:
"""
if isinstance(path, basestring):
if path.endswith(".npy"): # reads numpy arrays
return np.lib.load(path, None)
resp = getFileHandle(path) # download the image
else:
resp = path # assume path is a file-like object ie. cStringIO or file
# nparr = np.asarray(bytearray(resp.read()), dtype=dtype) # convert it to
# a NumPy array
nparr = np.fromstring(resp.read(), dtype=np.uint8)
image = cv2.imdecode(nparr, flags=flags) # decode using OpenCV format
return image
def interpretImage(toparse, flags):
"""
Interprets to get image.
:param toparse: string to parse or array. It can interpret:
* connection to server (i.e. host:port)
* path to file (e.g. /path_to_image/image_name.ext)
* URL to image (e.g. http://domain.com/path_to_image/image_name.ext)
* image as string (i.g. numpy converted to string)
* image itself (i.e. numpy array)
:param flags: openCV flags:
+-------+------------------------------+--------+
| value | openCV flag | output |
+=======+==============================+========+
| (1) | cv2.CV_LOAD_IMAGE_COLOR | BGR |
+-------+------------------------------+--------+
| (0) | cv2.CV_LOAD_IMAGE_GRAYSCALE | GRAY |
+-------+------------------------------+--------+
| (-1) | cv2.CV_LOAD_IMAGE_UNCHANGED | format |
+-------+------------------------------+--------+
:return: image or None if not successfull
"""
# test it is from server
if string_is_socket_address(toparse): # process request to server
toparse = parseString(toparse, 5)
# test is object itself
if type(toparse).__module__ == np.__name__: # test numpy array
if flags == 1:
return im2shapeFormat(toparse, (0, 0, 3))
if flags == 0:
return im2shapeFormat(toparse, (0, 0))
return toparse
# test image in string
try:
return cv2.imdecode(toparse, flags)
except TypeError:
# test path to file or URL
return loadsfrom(toparse, flags)
class ImFactory(object):
"""
image factory for RRToolbox to create scripts to standardize loading images and
provide lazy loading (it can load images from disk with the customized options
and/or create mapping images to load when needed) to conserve memory.
.. warning:: In development.
"""
_interpolations = {"nearest": 0, "bilinear": 1,
"bicubic": 2, "area": 3, "lanczos": 4}
_convertions = {}
def __init__(self, **kwargs):
"""
:param kwargs:
:return:
An image can be represented as a matrix of width "W" and height "H" with elements
called pixels,each pixel is a representation of a color in one point of a plane
(2D dimension). In the case of openCV and many other libraries for image manipulation,
the use of numpy arrays as base for image representation is becoming the standard
(numpy is a fast and powerful library for array manipulation and one of the main modules
for scientific development in python). A numpy matrix with n rows and m columns has a
shape (n,m), that in an Image is H,W which in a Cartesian plane would be y,x.
if image is W,H = 100,100 then
dsize = (W,H) = (300,100) would be the same as fsize = (fx,fy) = (3,1)
after the image is loaded in a numpy array the image would have shape
(n,m) = (rows,cols) = (H,W) = im.shape
"""
self.path = None # path to use to load image
self.mmap_mode = None # mapping file modes
self.mmap_path = None # path to create numpy file; None, do not create mapping file
self.w = None
self.h = None
self.fx = None
self.fy = None
self.convert = None
self.interpolation = None
self.throw = True
self.update(**kwargs)
# TODO not finished
def update(self, **kwargs):
for key, value in kwargs.items():
if hasattr(self, key):
setattr(self, key, value)
else:
raise Exception("Not attribute '{}'".format(key))
def get_Func(self):
"""
gets the loading function
"""
pass
def get_code(self):
"""
get the script code
"""
pass
def get_errorFunc(self, path=None, throw=None):
def errorFunc(im):
if throw and im is None:
if checkFile(path):
if getData(path)[-1] in supported_formats:
raise Exception(
"Not enough permissions to load '{}'".format(path))
else:
raise Exception(
"Failed to load '{}'. Format not supported".format(path))
else:
raise Exception("Missing file '{}'".format(path))
return {None: errorFunc}
def get_loadFunc(self, flag=None):
def loadFunc(path):
return cv2.imread(path, flag)
return {"im": loadFunc}
def get_resizeFunc(self, dsize=None, dst=None, fx=None, fy=None, interpolation=None):
# see
# http://docs.opencv.org/2.4/modules/imgproc/doc/geometric_transformations.html#resize
fx, fy, interpolation = fx or 0, fy or 0, interpolation or 0
def resizeFunc(im):
return cv2.resize(im, dsize, dst, fx, fy, interpolation)
return {"im": resizeFunc}
def get_mapFunc(self, flag=None, RGB=None, mpath=None, mode=None,
func=None, dsize=None, dst=None, fx=None, fy=None,
interpolation=None):
def mapFunc(path):
if mpath == "*": # save mmap in working directory
drive, dirname, (filename, ext) = "", "", getData(path)[-2:]
elif mpath: # save mmap in mpath
drive, dirname, filename, ext = getData(changedir(path, mpath))
else: # save mmap in image path
drive, dirname, filename, ext = getData(path)
# THIS CREATES ONE HASHED FILE
hashed = hash("{}{}{}{}{}{}".format(
flag, RGB, dsize, fx, fy, interpolation))
savepath = "{}{}{}{}.{}.npy".format(
drive, dirname, filename, ext, hashed)
try: # load from map
# mapper(savepath,None,mode,True)[0]#
return np.lib.load(savepath, mode)
except IOError: # create object and map
im = func(path)
if im is None: # this is regardless of throw flag
raise Exception("Failed to load image to map")
np.save(savepath, im)
# mapper(savepath,im,mode,True)[0]#
return np.lib.load(savepath, mode)
return {"im": mapFunc}
def get_transposeFunc(self):
def transposeFunc(im):
if len(im.shape) == 2:
return im.transpose(1, 0)
else:
# np.ascontiguousarray?
# http://stackoverflow.com/a/27601130/5288758
return im.transpose(1, 0, 2)
return {"im": transposeFunc}
def get_convertionFunc(self, code):
def convertionFunc(im):
return cv2.cvtColor(im, code)
return {"im": convertionFunc}
def get_np2qi(self):
return {"im": np2qi}
def loadFunc(flag=0, dsize=None, dst=None, fx=None, fy=None, interpolation=None,
mmode=None, mpath=None, throw=True, keepratio=True):
"""
Creates a function that loads image array from path, url,
server, string or directly from numpy array (supports databases).
:param flag: (default: 0) 0 to read as gray, 1 to read as BGR, -1 to
read as BGRA, 2 to read as RGB, -2 to read as RGBA.
It supports openCV flags:
* cv2.CV_LOAD_IMAGE_COLOR
* cv2.CV_LOAD_IMAGE_GRAYSCALE
* cv2.CV_LOAD_IMAGE_UNCHANGED
+-------+-------------------------------+--------+
| value | openCV flag | output |
+=======+===============================+========+
| (2) | N/A | RGB |
+-------+-------------------------------+--------+
| (1) | cv2.CV_LOAD_IMAGE_COLOR | BGR |
+-------+-------------------------------+--------+
| (0) | cv2.CV_LOAD_IMAGE_GRAYSCALE | GRAY |
+-------+-------------------------------+--------+
| (-1) | cv2.CV_LOAD_IMAGE_UNCHANGED | |
15, v + 7, v + 5, v + 13)])
mymesh.from_pydata(myvertex, [], myfaces)
mymesh.update(calc_edges=True)
if mat and bpy.context.scene.render.engine == 'CYCLES':
set_material(mywindow, matdata)
# --------------
# Blind Box
# --------------
if blind:
mybox = create_blind_box("Blind_box", sx, sy + blind_back + blind_rail, blind_height)
set_normals(mybox)
mybox.parent = mywindow
mybox.location.x = 0
mybox.location.y = -blind_back - sy
mybox.location.z = sz
if mat and bpy.context.scene.render.engine == 'CYCLES':
set_material(mybox, matdata)
# Lock
mybox.lock_location = (True, True, True)
mybox.lock_rotation = (True, True, True)
return win_size, p1, p2
# ------------------------------------------------------------------------------
# Create leafs windows frame
#
# sX: Size in X axis
# sY: Size in Y axis
# sZ: Size in Z axis
# frame: size of external frame
# frame_L: size of main frame
# leafratio: ratio of leaf depth
# mat: Flag for creating materials
# matdata: Aluminum material
# external: create external frame flag
# blind: blind flag
# blind_height: height of blind box
# blind_back: front extension
# blind_rail: distance of the rail
# ------------------------------------------------------------------------------
def create_leaf_window_frame(mywindow, mymesh, sx, sy, sz, frame, frame_l, leafratio, mat, matdata, external,
blind, blind_height, blind_back, blind_rail):
myvertex = []
myfaces = []
# ===========================================================================
# Main frame_L
# ===========================================================================
x = sx / 2
z = sz
y = sy * leafratio
gap = 0.01
size = sy - y - 0.001 # thickness of the leaf
myvertex.extend([(-x, 0, 0),
(-x, 0, z),
(x, 0, z),
(x, 0, 0),
(-x + frame_l, 0, frame_l),
(-x + frame_l, 0, z - frame_l),
(x - frame_l, 0, z - frame_l),
(x - frame_l, 0, frame_l),
(-x + frame_l, -y, frame_l),
(-x + frame_l, -y, z - frame_l),
(x - frame_l, -y, z - frame_l),
(x - frame_l, -y, frame_l),
(-x + frame_l - gap, -y, frame_l - gap),
(-x + frame_l - gap, -y, z - frame_l + gap),
(x - frame_l + gap, -y, z - frame_l + gap),
(x - frame_l + gap, -y, frame_l - gap),
(-x + frame_l - gap, -sy, frame_l - gap),
(-x + frame_l - gap, -sy, z - frame_l + gap),
(x - frame_l + gap, -sy, z - frame_l + gap),
(x - frame_l + gap, -sy, frame_l - gap),
(-x, -sy, 0),
(-x, -sy, z),
(x, -sy, z),
(x, -sy, 0)])
# Faces
myfaces.extend([(1, 5, 4, 0), (21, 1, 0, 20), (17, 21, 20, 16), (16, 12, 13, 17), (12, 8, 9, 13),
(5, 9, 8, 4), (3, 7, 6, 2), (23, 3, 2, 22), (19, 23, 22, 18), (15, 19, 18, 14),
(11, 15, 14, 10), (6, 7, 11, 10), (0, 3, 23, 20), (21, 22, 2, 1), (17, 13, 14, 18),
(21, 17, 18, 22), (13, 9, 10, 14), (8, 11, 7, 4), (8, 12, 15, 11), (4, 7, 3, 0),
(12, 16, 19, 15), (16, 20, 23, 19), (9, 5, 6, 10), (1, 2, 6, 5)])
v = len(myvertex)
# ===========================================================================
# External front covers
# ===========================================================================
if external:
thick = 0.002 # aluminum thickness
x = sx
gap = -0.001
sidem = frame
box = 0
if blind:
box = blind_height
myvertex.extend([((-x / 2) - sidem, -sy, sz + sidem + box),
((x / 2) + sidem, -sy, sz + sidem + box),
((-x / 2) - sidem, -sy, -sidem),
((x / 2) + sidem, -sy, -sidem),
((-x / 2) - gap, -sy, sz + gap + box),
((x / 2) + gap, -sy, sz + gap + box),
((-x / 2) - gap, -sy, -gap),
((x / 2) + gap, -sy, -gap)])
myvertex.extend([((-x / 2) - sidem, -sy - thick, sz + sidem + box),
((x / 2) + sidem, -sy - thick, sz + sidem + box),
((-x / 2) - sidem, -sy - thick, -sidem),
((x / 2) + sidem, -sy - thick, -sidem),
((-x / 2) - gap, -sy - thick, sz + gap + box),
((x / 2) + gap, -sy - thick, sz + gap + box),
((-x / 2) - gap, -sy - thick, -gap),
((x / 2) + gap, -sy - thick, -gap)])
myfaces.extend([(v + 3, v + 1, v + 9, v + 11), (v + 9, v + 8, v + 0, v + 1), (v + 1, v + 5, v + 4, v + 0),
(v + 3, v + 7, v + 5, v + 1), (v + 7, v + 3, v + 2, v + 6),
(v + 0, v + 4, v + 6, v + 2), (v + 9, v + 13, v + 12, v + 8), (v + 11, v + 15, v + 13, v + 9),
(v + 15, v + 11, v + 10, v + 14), (v + 8, v + 12, v + 14, v + 10),
(v + 11, v + 3, v + 2, v + 10), (v + 2, v + 10, v + 8, v + 0), (v + 14, v + 12, v + 4, v + 6),
(v + 7, v + 6, v + 14, v + 15), (v + 5, v + 13, v + 12, v + 4),
(v + 15, v + 7, v + 5, v + 13)])
mymesh.from_pydata(myvertex, [], myfaces)
mymesh.update(calc_edges=True)
if mat and bpy.context.scene.render.engine == 'CYCLES':
set_material(mywindow, matdata)
# --------------
# Blind Box
# --------------
if blind:
mybox = create_blind_box("Blind_box", sx, sy + blind_back + blind_rail, blind_height)
set_normals(mybox)
mybox.parent = mywindow
mybox.location.x = 0
mybox.location.y = -blind_back - sy
mybox.location.z = sz
if mat and bpy.context.scene.render.engine == 'CYCLES':
set_material(mybox, matdata)
# Lock
mybox.lock_location = (True, True, True)
mybox.lock_rotation = (True, True, True)
return size
# ------------------------------------------------------------------------------
# Create rail windows leaf
#
# objName: Name for the new object
# hand: Left or Right
# sX: Size in X axis
# sY: Size in Y axis
# sZ: Size in Z axis
# f: size of the frame_L
# pX: position X axis
# pY: position Y axis
# pZ: position Z axis
# mat: Flag for creating materials
# matdata: default material
# handle: create handle flag
# ------------------------------------------------------------------------------
def create_rail_window_leaf(objname, hand, sx, sy, sz, f, px, py, pz, mat, matdata, handle):
myvertex = []
myfaces = []
v = 0
# ===========================================================================
# Horizontal pieces
# ===========================================================================
rail = 0.010 # rail width
t = sy - 0.002
p = ((t - rail) / 2) - 0.002
side = 0.02 # vertical rail
x = sx
z = sz
fz = f
if hand == "R":
x *= -1
f *= -1
# ------------------------
# frame
# ------------------------
myvertex.extend([(0, 0, 0),
(0, 0, z),
(x, 0, z),
(x, 0, 0),
(f, 0, fz),
(f, 0, z - fz),
(x - f, 0, z - fz),
(x - f, 0, fz),
(f, -t / 2, fz),
(f, -t / 2, z - fz),
(x - f, -t / 2, z - fz),
(x - f, -t / 2, fz),
(f, -t, fz),
(f, -t, z - fz),
(x - f, -t, z - fz),
(x - f, -t, fz),
(0, -t, 0),
(0, -t, z),
(x, -t, z),
(x, -t, 0)])
# ------------------------
# Side rails
# ------------------------
for z in (0, sz):
myvertex.extend([(0, -p, z),
(x, -p, z),
(0, -p, z + side),
(x, -p, z + side),
(0, -p - rail, z + side),
(x, -p - rail, z + side),
(0, -p - rail, z),
(x, -p - rail, z)])
side *= -1 # reverse
# Faces
myfaces.extend([(v + 10, v + 6, v + 7, v + 11), (v + 9, v + 8, v + 4, v + 5),
(v + 13, v + 12, v + 8, v + 9), (v + 14, v + 10, v + 11, v + 15),
(v + 6, v + 10, v + 9, v + 5),
(v + 9, v + 10, v + 14, v + 13), (v + 11, v + 7, v + 4, v + | |
"""
@package mi.instrument.nortek.vector.ooicore.driver
@file mi/instrument/nortek/vector/ooicore/driver.py
@author <NAME>, <NAME>
@brief Driver for the ooicore
Release notes:
Driver for vector
"""
from datetime import datetime
import os
from mi.core.instrument.chunker import StringChunker
from mi.core.instrument.instrument_driver import DriverAsyncEvent
__author__ = '<NAME>, <NAME>'
__license__ = 'Apache 2.0'
import re
import base64
import struct
from mi.core.exceptions import SampleException
from mi.core.common import BaseEnum, Units
from mi.core.instrument.protocol_param_dict import ParameterDictVisibility
from mi.core.instrument.protocol_param_dict import ParameterDictType
from mi.core.instrument.data_particle import DataParticle, DataParticleKey, DataParticleValue
from mi.instrument.nortek.driver import NortekDataParticleType, Parameter, InstrumentCmds, \
USER_CONFIG_DATA_REGEX, validate_checksum, NORTEK_COMMON_REGEXES
from mi.instrument.nortek.driver import NortekInstrumentDriver
from mi.instrument.nortek.driver import NortekInstrumentProtocol
from mi.instrument.nortek.driver import NortekProtocolParameterDict
from mi.instrument.nortek.driver import InstrumentPrompts
from mi.instrument.nortek.driver import NEWLINE
from mi.core.log import get_logger
log = get_logger()
VELOCITY_DATA_LEN = 24
VELOCITY_DATA_SYNC_BYTES = '\xa5\x10'
SYSTEM_DATA_LEN = 28
SYSTEM_DATA_SYNC_BYTES = '\xa5\x11\x0e\x00'
VELOCITY_HEADER_DATA_LEN = 42
VELOCITY_HEADER_DATA_SYNC_BYTES = '\xa5\x12\x15\x00'
VELOCITY_DATA_PATTERN = r'%s.{22}' % VELOCITY_DATA_SYNC_BYTES
VELOCITY_DATA_REGEX = re.compile(VELOCITY_DATA_PATTERN, re.DOTALL)
SYSTEM_DATA_PATTERN = r'%s.{24}' % SYSTEM_DATA_SYNC_BYTES
SYSTEM_DATA_REGEX = re.compile(SYSTEM_DATA_PATTERN, re.DOTALL)
VELOCITY_HEADER_DATA_PATTERN = r'%s.{38}' % VELOCITY_HEADER_DATA_SYNC_BYTES
VELOCITY_HEADER_DATA_REGEX = re.compile(VELOCITY_HEADER_DATA_PATTERN, re.DOTALL)
VECTOR_SAMPLE_REGEX = [VELOCITY_DATA_REGEX, SYSTEM_DATA_REGEX, VELOCITY_HEADER_DATA_REGEX]
class DataParticleType(NortekDataParticleType):
"""
List of data particles to collect
"""
VELOCITY = 'vel3d_cd_velocity_data'
VELOCITY_HEADER = 'vel3d_cd_data_header'
SYSTEM = 'vel3d_cd_system_data'
class VectorVelocityDataParticleKey(BaseEnum):
"""
Velocity Data Particles
"""
ANALOG_INPUT2 = "analog_input_2"
COUNT = "ensemble_counter"
PRESSURE = "seawater_pressure_mbar"
ANALOG_INPUT1 = "analog_input_1"
VELOCITY_BEAM1 = "turbulent_velocity_east"
VELOCITY_BEAM2 = "turbulent_velocity_north"
VELOCITY_BEAM3 = "turbulent_velocity_vertical"
AMPLITUDE_BEAM1 = "amplitude_beam_1"
AMPLITUDE_BEAM2 = "amplitude_beam_2"
AMPLITUDE_BEAM3 = "amplitude_beam_3"
CORRELATION_BEAM1 = "correlation_beam_1"
CORRELATION_BEAM2 = "correlation_beam_2"
CORRELATION_BEAM3 = "correlation_beam_3"
class VectorVelocityDataParticle(DataParticle):
"""
Routine for parsing velocity data into a data particle structure for the Vector sensor.
"""
_data_particle_type = DataParticleType.VELOCITY
def _build_parsed_values(self):
"""
Take the velocity data sample format and parse it into
values with appropriate tags.
@throws SampleException If there is a problem with sample creation
"""
log.debug('VectorVelocityDataParticle: raw data =%r', self.raw_data)
try:
unpack_string = '<2s4B2H3h6BH'
sync_id, analog_input2_lsb, count, pressure_msb, analog_input2_msb, pressure_lsw, analog_input1,\
velocity_beam1, velocity_beam2, velocity_beam3, amplitude_beam1, amplitude_beam2, amplitude_beam3, \
correlation_beam1, correlation_beam2, correlation_beam3, checksum = struct.unpack(unpack_string, self.raw_data)
if not validate_checksum('<11H', self.raw_data):
log.warn("Failed checksum in %s from instrument (%r)", self._data_particle_type, self.raw_data)
self.contents[DataParticleKey.QUALITY_FLAG] = DataParticleValue.CHECKSUM_FAILED
analog_input2 = analog_input2_msb * 0x100 + analog_input2_lsb
pressure = pressure_msb * 0x10000 + pressure_lsw
except Exception as e:
log.error('Error creating particle vel3d_cd_velocity_data, raw data: %r', self.raw_data)
raise SampleException(e)
result = [{DataParticleKey.VALUE_ID: VectorVelocityDataParticleKey.ANALOG_INPUT2, DataParticleKey.VALUE: analog_input2},
{DataParticleKey.VALUE_ID: VectorVelocityDataParticleKey.COUNT, DataParticleKey.VALUE: count},
{DataParticleKey.VALUE_ID: VectorVelocityDataParticleKey.PRESSURE, DataParticleKey.VALUE: pressure},
{DataParticleKey.VALUE_ID: VectorVelocityDataParticleKey.ANALOG_INPUT1, DataParticleKey.VALUE: analog_input1},
{DataParticleKey.VALUE_ID: VectorVelocityDataParticleKey.VELOCITY_BEAM1, DataParticleKey.VALUE: velocity_beam1},
{DataParticleKey.VALUE_ID: VectorVelocityDataParticleKey.VELOCITY_BEAM2, DataParticleKey.VALUE: velocity_beam2},
{DataParticleKey.VALUE_ID: VectorVelocityDataParticleKey.VELOCITY_BEAM3, DataParticleKey.VALUE: velocity_beam3},
{DataParticleKey.VALUE_ID: VectorVelocityDataParticleKey.AMPLITUDE_BEAM1, DataParticleKey.VALUE: amplitude_beam1},
{DataParticleKey.VALUE_ID: VectorVelocityDataParticleKey.AMPLITUDE_BEAM2, DataParticleKey.VALUE: amplitude_beam2},
{DataParticleKey.VALUE_ID: VectorVelocityDataParticleKey.AMPLITUDE_BEAM3, DataParticleKey.VALUE: amplitude_beam3},
{DataParticleKey.VALUE_ID: VectorVelocityDataParticleKey.CORRELATION_BEAM1, DataParticleKey.VALUE: correlation_beam1},
{DataParticleKey.VALUE_ID: VectorVelocityDataParticleKey.CORRELATION_BEAM2, DataParticleKey.VALUE: correlation_beam2},
{DataParticleKey.VALUE_ID: VectorVelocityDataParticleKey.CORRELATION_BEAM3, DataParticleKey.VALUE: correlation_beam3}]
log.debug('VectorVelocityDataParticle: particle=%s', result)
return result
class VectorVelocityHeaderDataParticleKey(BaseEnum):
"""
Velocity Header data particles
"""
TIMESTAMP = "date_time_string"
NUMBER_OF_RECORDS = "number_velocity_records"
NOISE1 = "noise_amp_beam1"
NOISE2 = "noise_amp_beam2"
NOISE3 = "noise_amp_beam3"
CORRELATION1 = "noise_correlation_beam1"
CORRELATION2 = "noise_correlation_beam2"
CORRELATION3 = "noise_correlation_beam3"
class VectorVelocityHeaderDataParticle(DataParticle):
"""
Routine for parsing velocity header data into a data particle structure for the Vector sensor.
"""
_data_particle_type = DataParticleType.VELOCITY_HEADER
def _build_parsed_values(self):
"""
Take the velocity header data sample format and parse it into
values with appropriate tags.
@throws SampleException If there is a problem with sample creation
"""
log.debug('VectorVelocityHeaderDataParticle: raw data =%r', self.raw_data)
try:
unpack_string = '<4s6sH8B20sH'
sync, timestamp, number_of_records, noise1, noise2, noise3, _, correlation1, correlation2, correlation3, _,\
_, cksum = struct.unpack(unpack_string, self.raw_data)
if not validate_checksum('<20H', self.raw_data):
log.warn("Failed checksum in %s from instrument (%r)", self._data_particle_type, self.raw_data)
self.contents[DataParticleKey.QUALITY_FLAG] = DataParticleValue.CHECKSUM_FAILED
timestamp = NortekProtocolParameterDict.convert_time(timestamp)
self.set_internal_timestamp((timestamp-datetime(1900, 1, 1)).total_seconds())
except Exception as e:
log.error('Error creating particle vel3d_cd_data_header, raw data: %r', self.raw_data)
raise SampleException(e)
result = [{DataParticleKey.VALUE_ID: VectorVelocityHeaderDataParticleKey.TIMESTAMP, DataParticleKey.VALUE: str(timestamp)},
{DataParticleKey.VALUE_ID: VectorVelocityHeaderDataParticleKey.NUMBER_OF_RECORDS, DataParticleKey.VALUE: number_of_records},
{DataParticleKey.VALUE_ID: VectorVelocityHeaderDataParticleKey.NOISE1, DataParticleKey.VALUE: noise1},
{DataParticleKey.VALUE_ID: VectorVelocityHeaderDataParticleKey.NOISE2, DataParticleKey.VALUE: noise2},
{DataParticleKey.VALUE_ID: VectorVelocityHeaderDataParticleKey.NOISE3, DataParticleKey.VALUE: noise3},
{DataParticleKey.VALUE_ID: VectorVelocityHeaderDataParticleKey.CORRELATION1, DataParticleKey.VALUE: correlation1},
{DataParticleKey.VALUE_ID: VectorVelocityHeaderDataParticleKey.CORRELATION2, DataParticleKey.VALUE: correlation2},
{DataParticleKey.VALUE_ID: VectorVelocityHeaderDataParticleKey.CORRELATION3, DataParticleKey.VALUE: correlation3}]
log.debug('VectorVelocityHeaderDataParticle: particle=%s', result)
return result
class VectorSystemDataParticleKey(BaseEnum):
"""
System data particles
"""
TIMESTAMP = "date_time_string"
BATTERY = "battery_voltage_dv"
SOUND_SPEED = "sound_speed_dms"
HEADING = "heading_decidegree"
PITCH = "pitch_decidegree"
ROLL = "roll_decidegree"
TEMPERATURE = "temperature_centidegree"
ERROR = "error_code"
STATUS = "status_code"
ANALOG_INPUT = "analog_input"
class VectorSystemDataParticle(DataParticle):
"""
Routine for parsing system data into a data particle structure for the Vector sensor.
"""
_data_particle_type = DataParticleType.SYSTEM
def _build_parsed_values(self):
"""
Take the system data sample format and parse it into
values with appropriate tags.
@throws SampleException If there is a problem with sample creation
"""
log.debug('VectorSystemDataParticle: raw data =%r', self.raw_data)
try:
unpack_string = '<4s6s2H4h2bHH'
sync, timestamp, battery, sound_speed, heading, pitch, roll, temperature, error, status, analog_input, cksum =\
struct.unpack_from(unpack_string, self.raw_data)
if not validate_checksum('<13H', self.raw_data):
log.warn("Failed checksum in %s from instrument (%r)", self._data_particle_type, self.raw_data)
self.contents[DataParticleKey.QUALITY_FLAG] = DataParticleValue.CHECKSUM_FAILED
timestamp = NortekProtocolParameterDict.convert_time(timestamp)
self.set_internal_timestamp((timestamp-datetime(1900, 1, 1)).total_seconds())
except Exception as e:
log.error('Error creating particle vel3d_cd_system_data, raw data: %r', self.raw_data)
raise SampleException(e)
result = [{DataParticleKey.VALUE_ID: VectorSystemDataParticleKey.TIMESTAMP, DataParticleKey.VALUE: str(timestamp)},
{DataParticleKey.VALUE_ID: VectorSystemDataParticleKey.BATTERY, DataParticleKey.VALUE: battery},
{DataParticleKey.VALUE_ID: VectorSystemDataParticleKey.SOUND_SPEED, DataParticleKey.VALUE: sound_speed},
{DataParticleKey.VALUE_ID: VectorSystemDataParticleKey.HEADING, DataParticleKey.VALUE: heading},
{DataParticleKey.VALUE_ID: VectorSystemDataParticleKey.PITCH, DataParticleKey.VALUE: pitch},
{DataParticleKey.VALUE_ID: VectorSystemDataParticleKey.ROLL, DataParticleKey.VALUE: roll},
{DataParticleKey.VALUE_ID: VectorSystemDataParticleKey.TEMPERATURE, DataParticleKey.VALUE: temperature},
{DataParticleKey.VALUE_ID: VectorSystemDataParticleKey.ERROR, DataParticleKey.VALUE: error},
{DataParticleKey.VALUE_ID: VectorSystemDataParticleKey.STATUS, DataParticleKey.VALUE: status},
{DataParticleKey.VALUE_ID: VectorSystemDataParticleKey.ANALOG_INPUT, DataParticleKey.VALUE: analog_input}]
log.debug('VectorSystemDataParticle: particle=%r', result)
return result
###############################################################################
# Driver
###############################################################################
class InstrumentDriver(NortekInstrumentDriver):
"""
InstrumentDriver subclass
Subclasses SingleConnectionInstrumentDriver with connection state
machine.
"""
########################################################################
# Protocol builder.
########################################################################
def _build_protocol(self):
"""
Construct the driver protocol state machine.
"""
self._protocol = Protocol(InstrumentPrompts, NEWLINE, self._driver_event)
###############################################################################
# Protocol
################################################################################
class Protocol(NortekInstrumentProtocol):
"""
Instrument protocol class
Subclasses NortekInstrumentProtocol
"""
NortekInstrumentProtocol.velocity_data_regex.extend(VECTOR_SAMPLE_REGEX)
NortekInstrumentProtocol.velocity_sync_bytes = VELOCITY_DATA_SYNC_BYTES
order_of_user_config = [
Parameter.TRANSMIT_PULSE_LENGTH,
Parameter.BLANKING_DISTANCE,
Parameter.RECEIVE_LENGTH,
Parameter.TIME_BETWEEN_PINGS,
Parameter.TIME_BETWEEN_BURST_SEQUENCES,
Parameter.NUMBER_PINGS,
Parameter.AVG_INTERVAL,
Parameter.USER_NUMBER_BEAMS,
Parameter.TIMING_CONTROL_REGISTER,
Parameter.POWER_CONTROL_REGISTER,
Parameter.A1_1_SPARE,
Parameter.B0_1_SPARE,
Parameter.B1_1_SPARE,
Parameter.COMPASS_UPDATE_RATE,
Parameter.COORDINATE_SYSTEM,
Parameter.NUMBER_BINS,
Parameter.BIN_LENGTH,
Parameter.MEASUREMENT_INTERVAL,
Parameter.DEPLOYMENT_NAME,
Parameter.WRAP_MODE,
Parameter.CLOCK_DEPLOY,
Parameter.DIAGNOSTIC_INTERVAL,
Parameter.MODE,
Parameter.ADJUSTMENT_SOUND_SPEED,
Parameter.NUMBER_SAMPLES_DIAGNOSTIC,
Parameter.NUMBER_BEAMS_CELL_DIAGNOSTIC,
Parameter.NUMBER_PINGS_DIAGNOSTIC,
Parameter.MODE_TEST,
Parameter.ANALOG_INPUT_ADDR,
Parameter.SW_VERSION,
Parameter.USER_1_SPARE,
Parameter.VELOCITY_ADJ_TABLE,
Parameter.COMMENTS,
Parameter.WAVE_MEASUREMENT_MODE,
Parameter.DYN_PERCENTAGE_POSITION,
Parameter.WAVE_TRANSMIT_PULSE,
Parameter.WAVE_BLANKING_DISTANCE,
Parameter.WAVE_CELL_SIZE,
Parameter.NUMBER_DIAG_SAMPLES,
Parameter.A1_2_SPARE,
Parameter.B0_2_SPARE,
Parameter.NUMBER_SAMPLES_PER_BURST,
Parameter.SAMPLE_RATE,
Parameter.ANALOG_OUTPUT_SCALE,
Parameter.CORRELATION_THRESHOLD,
Parameter.USER_3_SPARE,
Parameter.TRANSMIT_PULSE_LENGTH_SECOND_LAG,
Parameter.USER_4_SPARE,
Parameter.QUAL_CONSTANTS]
spare_param_values = {Parameter.A1_1_SPARE: '',
Parameter.B0_1_SPARE: '',
Parameter.B1_1_SPARE: '',
Parameter.USER_1_SPARE: '',
Parameter.A1_2_SPARE: '',
Parameter.B0_2_SPARE: '',
Parameter.USER_3_SPARE: '',
Parameter.USER_4_SPARE: ''}
########################################################################
# overridden superclass methods
########################################################################
def __init__(self, prompts, newline, driver_event):
"""
Protocol constructor.
@param prompts A BaseEnum class containing instrument prompts.
@param newline The newline.
@param driver_event Driver process event callback.
"""
NortekInstrumentProtocol.__init__(self, prompts, newline, driver_event)
# create chunker for processing instrument samples.
self._chunker = StringChunker(self.sieve_function)
@classmethod
def sieve_function(cls, raw_data):
"""
The method that detects data sample structures from instrument
Should be in the format [[structure_sync_bytes, structure_len]*]
"""
return_list = []
sieve_matchers = NORTEK_COMMON_REGEXES + cls.velocity_data_regex
for matcher in sieve_matchers:
for match in matcher.finditer(raw_data):
if matcher == VELOCITY_DATA_REGEX:
# two bytes is not enough for an accurate match
# check for a valid checksum
data = raw_data[match.start():match.end()]
if validate_checksum('<11H', data):
return_list.append((match.start(), match.end()))
else:
return_list.append((match.start(), match.end()))
log.debug("sieve_function: regex found %r", raw_data[match.start():match.end()])
return return_list
def _got_chunk(self, structure, timestamp):
"""
The base class got_data has gotten a structure from the chunker. Pass it to extract_sample
with the appropriate particle objects and REGEXes.
"""
if any((
self._extract_sample(VectorVelocityDataParticle, VELOCITY_DATA_REGEX, structure, timestamp),
self._extract_sample(VectorSystemDataParticle, SYSTEM_DATA_REGEX, structure, timestamp),
self._extract_sample(VectorVelocityHeaderDataParticle, VELOCITY_HEADER_DATA_REGEX, structure, timestamp),
)):
return
self._got_chunk_base(structure, timestamp)
def _update_params(self):
"""
Update the parameter dictionary. Issue the read config command. The response
needs to be saved to param dictionary.
"""
ret_config = self._do_cmd_resp(InstrumentCmds.READ_USER_CONFIGURATION, response_regex=USER_CONFIG_DATA_REGEX)
self._param_dict.update(ret_config)
self.spare_param_values[Parameter.A1_1_SPARE] = ret_config[24:26]
self.spare_param_values[Parameter.B0_1_SPARE] = ret_config[26:28]
self.spare_param_values[Parameter.B1_1_SPARE] = ret_config[28:30]
self.spare_param_values[Parameter.USER_1_SPARE] = ret_config[74:76]
self.spare_param_values[Parameter.A1_2_SPARE] = ret_config[448:450]
self.spare_param_values[Parameter.B0_2_SPARE] = ret_config[450:452]
self.spare_param_values[Parameter.USER_3_SPARE] = ret_config[460:462]
self.spare_param_values[Parameter.USER_4_SPARE] = ret_config[464:494]
def _create_set_output(self, parameters):
"""
load buffer with sync byte (A5), ID byte (01), and size word (# of words in little-endian form)
'user' configuration is 512 bytes = 256 words long = size 0x100
"""
output = ['\xa5\x00\x00\x01']
CHECK_SUM_SEED = 0xb58c
for param in self.order_of_user_config:
log.trace('_create_set_output: adding %s to list', param)
if param == Parameter.COMMENTS:
output.append(parameters.format(param).ljust(180, "\x00"))
elif param == Parameter.DEPLOYMENT_NAME:
output.append(parameters.format(param).ljust(6, "\x00"))
elif param == Parameter.QUAL_CONSTANTS:
output.append('\x00'.ljust(16, "\x00"))
elif param == Parameter.VELOCITY_ADJ_TABLE:
output.append(base64.b64decode(parameters.format(param)))
elif param in [Parameter.A1_1_SPARE, Parameter.B0_1_SPARE, Parameter.B1_1_SPARE, Parameter.USER_1_SPARE,
Parameter.A1_2_SPARE, Parameter.B0_2_SPARE, Parameter.USER_2_SPARE, Parameter.USER_3_SPARE]:
output.append(self.spare_param_values.get(param).ljust(2, "\x00"))
elif param in [Parameter.WAVE_MEASUREMENT_MODE, Parameter.WAVE_TRANSMIT_PULSE, Parameter.WAVE_BLANKING_DISTANCE,
Parameter.WAVE_CELL_SIZE, Parameter.NUMBER_DIAG_SAMPLES, Parameter.DYN_PERCENTAGE_POSITION]:
output.append('\x00'.ljust(2, "\x00"))
elif param == Parameter.USER_4_SPARE:
output.append(self.spare_param_values.get(param).ljust(30, "\x00"))
else:
output.append(parameters.format(param))
log.trace('_create_set_output: ADDED %s output size = %s', param, len(output))
log.debug("Created set output: %r with length: %s", output, len(output))
checksum = CHECK_SUM_SEED
output = "".join(output)
for word_index in range(0, len(output), 2):
word_value = NortekProtocolParameterDict.convert_word_to_int(output[word_index:word_index+2])
checksum = (checksum + word_value) % 0x10000
log.debug('_create_set_output: user checksum = %r', checksum)
output += (NortekProtocolParameterDict.word_to_string(checksum))
return output
########################################################################
# Private helpers.
########################################################################
def _build_param_dict(self):
"""
Overwrite base classes method.
Creates base class's param dictionary, then sets parameter values for those specific to this instrument.
"""
NortekInstrumentProtocol._build_param_dict(self)
self._param_dict.add(Parameter.TRANSMIT_PULSE_LENGTH,
r'^.{%s}(.{2}).*' % str(4),
lambda match: NortekProtocolParameterDict.convert_word_to_int(match.group(1)),
NortekProtocolParameterDict.word_to_string,
regex_flags=re.DOTALL,
type=ParameterDictType.INT,
visibility=ParameterDictVisibility.READ_WRITE,
display_name="Transmit Pulse Length",
description="Pulse duration of the transmitted signal.",
default_value=2,
units=Units.COUNTS,
startup_param=True,
direct_access=True)
self._param_dict.add(Parameter.BLANKING_DISTANCE,
r'^.{%s}(.{2}).*' % str(6),
| |
#eps = np.zeros(self.dim)+1e-8
#for i in range(self.dim):
# eps[i] += np.amax(np.abs(sol.y.T[:,i]))*(1e-5)
J = np.zeros((self.dim+1,self.dim+1))
t = np.linspace(0,init[-1],self.TN)
for p in range(self.dim):
pertp = np.zeros(self.dim)
pertm = np.zeros(self.dim)
pertp[p] = eps[p]
pertm[p] = -eps[p]
initp = init[:-1] + pertp
initm = init[:-1] + pertm
# get error in position estimate
solp = solve_ivp(self.rhs,[0,t[-1]],initp,
method=self.method,dense_output=True,
t_eval=t,
rtol=self.rtol,atol=self.atol)
solm = solve_ivp(self.rhs,[0,t[-1]],initm,
method=self.method,dense_output=True,
t_eval=t,
rtol=self.rtol,atol=self.atol)
yp = solp.y.T
ym = solm.y.T
J[:-1,p] = (yp[-1,:]-ym[-1,:])/(2*eps[p])
J[:-1,:-1] = J[:-1,:-1] - np.eye(self.dim)
tp = np.linspace(0,init[-1]+epstime,self.TN)
tm = np.linspace(0,init[-1]-epstime,self.TN)
# get error in time estimate
solp = solve_ivp(self.rhs,[0,tp[-1]],initp,
method=self.method,
rtol=self.rtol,atol=self.atol)
solm = solve_ivp(self.rhs,[0,tm[-1]],initm,
method=self.method,
rtol=self.rtol,atol=self.atol)
yp = solp.y.T
ym = solm.y.T
J[:-1,-1] = (yp[-1,:]-ym[-1,:])/(2*epstime)
J[-1,:] = np.append(self.rhs(0,init[:-1]),0)
#print(J)
sol = solve_ivp(self.rhs,[0,init[-1]],init[:-1],
method=self.method,
rtol=self.rtol,atol=self.atol)
y_final = sol.y.T[-1,:]
#print(np.dot(np.linalg.inv(J),J))
b = np.append(init[:-1]-y_final,0)
dy = np.dot(np.linalg.inv(J),b)
init += dy
print('LC rel. err =',np.linalg.norm(dy))
if False:
fig, axs = plt.subplots(nrows=self.dim,ncols=1)
for i,ax in enumerate(axs):
key = self.var_names[i]
ax.plot(sol.t,sol.y.T[:,i],label=key)
ax.legend()
axs[0].set_title('LC counter'+str(counter))
plt.tight_layout()
plt.show(block=True)
time.sleep(.1)
counter += 1
# find index of peak voltage and initialize.
peak_idx = np.argmax(sol.y.T[:,0])
#init = np.zeros(5)
#init[-1] = sol.t[-1]
#init[:-1] = np.array([-0.048536698617817,
# 0.256223512263409,
# 0.229445856262051,
# 0.438912900900591])
# run finalized limit cycle solution
sol = solve_ivp(self.rhs,[0,init[-1]],sol.y.T[peak_idx,:],
method=self.method,
t_eval=np.linspace(0,init[-1],self.TN),
rtol=self.rtol,atol=self.atol)
#print('warning: lc init set by hand')
#sol = solve_ivp(self.rhs,[0,init[-1]],init[:-1],
# method='LSODA',
# t_eval=np.linspace(0,init[-1],self.TN),
# rtol=self.rtol,atol=self.atol)
return sol.y.T,sol.t
def load_monodromy(self):
"""
if monodromy data exists, load. if DNE or recompute required, compute here.
"""
if self.recompute_monodromy or \
not(os.path.isfile(self.monodromy_fname)):
initm = copy.deepcopy(self.eye)
r,c = np.shape(initm)
init = np.reshape(initm,r*c)
sol = solve_ivp(lib.monodromy3,[0,self.tLC[-1]],init,
args=(self,),t_eval=self.tLC,
method=self.method,
rtol=1e-13,atol=1e-13)
self.sol = sol.y.T
self.M = np.reshape(self.sol[-1,:],(r,c))
np.savetxt(self.monodromy_fname,self.M)
else:
self.M = np.loadtxt(self.monodromy_fname)
if False:
fig, axs = plt.subplots(nrows=self.dim,ncols=1,figsize=(10,10))
sol = solve_ivp(self.mono1,[0,self.tLC[-1]],[0,0,0,1],
args=(self.jacLC,),t_eval=self.tLC,
method=self.method,dense_output=True,
rtol=self.rtol,atol=self.atol)
for i,ax in enumerate(axs):
ax.plot(self.tLC,sol.y.T[:,i])
plt.tight_layout()
plt.show(block=True)
self.eigenvalues, self.eigenvectors = np.linalg.eig(self.M)
# get smallest eigenvalue and associated eigenvector
self.min_lam_idx = np.argsort(self.eigenvalues)[-2]
#print(self.min_lam_idx)
#print(self.eigenvalues[self.min_lam_idx])
self.lam = self.eigenvalues[self.min_lam_idx] # floquet mult.
self.kappa = np.log(self.lam)/self.T # floquet exponent
if np.sum(self.eigenvectors[:,self.min_lam_idx]) < 0:
self.eigenvectors[:,self.min_lam_idx] *= -1
print('eigenvalues',self.eigenvalues)
print('eiogenvectors',self.eigenvectors)
#print(self.eigenvectors)
# print floquet multipliers
einv = np.linalg.inv(self.eigenvectors/2)
print('eig inverse',einv)
idx = np.argsort(np.abs(self.eigenvalues-1))[0]
#min_lam_idx2 = np.argsort(einv)[-2]
self.g1_init = self.eigenvectors[:,self.min_lam_idx]/2.
self.z0_init = einv[idx,:]
self.i0_init = einv[self.min_lam_idx,:]
print('min idx for prc',idx,)
#print('Monodromy',self.M)
#print('eigenvectors',self.eigenvectors)
print('g1_init',self.g1_init)
print('z0_init',self.z0_init)
print('i0_init',self.i0_init)
#print('Floquet Multiplier',self.lam)
print('* Floquet Exponent kappa =',self.kappa)
if False:
fig, axs = plt.subplots(nrows=self.dim,
ncols=self.dim,figsize=(10,10))
for i in range(self.dim):
for j in range(self.dim):
axs[i,j].plot(self.tLC,self.sol[:,j+i*self.dim])
axs[0,0].set_title('monodromy')
plt.tight_layout()
plt.show(block=True)
time.sleep(.1)
def load_g_sym(self):
# load het. functions h if they exist. otherwise generate.
#self.rule_g0 = {sym.Indexed('gx',0):s(0),sym.Indexed('gy',0):s(0)}
# create dict of gv0=0,gh0=0,etc for substitution later.
self.rule_g0 = {sym.Indexed('g'+name,0):
s(0) for name in self.var_names}
for key in self.var_names:
self.g['sym_'+key] = []
#self.g_sym = {k: [] for k in self.var_names}
# check that files exist
val = 0
for key in self.var_names:
val += not(lib.files_exist(self.g['sym_fnames_'+key]))
if val != 0:
files_do_not_exist = True
else:
files_do_not_exist = False
if self.recompute_g_sym or files_do_not_exist:
#print(self.recompute_g_sym,files_do_not_exist)
print('* Computing... g sym')
# create symbolic derivative
sym_collected = slib.generate_g_sym(self)
for i in range(self.miter):
for key in self.var_names:
expr = sym_collected[key].coeff(self.psi,i)
self.g['sym_'+key].append(expr)
#print(self.g_sym_fnames[key][i])
dill.dump(self.g['sym_'+key][i],
open(self.g['sym_fnames_'+key][i],'wb'),
recurse=True)
else:
for key in self.var_names:
self.g['sym_'+key] = lib.load_dill(self.g['sym_fnames_'+key])
"""
rule_tmp = {Indexed('g'+key,1):1 for key in self.var_names}
rule_tmp3 = {Indexed('g'+key,2):2 for key in self.var_names}
rule_tmp2 = {self.v:3,self.h:1.5,self.r:.1,self.w:1.2}
rule_tmp = {**rule_tmp,**self.rule_par,**rule_tmp2,**rule_tmp3}
print(rule_tmp)
expr_temp = self.g['sym_w'][2]
expr_temp = expr_temp.subs(rule_tmp)
print(sym.N(expr_temp))
lam_temp = lambdify(self.vars,expr_temp(*self.vars))
print(lam_temp(1,1,1,1))
"""
def load_g(self):
"""
load all Floquet eigenfunctions g or recompute
"""
self.g['dat'] = []
for key in self.var_names:
self.g['imp_'+key] = []
self.g['lam_'+key] = []
print('* Computing...', end=' ')
for i in range(self.miter):
print('g_'+str(i),end=', ')
fname = self.g['dat_fnames'][i]
#print('i,fname',i,fname)
file_does_not_exist = not(os.path.exists(fname))
if self.recompute_g or file_does_not_exist:
het_vec = self.interp_lam(i,self.g,fn_type='g')
data = self.generate_g(i,het_vec)
np.savetxt(self.g['dat_fnames'][i],data)
else:
data = np.loadtxt(fname)
if True:
fig, axs = plt.subplots(nrows=self.dim,ncols=1)
for j,ax in enumerate(axs):
key = self.var_names[j]
ax.plot(self.tLC,data[:,j],label=key)
ax.legend()
axs[0].set_title('g'+str(i))
print('g'+str(i)+' init',data[0,:])
print('g'+str(i)+' final',data[-1,:])
plt.tight_layout()
plt.show(block=True)
time.sleep(.1)
self.g['dat'].append(data)
for j,key in enumerate(self.var_names):
#print(len(self.tLC),len(data[:,j]))
fn_temp = interpb(self.tLC,data[:,j],self.T)
imp_temp = imp_fn('g'+key+'_'+str(i),self.fmod(fn_temp))
self.g['imp_'+key].append(imp_temp)
lam_temp = lambdify(self.t,self.g['imp_'+key][i](self.t))
self.g['lam_'+key].append(lam_temp)
# replacement rules.
thA = self.thA
thB = self.thB
self.rule_g = {} # g function
self.rule_g_AB = {} # coupling
for key in self.var_names:
for i in range(self.miter):
dictg = {sym.Indexed('g'+key,i):self.g['imp_'+key][i](self.t)}
dictA = {Indexed('g'+key+'A',i):self.g['imp_'+key][i](thA)}
dictB = {Indexed('g'+key+'B',i):self.g['imp_'+key][i](thB)}
self.rule_g.update(dictg)
self.rule_g_AB.update(dictA)
self.rule_g_AB.update(dictB)
print()
def generate_g(self,k,het_vec):
"""
generate Floquet eigenfunctions g
uses Newtons method
"""
# load kth expansion of g for k >= 0
if k == 0:
# g0 is 0. dot his to keep indexing simple.
return np.zeros((self.TN,len(self.var_names)))
if k == 1:
# pick correct normalization
#init = [0,self.g1_init[1],self.g1_init[2],self.g1_init[3]]
init = copy.deepcopy(self.g1_init)
else:
init = np.zeros(self.dim)
# find intial condtion
if k == 1:
eps = 1e-2
backwards = False
rel_tol = 1e-7
alpha = 1
else:
eps = 1e-2
backwards = False
rel_tol = 1e-9
alpha = 1
"""
if k == 3:
backwards = True
rel_tol = 1e-9
alpha=0.2
elif k == 4:
backwards = True
rel_tol = 1e-9
alpha=0.7
else:
backwards = False
rel_tol = 1e-7
alpha=0.4
"""
init = lib.run_newton2(self,self.dg,init,k,het_vec,
max_iter=20,eps=eps,
rel_tol=rel_tol,rel_err=10,
exception=False,alpha=alpha,
backwards=backwards)
# get full solution
if backwards:
tLC = -self.tLC
else:
tLC = self.tLC
sol = solve_ivp(self.dg,[0,tLC[-1]],
init,args=(k,het_vec),
t_eval=tLC,method=self.method,
dense_output=True,
rtol=self.rtol,atol=self.atol)
if backwards:
gu = sol.y.T[::-1,:]
else:
gu = sol.y.T
return gu
def load_het_sym(self):
# load het. for z and i if they exist. otherwise generate.
for key in self.var_names:
self.z['sym_'+key] = []
self.i['sym_'+key] = []
# self.het1['sym_'+key] = []
#self.het1 = {'sym_'+k: [] for k in self.var_names}
# check that files exist
val = 0
for key in self.var_names:
val += not(lib.files_exist(self.z['sym_fnames_'+key]))
val += not(lib.files_exist(self.i['sym_fnames_'+key]))
val += not(lib.files_exist([self.A_fname]))
if val != 0:
files_do_not_exist = True
else:
files_do_not_exist = False
if self.recompute_het_sym or files_do_not_exist:
sym_collected = self.generate_het_sym()
for i in range(self.miter):
for key in self.var_names:
expr = sym_collected[key].coeff(self.psi,i)
expr = expr.subs(self.rule_g0)
self.z['sym_'+key].append(expr)
self.i['sym_'+key].append(expr)
#print('het1 key, i,expr', key, i,expr)
#print()
#print(self.g_sym_fnames[key][i])
dill.dump(self.z['sym_'+key][i],
open(self.z['sym_fnames_'+key][i],'wb'),
recurse=True)
dill.dump(self.i['sym_'+key][i],
open(self.i['sym_fnames_'+key][i],'wb'),
recurse=True)
# save matrix of a_i
dill.dump(self.A,open(self.A_fname,'wb'),recurse=True)
else:
self.A, = lib.load_dill([self.A_fname])
for key in self.var_names:
self.z['sym_'+key] = lib.load_dill(self.z['sym_fnames_'+key])
self.i['sym_'+key] = lib.load_dill(self.i['sym_fnames_'+key])
#lam = lambdify(self.t,self.het1['sym_'+key][1].subs(rule))
def generate_het_sym(self):
"""
Generate heterogeneous terms for integrating the Z_i and I_i terms.
Returns
-------
None.
"""
# get the general expression for h in z before plugging in g,z.
# column vectors ax ay for use in matrix A = [ax ay]
self.a = {k: sym.zeros(self.dim,1) for k in self.var_names}
#self.ax = Matrix([[0],[0]])
#self.ay = Matrix([[0],[0]])
for i in range(1,self.trunc_derivative+1):
p1 = lib.kProd(i,self.dx_vec)
p2 = kp(p1,sym.eye(self.dim))
for j,key in enumerate(self.var_names):
d1 = lib.vec(lib.df(self.rhs_sym[j],self.x_vec,i+1))
#print((1/math.factorial(i)))
self.a[key] += (1/math.factorial(i))*p2*d1
self.A = sym.zeros(self.dim,self.dim)
for i,key in enumerate(self.var_names):
self.A[:,i] = self.a[key]
het = self.A*self.z['vec']
# expand all terms
out = {}
for i,key in enumerate(self.var_names):
het_key = sym.expand(het[i]).subs(self.rule_d2g)
het_key = sym.collect(het_key,self.psi)
het_key = sym.expand(het_key)
het_key = sym.collect(het_key,self.psi)
#print(key,het_key)
#print(i,key,het_key)
#print(sym.apart(expr))
#print(sym.collect(expr,self.psi,evaluate=False))
#het_key = sym.collect(het_key,self.psi,evaluate=False)
out[key] = het_key
#het = {key: sym.expand(het[i]).subs(self.rule_d2g)
# for i,key in enumerate(self.var_names)}
#self.hetx = sym.expand(het[0].subs([(self.dx,self.gx),(self.dy,self.gy)]))
#self.hety = sym.expand(het[1].subs([(self.dx,self.gx),(self.dy,self.gy)]))
# collect all psi terms into factors of pis^k
#self.het1_collected = {k: sym.collect(het[k],self.psi,evaluate=False)
# for k in self.var_names}
return out
def load_z(self):
"""
load all PRCs z or recompute
"""
self.z['dat'] = []
for key in self.var_names:
self.z['imp_'+key] = []
self.z['lam_'+key] = []
print('* Computing...', end=' ')
for | |
<reponame>7l2icj/kamo_clone<filename>yamtbx/dataproc/myspotfinder/command_line/spot_finder_backend.py
"""
Reference:
Python Multiprocessing with ZeroMQ
http://taotetek.net/2011/02/02/python-multiprocessing-with-zeromq/
"""
import iotbx.phil
import libtbx.phil
import os
import stat
import time
import datetime
import getpass
import zmq
import re
import Queue
import collections
#import sqlite3
import pysqlite2.dbapi2 as sqlite3
import threading
import traceback
import numpy
import cPickle as pickle
import hashlib
from PIL import Image
from multiprocessing import Process
#import inotify.adapters # use yamtbx.python -mpip install inotify
from yamtbx.dataproc.myspotfinder import shikalog
from yamtbx.dataproc.myspotfinder import config_manager
from yamtbx.dataproc.myspotfinder import spot_finder_for_grid_scan
from yamtbx.dataproc import bl_logfiles
from yamtbx.dataproc import eiger
from yamtbx import util
#DEBUG for inotify
#import logging
#logger = logging.getLogger("inotify.adapters")
#logger.setLevel(logging.DEBUG)
#handlers = logging.StreamHandler()
#handlers.setLevel(logging.DEBUG)
#handlers.setFormatter(logging.Formatter("%(asctime)-15s %(levelname)s : %(message)s"))
#logger.addHandler(handlers)
master_params_str = """\
topdir = None
.type = path
.help = Root directory
bl = 32xu 41xu 26b2 44xu 45xu
.type = choice(multi=False)
.help = Choose beamline where you start SHIKA
date = "today"
.type = str
.help = Data collection date ("today" or %Y-%d-%m format)
blconfig = None
.type = path
.help = Override default blconfig path (/isilon/blconfig/bl$bl/)
nproc = 4
.type = int
.help = Number of processors used for spot finding
ports = 5557,5558,5559
.type = ints(size=3,value_min=1024,value_max=49151)
.help = Port numbers used by ZeroMQ.
dbdir = /isilon/cluster/log/shika/db
.type = path
.help = location to write sqlite3 db file.
logroot = /isilon/cluster/log/shika/
.type = path
mode = *eiger_streaming bsslog zoo watch_ramdisk
.type = choice(multi=False)
env = *oys ppu
.type = choice(multi=False)
.help = Excetution environment
eiger_host = "192.168.163.204"
.type = str
.help = "EIGER hostname or ip-address"
#incomplete_file_workaround = 0
# .type = float
# .help = wait given seconds after detecting new image
force_ssh_from = None
.type = str
.help = Users must not change this parameter.
only_check_in_last_hours = 1
.type = float
.help = "Only check diffscan.log modified during the last specified hours"
ramdisk_walk_interval = 2
.type = float
"""
params = None
def retry_until_success(f, arg=None):
args = (arg,) if arg is not None else ()
return util.retry_until_noexc(f, args, ntry=30, outf=shikalog.warning)
class DiffScanManager:
def __init__(self):
self.clear()
# __init__()
def clear(self):
self.scanlog = {} # directory: BssDiffscanLog object
self.found_imgs = set()
self.finished = {} # {filename:timestamp}; list of filenames of which analysis was completed
# clear()
def add_scanlog(self, slog):
slog = os.path.abspath(slog)
self.scanlog[os.path.dirname(slog)] = bl_logfiles.BssDiffscanLog(slog)
# add_scanlog()
def add_dir(self, slogdir):
self.add_scanlog(os.path.join(slogdir, "diffscan.log"))
# add_dir()
def update_scanlogs(self):
for logdir, slog in self.scanlog.items():
if os.path.isfile(slog.scanlog):
slog.parse()
else:
shikalog.error("diffraction scan log is not found!: %s" %slog.scanlog)
continue
# if no update since all images processed
mtime = os.path.getmtime(slog.scanlog)
if mtime == self.finished.get(slog.scanlog, -1): continue
# Update 'processed files' using database
dbfile = os.path.join(logdir, "_spotfinder", "shika.db")
for _ in xrange(10):
try:
if not os.path.exists(os.path.dirname(dbfile)): os.mkdir(os.path.dirname(dbfile))
con = sqlite3.connect(dbfile, timeout=30)
break
except sqlite3.OperationalError:
shikalog.warning("Connecting to %s failed. Retrying" % dbfile)
cur = con.cursor()
# status TABLE
#c = cur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='status';")
c = retry_until_success(cur.execute, "SELECT name FROM sqlite_master WHERE type='table' AND name='status';")
if c.fetchone() is not None:
#cur.execute("select filename from status")
retry_until_success(cur.execute, "select filename from status")
processed_files = map(lambda x:os.path.join(logdir, x[0]), cur.fetchall())
print "debug::",processed_files
self.found_imgs.update(processed_files)
# check if all images are processed
if len(processed_files) == sum(map(lambda x: len(x.filename_idxes), slog.scans)):
shikalog.info("All %d images in %s are already processed. Will not check unless diffscan.log updated"%(len(processed_files), slog.scanlog))
self.finished[slog.scanlog] = mtime
#con.commit()
retry_until_success(con.commit)
con.close()
# update_scanlogs()
def get_unprocessed_images(self, env=None):
ret = []
self.update_scanlogs()
for slogdir, slog in self.scanlog.items():
for scan in slog.scans:
fcs = map(lambda x: (os.path.join(slogdir, x[0]), x[1]), scan.filename_idxes)
#print "fix=", fcs
#if env == "ppu": fcs_proxy = map(lambda x: (re.sub("^/isilon/users/", "/ramdisk/", x[0]), x[1]), fcs)
if env == "ppu": f_mod = lambda x: re.sub("^/isilon/users/", "/ramdisk/", x)
else: f_mod = lambda x: x
unproc = filter(lambda x: x[0] not in self.found_imgs and os.path.isfile(f_mod(x[0])), fcs)
ret.extend(map(lambda x:x+(scan,), unproc))
self.found_imgs.update(map(lambda x: x[0], ret))
return ret # (filename, idx, scan object)
# get_unprocessed_images()
def remove_found(self, files): # when user wants to recalculate..
self.found_imgs.difference_update(files)
# remove_found()
def needs_to_be_processed(self, filename):
"""
Check if the given file needs to be processed.
No need to process if
- not included in diffscan.log
- first image in row (only if BSS creates such non-sense files)
"""
scaninfo = self.get_scan_info(filename)
if scaninfo is None:
return False
# return True here *if* BSS no longer creates such non-sense files.
# this should be an option.
if scaninfo.is_shutterless():
r = scaninfo.get_file_number_based_on_template(filename)
num = int(r.group(1))
if scaninfo.hpoints > 1:
return num%(scaninfo.hpoints+1) != 0 # if remainder is 0, discard the image.
else:
return num != 0 # discard 000.img
else:
return True
# needs_to_be_processed()
def get_grid_coord(self, filename):
dirname = os.path.dirname(filename)
if dirname not in self.scanlog:
shikalog.warning("get_grid_coord(): directory is not found: %s" % dirname)
return None
return self.scanlog[dirname].get_grid_coord(os.path.basename(filename))
# get_grid_coord()
def get_scan_info(self, filename):
dirname = os.path.dirname(filename)
basename = os.path.basename(filename)
if not dirname in self.scanlog:
shikalog.warning("get_scan_info(): directory is not found: %s" % dirname)
return None
for scan in reversed(self.scanlog[dirname].scans):
if scan.match_file_with_template(filename):
return scan
shikalog.warning("get_scan_info(): Not in scans: %s" % dirname)
return None
# get_scan_info()
def get_gonio_xyz_phi(self, filename):
dirname = os.path.dirname(filename)
basename = os.path.basename(filename)
if not dirname in self.scanlog:
return None
for scan in reversed(self.scanlog[dirname].scans):
for f, c in scan.filename_coords:
if basename == f:
if scan.is_shutterless():
return list(c[0]) + [scan.fixed_spindle]
else:
return list(c[0]) + [scan.osc_start]
return None
# get_gonio_xyz_phi()
# class DiffScanManager
class WatchScanlogThread:
def __init__(self, queue, topdir, beamline=None, expdate="today"):
self.queue = queue
self.topdir = topdir
self.interval = 5
self.thread = None
#self.latest_dir = None
self.keep_going = True
self.running = True
self.beamline = beamline
self.last_bsslog = None
self.last_bsslog_line = 0
self.expdate = None
if expdate != "today": self.expdate = datetime.datetime.strptime(expdate, "%Y-%m-%d")
self.thread = threading.Thread(None, self.run)
self.thread.daemon = True
self._cached_dirs = {}
#self.thread.start()
def start(self, interval=None):
# Thread should be already started.
# Just start to notify the latest directory.
self.notify_latest_dir = True
#wx.PostEvent(self.parent, EventLogWatcherStarted())
if interval is not None:
self.interval = interval
# If accidentally stopped
if not self.is_running():
self.keep_going = True
self.running = True
self.thread = threading.Thread(None, self.run)
self.thread.daemon = True
self.thread.start()
def stop(self):
pass
def is_running(self): return self.thread is not None and self.thread.is_alive()
def find_in_directory(self, topdir):
scanlogs = [] # (filename, date)
for root, dirnames, filenames in os.walk(topdir):
if "diffscan.log" in filenames:
scanlog = os.path.join(root, "diffscan.log")
scanlogs.append((scanlog, os.path.getmtime(scanlog)))
return scanlogs
# find_in_directory()
def find_in_bsslog(self, topdir):
def read_bsslog_line(l):
if self.beamline in ("41xu","45xu") and "/ramdisk/" in l: l = l.replace("/ramdisk/","/isilon/users/", 1) # XXX should check if Pilatus or not!!
if topdir not in l: return
l = l[l.index(topdir):]
if " " in l: l = l[:l.index(" ")]
if ",.img" in l: l = l[:l.index(",.img")]
return os.path.dirname(l)
basedate = datetime.datetime.today() if self.expdate is None else self.expdate
if self.last_bsslog is None:
shikalog.debug("checking yesterday's bss log")
self.last_bsslog = os.path.join(params.blconfig, "log",
(basedate - datetime.timedelta(days=1)).strftime("bss_%Y%m%d.log"))
if not os.path.isfile(self.last_bsslog):
shikalog.info("Yesterday's log not found: %s"%self.last_bsslog)
current_bsslog = os.path.join(params.blconfig, "log", basedate.strftime("bss_%Y%m%d.log"))
if self.last_bsslog is not None and self.last_bsslog != current_bsslog and os.path.isfile(self.last_bsslog):
shikalog.debug("reading last-log %s from %d" % (os.path.basename(self.last_bsslog), self.last_bsslog_line))
for i, l in enumerate(open(self.last_bsslog)):
if i <= self.last_bsslog_line: continue
# read last log!
found = read_bsslog_line(l)
if found is not None: self._cached_dirs[found] = time.time()
# reset for reading current log
self.last_bsslog_line = 0
if os.path.isfile(current_bsslog):
shikalog.debug("reading curr-log %s from %d" % (os.path.basename(current_bsslog), self.last_bsslog_line))
i = -1 # in case empty file
for i, l in enumerate(open(current_bsslog)):
if i <= self.last_bsslog_line: continue
# read current log!
found = read_bsslog_line(l)
if found is not None: self._cached_dirs[found] = time.time()
# set for next reading
self.last_bsslog_line = i
else:
shikalog.info("bsslog not found: %s"%current_bsslog)
self.last_bsslog = current_bsslog
scanlogs = map(lambda x: os.path.join(x, "diffscan.log"), self._cached_dirs)
uid = os.getuid()
scanlogs = filter(lambda x: os.path.isfile(x) and os.stat(x).st_uid==uid, scanlogs)
if params.only_check_in_last_hours is not None and params.only_check_in_last_hours > 0:
now = time.time()
last_seconds = params.only_check_in_last_hours*60*60
scanlogs = filter(lambda x: (now-os.path.getmtime(x))<last_seconds, scanlogs)
if scanlogs: shikalog.debug("found diffscan.log in bsslog: %s" % scanlogs)
for k in self._cached_dirs.keys():
# clear old cache
if time.time() - self._cached_dirs[k] > 60*5: del self._cached_dirs[k]
return map(lambda x: (x, os.path.getmtime(x)), scanlogs)
# find_in_bsslog()
def run_inner(self, method="bsslog"):
assert method in ("bsslog", "os.walk")
startt = time.time()
if method == "bsslog":
scanlogs = self.find_in_bsslog(self.topdir)
else:
scanlogs = self.find_in_directory(self.topdir)
shikalog.debug("WatchScanlogThread.run_inner(method=%s) took %.3f sec for finding" % (method,
time.time()-startt))
if len(scanlogs) > 0:
scanlogs.sort(key=lambda x:x[1], reverse=True)
for x in scanlogs: self.queue.put(x)
# run_inner()
def run(self):
def mysleep():
if self.interval < 1:
time.sleep(self.interval)
else:
for i in xrange(int(self.interval/.5)):
if self.keep_going:
time.sleep(.5)
# mysleep()
shikalog.info("WatchScanlogThread loop STARTED")
while self.keep_going:
#shikalog.debug("in | |
from typing import List, Union, Tuple
from functools import reduce
import re
import json
import shlex
import click
from prompt_toolkit.styles import Style
from prompt_toolkit.formatted_text import HTML
from prompt_toolkit.shortcuts import CompleteStyle
from prompt_toolkit.completion import (
Completer, FuzzyCompleter, Completion
)
from .pretty import PrettyArgument, PrettyOption
from .utils import HasKey
from . import globals as globs
from . import _colors as colors
COMPLETION_TREE = {}
def deep_get(dictionary, *keys):
return reduce(lambda d, key: d.get(key) if d else None, keys, dictionary)
def deep_set(dictionary, value, *keys):
for key in keys[:-1]: dictionary = dictionary[key]
dictionary[keys[-1]] = value
def html_escape(s: str):
return s.translate(str.maketrans(
{
"&": r"&",
"<": r"<",
">": r">",
"\"": r""",
"'": r"'",
}
))
class ClickCompleter(Completer):
@staticmethod
def get_true_option_from_line(line) -> str:
tmp = line.rstrip()
tmp = re.sub(r"((,[\s]*\])|(,[\s]*)|((,\][\s]*)))$", '', tmp)
def read_words(words: List[str]) -> str:
for word in reversed(words):
if word.startswith('--'): return word
return None
try:
tmp_words = shlex.split(tmp, posix=False)
return read_words(tmp_words)
except:
tmp_words = tmp.split(' ')
if len(tmp_words) > 1:
tmp_words.pop()
return ClickCompleter.get_true_option_from_line(' '.join(tmp_words))
elif len(tmp_words):
try:
tmp_words = shlex.split(tmp, posix=False)
return read_words(tmp_words)
except: return None
else: return None
@staticmethod
def get_current_tuple_from_line(line) -> str:
tmp = line.rstrip()
tmp = re.sub(r"((,[\s]*\])|(,[\s]*)|((,\][\s]*)))$", '', tmp)
# no_quotes = re.sub('".*?"', '', tmp)
index = tmp.rfind('[', 0)
index2 = tmp.rfind(']', index) if index >= 0 else -1
ret = line[index:] if not index2 > index else None
return ret
def get_completions(self, document, complete_event):
word: str = document.get_word_before_cursor()
line: str = document.current_line_before_cursor
original_line = line.rstrip()
original_words = original_line.split(' ')
original_words_prev = original_words.copy()
original_words_prev.pop()
line = ((' '.join(globs.__SHELL_PATH__) + ' ') if len(globs.__SHELL_PATH__) else '') + line
words = line.rstrip().split(' ')
lastword = words[len(words) - 1]
priorOption = words[len(words) - 2] if len(words) > 1 else None
try:
current_key = []
for i in range(0, len(words)):
if i < len(globs.__SHELL_PATH__): continue
try:
if deep_get(COMPLETION_TREE, *words[:len(words) - i]):
if len(original_words) > 2:
current_key = words[:(len(words) - (i - 1)) + len(globs.__SHELL_PATH__)]
if deep_get(COMPLETION_TREE, *current_key): break
elif '--' in lastword:
current_key = words[:len(words) - i]
else:
current_key = words[:len(words) - (i - 1)]
if deep_get(COMPLETION_TREE, *current_key): break
elif priorOption and '--' in priorOption:
current_key = words[:len(words) - (i + 1)]
if deep_get(COMPLETION_TREE, *current_key): break
else:
key = words[:len(words) - (i + 1)]
if deep_get(COMPLETION_TREE, *key):
current_key = key
break
except: break
obj = deep_get(COMPLETION_TREE, *current_key)
obj2 = deep_get(COMPLETION_TREE, *words)
true_option = ClickCompleter.get_true_option_from_line(line)
l = len(words)
c = len([x for x in words if '--' in x])
l -= c if c else 0
# Recommend Commands
if obj:
if obj2 and obj2['isGroup']:
commands = [k for k, v in obj2.items() if isinstance(obj2[k], dict) and not obj2[k]['isHidden']]
for key in commands:
if key.startswith(word):
h = html_escape(obj2[key]['_help'])
if not current_key == globs.__SHELL_PATH__:
yield Completion(
key,
start_position=-len(word),
display=HTML("<%s>%s</%s>" % (colors.COMPLETION_COMMAND_NAME, key, colors.COMPLETION_COMMAND_NAME)),
display_meta=HTML("<style %s><i>%s</i></style>" % (colors.COMPLETION_COMMAND_DESCRIPTION, h))
)
else:
yield Completion(
key,
start_position=-len(word),
display=HTML("<%s>%s</%s>" % (colors.COMPLETION_ROOT_COMMAND_NAME, key, colors.COMPLETION_ROOT_COMMAND_NAME)),
display_meta=HTML("<style %s><i>%s</i></style>" % (colors.COMPLETION_ROOT_COMMAND_DESCRIPTION, h))
)
elif obj['isGroup'] and not obj2:
root = deep_get(COMPLETION_TREE, words[0 + (l - 1)])
if root or line.count(' ') == 0:
if (root and line.count(' ') == 0) or not root:
commands = [k for k, v in obj.items() if isinstance(obj[k], dict) and not obj[k]['isHidden']]
for key in commands:
if key.startswith(word):
h = html_escape(obj[key]['_help'])
yield Completion(
key,
start_position=-len(word),
display=HTML("<%s>%s</%s>" % (colors.COMPLETION_ROOT_COMMAND_NAME, key, colors.COMPLETION_ROOT_COMMAND_NAME)),
display_meta=HTML("<style %s><i>%s</i></style>" % (colors.COMPLETION_ROOT_COMMAND_DESCRIPTION, h))
)
if len(current_key):
# Option Reflection Utilities
def get_option(name: str) -> PrettyOption:
if len(obj['_options']):
try: return [x for x in obj['_options'] if x[0] == name][0][1]
except IndexError: pass
return None
def get_option_names() -> List[str]:
expression = r'(?<=--)([a-zA-Z0-9]*)(?=\s)'
return re.findall(expression, line)
# HTML Display Style Utilities
def get_option_display_tag(option, value, isChoice=False, isBool=False) -> List[str]:
tag = colors.COMPLETION_CHOICE_DEFAULT
if isChoice:
try:
if len(option.choices.display_tags):
try:
tag = option.choices.display_tags[values.index(value)]
except: tag = option.choices.display_tags[values.pop()]
except: pass
try:
if len(option.type.display_tags):
try:
tag = option.type.display_tags[values.index(value)]
except: tag = option.type.display_tags[values.pop()]
except: pass
if isBool:
tag = colors.COMPLETION_CHOICE_BOOLEAN_TRUE if value == 'true' else colors.COMPLETION_CHOICE_BOOLEAN_FALSE
return tag
def get_option_literal_tuple_display_tag(tuple_type, value) -> List[str]:
tag = colors.COMPLETION_CHOICE_DEFAULT
isChoice = False
isBool = False
if 'Choice(' in str(tuple_type):
isChoice = True
values = [c for c in tuple_type.choices if c]
elif str(tuple_type) == 'bool':
isBool = True
values = ['true', 'false']
if isChoice:
try:
if len(tuple_type.display_tags):
try:
tag = tuple_type.display_tags[values.index(value)]
except: tag = tuple_type.display_tags[values.pop()]
except: pass
if isBool:
tag = colors.COMPLETION_CHOICE_BOOLEAN_TRUE if value == 'true' else colors.COMPLETION_CHOICE_BOOLEAN_FALSE
return tag
# Typed Tuple Parameter Completion Support
def get_literal_tuple_display(option: PrettyOption, word: str, mod=0) -> Tuple[List[str], HTML, HTML, int]:
Current_Tag_Begin = '<u><b>'
Current_Tag_End = '</b></u>'
def fix_json_string(tmp) -> str:
"""Transforms all \'\' and \`\` strings into \"\" strings"""
for match in re.finditer(r"(['`])(?:(?=(\\?))\2.)*?(\1)", tmp):
m = tmp[match.span()[0]: match.span()[1]]
if (m.startswith('"') or m.startswith("'") or m.startswith('`')) and (m.endswith('"') or m.endswith("'") or m.endswith('`')):
tmp = tmp[:match.start()] + ('"%s"' % m[1:-1]) + tmp[match.span()[1]:]
return tmp
def get_valid_json(w: str, recurse=False) -> Union[list, None]:
tmp = w.rstrip()
tmp = re.sub(r"((,[\s]*\])|(,[\s]*)|((,\][\s]*)))$", '', tmp)
tmp = re.sub(r"([,]{1,999}.(?<=,))", ',', tmp)
tmp = fix_json_string(tmp)
if tmp.startswith('['):
try:
if not tmp.endswith(']'): tmp += ']'
return json.loads(tmp)
except:
try: tmp_words = shlex.split(tmp, posix=False)
except: tmp_words = tmp.split(' ')
if len(tmp_words) > 1:
tmp_words.pop()
return get_valid_json(' '.join(tmp_words), True)
elif len(tmp_words):
try:
tmp_words = shlex.split(' '.join(tmp_words).rstrip(), posix=False)
return json.loads(' '.join(tmp_words))
except: return None
return None
return None
def get_tuple_displaylist(cur_json: list, remaining=True) -> List[str]:
types = []
if len(cur_json) and (len(cur_json) < len(option.literal_tuple_type)):
if remaining:
for tuple_type in option.literal_tuple_type[len(cur_json) + mod:]:
types.append('<style {}>{}</style>'.format(colors.COMPLETION_LITERAL_TUPLE_TYPE, html_escape(str(tuple_type))))
else:
for tuple_type in option.literal_tuple_type[:len(cur_json) + mod]:
types.append('<style {}>{}</style>'.format(colors.COMPLETION_LITERAL_TUPLE_TYPE_USED, html_escape(str(tuple_type))))
elif len(cur_json) and not remaining:
for tuple_type in option.literal_tuple_type[:len(cur_json) + mod]:
types.append('<style {}>{}</style>'.format(colors.COMPLETION_LITERAL_TUPLE_TYPE_USED, html_escape(str(tuple_type))))
else:
if remaining:
for tuple_type in option.literal_tuple_type[mod:]:
types.append('<style {}>{}</style>'.format(colors.COMPLETION_LITERAL_TUPLE_TYPE, html_escape(str(tuple_type))))
else:
types.append('<style {}>{}</style>'.format(colors.COMPLETION_LITERAL_TUPLE_TYPE_USED, html_escape(str(option.literal_tuple_type[0]))))
return types
def get_tuple_values(used_types, cur_json) -> List[str]:
if not len(word.rstrip()):
return ['[']
else:
if (len(cur_json) < len(option.literal_tuple_type) + mod):
values = []
isChoice = False
isBool = False
raw_type = option.literal_tuple_type[len(used_types) - 1 if len(used_types) else 0]
type_str = str(raw_type)
if type_str.startswith('Choice('):
isChoice = True
values = ['"%s"' % c for c in raw_type.choices if c]
elif "<class 'bool'>" in type_str:
isBool = True
values = ['true', 'false']
elif "<class 'float'>" in type_str:
values.append('0.0')
elif "<class 'int'>" in type_str:
values.append('0')
if not len(values):
values.append('\"\"')
return values
else:
return [']']
def get_tuple_value_display(used_types, cur_json) -> str:
if not len(word.rstrip()):
return '<b>[</b>'
elif len(used_types):
if (len(used_types) < len(option.literal_tuple_type)):
return '<style {}>{}</style>'.format(colors.COMPLETION_LITERAL_TUPLE_TYPE_CURRENT, html_escape(str(option.literal_tuple_type[len(used_types) - 1])))
else:
return '<b>]</b>'
else:
if len(cur_json):
return '<style {}>{}</style>'.format(colors.COMPLETION_LITERAL_TUPLE_TYPE_CURRENT, html_escape(str(option.literal_tuple_type[len(used_types) - 1])))
else:
return '<style {}>{}</style>'.format(colors.COMPLETION_LITERAL_TUPLE_TYPE_CURRENT, html_escape(str(option.literal_tuple_type[0])))
word_json = get_valid_json(word)
if not word_json: word_json = []
if len(word_json) >= len(option.literal_tuple_type): mod -= 1
used_types = get_tuple_displaylist(word_json, False)
remaining_types = get_tuple_displaylist(word_json)
vals = get_tuple_values(used_types, word_json)
disp_val = get_tuple_value_display(used_types, word_json)
index = len(word_json) - 1
disp = '<b>[</b>'
if len(used_types):
disp += ', '.join(used_types)
if len(used_types) < len(option.literal_tuple_type):
disp += ', ' + ', '.join(remaining_types)
disp += '<b>]</b>'
if index > -1:
disp = disp.replace(used_types[index + mod], '{}{}{}'.format(Current_Tag_Begin, used_types[index + mod], Current_Tag_End))
elif len(used_types):
disp = disp.replace(used_types[0], '{}{}{}'.format(Current_Tag_Begin, used_types[0], Current_Tag_End))
return (vals, HTML(disp_val), HTML(disp), index + mod)
# Recommend Option Parameters
if len(obj['_options']):
valid = True
option = get_option(true_option)
def get_option_args():
ret = []
for arg in reversed(original_words):
if arg == true_option: break
ret.append(arg)
return ret
if option:
option_nargs = 1
option_args = get_option_args()
if HasKey('nargs', option): option_nargs = option.nargs
if len(option_args):
if not option.literal_tuple_type:
if len(option_args) >= option_nargs: valid = False
else:
if ']' in option_args[0]: valid = False
if (not option.multiple) and (original_words_prev.count('--%s' % option.name) > 1):
valid = False
if option and valid:
if not (option.is_bool_flag or option.is_flag):
values = []
isChoice = False
isBool = False
invalid = False
if not option.literal:
# Option Parmeter is standard
disp_meta = None
if not '.Tuple object' in str(option.type):
# Standard Parameter
if option.type.name == 'choice':
isChoice = True
values = [c for c in option.type.choices if c]
elif option.choices and ('Choice' in str(type(option.choices))):
isChoice = True
values = [c for c in option.choices.choices if c]
elif option.type.name == 'boolean':
isBool = True
values = | |
import sys
sys.path.append("..")
from common.utils import Node
#%%
def paintLineOn(buff, text, indent):
buff += indent + text + "\n"
return buff
def paint_type(typeName):
if typeName.value == "Generic":
base, *args = typeName.children
base, args = paint_type(base), [paint_type(arg) for arg in args]
return f"{base}<{', '.join(args)}>"
# handle combos
# TODO
if typeName.value == "TypeExpr":
op, left, right = typeName.children
left, right = paint_type(left), paint_type(right)
op = op.value
left, right = (left, right) if "mut" in left else (right, left)
if op == "*" :
return left + " " + right
if typeName.value == "ID":
name = typeName.children[0].value
if name == "Int":
return "i32"
if name == "String":
return "String"
if name == "Mut":
return "mut"
if name == "Obj":
return "HashMap<&str, Box<dyn Any + 'static>>"
return name
return "typeNotImplemented"
def paint_call(name, args):
argText = ", ".join(str(paint_expression(arg)) for arg in args)
if name == "print":
return 'println!("{:#?}", ' + argText + ")"
if name == "Box":
return f"Box::new({argText})"
if name == "Unbox":
return f"*{argText}"
return f"{name}({argText})"
def paint_expression(expr, currentIndent=""):
if expr.value == "String" or expr.value == "Number":
return expr.children[0].value
if expr.value == "ID":
return expr.children[0].value
if expr.value == "BinOp":
# TODO handle Glace-specific ops
op, left, right = expr.children
# Single Access
if right.value == "TypedDecl" and op.value == "'":
left = paint_expression(left, currentIndent)
vartype, varname = right.children
vartype = paint_type(vartype)
return f'{left}.get("{varname[1][0][0]}").unwrap().downcast_ref::<{vartype}>().unwrap()'
# Multiple access
if right.value == "BinOp" and op.value == "'":
out = paint_expression(left, currentIndent)
while right.value == "BinOp":
op, left, right = right.children
if op.value != "'":
raise NotImplementedError(f"Binary Operation ({op.value}) on Object get")
vartype, varname = left.children
vartype = paint_type(vartype)
out += f'.get("{varname[1][0][0]}").unwrap().downcast_ref::<{vartype}>().unwrap()'
vartype, varname = right.children
vartype = paint_type(vartype)
out += f'.get("{varname[1][0][0]}").unwrap().downcast_ref::<{vartype}>().unwrap()'
return out
left, right = paint_expression(left, currentIndent), paint_expression(right, currentIndent)
return f"{left} {op.value} {right}"
if expr.value == "Call":
if len(expr.children) > 1:
iden, *arg = expr.children
if iden.value == "ID":
name = iden[1][0][0]
return paint_call(name, arg)
else:
return paint_call(expr.children[0][1][0][0], Node("ID", [Node("", [])]))
if expr.value == "ComplexCall":
out = ""
iden, *extra = expr.children
out += iden[1][0][0]
for call in extra:
if call.value == "Parg":
out += "(" + \
", ".join(str(paint_expression(child, currentIndent))
for child in call.children) + ")"
if call.value == "Aidx":
if len(call.children) != 0:
out += "[" + str(paint_expression(call.children[0], currentIndent)) + "]"
else:
out += "[:]"
if call.value == "Spawn":
out += "{ " + \
", ".join(kwarg.children[0][1][0][0] + ": " + \
str(paint_expression(kwarg.children[1], currentIndent))
for kwarg in call.children) + " }"
if call.value == "Dcol":
out += "::" + call[1][0][1][0][0]
if call.value == "Dot":
out += "." + call[1][0][1][0][0]
return out
# Reworking this
if expr.value == "Object":
assigns = expr.children
out = "{\n" + currentIndent + "\t" + "let mut object: HashMap<&str, Box<dyn Any + 'static>> = HashMap::new();" + "\n"
for assign in assigns:
name, value = assign.children
if name.value == "ID":
name = name[1][0][0]
value = paint_expression(value, currentIndent+"\t")
out += currentIndent + "\t" + f'object.insert("{name}", Box::new({value}));' + "\n"
return out + currentIndent + "\t" + "object" + "\n" + currentIndent + "}"
if expr.value == "TypedDecl":
vartype, varname = expr.children
if expr.value == "Function":
default = paint_function("§§", expr, currentIndent)
return default.split("§§")[1][3:-2].replace("\n", "\n" + currentIndent + "\t")
if expr.value == "Block":
prg = paint_program(expr.children, currentIndent+"\t")
return "{\n" + prg + currentIndent + "}"
if expr.value == "Vector":
return "vec![" + \
", ".join(str(paint_expression(e, currentIndent)) for e in expr.children) + \
"]"
if expr.value == "Array":
return "[" + \
", ".join(str(paint_expression(e, currentIndent)) for e in expr.children) + \
"]"
if expr.value == "FixedArray":
type, count = expr.children
return "[" + paint_type(type) + " ; " + str(paint_expression(count)) + "]"
return "exprNotImplemented"
def paint_function(name, tree, currentIndent=""):
argument, body = tree.children
# Normal function
if body.value == "FunctionBody":
argsText = ""
if argument.children[0].value != "None":
argsText = ""
for argument in argument.children:
if argument.value == "TypedDecl":
argName, type = argument[1][1][1][0][0], paint_type(argument.children[0])
argsText += f"{argName}: {type}, "
argsText = argsText[:-2]
retType, retValue = body.children
outputType = paint_type(retType)
bodyText = ""
if retValue.value == "Block":
bodyText = paint_program(retValue.children, currentIndent+"\t")
else:
bodyText = currentIndent + "\t" + str(paint_expression(retValue, currentIndent)) + "\n"
outputText = f" -> {outputType}" if outputType != "Void" else ""
return f"fn {name}({argsText}){outputText} " + "{" + f"\n{bodyText}{currentIndent}" + "}\n"
# Lambda
else:
argsText = ""
# TODO same rework as above
if argument.value != "None":
if argument.children[0][1][0].value != "ID":
argsText = argument.children[0][1][0].value
else:
argName = argument.children[0][1][1][1][0][0]
type = paint_type(argument.children[0][1][0])
argsText = f"{argName} : {type}"
bodyText = compl = ""
if body.value == "Block":
bodyText = "{\n" + paint_program(body.children, currentIndent+"\t") + currentIndent + "}"
else:
bodyText = str(paint_expression(body, currentIndent))
if bodyText[0] == '|':
compl = " move"
newBody = ""
for i, e in enumerate(bodyText.splitlines()):
if i == 0:
newBody += e
else:
extraTab = "\t" if i != len(bodyText.splitlines())-1 else ""
newBody += "\n" + currentIndent + extraTab + e.strip()
bodyText = newBody
return f"let {name} = |{argsText}|{compl} {bodyText};" + "\n"
def paint_struct(name, tree, currentIndent=""):
_, *sections = tree.children
out = ""
for section in sections:
secName, *program = section.children
if secName[1][0].value == "data":
out += f"struct {name}" + " {\n" + currentIndent
for decl in program:
type, val = decl.children
type = paint_type(type)
out += "\t" + f"{val[1][0][0]}: {type}," + "\n" + currentIndent
out += "}\n" + currentIndent
if secName[1][0].value == "methods":
out += f"impl {name}" + " {\n" + currentIndent
for decl in program:
funcName, func = decl.children
funcName = funcName[1][0][0]
body = paint_function(funcName, func, currentIndent + "\t")
out += "\t" + body.replace(f"self: {name}", "&self")
out += "}\n" + currentIndent
if secName.value == "Generic":
if secName[1][0][1][0].value == "methods":
traitName = secName[1][1][1][0].value
out += f"impl {traitName} for {name}" + " {\n" + currentIndent
for decl in program:
funcName, func = decl.children
funcName = funcName[1][0][0]
body = paint_function(funcName, func, currentIndent + "\t")
out += "\t" + body.replace(f"self: {name}", "&self")
out += "}\n" + currentIndent
return out
def paint_trait(name, tree, currentIndent=""):
_, *sections = tree.children
out = ""
for section in sections:
secName, *program = section.children
if secName[1][0].value == "methods":
out += currentIndent + f"trait {name}" + " {\n" + currentIndent
for decl in program:
funcName, func = decl.children
funcName = funcName[1][0][0]
inTypes = ", ".join(f"{a[1][1][1][0][0]}: {a[1][0][1][0][0]}"
for a in func.children[0].children)
outType = paint_type(func.children[1])
body = f"fn {funcName}({inTypes}) -> {outType};".replace(" -> Void", "")
out += "\t" + body.replace(f"self: {name}", "&self") + "\n" + currentIndent
out += "}\n" + currentIndent
return out
#%%
def paint_program(instructions, currentIndent=""):
out = ""
for instr in instructions:
name, extra = instr
if name == "Use":
o = ""
for i, e in enumerate(extra):
if i != len(extra) - 1:
o += e[1][0][0] + "::"
else:
if len(e[1]) == 0:
o += "*;"
elif e.value == "ImPack":
o += "{" + ", ".join(a[1][0][0] for a in e[1]) + "};"
else:
o += e[1][0][0] + ";"
out = paintLineOn(out, f"use {o}", "")
if name == "If":
expr, block = extra
expr, block = paint_expression(expr, currentIndent), paint_program(block.children, currentIndent+"\t")
out = paintLineOn(out, f"if {expr} " + "{\n" + block + currentIndent + "}", currentIndent)
if name == "For":
expr, block = extra
expr, block = paint_expression(expr, currentIndent), paint_program(block.children, currentIndent+"\t")
out = paintLineOn(out, f"for {expr} " + "{\n" + block + currentIndent + "}", currentIndent)
if name == "TVDecl":
vartype, iden, value = extra
varname = iden.children[0].value
typeText = paint_type(vartype)
varvalue = paint_expression(value, currentIndent)
mods = ""
if "mut" in typeText:
mods = "mut"
typeText = typeText.replace("mut ", "")
out = paintLineOn(out, f"let {mods}{' ' if mods!='' else ''}{varname}: {typeText} = {varvalue};", currentIndent)
if name == "AutoDecl":
iden, value = extra
varname = iden.children[0].value
if value.value == "Function": # declare a function
functext = paint_function(varname, value, currentIndent)
out += currentIndent + functext
elif value.value == "MacroCall":
if value.children[0].children[0].value == "Struct":
structext = paint_struct(varname, value, currentIndent)
out += currentIndent + structext
if value.children[0].children[0].value in ["Blueprint", "Trait"]:
traitext = paint_trait(varname, value, currentIndent)
out += currentIndent + traitext
else: # use the let keyword without | |
node.args[1],
node.args[2],
node.args[3],
node.args[4]],
keywords=[])
elif len(node.args) == 6:
ast.fix_missing_locations(function_6_arg)
copy_node.body.append(function_6_arg)
new_node = ast.Call(
func=ast.Name(
id='__qmutpy_qgi_func__', ctx=ast.Load()),
args=[
node.func.value,
node.args[0],
node.args[1],
node.args[2],
node.args[3],
node.args[4],
node.args[5]],
keywords=[])
return new_node
def compare_gate_functions(self, f1, f2, discard_named_args=True):
if not discard_named_args:
return signature(f1) == signature(f2)
return len([arg for arg in signature(f1).parameters.values() if arg.default is arg.empty]) == \
len([arg for arg in signature(f2).parameters.values() if arg.default is arg.empty])
def equivalent_gates(self, discard_named_args=True):
existing_gate_names = ['ch', 'cp', 'cx', 'cy', 'cz', 'crx', 'cry', 'crz', 'ccx', 'cswap',
'csx', 'cu', 'cu1', 'cu3', 'dcx', 'h', 'i', 'id', 'iden', 'iswap',
'ms', 'p', 'r', 'rx', 'rxx', 'ry', 'ryy', 'rz', 'rzx', 'rzz', 's',
'sdg', 'swap', 'sx', 'x', 'y', 'z', 't', 'tdg', 'u', 'u1', 'u2',
'u3']
gate_functions = [o for o in getmembers(QuantumCircuit) if isfunction(o[1]) and o[0] in existing_gate_names]
gate_to_gate = { g: set() for g in existing_gate_names }
done = set()
for gate, func in gate_functions:
for gate_, func_ in gate_functions:
if gate == gate_:
continue
if self.compare_gate_functions(func, func_, discard_named_args):
gate_to_gate[gate].add(gate_)
gate_to_gate[gate_].add(gate_)
done.add(gate)
done.add(gate_)
return gate_to_gate
class QuantumGateReplacement(MutationOperator):
# if Call is Name, a gate and if there are equivalent gates
def should_mutate_Name(self, node):
return isinstance(node.func, ast.Name) and node.func.id in gates_set and len(gates_set[node.func.id]) > 0
# if Call is Attribute, a gate and if there are equivalent gates
def should_mutate_Attribute(self, node):
return isinstance(node.func, ast.Attribute) and node.func.attr in gates_set and len(gates_set[node.func.attr]) > 0
def mutate_Call_0(self, node):
# create gate set
global gates_set
gates_set = self.equivalent_gates()
if self.should_mutate_Name(node):
# remove same gate from equivalents
gates_set[node.func.id].remove(node.func.id)
# picks gate to mutate
gates_list = list(gates_set[node.func.id])
new_gate = gates_list[0]
# removes picked gate from equivalents
gates_set[node.func.id].remove(new_gate)
# mutates gate
mutated_qgate = ast.Name(new_gate, node.func.ctx)
return ast.Call(mutated_qgate, node.args, node.keywords)
if self.should_mutate_Attribute(node):
# remove same gate from equivalents
gates_set[node.func.attr].remove(node.func.attr)
# picks gate to mutate
gates_list = list(gates_set[node.func.attr])
new_gate = gates_list[0]
# removes picked gate from equivalents
gates_set[node.func.attr].remove(new_gate)
# mutates gate
mutated_qgate = ast.Attribute(node.func.value, new_gate, node.func.ctx)
return ast.Call(mutated_qgate, node.args, node.keywords)
raise MutationResign()
def mutate_Call_1(self, node):
if self.should_mutate_Name(node):
# picks gate to mutate
gates_list = list(gates_set[node.func.id])
new_gate = gates_list[0]
# removes picked gate from equivalents
gates_set[node.func.id].remove(new_gate)
# mutates gate
mutated_qgate = ast.Name(new_gate, node.func.ctx)
return ast.Call(mutated_qgate, node.args, node.keywords)
if self.should_mutate_Attribute(node):
# picks gate to mutate
gates_list = list(gates_set[node.func.attr])
new_gate = gates_list[0]
# removes picked gate from equivalents
gates_set[node.func.attr].remove(new_gate)
# mutates gate
mutated_qgate = ast.Attribute(node.func.value, new_gate, node.func.ctx)
return ast.Call(mutated_qgate, node.args, node.keywords)
raise MutationResign()
def mutate_Call_2(self, node):
if self.should_mutate_Name(node):
# picks gate to mutate
gates_list = list(gates_set[node.func.id])
new_gate = gates_list[0]
# removes picked gate from equivalents
gates_set[node.func.id].remove(new_gate)
# mutates gate
mutated_qgate = ast.Name(new_gate, node.func.ctx)
return ast.Call(mutated_qgate, node.args, node.keywords)
if self.should_mutate_Attribute(node):
# picks gate to mutate
gates_list = list(gates_set[node.func.attr])
new_gate = gates_list[0]
# removes picked gate from equivalents
gates_set[node.func.attr].remove(new_gate)
# mutates gate
mutated_qgate = ast.Attribute(node.func.value, new_gate, node.func.ctx)
return ast.Call(mutated_qgate, node.args, node.keywords)
raise MutationResign()
def mutate_Call_3(self, node):
if self.should_mutate_Name(node):
# picks gate to mutate
gates_list = list(gates_set[node.func.id])
new_gate = gates_list[0]
# removes picked gate from equivalents
gates_set[node.func.id].remove(new_gate)
# mutates gate
mutated_qgate = ast.Name(new_gate, node.func.ctx)
return ast.Call(mutated_qgate, node.args, node.keywords)
if self.should_mutate_Attribute(node):
# picks gate to mutate
gates_list = list(gates_set[node.func.attr])
new_gate = gates_list[0]
# removes picked gate from equivalents
gates_set[node.func.attr].remove(new_gate)
# mutates gate
mutated_qgate = ast.Attribute(node.func.value, new_gate, node.func.ctx)
return ast.Call(mutated_qgate, node.args, node.keywords)
raise MutationResign()
def mutate_Call_4(self, node):
if self.should_mutate_Name(node):
# picks gate to mutate
gates_list = list(gates_set[node.func.id])
new_gate = gates_list[0]
# removes picked gate from equivalents
gates_set[node.func.id].remove(new_gate)
# mutates gate
mutated_qgate = ast.Name(new_gate, node.func.ctx)
return ast.Call(mutated_qgate, node.args, node.keywords)
if self.should_mutate_Attribute(node):
# picks gate to mutate
gates_list = list(gates_set[node.func.attr])
new_gate = gates_list[0]
# removes picked gate from equivalents
gates_set[node.func.attr].remove(new_gate)
# mutates gate
mutated_qgate = ast.Attribute(node.func.value, new_gate, node.func.ctx)
return ast.Call(mutated_qgate, node.args, node.keywords)
raise MutationResign()
def mutate_Call_5(self, node):
if self.should_mutate_Name(node):
# picks gate to mutate
gates_list = list(gates_set[node.func.id])
new_gate = gates_list[0]
# removes picked gate from equivalents
gates_set[node.func.id].remove(new_gate)
# mutates gate
mutated_qgate = ast.Name(new_gate, node.func.ctx)
return ast.Call(mutated_qgate, node.args, node.keywords)
if self.should_mutate_Attribute(node):
# picks gate to mutate
gates_list = list(gates_set[node.func.attr])
new_gate = gates_list[0]
# removes picked gate from equivalents
gates_set[node.func.attr].remove(new_gate)
# mutates gate
mutated_qgate = ast.Attribute(node.func.value, new_gate, node.func.ctx)
return ast.Call(mutated_qgate, node.args, node.keywords)
raise MutationResign()
def mutate_Call_6(self, node):
if self.should_mutate_Name(node):
# picks gate to mutate
gates_list = list(gates_set[node.func.id])
new_gate = gates_list[0]
# removes picked gate from equivalents
gates_set[node.func.id].remove(new_gate)
# mutates gate
mutated_qgate = ast.Name(new_gate, node.func.ctx)
return ast.Call(mutated_qgate, node.args, node.keywords)
if self.should_mutate_Attribute(node):
# picks gate to mutate
gates_list = list(gates_set[node.func.attr])
new_gate = gates_list[0]
# removes picked gate from equivalents
gates_set[node.func.attr].remove(new_gate)
# mutates gate
mutated_qgate = ast.Attribute(node.func.value, new_gate, node.func.ctx)
return ast.Call(mutated_qgate, node.args, node.keywords)
raise MutationResign()
def mutate_Call_7(self, node):
if self.should_mutate_Name(node):
# picks gate to mutate
gates_list = list(gates_set[node.func.id])
new_gate = gates_list[0]
# removes picked gate from equivalents
gates_set[node.func.id].remove(new_gate)
# mutates gate
mutated_qgate = ast.Name(new_gate, node.func.ctx)
return ast.Call(mutated_qgate, node.args, node.keywords)
if self.should_mutate_Attribute(node):
# picks gate to mutate
gates_list = list(gates_set[node.func.attr])
new_gate = gates_list[0]
# removes picked gate from equivalents
gates_set[node.func.attr].remove(new_gate)
# mutates gate
mutated_qgate = ast.Attribute(node.func.value, new_gate, node.func.ctx)
return ast.Call(mutated_qgate, node.args, node.keywords)
raise MutationResign()
def mutate_Call_8(self, node):
if self.should_mutate_Name(node):
# picks gate to mutate
gates_list = list(gates_set[node.func.id])
new_gate = gates_list[0]
# removes picked gate from equivalents
gates_set[node.func.id].remove(new_gate)
# mutates gate
mutated_qgate = ast.Name(new_gate, node.func.ctx)
return ast.Call(mutated_qgate, node.args, node.keywords)
if self.should_mutate_Attribute(node):
# picks gate to mutate
gates_list = list(gates_set[node.func.attr])
new_gate = gates_list[0]
# removes picked gate from equivalents
gates_set[node.func.attr].remove(new_gate)
# mutates gate
mutated_qgate = ast.Attribute(node.func.value, new_gate, node.func.ctx)
return ast.Call(mutated_qgate, node.args, node.keywords)
raise MutationResign()
def mutate_Call_9(self, node):
if self.should_mutate_Name(node):
# picks gate to mutate
gates_list = list(gates_set[node.func.id])
new_gate = gates_list[0]
# removes picked gate from equivalents
gates_set[node.func.id].remove(new_gate)
# mutates gate
mutated_qgate = ast.Name(new_gate, node.func.ctx)
return ast.Call(mutated_qgate, node.args, node.keywords)
if self.should_mutate_Attribute(node):
# picks gate to mutate
gates_list = list(gates_set[node.func.attr])
new_gate = gates_list[0]
# removes picked gate from equivalents
gates_set[node.func.attr].remove(new_gate)
# mutates gate
mutated_qgate = ast.Attribute(node.func.value, new_gate, node.func.ctx)
return ast.Call(mutated_qgate, node.args, node.keywords)
raise MutationResign()
def mutate_Call_90(self, node):
if self.should_mutate_Name(node):
# picks gate to mutate
gates_list = list(gates_set[node.func.id])
new_gate = gates_list[0]
# removes picked gate from equivalents
gates_set[node.func.id].remove(new_gate)
# mutates gate
mutated_qgate = ast.Name(new_gate, node.func.ctx)
return ast.Call(mutated_qgate, node.args, node.keywords)
if self.should_mutate_Attribute(node):
# picks gate to mutate
gates_list = list(gates_set[node.func.attr])
new_gate = gates_list[0]
# removes picked gate from equivalents
gates_set[node.func.attr].remove(new_gate)
# mutates gate
mutated_qgate = ast.Attribute(node.func.value, new_gate, node.func.ctx)
return ast.Call(mutated_qgate, node.args, node.keywords)
raise MutationResign()
def mutate_Call_91(self, node):
if self.should_mutate_Name(node):
# picks gate to mutate
gates_list = list(gates_set[node.func.id])
new_gate = gates_list[0]
# removes picked gate from equivalents
gates_set[node.func.id].remove(new_gate)
# mutates gate
mutated_qgate = ast.Name(new_gate, node.func.ctx)
return ast.Call(mutated_qgate, node.args, node.keywords)
if self.should_mutate_Attribute(node):
# picks gate to mutate
gates_list = list(gates_set[node.func.attr])
new_gate = gates_list[0]
# removes picked gate from equivalents
gates_set[node.func.attr].remove(new_gate)
# mutates gate
mutated_qgate = ast.Attribute(node.func.value, new_gate, node.func.ctx)
return ast.Call(mutated_qgate, node.args, node.keywords)
raise MutationResign()
def mutate_Call_92(self, node):
if self.should_mutate_Name(node):
# picks gate to mutate
gates_list = list(gates_set[node.func.id])
new_gate = gates_list[0]
# removes picked gate from equivalents
gates_set[node.func.id].remove(new_gate)
# mutates gate
mutated_qgate = ast.Name(new_gate, node.func.ctx)
return ast.Call(mutated_qgate, node.args, node.keywords)
if self.should_mutate_Attribute(node):
# picks gate to mutate
gates_list = list(gates_set[node.func.attr])
new_gate = gates_list[0]
# removes picked gate from equivalents
gates_set[node.func.attr].remove(new_gate)
# mutates gate
mutated_qgate = ast.Attribute(node.func.value, new_gate, node.func.ctx)
return ast.Call(mutated_qgate, node.args, node.keywords)
raise MutationResign()
def compare_gate_functions(self, f1, f2, discard_named_args=True):
if not discard_named_args:
return signature(f1) == signature(f2)
return len([arg for arg in signature(f1).parameters.values() if arg.default is arg.empty]) == \
len([arg for arg in signature(f2).parameters.values() if arg.default is arg.empty])
def equivalent_gates(self, discard_named_args=True):
existing_gate_names = ['ch', 'cp', 'cx', 'cy', 'cz', 'crx', 'cry', 'crz', 'ccx', 'cswap',
'csx', 'cu', 'cu1', 'cu3', 'dcx', 'h', 'i', 'id', 'iden', 'iswap',
'ms', 'p', 'r', 'rx', 'rxx', 'ry', 'ryy', 'rz', 'rzx', 'rzz', 's',
'sdg', 'swap', 'sx', 'x', 'y', 'z', 't', 'tdg', 'u', 'u1', 'u2',
'u3']
gate_functions = [o for o in getmembers(QuantumCircuit) if isfunction(o[1]) and o[0] in existing_gate_names]
gate_to_gate = { g: set() for g in existing_gate_names }
done = set()
for gate, func in gate_functions:
for gate_, func_ in gate_functions:
if gate == gate_:
continue
if self.compare_gate_functions(func, func_, discard_named_args):
gate_to_gate[gate].add(gate_)
gate_to_gate[gate_].add(gate_)
done.add(gate)
done.add(gate_)
| |
"SELECT name FROM shard_range").fetchone()[0],
'"a/{<shardrange \'&\' name>}"')
self.assertEqual(conn.execute(
"SELECT timestamp FROM shard_range").fetchone()[0],
timestamp) # Not old_delete_timestamp!
self.assertEqual(conn.execute(
"SELECT meta_timestamp FROM shard_range").fetchone()[0],
meta_timestamp)
self.assertEqual(conn.execute(
"SELECT lower FROM shard_range").fetchone()[0], 'lower')
self.assertEqual(conn.execute(
"SELECT upper FROM shard_range").fetchone()[0], 'upper')
self.assertEqual(conn.execute(
"SELECT deleted FROM shard_range").fetchone()[0], 0)
self.assertEqual(conn.execute(
"SELECT object_count FROM shard_range").fetchone()[0], 1)
self.assertEqual(conn.execute(
"SELECT bytes_used FROM shard_range").fetchone()[0], 2)
# Put new delete event
timestamp = next(ts_iter).internal
broker.merge_shard_ranges(
ShardRange('"a/{<shardrange \'&\' name>}"', timestamp,
'lower', 'upper', meta_timestamp=meta_timestamp,
deleted=1))
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT name FROM shard_range").fetchone()[0],
'"a/{<shardrange \'&\' name>}"')
self.assertEqual(conn.execute(
"SELECT timestamp FROM shard_range").fetchone()[0],
timestamp)
self.assertEqual(conn.execute(
"SELECT deleted FROM shard_range").fetchone()[0], 1)
# Put new event
timestamp = next(ts_iter).internal
meta_timestamp = next(ts_iter).internal
broker.merge_shard_ranges(
ShardRange('"a/{<shardrange \'&\' name>}"', timestamp,
'lowerer', 'upperer', 3, 4,
meta_timestamp=meta_timestamp))
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT name FROM shard_range").fetchone()[0],
'"a/{<shardrange \'&\' name>}"')
self.assertEqual(conn.execute(
"SELECT timestamp FROM shard_range").fetchone()[0],
timestamp)
self.assertEqual(conn.execute(
"SELECT meta_timestamp FROM shard_range").fetchone()[0],
meta_timestamp)
self.assertEqual(conn.execute(
"SELECT lower FROM shard_range").fetchone()[0], 'lowerer')
self.assertEqual(conn.execute(
"SELECT upper FROM shard_range").fetchone()[0], 'upperer')
self.assertEqual(conn.execute(
"SELECT deleted FROM shard_range").fetchone()[0], 0)
self.assertEqual(conn.execute(
"SELECT object_count FROM shard_range").fetchone()[0], 3)
self.assertEqual(conn.execute(
"SELECT bytes_used FROM shard_range").fetchone()[0], 4)
# We'll use this later
in_between_timestamp = next(ts_iter).internal
# New update event, meta_timestamp increases
meta_timestamp = next(ts_iter).internal
broker.merge_shard_ranges(
ShardRange('"a/{<shardrange \'&\' name>}"', timestamp,
'lowerer', 'upperer', 3, 4,
meta_timestamp=meta_timestamp))
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT name FROM shard_range").fetchone()[0],
'"a/{<shardrange \'&\' name>}"')
self.assertEqual(conn.execute(
"SELECT timestamp FROM shard_range").fetchone()[0],
timestamp)
self.assertEqual(conn.execute(
"SELECT meta_timestamp FROM shard_range").fetchone()[0],
meta_timestamp)
self.assertEqual(conn.execute(
"SELECT lower FROM shard_range").fetchone()[0], 'lowerer')
self.assertEqual(conn.execute(
"SELECT upper FROM shard_range").fetchone()[0], 'upperer')
self.assertEqual(conn.execute(
"SELECT deleted FROM shard_range").fetchone()[0], 0)
self.assertEqual(conn.execute(
"SELECT object_count FROM shard_range").fetchone()[0], 3)
self.assertEqual(conn.execute(
"SELECT bytes_used FROM shard_range").fetchone()[0], 4)
# Put event from after last put but before last post
timestamp = in_between_timestamp
broker.merge_shard_ranges(
ShardRange('"a/{<shardrange \'&\' name>}"', timestamp,
'lowererer', 'uppererer', 5, 6,
meta_timestamp=meta_timestamp))
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT name FROM shard_range").fetchone()[0],
'"a/{<shardrange \'&\' name>}"')
self.assertEqual(conn.execute(
"SELECT timestamp FROM shard_range").fetchone()[0],
timestamp)
self.assertEqual(conn.execute(
"SELECT meta_timestamp FROM shard_range").fetchone()[0],
meta_timestamp)
self.assertEqual(conn.execute(
"SELECT lower FROM shard_range").fetchone()[0], 'lowererer')
self.assertEqual(conn.execute(
"SELECT upper FROM shard_range").fetchone()[0], 'uppererer')
self.assertEqual(conn.execute(
"SELECT deleted FROM shard_range").fetchone()[0], 0)
self.assertEqual(conn.execute(
"SELECT object_count FROM shard_range").fetchone()[0], 5)
self.assertEqual(conn.execute(
"SELECT bytes_used FROM shard_range").fetchone()[0], 6)
def test_merge_shard_ranges_deleted(self):
# Test ContainerBroker.merge_shard_ranges sets deleted attribute
ts_iter = make_timestamp_iter()
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(Timestamp('1').internal, 0)
# put shard range
broker.merge_shard_ranges(ShardRange('a/o', next(ts_iter).internal))
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT count(*) FROM shard_range "
"WHERE deleted = 0").fetchone()[0], 1)
self.assertEqual(conn.execute(
"SELECT count(*) FROM shard_range "
"WHERE deleted = 1").fetchone()[0], 0)
# delete shard range
broker.merge_shard_ranges(ShardRange('a/o', next(ts_iter).internal,
deleted=1))
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT count(*) FROM shard_range "
"WHERE deleted = 0").fetchone()[0], 0)
self.assertEqual(conn.execute(
"SELECT count(*) FROM shard_range "
"WHERE deleted = 1").fetchone()[0], 1)
def test_make_tuple_for_pickle(self):
record = {'name': 'obj',
'created_at': '1234567890.12345',
'size': 42,
'content_type': 'text/plain',
'etag': 'hash_test',
'deleted': '1',
'storage_policy_index': '2',
'ctype_timestamp': None,
'meta_timestamp': None}
broker = ContainerBroker(':memory:', account='a', container='c')
expect = ('obj', '1234567890.12345', 42, 'text/plain', 'hash_test',
'1', '2', None, None)
result = broker.make_tuple_for_pickle(record)
self.assertEqual(expect, result)
record['ctype_timestamp'] = '2233445566.00000'
expect = ('obj', '1234567890.12345', 42, 'text/plain', 'hash_test',
'1', '2', '2233445566.00000', None)
result = broker.make_tuple_for_pickle(record)
self.assertEqual(expect, result)
record['meta_timestamp'] = '5566778899.00000'
expect = ('obj', '1234567890.12345', 42, 'text/plain', 'hash_test',
'1', '2', '2233445566.00000', '5566778899.00000')
result = broker.make_tuple_for_pickle(record)
self.assertEqual(expect, result)
@with_tempdir
def test_load_old_record_from_pending_file(self, tempdir):
# Test reading old update record from pending file
db_path = os.path.join(tempdir, 'container.db')
broker = ContainerBroker(db_path, account='a', container='c')
broker.initialize(time(), 0)
record = {'name': 'obj',
'created_at': '1234567890.12345',
'size': 42,
'content_type': 'text/plain',
'etag': 'hash_test',
'deleted': '1',
'storage_policy_index': '2',
'ctype_timestamp': None,
'meta_timestamp': None}
# sanity check
self.assertFalse(os.path.isfile(broker.pending_file))
# simulate existing pending items written with old code,
# i.e. without content_type and meta timestamps
def old_make_tuple_for_pickle(_, record):
return (record['name'], record['created_at'], record['size'],
record['content_type'], record['etag'], record['deleted'],
record['storage_policy_index'])
_new = 'swift.container.backend.ContainerBroker.make_tuple_for_pickle'
with mock.patch(_new, old_make_tuple_for_pickle):
broker.put_record(dict(record))
self.assertTrue(os.path.getsize(broker.pending_file) > 0)
read_items = []
def mock_merge_items(_, item_list, *args):
# capture the items read from the pending file
read_items.extend(item_list)
with mock.patch('swift.container.backend.ContainerBroker.merge_items',
mock_merge_items):
broker._commit_puts()
self.assertEqual(1, len(read_items))
self.assertEqual(record, read_items[0])
self.assertTrue(os.path.getsize(broker.pending_file) == 0)
@with_tempdir
def test_save_and_load_record_from_pending_file(self, tempdir):
db_path = os.path.join(tempdir, 'container.db')
broker = ContainerBroker(db_path, account='a', container='c')
broker.initialize(time(), 0)
record = {'name': 'obj',
'created_at': '1234567890.12345',
'size': 42,
'content_type': 'text/plain',
'etag': 'hash_test',
'deleted': '1',
'storage_policy_index': '2',
'ctype_timestamp': '1234567890.44444',
'meta_timestamp': '1234567890.99999'}
# sanity check
self.assertFalse(os.path.isfile(broker.pending_file))
broker.put_record(dict(record))
self.assertTrue(os.path.getsize(broker.pending_file) > 0)
read_items = []
def mock_merge_items(_, item_list, *args):
# capture the items read from the pending file
read_items.extend(item_list)
with mock.patch('swift.container.backend.ContainerBroker.merge_items',
mock_merge_items):
broker._commit_puts()
self.assertEqual(1, len(read_items))
self.assertEqual(record, read_items[0])
self.assertTrue(os.path.getsize(broker.pending_file) == 0)
def _assert_db_row(self, broker, name, timestamp, size, content_type, hash,
deleted=0):
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT name FROM object").fetchone()[0], name)
self.assertEqual(conn.execute(
"SELECT created_at FROM object").fetchone()[0], timestamp)
self.assertEqual(conn.execute(
"SELECT size FROM object").fetchone()[0], size)
self.assertEqual(conn.execute(
"SELECT content_type FROM object").fetchone()[0],
content_type)
self.assertEqual(conn.execute(
"SELECT etag FROM object").fetchone()[0], hash)
self.assertEqual(conn.execute(
"SELECT deleted FROM object").fetchone()[0], deleted)
def _test_put_object_multiple_encoded_timestamps(self, broker):
ts = make_timestamp_iter()
broker.initialize(next(ts).internal, 0)
t = [next(ts) for _ in range(9)]
# Create initial object
broker.put_object('obj_name', t[0].internal, 123,
'application/x-test',
'5af83e3196bf99f440f31f2e1a6c9afe')
self.assertEqual(1, len(broker.get_items_since(0, 100)))
self._assert_db_row(broker, 'obj_name', t[0].internal, 123,
'application/x-test',
'5af83e3196bf99f440f31f2e1a6c9afe')
# hash and size change with same data timestamp are ignored
t_encoded = encode_timestamps(t[0], t[1], t[1])
broker.put_object('obj_name', t_encoded, 456,
'application/x-test-2',
'1234567890abcdeffedcba0987654321')
self.assertEqual(1, len(broker.get_items_since(0, 100)))
self._assert_db_row(broker, 'obj_name', t_encoded, 123,
'application/x-test-2',
'5af83e3196bf99f440f31f2e1a6c9afe')
# content-type change with same timestamp is ignored
t_encoded = encode_timestamps(t[0], t[1], t[2])
broker.put_object('obj_name', t_encoded, 456,
'application/x-test-3',
'1234567890abcdeffedcba0987654321')
self.assertEqual(1, len(broker.get_items_since(0, 100)))
self._assert_db_row(broker, 'obj_name', t_encoded, 123,
'application/x-test-2',
'5af83e3196bf99f440f31f2e1a6c9afe')
# update with differing newer timestamps
t_encoded = encode_timestamps(t[4], t[6], t[8])
broker.put_object('obj_name', t_encoded, 789,
'application/x-test-3',
'abcdef1234567890abcdef1234567890')
self.assertEqual(1, len(broker.get_items_since(0, 100)))
self._assert_db_row(broker, 'obj_name', t_encoded, 789,
'application/x-test-3',
'abcdef1234567890abcdef1234567890')
# update with differing older timestamps should be ignored
t_encoded_older = encode_timestamps(t[3], t[5], t[7])
self.assertEqual(1, len(broker.get_items_since(0, 100)))
broker.put_object('obj_name', t_encoded_older, 9999,
'application/x-test-ignored',
'ignored_hash')
self.assertEqual(1, len(broker.get_items_since(0, 100)))
self._assert_db_row(broker, 'obj_name', t_encoded, 789,
'application/x-test-3',
'abcdef1234567890abcdef1234567890')
def test_put_object_multiple_encoded_timestamps_using_memory(self):
# Test ContainerBroker.put_object with differing data, content-type
# and metadata timestamps
broker = ContainerBroker(':memory:', account='a', container='c')
self._test_put_object_multiple_encoded_timestamps(broker)
@with_tempdir
def test_get_db_state(self, tempdir):
acct = 'account'
cont = 'container'
hsh = hash_path(acct, cont)
db_file = "%s.db" % hsh
epoch = Timestamp.now()
fresh_db_file = "%s_%s.db" % (hsh, epoch.normal)
db_path = os.path.join(tempdir, db_file)
fresh_db_path = os.path.join(tempdir, fresh_db_file)
ts = Timestamp.now()
# First test NOTFOUND state
broker = ContainerBroker(db_path, account=acct, container=cont)
self.assertEqual(broker.get_db_state(), 'not_found')
# Test UNSHARDED state, that is when db_file exists and fresh_db_file
# doesn't
broker.initialize(ts.internal, 0)
self.assertEqual(broker.get_db_state(), 'unsharded')
# Test the SHARDING state, this is the period when both the db_file and
# the fresh_db_file exist
fresh_broker = ContainerBroker(fresh_db_path, account=acct,
container=cont, force_db_file=True)
fresh_broker.initialize(ts.internal, 0)
own_shard_range = fresh_broker.get_own_shard_range()
own_shard_range.update_state(ShardRange.SHARDING)
own_shard_range.epoch = epoch
shard_range = ShardRange(
'.shards_%s/%s' % (acct, cont), Timestamp.now())
fresh_broker.merge_shard_ranges([own_shard_range, shard_range])
self.assertEqual(fresh_broker.get_db_state(), 'sharding')
# old broker will also change state if we reload its db files
broker.reload_db_files()
self.assertEqual(broker.get_db_state(), 'sharding')
# Test the SHARDED state, this is when only fresh_db_file exists.
os.unlink(db_path)
fresh_broker.reload_db_files()
self.assertEqual(fresh_broker.get_db_state(), 'sharded')
# Test the COLLAPSED state, this is when only fresh_db_file exists.
shard_range.deleted = 1
shard_range.timestamp = Timestamp.now()
fresh_broker.merge_shard_ranges([shard_range])
self.assertEqual(fresh_broker.get_db_state(), 'collapsed')
# back to UNSHARDED if the desired epoch changes
own_shard_range.update_state(ShardRange.SHRINKING,
state_timestamp=Timestamp.now())
own_shard_range.epoch = Timestamp.now()
fresh_broker.merge_shard_ranges([own_shard_range])
self.assertEqual(fresh_broker.get_db_state(), 'unsharded')
@with_tempdir
def test_db_file(self, tempdir):
acct = 'account'
cont = 'continer'
hsh = hash_path(acct, cont)
db_file = "%s.db" % hsh
ts_epoch = Timestamp.now()
fresh_db_file = "%s_%s.db" % (hsh, ts_epoch.normal)
db_path = os.path.join(tempdir, db_file)
fresh_db_path = os.path.join(tempdir, fresh_db_file)
ts = Timestamp.now()
# First test NOTFOUND state, this will return the db_file passed
# in the constructor
def check_unfound_db_files(broker, init_db_file):
self.assertEqual(init_db_file, broker.db_file)
self.assertEqual(broker._db_file, db_path)
self.assertFalse(os.path.exists(db_path))
self.assertFalse(os.path.exists(fresh_db_path))
self.assertEqual([], broker.db_files)
broker = ContainerBroker(db_path, account=acct, container=cont)
check_unfound_db_files(broker, db_path)
broker = ContainerBroker(fresh_db_path, account=acct, container=cont)
check_unfound_db_files(broker, fresh_db_path)
# Test UNSHARDED state, that is when db_file exists and fresh_db_file
# doesn't, so it should return the db_path
def check_unsharded_db_files(broker):
self.assertEqual(broker.db_file, db_path)
self.assertEqual(broker._db_file, db_path)
self.assertTrue(os.path.exists(db_path))
self.assertFalse(os.path.exists(fresh_db_path))
self.assertEqual([db_path], broker.db_files)
broker = ContainerBroker(db_path, account=acct, container=cont)
broker.initialize(ts.internal, 0)
check_unsharded_db_files(broker)
broker = ContainerBroker(fresh_db_path, account=acct, container=cont)
check_unsharded_db_files(broker)
# while UNSHARDED db_path is still used despite giving fresh_db_path
# to init, so we cannot initialize this broker
with self.assertRaises(DatabaseAlreadyExists):
broker.initialize(ts.internal, 0)
# Test the SHARDING state, this is the period when both the db_file and
# the fresh_db_file exist, in this case it should return the
# fresh_db_path.
def check_sharding_db_files(broker):
self.assertEqual(broker.db_file, fresh_db_path)
self.assertEqual(broker._db_file, db_path)
self.assertTrue(os.path.exists(db_path))
self.assertTrue(os.path.exists(fresh_db_path))
self.assertEqual([db_path, fresh_db_path], broker.db_files)
# Use force_db_file to have db_shard_path created when initializing
broker = ContainerBroker(fresh_db_path, account=acct,
container=cont, force_db_file=True)
self.assertEqual([db_path], broker.db_files)
broker.initialize(ts.internal, 0)
check_sharding_db_files(broker)
broker = ContainerBroker(db_path, account=acct, container=cont)
check_sharding_db_files(broker)
broker = | |
# -*- coding: utf-8 -*-
# ===========================================================================
# Copyright 2016-2017 TrungNT
# ===========================================================================
from __future__ import print_function, division, absolute_import
import os
import sys
import inspect
import marshal
import warnings
from array import array
from six.moves import builtins
from collections import OrderedDict, defaultdict
from collections import MutableMapping, Mapping
from functools import wraps, partial
from six import string_types
from six.moves import zip, zip_longest, cPickle
import types
import numpy as np
__all__ = [
'typecheck',
'autoattr',
'abstractstatic',
'functionable',
'singleton'
]
# ===========================================================================
# Type enforcement
# ===========================================================================
def _info(fname, expected, actual, flag):
'''Convenience function outputs nicely formatted error/warning msg.'''
def to_str(t):
s = []
for i in t:
if not isinstance(i, (tuple, list)):
s.append(str(i).split("'")[1])
else:
s.append('(' + ', '.join([str(j).split("'")[1] for j in i]) + ')')
return ', '.join(s)
expected, actual = to_str(expected), to_str(actual)
ftype = 'method'
msg = "'{}' {} ".format(fname, ftype) \
+ ("inputs", "outputs")[flag] + " ({}), but ".format(expected) \
+ ("was given", "result is")[flag] + " ({})".format(actual)
return msg
def _compares_types(argtype, force_types):
# True if types is satisfied the force_types
for i, j in zip(argtype, force_types):
if isinstance(j, (tuple, list)):
if i not in j:
return False
elif i != j:
return False
return True
def typecheck(inputs=None, outputs=None, debug=2):
'''Function/Method decorator. Checks decorated function's arguments are
of the expected types.
Parameters
----------
inputs : types
The expected types of the inputs to the decorated function.
Must specify type for each parameter.
outputs : types
The expected type of the decorated function's return value.
Must specify type for each parameter.
debug : int, str
Optional specification of 'debug' level:
0:'ignore', 1:'warn', 2:'raise'
Examples
--------
>>> # Function typecheck
>>> @typecheck(inputs=(int, str, float), outputs=(str))
>>> def function(a, b, c):
... return b
>>> function(1, '1', 1.) # no error
>>> function(1, '1', 1) # error, final argument must be float
...
>>> # method typecheck
>>> class ClassName(object):
... @typecheck(inputs=(str, int), outputs=int)
... def method(self, a, b):
... return b
>>> x = ClassName()
>>> x.method('1', 1) # no error
>>> x.method(1, '1') # error
'''
if inspect.ismethod(inputs) or inspect.isfunction(inputs):
raise ValueError('You must specify either [inputs] types or [outputs]'
' types arguments.')
# ====== parse debug ====== #
if isinstance(debug, str):
debug_str = debug.lower()
if 'raise' in debug_str:
debug = 2
elif 'warn' in debug_str:
debug = 1
else:
debug = 0
elif debug not in (0, 1, 2):
debug = 2
# ====== check types ====== #
if inputs is not None and not isinstance(inputs, (tuple, list)):
inputs = (inputs,)
if outputs is not None and not isinstance(outputs, (tuple, list)):
outputs = (outputs,)
def wrap_function(func):
# ====== fetch arguments order ====== #
sign = inspect.signature(func)
args_name = []
args_defaults = OrderedDict()
for n, p in sign.parameters.items():
if p.kind in (inspect.Parameter.VAR_POSITIONAL,
inspect.Parameter.VAR_KEYWORD):
continue
args_name.append(n)
if p.default != inspect.Parameter.empty:
args_defaults[n] = p.default
@wraps(func)
def wrapper(*args, **kwargs):
input_args = list(args)
excluded = {i: j for i, j in zip(args_name, input_args)}
# check default kwargs
for i, j in args_defaults.items():
if i in excluded: # already input as positional argument
continue
if i in kwargs: # specified value
input_args.append(kwargs[i])
else: # default value
input_args.append(j)
### main logic
if debug is 0: # ignore
return func(*args, **kwargs)
### Check inputs
if inputs is not None:
# main logic
length = int(min(len(input_args), len(inputs)))
argtypes = tuple(map(type, input_args))
# TODO: smarter way to check argtypes for methods
if not _compares_types(argtypes[:length], inputs[:length]) and\
not _compares_types(argtypes[1:length + 1], inputs[:length]): # wrong types
msg = _info(func.__name__, inputs, argtypes, 0)
if debug is 1:
print('TypeWarning:', msg)
elif debug is 2:
raise TypeError(msg)
### get results
results = func(*args, **kwargs)
### Check outputs
if outputs is not None:
res_types = ((type(results),)
if not isinstance(results, (tuple, list))
else tuple(map(type, results)))
length = min(len(res_types), len(outputs))
if len(outputs) > len(res_types) or \
not _compares_types(res_types[:length], outputs[:length]):
msg = _info(func.__name__, outputs, res_types, 1)
if debug is 1:
print('TypeWarning: ', msg)
elif debug is 2:
raise TypeError(msg)
### finally everything ok
return results
return wrapper
return wrap_function
# ===========================================================================
# Auto set attributes
# ===========================================================================
def autoattr(*args, **kwargs):
'''
Example
-------
>>> class ClassName(object):
..... def __init__(self):
......... super(ClassName, self).__init__()
......... self.arg1 = 1
......... self.arg2 = False
...... @autoattr('arg1', arg1=lambda x: x + 1)
...... def test1(self):
......... print(self.arg1)
...... @autoattr('arg2')
...... def test2(self):
......... print(self.arg2)
>>> c = ClassName()
>>> c.test1() # arg1 = 2
>>> c.test2() # arg2 = True
'''
if len(args) > 0 and (inspect.ismethod(args[0]) or inspect.isfunction(args[0])):
raise ValueError('You must specify at least 1 *args or **kwargs, all '
'attributes in *args will be setted to True, likewise, '
'all attributes in **kwargs will be setted to given '
'value.')
attrs = {i: True for i in args}
attrs.update(kwargs)
def wrap_function(func):
@wraps(func)
def wrapper(*args, **kwargs):
results = func(*args, **kwargs)
if len(args) > 0:
for i, j in attrs.items():
if hasattr(args[0], i):
if hasattr(j, '__call__'):
setattr(args[0], str(i), j(getattr(args[0], i)))
else:
setattr(args[0], str(i), j)
return results
return wrapper
return wrap_function
# ===========================================================================
# Abstract static
# ===========================================================================
class abstractstatic(staticmethod):
__slots__ = ()
def __init__(self, function):
super(abstractstatic, self).__init__(function)
function.__isabstractmethod__ = True
__isabstractmethod__ = True
# ===========================================================================
# Python utilities
# ===========================================================================
_primitives = (bool, int, float, str,
tuple, list, dict, type, types.ModuleType, types.FunctionType,
type(None), type(type), np.ndarray)
def func_to_str(func):
# conver to byte
code = cPickle.dumps(array("B", marshal.dumps(func.__code__)),
protocol=cPickle.HIGHEST_PROTOCOL)
closure = None
if func.__closure__ is not None:
print("[WARNING] function: %s contains closure, which cannot be "
"serialized." % str(func))
closure = tuple([c.cell_contents for c in func.__closure__])
defaults = func.__defaults__
return (code, closure, defaults)
def str_to_func(s, sandbox=None):
if isinstance(s, (tuple, list)):
code, closure, defaults = s
elif isinstance(s, string_types): # path to file
if os.path.isfile(s):
with open(s, 'rb') as f:
code, closure, defaults = cPickle.load(f)
else: # pickled string
code, closure, defaults = cPickle.loads(s)
else:
raise ValueError("Unsupport str_to_func for type:%s" % type(s))
code = marshal.loads(cPickle.loads(code).tobytes())
func = types.FunctionType(code=code, name=code.co_name,
globals=sandbox if isinstance(sandbox, Mapping) else globals(),
closure=closure, argdefs=defaults)
return func
def _serialize_function_sandbox(function, source):
'''environment, dictionary (e.g. globals(), locals())
Parameters
----------
source : str
source code of the function
Returns
-------
dictionary : cPickle dumps-able dictionary to store as text
'''
import re
sys_module = re.compile(r"__\w+__")
environment = function.__globals__
func_module = function.__module__
sandbox = OrderedDict()
# ====== serialize primitive type ====== #
seen_main_function = False
for name, val in environment.items():
typ = None
# ignore system modules
if sys_module.match(name) is not None:
continue
# support primitive type
if builtins.any(isinstance(val, i) for i in _primitives):
typ = type(val)
if isinstance(val, np.ndarray):
val = (val.tostring(), val.dtype)
typ = 'ndarray'
# special case: import module
elif isinstance(val, types.ModuleType):
val = val.__name__
typ = 'module'
# edward distribution
elif isinstance(val, type) and str(val.__module__) == 'abc' and \
str(type(val).__module__) == "tensorflow.contrib.distributions.python.ops.distribution":
val = val.__name__
typ = 'edward_distribution'
# the FunctionType itself cannot be pickled (weird!)
elif val is types.FunctionType:
val = None
typ = 'function_type'
# for some reason, pickle cannot serialize None type
elif val is None:
val = None
typ = 'None'
elif isinstance(val, Mapping):
val = cPickle.dumps(val, protocol=cPickle.HIGHEST_PROTOCOL)
typ = 'Mapping'
elif inspect.isfunction(val): # special case: function
# function might nested, so cannot find it in globals()
if val == function:
seen_main_function = True
# imported function
_ = '_main' if function == val else ''
if val.__module__ != func_module:
typ = 'imported_function'
val = (val.__name__, val.__module__)
# defined function in the same script file
else:
typ = 'defined_function'
val = func_to_str(val)
typ += _
# finally add to sandbox valid type
if typ is not None:
sandbox[name] = (typ, val)
# ====== not seen the main function ====== #
if not seen_main_function: # mark the main function with "_main"
sandbox['random_name_1234'] = ('defined_function_main',
func_to_str(function))
return sandbox
def _deserialize_function_sandbox(sandbox):
'''
environment : dictionary
create by `serialize_sandbox`
'''
import marshal
with warnings.catch_warnings():
warnings.filterwarnings(action='ignore', category=ImportWarning)
import importlib
environment = {}
defined_function = []
main_func = None
# first pass we deserialize all type except function type
for name, (typ, val) in sandbox.items():
if isinstance(typ, string_types):
if typ == 'None':
val = None
elif typ == 'edward_distribution':
try:
import edward
val = getattr(edward.models, val)
except ImportError:
raise ImportError("Cannot import 'edward' library to deserialize "
"the function.")
# exec("from edward.models import |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.