repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
caterinaurban/Lyra | src/lyra/frontend/cfg_generator.py | 1 | 53675 | import ast
import optparse
import sys
from lyra.core.cfg import *
from lyra.core.expressions import Literal
from lyra.core.statements import *
from lyra.core.types import IntegerLyraType, BooleanLyraType, resolve_type_annotation, \
FloatLyraType, ListLyraType, TupleLyraType, StringLyraType, DictLyraType, SetLyraType
from lyra.visualization.graph_renderer import CFGRenderer
class LooseControlFlowGraph:
class SpecialEdgeType(Enum):
BREAK = 1
CONTINUE = 2
def __init__(self, nodes: Set[Node] = None, in_node: Node = None, out_node: Node = None,
edges: Set[Edge] = None,
loose_in_edges=None, loose_out_edges=None, both_loose_edges=None):
"""Loose control flow graph representation.
This representation uses a complete (non-loose) control flow graph via aggregation
and adds loose edges and
some transformations methods to combine, prepend and append loose control flow graphs.
This class
intentionally does not provide access to the linked CFG.
The completed CFG can be retrieved finally with
`eject()`.
:param nodes: optional set of nodes of the control flow graph
:param in_node: optional entry node of the control flow graph
:param out_node: optional exit node of the control flow graph
:param edges: optional set of edges of the control flow graph
:param loose_in_edges: optional set of loose edges
that have no start yet and end inside this CFG
:param loose_out_edges: optional set of loose edges
that start inside this CFG and have no end yet
:param both_loose_edges: optional set of loose edges, loose on both ends
"""
assert not in_node or not (loose_in_edges or both_loose_edges)
assert not out_node or not (loose_out_edges or both_loose_edges)
assert all([e.source is None for e in loose_in_edges or []])
assert all([e.target is None for e in loose_out_edges or []])
assert all([e.source is None and e.target is None for e in both_loose_edges or []])
self._cfg = ControlFlowGraph(nodes or set(), in_node, out_node, edges or set())
self._loose_in_edges = loose_in_edges or set()
self._loose_out_edges = loose_out_edges or set()
self._both_loose_edges = both_loose_edges or set()
self._special_edges = []
@property
def nodes(self) -> Dict[int, Node]:
return self._cfg.nodes
@property
def in_node(self) -> Node:
return self._cfg.in_node
@in_node.setter
def in_node(self, node):
self._cfg._in_node = node
@property
def out_node(self) -> Node:
return self._cfg.out_node
@out_node.setter
def out_node(self, node):
self._cfg._out_node = node
@property
def edges(self) -> Dict[Tuple[Node, Node], Edge]:
return self._cfg.edges
@property
def loose_in_edges(self) -> Set[Edge]:
return self._loose_in_edges
@property
def loose_out_edges(self) -> Set[Edge]:
return self._loose_out_edges
@property
def both_loose_edges(self) -> Set[Edge]:
return self._both_loose_edges
@property
def special_edges(self) -> List[Tuple[Edge, SpecialEdgeType]]:
return self._special_edges
@property
def cfgs(self):
return self._cfgs
def loose(self):
loose = len(self.loose_in_edges) or len(self.loose_out_edges) or len(self.both_loose_edges)
return loose or len(self.special_edges)
def add_node(self, node):
self.nodes[node.identifier] = node
def add_edge(self, edge):
"""Add a (loose/normal) edge to this loose CFG.
"""
if not edge.source and not edge.target:
self.both_loose_edges.add(edge)
self._cfg._in_node = None
self._cfg._out_node = None
elif not edge.source:
self.loose_in_edges.add(edge)
self._cfg._in_node = None
elif not edge.target:
self.loose_out_edges.add(edge)
self._cfg._out_node = None
else:
self.edges[edge.source, edge.target] = edge
def remove_edge(self, edge):
del self.edges[(edge.source, edge.target)]
def remove_node(self, node):
"""Remove a node and all its out edges from the CFG.
"""
edges_to_be_removed = self.get_edges_with_source(node)
del self.nodes[node.identifier]
nodes_to_be_removed = []
for edge_to_be_removed in edges_to_be_removed:
target = edge_to_be_removed.target
self.remove_edge(edge_to_be_removed)
if target is not self.out_node and len(self.get_edges_with_target(target)) == 0:
nodes_to_be_removed.append(target)
for node_to_be_removed in nodes_to_be_removed:
self.remove_node(node_to_be_removed)
def get_edges_with_source(self, source):
return [edge for edge in self.edges.values() if edge.source is source]
def get_edges_with_target(self, target):
return [edge for edge in self.edges.values() if edge.target is target]
def combine(self, other):
assert not (self.in_node and other.in_node)
assert not (self.out_node and other.out_node)
self.nodes.update(other.nodes)
self.edges.update(other.edges)
self.loose_in_edges.update(other.loose_in_edges)
self.loose_out_edges.update(other.loose_out_edges)
self.both_loose_edges.update(other.both_loose_edges)
self.special_edges.extend(other.special_edges)
self._cfg._in_node = other.in_node or self.in_node # agree on in_node
self._cfg._out_node = other.out_node or self.out_node # agree on out_node
return self
def prepend(self, other):
other.append(self)
self.replace(other)
def append(self, other):
assert not (self.loose_out_edges and other.loose_in_edges)
assert not self.both_loose_edges or (
not other.loose_in_edges and not other.both_loose_edges)
self.nodes.update(other.nodes)
self.edges.update(other.edges)
edge_added = False
if self.loose_out_edges:
edge_added = True
for e in self.loose_out_edges:
e._target = other.in_node
# updated/created edge is not yet in edge dict -> add
self.edges[(e.source, e.target)] = e
# clear loose edge sets
self._loose_out_edges = set()
elif other.loose_in_edges:
edge_added = True
for e in other.loose_in_edges:
e._source = self.out_node
# updated/created edge is not yet in edge dict -> add
self.edges[(e.source, e.target)] = e
# clear loose edge set
other._loose_in_edges = set()
if self.both_loose_edges:
edge_added = True
for e in self.both_loose_edges:
e._target = other.in_node
self.add_edge(e) # updated/created edge is not yet in edge dict -> add
# clear loose edge set
self._both_loose_edges = set()
elif other.both_loose_edges:
edge_added = True
for e in other.both_loose_edges:
e._source = self.out_node
self.add_edge(e) # updated/created edge is not yet in edge dict -> add
# clear loose edge set
other._both_loose_edges = set()
if not edge_added:
# neither of the CFGs has loose ends -> add unconditional edge
e = Unconditional(self.out_node, other.in_node)
# updated/created edge is not yet in edge dict -> add
self.edges[(e.source, e.target)] = e
# in any case, transfer loose_out_edges of other to self
self.loose_out_edges.update(other.loose_out_edges)
self.special_edges.extend(other.special_edges)
self._cfg._out_node = other.out_node
return self
def eject(self) -> ControlFlowGraph:
if self.loose():
error = 'This control flow graph is still loose'
error += ' and cannot eject a complete control flow graph!'
raise TypeError(error)
return self._cfg
def replace(self, other):
self.__dict__.update(other.__dict__)
def _dummy_node(id_gen):
return Basic(id_gen.next)
def _dummy_cfg(id_gen):
dummy = _dummy_node(id_gen)
return LooseControlFlowGraph({dummy}, dummy, dummy, set())
class CFGFactory:
"""
A helper class that encapsulates a partial CFG
and possibly some statements not yet attached to the CFG.
Whenever the
method `complete_basic_block()` is called,
it is ensured that all unattached statements are properly attached to the
partial CFG. The partial CFG can be retrieved at any time by property `cfg`.
"""
def __init__(self, id_gen):
self._stmts = []
self._cfg = None
self._id_gen = id_gen
@property
def cfg(self):
return self._cfg
def prepend_cfg(self, other):
if self._cfg is not None:
self._cfg.prepend(other)
else:
self._cfg = other
return self._cfg
def append_cfg(self, other):
if self._cfg is not None:
if self._cfg.loose_out_edges and other.loose_in_edges:
self._cfg.append(_dummy_cfg(self._id_gen))
self._cfg.append(other)
else:
self._cfg = other
return self._cfg
def add_stmts(self, stmts):
"""
Adds statements to the currently open block.
:param stmts: a single statement or an iterable of statements
:return:
"""
if isinstance(stmts, List):
self._stmts.extend(stmts)
else:
self._stmts.append(stmts)
def complete_basic_block(self):
if self._stmts:
block = Basic(self._id_gen.next, self._stmts)
self.append_cfg(LooseControlFlowGraph({block}, block, block, set()))
self._stmts = []
def incomplete_block(self):
return len(self._stmts) > 0
# noinspection PyPep8Naming
class CFGVisitor(ast.NodeVisitor):
"""AST visitor that generates a CFG for each function."""
class NodeIdentifierGenerator:
"""Helper class that generates an increasing sequence of node identifiers."""
def __init__(self):
self._next = 0
@property
def next(self):
self._next += 1
return self._next
def __init__(self):
super().__init__()
self._id_gen = CFGVisitor.NodeIdentifierGenerator()
self._cfgs = {}
self._fdefs = dict()
def visit(self, node, *args, **kwargs):
"""Visit an AST node.
:param node: node to be visited
:return: either a statement or a partial CFG, depending on the visited node
:keyword arguments:
* *types* -- dictionary mapping (variable) names to their corresponding (lyra) type
* *typ* -- type of the current node
"""
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node, *args, **kwargs)
def generic_visit(self, node, *args, **kwargs):
print(type(node).__name__)
raise NotImplementedError(f"Visit of {node.__class__.__name__} is unsupported!")
# Literals
# noinspection PyUnusedLocal
def visit_Num(self, node, types=None, typ=None, fname=''):
"""Visitor function for a number (integer, float, or complex).
The n attribute stores the value, already converted to the relevant type."""
pp = ProgramPoint(node.lineno, node.col_offset)
if isinstance(node.n, int):
expr = Literal(IntegerLyraType(), str(node.n))
return LiteralEvaluation(pp, expr)
elif isinstance(node.n, float):
expr = Literal(FloatLyraType(), str(node.n))
return LiteralEvaluation(pp, expr)
raise NotImplementedError(f"Num of type {node.n.__class__.__name__} is unsupported!")
# noinspection PyMethodMayBeStatic, PyUnusedLocal
def visit_Str(self, node, types=None, typ=None, fname=''):
"""Visitor function for a string. The s attribute stores the value."""
pp = ProgramPoint(node.lineno, node.col_offset)
expr = Literal(StringLyraType(), node.s)
return LiteralEvaluation(pp, expr)
def visit_List(self, node, types=None, typ=None, fname=''):
"""Visitor function for a list.
The elts attribute stores a list of nodes representing the elements.
The ctx attribute is Store if the container is an assignment target, and Load otherwise."""
pp = ProgramPoint(node.lineno, node.col_offset)
if isinstance(typ, list): # this comes from a subscript
items = [self.visit(item, types, typ[1], fname=fname) for item in node.elts]
itm_typ = items[0].typ if isinstance(typ[1], list) else typ[1]
return ListDisplayAccess(pp, ListLyraType(itm_typ), items)
if isinstance(typ, ListLyraType):
items = [self.visit(item, types, typ.typ, fname=fname) for item in node.elts]
return ListDisplayAccess(pp, typ, items)
else:
items = [self.visit(item, types, None, fname=fname) for item in node.elts]
if isinstance(items[0], LiteralEvaluation):
itm_typ = items[0].literal.typ
else:
itm_typ = items[0].typ
return ListDisplayAccess(pp, ListLyraType(itm_typ), items)
def visit_Tuple(self, node, types=None, typ=None, fname=''):
"""Visitor function for a tuple.
The elts attribute stores a list of nodes representing the elements.
The ctx attribute is Store if the container is an assignment target, and Load otherwise."""
pp = ProgramPoint(node.lineno, node.col_offset)
if isinstance(typ, list): # this comes from a subscript
items = [self.visit(item, types, None, fname=fname) for item in node.elts]
return TupleDisplayAccess(pp, TupleLyraType([]), items) # TODO: fix me
if isinstance(typ, TupleLyraType):
zipped = zip(node.elts, typ.typs)
items = [self.visit(item, types, i_typ, fname=fname) for item, i_typ in zipped]
return TupleDisplayAccess(pp, typ, items)
else:
items = [self.visit(item, types, None, fname=fname) for item in node.elts]
typs = list()
for item in items:
if isinstance(item, LiteralEvaluation):
typs.append(item.literal.typ)
else:
typs.append(item.typ)
return TupleDisplayAccess(pp, TupleLyraType(typs), items)
def visit_Set(self, node, types=None, typ=None, fname=''):
"""Visitor function for a set.
The elts attribute stores a list of nodes representing the elements."""
pp = ProgramPoint(node.lineno, node.col_offset)
if isinstance(typ, list): # this comes from a subscript
items = [self.visit(item, types, typ[1], fname=fname) for item in node.elts]
itm_typ = items[0].typ if isinstance(typ[1], list) else typ[1]
return SetDisplayAccess(pp, SetLyraType(itm_typ), items)
if isinstance(typ, SetLyraType):
items = [self.visit(item, types, typ.typ, fname=fname) for item in node.elts]
return SetDisplayAccess(pp, typ, items)
else:
items = [self.visit(item, types, None, fname=fname) for item in node.elts]
if isinstance(items[0], LiteralEvaluation):
itm_typ = items[0].literal.typ
else:
itm_typ = items[0].typ
return SetDisplayAccess(pp, SetLyraType(itm_typ), items)
def visit_Dict(self, node, types=None, typ=None, fname=''):
"""Visitor function for a dictionary.
The attributes keys and values store lists of nodes with matching order
representing the keys and the values, respectively."""
pp = ProgramPoint(node.lineno, node.col_offset)
if isinstance(typ, list): # this comes from a subscript
keys = [self.visit(key, types, typ[0], fname=fname) for key in node.keys]
values = [self.visit(value, types, typ[1], fname=fname) for value in node.values]
val_typ = values[0].typ if isinstance(typ[1], list) else typ[1]
return DictDisplayAccess(pp, DictLyraType(typ[0], val_typ), keys, values)
if isinstance(typ, DictLyraType):
keys = [self.visit(key, types, typ.key_typ, fname=fname) for key in node.keys]
values = [self.visit(value, types, typ.val_typ, fname=fname) for value in node.values]
return DictDisplayAccess(pp, typ, keys, values)
else:
keys = [self.visit(key, types, None, fname=fname) for key in node.keys]
if isinstance(keys[0], LiteralEvaluation):
key_typ = keys[0].literal.typ
else:
key_typ = keys[0].typ
values = [self.visit(value, types, None, fname=fname) for value in node.values]
if isinstance(values[0], LiteralEvaluation):
val_typ = values[0].literal.typ
else:
val_typ = values[0].typ
return DictDisplayAccess(pp, DictLyraType(key_typ, val_typ), keys, values)
# noinspection PyUnusedLocal
def visit_NameConstant(self, node, types=None, typ=None, fname=''):
"""Visitor function for True, False or None.
The value attribute stores the constant."""
if isinstance(node.value, bool):
pp = ProgramPoint(node.lineno, node.col_offset)
expr = Literal(BooleanLyraType(), str(node.value))
return LiteralEvaluation(pp, expr)
raise NotImplementedError(f"Constant {node.value.__class__.__name__} is unsupported!")
# Variables
def visit_Name(self, node, types=None, typ=None, fname=''):
"""Visitor function for a variable name.
The attribute id stores the name as a string.
The attribute ctx is Store (to assign a new value to the variable),
Load (to load the value of the variable), or Del (to delete the variable)."""
name = fname + "#" + node.id if fname else node.id
pp = ProgramPoint(node.lineno, node.col_offset)
if isinstance(node.ctx, ast.Store):
if name not in types:
if typ:
types[name] = typ
else:
raise ValueError(f"Missing type annotation for variable {name}!")
expr = VariableIdentifier(types[name], name)
return VariableAccess(pp, types[name], expr)
if isinstance(node.ctx, ast.Load):
if name in types:
_name = name
else:
_name = name.replace(fname + '#', '')
assert _name in types
# assert types[name] == typ or typ is None
expr = VariableIdentifier(types[_name], _name)
return VariableAccess(pp, types[_name], expr)
assert isinstance(node.ctx, ast.Del)
raise NotImplementedError(f"Name deletion is unsupported!")
# Expressions
# noinspection PyUnusedLocal
def visit_Expr(self, node, types=None, typ=None, fname=''):
"""Visitor function for an expression statement (whose return value is unused).
The attribute value stored another AST node."""
return self.visit(node.value, types, fname=fname)
def visit_UnaryOp(self, node, types=None, typ=None, fname=''):
"""Visitor function for a unary operation.
The attributes op and operand store the operator
and any expression node, respectively."""
pp = ProgramPoint(node.lineno, node.col_offset)
name = type(node.op).__name__.lower()
argument = self.visit(node.operand, types, typ, fname=fname)
return Call(pp, name, [argument], typ)
def visit_BinOp(self, node, types=None, typ=None, fname=''):
"""Visitor function for a binary operation.
The attributes op, left, and right store the operator
and any expression nodes, respectively."""
pp = ProgramPoint(node.lineno, node.col_offset)
name = type(node.op).__name__.lower()
left = self.visit(node.left, types, typ, fname=fname)
right = self.visit(node.right, types, typ, fname=fname)
return Call(pp, name, [left, right], typ)
def visit_BoolOp(self, node, types=None, typ=None, fname=''):
"""Visitor function for a boolean operation.
The attributes op and values store the operand
and a list of any expression node representing the operand involved, respectively."""
pp = ProgramPoint(node.lineno, node.col_offset)
name = type(node.op).__name__.lower()
arguments = [self.visit(val, types, typ, fname=fname) for val in node.values]
return Call(pp, name, arguments, typ)
def visit_Compare(self, node, types=None, typ=None, fname=''):
"""Visitor function for a comparison operation.
The attributes left, ops, and comparators store the first value in the comparison,
the list of operators, and the list of compared values after the first."""
pp = ProgramPoint(node.lineno, node.col_offset)
assert isinstance(typ, BooleanLyraType) # we expect typ to be a BooleanLyraType
left = self.visit(node.left, types, None, fname=fname)
name = type(node.ops[0]).__name__.lower()
second = self.visit(node.comparators[0], types, None, fname=fname)
result = Call(pp, name, [left, second], typ)
for op, comparator in zip(node.ops[1:], node.comparators[1:]):
name = type(op).__name__.lower()
right = self.visit(comparator, types, None, fname=fname)
current = Call(pp, name, [second, right], typ)
result = Call(pp, 'and', [result, current], typ)
second = right
return result
def visit_Call(self, node, types=None, typ=None, fname=''):
"""Visitor function for a call.
The attribute func stores the function being called (often a Name or Attribute object).
The attribute args stores a list fo the arguments passed by position."""
pp = ProgramPoint(node.lineno, node.col_offset)
if isinstance(node.func, ast.Name):
name: str = node.func.id
if name == 'bool' or name == 'int' or name == 'str':
arguments = [self.visit(arg, types, typ, fname=fname) for arg in node.args]
return Call(pp, name, arguments, typ)
if name == 'input':
typ = StringLyraType
arguments = [self.visit(arg, types, typ(), fname=fname) for arg in node.args]
return Call(pp, name, arguments, typ())
if name == 'range':
typ = IntegerLyraType
arguments = [self.visit(arg, types, typ(), fname=fname) for arg in node.args]
return Call(pp, name, arguments, ListLyraType(typ()))
arguments = [self.visit(arg, types, None, fname=fname) for arg in node.args]
return Call(pp, name, arguments, typ)
elif isinstance(node.func, ast.Attribute):
name: str = node.func.attr
if name == 'append':
arguments = [self.visit(node.func.value, types, None, fname=fname)] # target
assert isinstance(arguments[0].typ, ListLyraType)
args = [self.visit(arg, types, arguments[0].typ.typ, fname=fname) for arg in node.args]
arguments.extend(args)
return Call(pp, name, arguments, arguments[0].typ)
if name == 'count': # str.count(sub, start= 0,end=len(string))
arguments = [self.visit(node.func.value, types, None, fname=fname)] # target
args = [self.visit(arg, types, None, fname=fname) for arg in node.args]
arguments.extend(args)
assert isinstance(arguments[0].typ, (StringLyraType, ListLyraType))
return Call(pp, name, arguments, IntegerLyraType())
if name == 'find': # str.find(str, beg=0, end=len(string))
arguments = [self.visit(node.func.value, types, None, fname=fname)] # target
args = [self.visit(arg, types, None, fname=fname) for arg in node.args]
arguments.extend(args)
assert isinstance(arguments[0].typ, StringLyraType)
return Call(pp, name, arguments, IntegerLyraType())
if name == 'get': # dict.get(key[, value])
arguments = [self.visit(node.func.value, types, None, fname=fname)] # target
args = [self.visit(arg, types, None, fname=fname) for arg in node.args]
arguments.extend(args)
assert isinstance(arguments[0].typ, DictLyraType)
return Call(pp, name, arguments, arguments[0].typ.val_typ)
if name == 'items':
arguments = [self.visit(node.func.value, types, None, fname=fname)] # target
args = [self.visit(arg, types, None, fname=fname) for arg in node.args]
arguments.extend(args)
assert isinstance(arguments[0].typ, DictLyraType)
tuple_typ = TupleLyraType([arguments[0].typ.key_typ, arguments[0].typ.val_typ])
return Call(pp, name, arguments, SetLyraType(tuple_typ))
if name == 'keys':
arguments = [self.visit(node.func.value, types, None, fname=fname)] # target
args = [self.visit(arg, types, None, fname=fname) for arg in node.args]
arguments.extend(args)
assert isinstance(arguments[0].typ, DictLyraType)
return Call(pp, name, arguments, SetLyraType(arguments[0].typ.key_typ))
if name == 'lstrip' or name == 'strip' or name == 'rstrip':
arguments = [self.visit(node.func.value, types, None, fname=fname)] # target
args = [self.visit(arg, types, None, fname=fname) for arg in node.args]
arguments.extend(args)
assert isinstance(arguments[0].typ, StringLyraType)
return Call(pp, name, arguments, arguments[0].typ)
if name == 'split': # str.split([sep[, maxsplit]])
if isinstance(typ, list): # this comes from a subscript
_typ = ListLyraType(typ[0])
else:
_typ = typ
assert isinstance(_typ, ListLyraType) # we expect type to be a ListLyraType
arguments = [self.visit(node.func.value, types, _typ.typ, fname=fname)] # target
args_typs = zip(node.args, [_typ.typ, IntegerLyraType()])
args = [self.visit(arg, types, arg_typ, fname=fname) for arg, arg_typ in args_typs]
arguments.extend(args)
return Call(pp, name, arguments, typ)
if name == 'values':
arguments = [self.visit(node.func.value, types, None, fname=fname)] # target
args = [self.visit(arg, types, None, fname=fname) for arg in node.args]
arguments.extend(args)
assert isinstance(arguments[0].typ, DictLyraType)
return Call(pp, name, arguments, SetLyraType(arguments[0].typ.val_typ))
if name == 'update':
arguments = [self.visit(node.func.value, types, None, fname=fname)] # target
args = [self.visit(arg, types, None, fname=fname) for arg in node.args]
arguments.extend(args)
assert isinstance(arguments[0].typ, SetLyraType)
return Call(pp, name, arguments, arguments[0].typ)
arguments = [self.visit(node.func.value, types, None, fname=fname)] # target
arguments.extend([self.visit(arg, types, None, fname=fname) for arg in node.args])
return Call(pp, name, arguments, typ)
def visit_IfExp(self, node, targets, op=None, types=None, typ=None, fname=''):
"""Visitor function for an if expression.
The components of the expression are stored in the attributes test, body, and orelse."""
pp = ProgramPoint(node.lineno, node.col_offset)
_pp = ProgramPoint(-node.lineno, -node.col_offset)
then = CFGFactory(self._id_gen)
body = self.visit(node.body, types, typ, fname=fname)
assignments = list()
for target in targets:
left = self.visit(target, types, typ, fname=fname)
if op:
name = type(op).__name__.lower()
value = Call(pp, name, [left, body], left.typ)
assignments.append(Assignment(pp, left, value))
else:
assignments.append(Assignment(pp, left, body))
then.add_stmts(assignments)
then.complete_basic_block()
then = then.cfg
test = self.visit(node.test, types, BooleanLyraType(), fname=fname)
then.add_edge(Conditional(None, test, then.in_node, Edge.Kind.IF_IN))
then.add_edge(Unconditional(then.out_node, None, Edge.Kind.IF_OUT))
orelse = CFGFactory(self._id_gen)
body = self.visit(node.orelse, types, typ, fname=fname)
assignments = list()
for target in targets:
left = self.visit(target, types, typ, fname=fname)
if op:
name = type(op).__name__.lower()
value = Call(pp, name, [left, body], left.typ)
assignments.append(Assignment(pp, left, value))
else:
assignments.append(Assignment(pp, left, body))
orelse.add_stmts(assignments)
orelse.complete_basic_block()
orelse = orelse.cfg
not_test = Call(_pp, 'not', [test], BooleanLyraType())
orelse.add_edge(Conditional(None, not_test, orelse.in_node, Edge.Kind.IF_IN))
orelse.add_edge(Unconditional(orelse.out_node, None, Edge.Kind.IF_OUT))
return then.combine(orelse)
# Subscripting
def visit_Subscript(self, node: ast.Subscript, types=None, typ=None, fname=''):
"""Visitor function for a subscript.
The attribute value stores the target of the subscript (often a Name).
The attribute slice is one of Index, Slice, or ExtSlice.
The attribute ctx is Load, Store, or Del."""
pp = ProgramPoint(node.lineno, node.col_offset)
if isinstance(node.slice, ast.Index):
key = self.visit(node.slice.value, types, None, fname=fname)
if isinstance(key, LiteralEvaluation):
_typ = key.literal.typ
else:
_typ = key.typ
target = self.visit(node.value, types, [_typ, typ], fname=fname)
if isinstance(target.typ, DictLyraType):
return SubscriptionAccess(pp, target.typ.val_typ, target, key)
elif isinstance(target.typ, ListLyraType):
return SubscriptionAccess(pp, target.typ.typ, target, key)
else: # String
return SubscriptionAccess(pp, target.typ, target, key)
elif isinstance(node.slice, ast.Slice):
value = self.visit(node.value, types, None, fname=fname)
lower = None
if node.slice.lower:
lower = self.visit(node.slice.lower, types, None, fname=fname)
upper = None
if node.slice.upper:
upper = self.visit(node.slice.upper, types, None, fname=fname)
step = None
if node.slice.step:
step = self.visit(node.slice.step, types, None, fname=fname)
return SlicingAccess(pp, typ, value, lower, upper, step)
raise NotImplementedError(f"Subscription {node.slice.__class__.__name__} is unsupported!")
# Statements
def visit_Assign(self, node, types=None, typ=None, fname=''):
"""Visitor function for an assignment.
The attribute targets stores a list of targets of the assignment.
The attribute value stores the assigned value."""
pp = ProgramPoint(node.lineno, node.col_offset)
assert typ is None # we expect typ to be None
assert len(node.targets) == 1
target = self.visit(node.targets[0], types=types, typ=None, fname=fname)
value = self.visit(node.value, types=types, typ=target.typ, fname=fname)
return Assignment(pp, target, value)
def visit_AnnAssign(self, node, types=None, typ=None, fname=''):
"""Visitor function for an assignment with a type annotation.
The attribute target stores the target of the assignment (a Name, Attribute, or Subscript).
The attribute annotation stores the type annotation (a Str or Name).
The attribute value optionally stores the assigned value."""
pp = ProgramPoint(node.lineno, node.col_offset)
assert typ is None # we expect typ to be None
annotated = resolve_type_annotation(node.annotation)
target = self.visit(node.target, types=types, typ=annotated, fname=fname)
value = self.visit(node.value, types=types, typ=annotated, fname=fname)
return Assignment(pp, target, value)
def visit_AugAssign(self, node, types=None, typ=None, fname=''):
"""Visitor function for an augmented assignment.
The attribute target stores the target of the assignment (a Name, Attribute, or Subscript).
The attributes op and value store the operation and the assigned value, respectively."""
pp = ProgramPoint(node.lineno, node.col_offset)
assert typ is None # we expect typ to be None
target = self.visit(node.target, types=types, typ=None, fname=fname)
name = type(node.op).__name__.lower()
right = self.visit(node.value, types=types, typ=None, fname=fname)
value = Call(pp, name, [target, right], target.typ)
return Assignment(pp, target, value)
# noinspection PyMethodMayBeStatic, PyUnusedLocal
def visit_Raise(self, node, types=None, typ=None, fname=''):
"""Visitor function for an exception raise.
The attribute exc stores the exception object to be raised
(normally a Call or Name, or None for a standalone raise)."""
return Raise(ProgramPoint(node.lineno, node.col_offset))
# Control Flow
def visit_If(self, node, types=None, typ=None, fname=''):
"""Visitor function for an if statement.
The attribute test stores a single AST node.
The attributes body and orelse each store a list of AST nodes to be executed."""
pp = ProgramPoint(node.test.lineno, node.test.col_offset)
_pp = ProgramPoint(-node.test.lineno, -node.test.col_offset)
body = self._visit_body(node.body, types, fname=fname)
test = self.visit(node.test, types=types, typ=BooleanLyraType(), fname=fname)
body.add_edge(Conditional(None, test, body.in_node, Edge.Kind.IF_IN))
if body.out_node: # control flow can exit the body
# add an unconditional IF_OUT edge
body.add_edge(Unconditional(body.out_node, None, Edge.Kind.IF_OUT))
if node.orelse: # there is an else branch
orelse = self._visit_body(node.orelse, types, fname=fname)
not_test = Call(_pp, 'not', [test], BooleanLyraType())
orelse.add_edge(Conditional(None, not_test, orelse.in_node, Edge.Kind.IF_IN))
if orelse.out_node: # control flow can exit the else
# add an unconditional IF_OUT edge
orelse.add_edge(Unconditional(orelse.out_node, None, Edge.Kind.IF_OUT))
# handle special edges
for special, _ in orelse.special_edges:
# create dummy node
dummy = _dummy_node(self._id_gen)
orelse.add_node(dummy)
# add an unconditional IF_OUT edge to the newly created dummy node
orelse.add_edge(Unconditional(special.source, dummy, Edge.Kind.IF_OUT))
# move the special edge after the dummy node
special._source = dummy
else: # there is no else branch
orelse = LooseControlFlowGraph()
not_test = Call(_pp, 'not', [test], BooleanLyraType())
orelse.add_edge(Conditional(None, not_test, None, Edge.Kind.DEFAULT))
# handle special edges
for special, edge_type in body.special_edges:
# create dummy node
dummy = _dummy_node(self._id_gen)
body.add_node(dummy)
# add an unconditional IF_OUT edge to the newly created dummy node
body.add_edge(Unconditional(special.source, dummy, Edge.Kind.IF_OUT))
# move the special edge after the dummy node
special._source = dummy
cfg = body.combine(orelse)
return cfg
def visit_While(self, node, types=None, typ=None, fname=''):
"""Visitor function for an while statement.
The attribute test stores a single AST node.
The attributes body and orelse each store a list of AST nodes to be executed."""
pp = ProgramPoint(node.test.lineno, node.test.col_offset)
_pp = ProgramPoint(-node.test.lineno, -node.test.col_offset)
body = self._visit_body(node.body, types, fname=fname)
test = self.visit(node.test, types=types, typ=BooleanLyraType(), fname=fname)
header = Loop(self._id_gen.next)
body_in_node = body.in_node
body_out_node = body.out_node
body.add_node(header)
body.in_node = header
body.add_edge(Conditional(header, test, body_in_node, Edge.Kind.LOOP_IN))
not_test = Call(_pp, 'not', [test], BooleanLyraType())
body.add_edge(Conditional(header, not_test, None))
if body_out_node: # control flow can exit the body
# add an unconditional LOOP_OUT edge
body.add_edge(Unconditional(body_out_node, header, Edge.Kind.LOOP_OUT))
if node.orelse: # there is an else branch
orelse = self._visit_body(node.orelse, types, fname=fname)
if orelse.out_node: # control flow can exit the else
# add an unconditional DEFAULT edge
orelse.add_edge(Unconditional(orelse.out_node, None, Edge.Kind.DEFAULT))
body.append(orelse)
# handle special edges
for special, kind in body.special_edges:
if kind == LooseControlFlowGraph.SpecialEdgeType.CONTINUE:
body.add_edge(Unconditional(special.source, header, Edge.Kind.LOOP_OUT))
elif kind == LooseControlFlowGraph.SpecialEdgeType.BREAK:
body.add_edge(Unconditional(special.source, None, Edge.Kind.LOOP_OUT))
body.special_edges.clear()
return body
def visit_For(self, node, types=None, typ=None, fname=''):
"""Visitor function for a for statement.
The attribute target stores the variable(s) the loop assigns to
(as a single Name, Tuple, or List node).
The attribute iter stores a single AST node representing the item to be looped over.
The attributes body and orelse each store a list of AST nodes to be executed."""
pp = ProgramPoint(node.target.lineno, node.target.col_offset)
_pp = ProgramPoint(-node.target.lineno, -node.target.col_offset)
iterated = self.visit(node.iter, types=types, typ=None, fname=fname)
target_typ = None
if isinstance(iterated, VariableAccess):
if isinstance(iterated.typ, ListLyraType): # iteration over list items
target_typ = iterated.typ.typ
elif isinstance(iterated.typ, SetLyraType): # iteration over set items
target_typ = iterated.typ.typ
elif isinstance(iterated.typ, DictLyraType): # iteration over dictionary keys
iterated = Call(iterated.pp, 'keys', [iterated], SetLyraType(iterated.typ.key_typ))
target_typ = iterated.typ.typ
elif isinstance(iterated, Call):
if iterated.name == 'range':
assert isinstance(iterated.typ, ListLyraType)
target_typ = iterated.typ.typ
elif iterated.name == 'items' or iterated.name == 'keys' or iterated.name == 'values':
assert isinstance(iterated.typ, SetLyraType)
target_typ = iterated.typ.typ
elif iterated.name == 'list':
assert len(iterated.arguments) == 1
if isinstance(iterated.arguments[0].typ, ListLyraType):
target_typ = iterated.arguments[0].typ.typ
else:
error = "The type of the target {} is not yet determinable!".format(iterated)
raise NotImplementedError(error)
else:
error = "The iteration attribute {} is not yet translatable to CFG!".format(iterated)
raise NotImplementedError(error)
target = self.visit(node.target, types=types, typ=target_typ, fname=fname)
body = self._visit_body(node.body, types, fname=fname)
test = Call(pp, 'in', [target, iterated], BooleanLyraType(), forloop=True)
header = Loop(self._id_gen.next)
body_in_node = body.in_node
body_out_node = body.out_node
body.add_node(header)
body.in_node = header
body.add_edge(Conditional(header, test, body_in_node, Edge.Kind.LOOP_IN))
not_test = Call(_pp, 'notin', [target, iterated], BooleanLyraType(), forloop=True)
body.add_edge(Conditional(header, not_test, None))
if body_out_node: # control flow can exit the body
# add an unconditional LOOP_OUT edge
body.add_edge(Unconditional(body_out_node, header, Edge.Kind.LOOP_OUT))
if node.orelse: # there is an else branch
orelse = self._visit_body(node.orelse, types=types, fname=fname)
if orelse.out_node: # control flow can exit the else
# add an unconditional DEFAULT edge
orelse.add_edge(Unconditional(orelse.out_node, None, Edge.Kind.DEFAULT))
body.append(orelse)
# handle special edges
for special, kind in body.special_edges:
if kind == LooseControlFlowGraph.SpecialEdgeType.CONTINUE:
body.add_edge(Unconditional(special.source, header, Edge.Kind.LOOP_OUT))
elif kind == LooseControlFlowGraph.SpecialEdgeType.BREAK:
body.add_edge(Unconditional(special.source, None, Edge.Kind.LOOP_OUT))
body.special_edges.clear()
return body
# noinspection PyUnusedLocal
def visit_Break(self, _, types=None, typ=None, fname=''):
"""Visitor function for a break statement."""
dummy = _dummy_node(self._id_gen)
cfg = LooseControlFlowGraph({dummy}, dummy, None)
# the type of the special edge is not yet known, set to DEFAULT for now
edge = Unconditional(dummy, None, Edge.Kind.DEFAULT)
cfg.special_edges.append((edge, LooseControlFlowGraph.SpecialEdgeType.BREAK))
return cfg
# noinspection PyUnusedLocal
def visit_Continue(self, _, types=None, typ=None, fname=''):
"""Visitor function for a continue statement."""
dummy = _dummy_node(self._id_gen)
cfg = LooseControlFlowGraph({dummy}, dummy, None)
# the type of the special edge is not yet known, set to DEFAULT for now
edge = Unconditional(dummy, None, Edge.Kind.DEFAULT)
cfg.special_edges.append((edge, LooseControlFlowGraph.SpecialEdgeType.CONTINUE))
return cfg
def visit_FunctionDef(self, node: ast.FunctionDef, types, fname):
"""Visit function for a function definition.
class FunctionDef(name, args, body, decorator_list, returns)
name is a raw string of the function name.
args is a arguments node.
body is the list of nodes inside the function.
decorator_list is the list of decorators to be applied.
returns is the return annotation.
"""
for arg in node.args.args:
annotated = resolve_type_annotation(arg.annotation)
arg.arg = fname + "#" + arg.arg
types[arg.arg] = annotated
types[fname + "#return"] = resolve_type_annotation(node.returns)
start = _dummy_cfg(self._id_gen)
body = self._visit_body(node.body, types, True, True, fname)
end = _dummy_cfg(self._id_gen)
fun_cfg = start.append(body).append(end) if body else start.append(end)
# fun_cfg = self._restructure_return_and_raise_edges(fun_cfg)
self._cfgs[fname] = fun_cfg
return fun_cfg
def visit_Return(self, node, types=None, fname=''):
"""Visitor function for a return statement."""
typ = types[fname + "#return"]
expressions = self.visit(node.value, typ=typ, types=types, fname=fname)
return Return(ProgramPoint(node.lineno, node.col_offset), [expressions])
def _visit_body(self, body, types, loose_in_edges=False, loose_out_edges=False, fname=''):
factory = CFGFactory(self._id_gen)
for child in body:
if isinstance(child, ast.Assign):
if isinstance(child.value, ast.IfExp): # the value is a conditional expression
factory.complete_basic_block()
targets = child.targets
if_cfg = self.visit(child.value, targets, op=None, types=types, fname=fname)
factory.append_cfg(if_cfg)
else: # normal assignment
factory.add_stmts(self.visit(child, types=types, fname=fname))
elif isinstance(child, ast.AnnAssign):
if child.value is None: # only a type annotation
annotation = resolve_type_annotation(child.annotation)
if isinstance(child.target, ast.Name):
types[child.target.id] = annotation
elif isinstance(child.target, (ast.Attribute, ast.Subscript)):
types[child.target.value] = annotation
elif isinstance(child.value, ast.IfExp): # the value is a conditional expression
factory.complete_basic_block()
annotation = resolve_type_annotation(child.annotation)
targets = [child.target]
if_cfg = self.visit(child.value, targets, None, types, annotation, fname)
factory.append_cfg(if_cfg)
else: # normal annotated assignment
factory.add_stmts(self.visit(child, types, fname=fname))
elif isinstance(child, ast.AugAssign):
if isinstance(child.value, ast.IfExp): # the value is a conditional expression
factory.complete_basic_block()
targets = [child.target]
if_cfg = self.visit(child.value, targets, child.op, types=types, fname=fname)
factory.append_cfg(if_cfg)
else: # normal augmented assignment
factory.add_stmts(self.visit(child, types=types, fname=fname))
elif isinstance(child, ast.Expr):
# check other options for AnnAssign (empty value, or IfExp as value)
factory.add_stmts(self.visit(child, types=types, fname=fname))
elif isinstance(child, (ast.Raise, ast.Return)):
factory.add_stmts(self.visit(child, types=types, fname=fname))
factory.complete_basic_block()
elif isinstance(child, ast.If):
factory.complete_basic_block()
if_cfg = self.visit(child, types=types, fname=fname)
factory.append_cfg(if_cfg)
elif isinstance(child, ast.While):
factory.complete_basic_block()
while_cfg = self.visit(child, types=types, fname=fname)
factory.append_cfg(while_cfg)
elif isinstance(child, ast.For):
factory.complete_basic_block()
for_cfg = self.visit(child, types=types, fname=fname)
factory.append_cfg(for_cfg)
elif isinstance(child, ast.Break):
factory.complete_basic_block()
break_cfg = self.visit(child, types=types, fname=fname)
factory.append_cfg(break_cfg)
elif isinstance(child, ast.Continue):
factory.complete_basic_block()
cont_cfg = self.visit(child, types=types, fname=fname)
factory.append_cfg(cont_cfg)
elif isinstance(child, ast.Pass) and factory.incomplete_block():
pass
elif isinstance(child, ast.Pass):
factory.append_cfg(_dummy_cfg(self._id_gen))
elif isinstance(child, ast.FunctionDef):
self._fdefs[child.name] = child
else:
error = "The statement {} is not yet translatable to CFG!".format(child)
raise NotImplementedError(error)
factory.complete_basic_block()
if not loose_in_edges and factory.cfg and factory.cfg.loose_in_edges:
factory.prepend_cfg(_dummy_cfg(self._id_gen))
if not loose_out_edges and factory.cfg and factory.cfg.loose_out_edges:
factory.append_cfg(_dummy_cfg(self._id_gen))
return factory.cfg
# noinspection PyUnusedLocal
def visit_Module(self, node, types=None, typ=None):
"""Visitor function for a Python module."""
start = _dummy_cfg(self._id_gen)
body = self._visit_body(node.body, types, loose_in_edges=True, loose_out_edges=True)
end = _dummy_cfg(self._id_gen)
main_cfg = start.append(body).append(end) if body else start.append(end)
# main_cfg = self._restructure_return_and_raise_edges(main_cfg)
self._cfgs[''] = main_cfg
for fdef, child in self._fdefs.items():
fun_factory = CFGFactory(self._id_gen)
fun_cfg = self.visit_FunctionDef(child, types, child.name)
fun_factory.append_cfg(fun_cfg)
return self._cfgs
# def _restructure_return_and_raise_edges(self, cfg):
# nodes_to_be_removed = []
# for node in cfg.nodes.values():
# if any(isinstance(stmt, (Raise, Return)) for stmt in node.stmts):
# edges_to_be_removed = cfg.get_edges_with_source(node)
# for edge_to_be_removed in edges_to_be_removed:
# target = edge_to_be_removed.target
# if len(cfg.get_edges_with_target(target)) == 1: # there is no other edge
# nodes_to_be_removed.append(edge_to_be_removed.target)
# cfg.remove_edge(edge_to_be_removed)
# cfg.add_edge(Unconditional(node, cfg.out_node)) # connect the node to the exit node
# for node_to_be_removed in nodes_to_be_removed:
# cfg.remove_node(node_to_be_removed)
# return cfg
def ast_to_cfgs(root):
"""Generate a CFG for each user-defined function from an AST.
:param root: root node of the AST
:return: mapping of function names to the corresponding CFG generated from the given AST
"""
loose_cfgs = CFGVisitor().visit(root, dict())
cfgs = {name: loose_cfg.eject() for name, loose_cfg in loose_cfgs.items()}
return cfgs
def ast_to_fargs(root):
fargs = {'': None}
for child in root.body:
if isinstance(child, ast.FunctionDef):
fname = child.name
args = []
for arg in child.args.args:
annotated = resolve_type_annotation(arg.annotation)
args.append(VariableIdentifier(annotated, arg.arg))
fargs[fname] = args
return fargs
def source_to_cfg(code: str, fname=''):
"""Generate a CFG from a Python program.
:param code: Python program
:param fname: the function whose CFG will be generated
:return: the CFG generated from the given Python program for the function fname
"""
return source_to_cfgs(code)[fname]
def source_to_cfgs(code: str):
"""Generate a CFG for each user-defined function from a Python program.
:param code: Python program
:return: the CFG generated for each user-defined function from the given Python program
"""
root_node = ast.parse(code)
return ast_to_cfgs(root_node)
def main(args):
optparser = optparse.OptionParser(usage="python3 -m frontend.cfg_generator [options] [string]")
optparser.add_option("-f", "--file", help="Read a code snippet from the specified file")
optparser.add_option("-l", "--label", help="The label for the visualization")
options, args = optparser.parse_args(args)
if options.file:
with open(options.file) as instream:
code = instream.read()
label = options.file
elif len(args) == 2:
code = args[1] + "\n"
label = "<code read from command line parameter>"
else:
print("Expecting Python code on stdin...")
code = sys.stdin.read()
label = "<code read from stdin>"
if options.label:
label = options.label
cfg = source_to_cfg(code)
CFGRenderer().render(cfg, label=label)
if __name__ == '__main__':
main(sys.argv)
| mpl-2.0 | 5,630,675,610,013,542,000 | 46.416078 | 103 | 0.605701 | false |
denys-duchier/Scolar | ZopeProducts/exUserFolder/__init__.py | 1 | 3400 | #
# Extensible User Folder
#
# (C) Copyright 2000-2005 The Internet (Aust) Pty Ltd
# ACN: 082 081 472 ABN: 83 082 081 472
# All Rights Reserved
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# Author: Andrew Milton <[email protected]>
# $Id: __init__.py,v 1.18 2004/11/10 14:15:33 akm Exp $
import exUserFolder
import CryptoSources
import AuthSources
import PropSources
import MembershipSources
import GroupSources
from GroupSource import GroupSource
from App.ImageFile import ImageFile
import OFS
#
# Install a dummy ZBabel setup if we don't have ZBabel installed.
#
import dummyZBabelTag
# Methods we need access to from any ObjectManager context
legacy_methods = (
('manage_addexUserFolderForm', exUserFolder.manage_addexUserFolderForm),
('manage_addexUserFolder', exUserFolder.manage_addexUserFolder),
('getAuthSources', exUserFolder.getAuthSources),
#('getPropSources', exUserFolder.getPropSources),
('getCryptoSources', exUserFolder.getCryptoSources),
('getMembershipSources', exUserFolder.getMembershipSources),
('getGroupSources', exUserFolder.getGroupSources),
('doAuthSourceForm', exUserFolder.doAuthSourceForm),
#('doPropSourceForm', exUserFolder.doPropSourceForm),
('doMembershipSourceForm', exUserFolder.doMembershipSourceForm),
# ('doGroupSourceForm', exUserFolder.doGroupSourceForm),
('getVariableType', exUserFolder.getVariableType),
('DialogHeader', exUserFolder.exUserFolder.DialogHeader),
('DialogFooter', exUserFolder.exUserFolder.DialogFooter),
#('MailHostIDs', exUserFolder.MailHostIDs),
)
# Image files to place in the misc_ object so they are accesible from misc_/exUserFolder
misc_={'exUserFolder.gif': ImageFile('exUserFolder.gif', globals()),
'exUserFolderPlugin.gif': ImageFile('exUserFolderPlugin.gif', globals()),
'exUser.gif': ImageFile('exUser.gif', globals()),
}
def initialize(context):
"""
Register base classes
"""
context.registerClass(exUserFolder.exUserFolder,
meta_type="ex User Folder",
permission="Add exUser Folder",
constructors=(exUserFolder.manage_addexUserFolderForm,
exUserFolder.manage_addexUserFolder,),
legacy=legacy_methods,
icon="exUserFolder.gif")
context.registerClass(GroupSource.GroupSource,
meta_type="ex User Folder Group Source",
permission="Add exUser Folder",
constructors=(GroupSource.manage_addGroupSourceForm,
GroupSource.manage_addGroupSource,),
icon="exUserFolderPlugin.gif")
| gpl-2.0 | -6,918,314,890,740,817,000 | 39 | 88 | 0.717353 | false |
mobify/dj-saml-idp | saml2idp/views.py | 1 | 5988 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import os
from django.contrib import auth
from django.core.validators import URLValidator
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ImproperlyConfigured, ValidationError
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
from django.utils.datastructures import MultiValueDictKeyError
from django.shortcuts import render, redirect
from django.http import HttpResponseBadRequest, HttpResponseRedirect
from django.views.decorators.csrf import csrf_exempt
from . import (
saml2idp_metadata,
exceptions,
metadata,
registry,
xml_signing
)
from .logging import get_saml_logger
logger = get_saml_logger()
# The 'schemes' argument for the URLValidator was introduced in Django 1.6. This
# ensure that URL validation works in 1.5 as well.
try:
URL_VALIDATOR = URLValidator(schemes=('http', 'https'))
except TypeError:
URL_VALIDATOR = URLValidator()
BASE_TEMPLATE_DIR = 'saml2idp'
def _get_template_names(filename, processor=None):
"""
Create a list of template names to use based on the processor name. This
makes it possible to have processor-specific templates.
"""
specific_templates = []
if processor and processor.name:
specific_templates = [
os.path.join(BASE_TEMPLATE_DIR, processor.name, filename)]
return specific_templates + [os.path.join(BASE_TEMPLATE_DIR, filename)]
def _generate_response(request, processor):
"""
Generate a SAML response using processor and return it in the proper Django
response.
"""
try:
tv = processor.generate_response()
except exceptions.UserNotAuthorized:
template_names = _get_template_names('invalid_user.html', processor)
return render(
request,
template_names
)
template_names = _get_template_names('login.html', processor)
return render(
request,
template_names,
tv
)
def xml_response(request, template, tv):
return render(request, template, tv, content_type="application/xml")
@csrf_exempt
def login_begin(request, *args, **kwargs):
"""
Receives a SAML 2.0 AuthnRequest from a Service Provider and
stores it in the session prior to enforcing login.
"""
if request.method == 'POST':
source = request.POST
else:
source = request.GET
# Store these values now, because Django's login cycle won't preserve them.
try:
request.session['SAMLRequest'] = source['SAMLRequest']
except (KeyError, MultiValueDictKeyError):
return HttpResponseBadRequest('the SAML request payload is missing')
request.session['RelayState'] = source.get('RelayState', '')
return redirect('saml_login_process')
@login_required
def login_init(request, resource, **kwargs):
"""
Initiates an IdP-initiated link to a simple SP resource/target URL.
"""
name, sp_config = metadata.get_config_for_resource(resource)
proc = registry.get_processor(name, sp_config)
try:
linkdict = dict(metadata.get_links(sp_config))
pattern = linkdict[resource]
except KeyError:
raise ImproperlyConfigured(
'Cannot find link resource in SAML2IDP_REMOTE setting: "%s"' % resource
)
is_simple_link = ('/' not in resource)
if is_simple_link:
simple_target = kwargs['target']
url = pattern % simple_target
else:
url = pattern % kwargs
proc.init_deep_link(request, sp_config, url)
return _generate_response(request, proc)
@login_required
def login_process(request):
"""
Processor-based login continuation.
Presents a SAML 2.0 Assertion for POSTing back to the Service Provider.
"""
logger.debug("Request: %s" % request)
proc = registry.find_processor(request)
return _generate_response(request, proc)
@csrf_exempt
def logout(request):
"""
Allows a non-SAML 2.0 URL to log out the user and
returns a standard logged-out page. (SalesForce and others use this method,
though it's technically not SAML 2.0).
"""
auth.logout(request)
redirect_url = request.GET.get('redirect_to', '')
try:
URL_VALIDATOR(redirect_url)
except ValidationError:
pass
else:
return HttpResponseRedirect(redirect_url)
return render(
request,
_get_template_names('logged_out.html')
)
@login_required
@csrf_exempt
def slo_logout(request):
"""
Receives a SAML 2.0 LogoutRequest from a Service Provider,
logs out the user and returns a standard logged-out page.
"""
request.session['SAMLRequest'] = request.POST['SAMLRequest']
#TODO: Parse SAML LogoutRequest from POST data, similar to login_process().
#TODO: Add a URL dispatch for this view.
#TODO: Modify the base processor to handle logouts?
#TODO: Combine this with login_process(), since they are so very similar?
#TODO: Format a LogoutResponse and return it to the browser.
#XXX: For now, simply log out without validating the request.
auth.logout(request)
tv = {}
return render(
request,
_get_template_names('logged_out.html'),
tv
)
def descriptor(request):
"""
Replies with the XML Metadata IDSSODescriptor.
"""
idp_config = saml2idp_metadata.SAML2IDP_CONFIG
entity_id = idp_config['issuer']
slo_url = request.build_absolute_uri(reverse('saml_logout'))
sso_url = request.build_absolute_uri(reverse('saml_login_begin'))
pubkey = xml_signing.load_certificate(idp_config)
tv = {
'entity_id': entity_id,
'cert_public_key': pubkey,
'slo_url': slo_url,
'sso_url': sso_url
}
return xml_response(
request,
os.path.join(BASE_TEMPLATE_DIR, 'idpssodescriptor.xml'),
tv
)
| mit | -1,988,701,014,868,627,700 | 28.643564 | 83 | 0.675685 | false |
openworm/PyOpenWorm | post_install.py | 1 | 1749 | from __future__ import absolute_import
from __future__ import print_function
import os, shutil, sys
from sysconfig import get_path
from glob import glob
from pkgutil import get_loader
from subprocess import call
def get_library_location(package):
# get abs path of a package in the library, rather than locally
library_package_paths = glob(os.path.join(get_path('platlib'), '*'))
sys.path = library_package_paths + sys.path
package_path = os.path.dirname(get_loader(package).get_filename())
sys.path = sys.path[len(library_package_paths):]
return package_path
package_location = get_library_location('owmeta')
pwd = os.path.dirname(os.path.realpath(__file__))
user_id = os.stat(pwd).st_uid # this is the person that cloned the repo
script_location = os.path.join(pwd, 'OpenWormData', 'scripts')
user_script = 'insert_worm.py' # script(s) we want to be run as non-root
print('Running {} as UID {}'.format(user_script, user_id))
pid = os.fork()
if pid == 0:
#child process
db_files = glob(os.path.join(script_location, 'worm.db*'))
for x in db_files:
os.unlink(x)
try:
os.seteuid(user_id)
call([sys.executable, user_script], cwd = script_location)
finally:
os._exit(0)
os.waitpid(pid, 0)
# move created database files to your library's package directory
db_files = glob(os.path.join(script_location, 'worm.db*'))
for db_file in db_files:
print(('copying {} to {}'.format(db_file, package_location)))
new_location = os.path.join(package_location, os.path.basename(db_file))
shutil.copy(db_file, package_location)
os.chmod(new_location, 0o777)
# change directory owner to allow writing and reading from db in that dir
os.chown(package_location, user_id, -1)
| mit | 6,506,810,623,881,900,000 | 38.75 | 76 | 0.699257 | false |
2013Commons/HUE-SHARK | apps/beeswax/src/beeswax/urls.py | 1 | 3280 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('beeswax.views',
url(r'^$', 'index', name='index'),
url(r'^execute/(?P<design_id>\d+)?$', 'execute_query', name='execute_query'),
url(r'^explain_parameterized/(?P<design_id>\d+)$', 'explain_parameterized_query', name='explain_parameterized_query'),
url(r'^execute_parameterized/(?P<design_id>\d+)$', 'execute_parameterized_query', name='execute_parameterized_query'),
url(r'^watch/(?P<id>\d+)$', 'watch_query', name='watch_query'),
url(r'^watch/json/(?P<id>\d+)$', 'watch_query_refresh_json', name='watch_query_refresh_json'),
url(r'^cancel_operation/(?P<query_id>\d+)?$', 'cancel_operation', name='cancel_operation'),
url(r'^results/(?P<id>\d+)/(?P<first_row>\d+)$', 'view_results', name='view_results'),
url(r'^download/(?P<id>\d+)/(?P<format>\w+)$', 'download', name='download'),
url(r'^save_results/(?P<id>\d+)$', 'save_results', name='save_results'),
url(r'^save_design_properties$', 'save_design_properties', name='save_design_properties'), # Ajax
url(r'^autocomplete/$', 'autocomplete', name='autocomplete'),
url(r'^autocomplete/(?P<database>\w+)/$', 'autocomplete', name='autocomplete'),
url(r'^autocomplete/(?P<database>\w+)/(?P<table>\w+)$', 'autocomplete', name='autocomplete'),
url(r'^my_queries$', 'my_queries', name='my_queries'),
url(r'^list_designs$', 'list_designs', name='list_designs'),
url(r'^list_trashed_designs$', 'list_trashed_designs', name='list_trashed_designs'),
url(r'^delete_designs$', 'delete_design', name='delete_design'),
url(r'^restore_designs$', 'restore_design', name='restore_design'),
url(r'^clone_design/(?P<design_id>\d+)$', 'clone_design', name='clone_design'),
url(r'^query_history$', 'list_query_history', name='list_query_history'),
url(r'^configuration$', 'configuration', name='configuration'),
url(r'^install_examples$', 'install_examples', name='install_examples'),
url(r'^query_cb/done/(?P<server_id>\S+)$', 'query_done_cb', name='query_done_cb'),
)
urlpatterns += patterns(
'beeswax.create_database',
url(r'^create/database$', 'create_database', name='create_database'),
)
urlpatterns += patterns(
'beeswax.create_table',
url(r'^create/create_table/(?P<database>\w+)$', 'create_table', name='create_table'),
url(r'^create/import_wizard/(?P<database>\w+)$', 'import_wizard', name='import_wizard'),
url(r'^create/auto_load/(?P<database>\w+)$', 'load_after_create', name='load_after_create'),
)
| apache-2.0 | 5,420,627,907,366,661,000 | 50.25 | 120 | 0.685671 | false |
cuckoo5/soap | Soap_know/handler/formula.py | 1 | 1592 | #coding=utf-8
import tornado.web
import util.config as config
import util.constants as constants
from db.manager import FormulaManager, OilManager
from handler.base import BaseHandler
class FormulaHandler(BaseHandler):
def initialize(self):
self.all_use = constants.use
self.all_difficult_degree = constants.difficult_degree
self.oil_manager = OilManager()
self.formula_manager = FormulaManager()
self.all_skin_types = self.formula_manager.get_all_skin_types()
@tornado.web.authenticated
def get(self, param):
print "param = %s" %param
url = 'formula/%s.html' %param
cur_user = self.get_current_user()
switch = {'index' : self.index, 'new' : self.new}
switch[param](url, cur_user)
# self.render(url, title=TITLE, cur_user=cur_user)
def index(self, url, cur_user):
skin_type = self.get_argument("skin_type", None)
use = self.get_argument("use", None)
difficult_degree = self.get_argument("difficult_degree", None)
print 'skin_type = ', skin_type
print 'use = ', use
print 'difficult_degree = ', difficult_degree
formulas = self.formula_manager.get_formulas()
self.render(url, title=config.title, cur_user=cur_user, use=self.all_use, skin_types=self.all_skin_types,
difficult_degree=self.all_difficult_degree, formulas=formulas)
def new(self, url, cur_user):
oils = self.oil_manager.get_oils()
self.render(url, title=config.title, cur_user=cur_user, oils=oils)
| gpl-3.0 | 3,997,204,989,561,479,000 | 36.904762 | 114 | 0.645101 | false |
taolei87/sru | DrQA/drqa/model.py | 1 | 5679 | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import torch
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import logging
from torch.autograd import Variable
from .utils import AverageMeter
from .rnn_reader import RnnDocReader
# Modification:
# - change the logger name
# - save & load optimizer state dict
# - change the dimension of inputs (for POS and NER features)
# Origin: https://github.com/facebookresearch/ParlAI/tree/master/parlai/agents/drqa
logger = logging.getLogger(__name__)
class DocReaderModel(object):
"""High level model that handles intializing the underlying network
architecture, saving, updating examples, and predicting examples.
"""
def __init__(self, opt, embedding=None, state_dict=None):
# Book-keeping.
self.opt = opt
self.updates = state_dict['updates'] if state_dict else 0
self.train_loss = AverageMeter()
# Building network.
self.network = RnnDocReader(opt, embedding=embedding)
if state_dict:
new_state = set(self.network.state_dict().keys())
for k in list(state_dict['network'].keys()):
if k not in new_state:
del state_dict['network'][k]
self.network.load_state_dict(state_dict['network'])
# Building optimizer.
parameters = [p for p in self.network.parameters() if p.requires_grad]
if opt['optimizer'] == 'sgd':
self.optimizer = optim.SGD(parameters, opt['learning_rate'],
momentum=opt['momentum'],
weight_decay=opt['weight_decay'])
elif opt['optimizer'] == 'adamax':
self.optimizer = optim.Adamax(parameters, opt['learning_rate'],
weight_decay=opt['weight_decay'])
else:
raise RuntimeError('Unsupported optimizer: %s' % opt['optimizer'])
if state_dict:
self.optimizer.load_state_dict(state_dict['optimizer'])
num_params = sum(p.data.numel() for p in parameters
if p.data.data_ptr() != self.network.embedding.weight.data.data_ptr())
print ("{} parameters".format(num_params))
def update(self, ex):
# Train mode
self.network.train()
# Transfer to GPU
if self.opt['cuda']:
inputs = [Variable(e.cuda(async=True)) for e in ex[:7]]
target_s = Variable(ex[7].cuda(async=True))
target_e = Variable(ex[8].cuda(async=True))
else:
inputs = [Variable(e) for e in ex[:7]]
target_s = Variable(ex[7])
target_e = Variable(ex[8])
# Run forward
score_s, score_e = self.network(*inputs)
# Compute loss and accuracies
loss = F.nll_loss(score_s, target_s) + F.nll_loss(score_e, target_e)
self.train_loss.update(loss.data[0], ex[0].size(0))
# Clear gradients and run backward
self.optimizer.zero_grad()
loss.backward()
# Clip gradients
torch.nn.utils.clip_grad_norm(self.network.parameters(),
self.opt['grad_clipping'])
# Update parameters
self.optimizer.step()
self.updates += 1
# Reset any partially fixed parameters (e.g. rare words)
self.reset_parameters()
def predict(self, ex):
# Eval mode
self.network.eval()
# Transfer to GPU
if self.opt['cuda']:
inputs = [Variable(e.cuda(async=True), volatile=True)
for e in ex[:7]]
else:
inputs = [Variable(e, volatile=True) for e in ex[:7]]
# Run forward
score_s, score_e = self.network(*inputs)
# Transfer to CPU/normal tensors for numpy ops
score_s = score_s.data.cpu()
score_e = score_e.data.cpu()
# Get argmax text spans
text = ex[-2]
spans = ex[-1]
predictions = []
max_len = self.opt['max_len'] or score_s.size(1)
for i in range(score_s.size(0)):
scores = torch.ger(score_s[i], score_e[i])
scores.triu_().tril_(max_len - 1)
scores = scores.numpy()
s_idx, e_idx = np.unravel_index(np.argmax(scores), scores.shape)
s_offset, e_offset = spans[i][s_idx][0], spans[i][e_idx][1]
predictions.append(text[i][s_offset:e_offset])
return predictions
def reset_parameters(self):
# Reset fixed embeddings to original value
if self.opt['tune_partial'] > 0:
offset = self.opt['tune_partial'] + 2
if offset < self.network.embedding.weight.data.size(0):
self.network.embedding.weight.data[offset:] \
= self.network.fixed_embedding
def save(self, filename, epoch):
params = {
'state_dict': {
'network': self.network.state_dict(),
'optimizer': self.optimizer.state_dict(),
'updates': self.updates
},
'config': self.opt,
'epoch': epoch
}
try:
torch.save(params, filename)
logger.info('model saved to {}'.format(filename))
except BaseException:
logger.warn('[ WARN: Saving failed... continuing anyway. ]')
def cuda(self):
self.network.cuda()
| mit | -4,923,884,610,044,888,000 | 35.171975 | 83 | 0.573693 | false |
anirudhSK/chromium | tools/perf/measurements/timeline_based_measurement.py | 1 | 4448 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from metrics import timeline as timeline_module
from metrics import timeline_interaction_record as tir_module
from telemetry.page import page_measurement
from telemetry.core.timeline import model as model_module
# TimelineBasedMeasurement considers all instrumentation as producing a single
# timeline. But, depending on the amount of instrumentation that is enabled,
# overhead increases. The user of the measurement must therefore chose between
# a few levels of instrumentation.
NO_OVERHEAD_LEVEL = 'no-overhead'
MINIMAL_OVERHEAD_LEVEL = 'minimal-overhead'
DEBUG_OVERHEAD_LEVEL = 'debug-overhead'
ALL_OVERHEAD_LEVELS = [
NO_OVERHEAD_LEVEL,
MINIMAL_OVERHEAD_LEVEL,
DEBUG_OVERHEAD_LEVEL
]
class _TimelineBasedMetrics(object):
def __init__(self, model, renderer_thread):
self._model = model
self._renderer_thread = renderer_thread
def FindTimelineInteractionRecords(self):
# TODO(nduca): Add support for page-load interaction record.
return [tir_module.TimelineInteractionRecord(event) for
event in self._renderer_thread.IterAllAsyncSlices()
if tir_module.IsTimelineInteractionRecord(event.name)]
def CreateMetricsForTimelineInteractionRecord(self, interaction):
res = []
if interaction.is_smooth:
pass # TODO(nduca): res.append smoothness metric instance.
return res
def AddResults(self, results):
interactions = self.FindTimelineInteractionRecords()
if len(interactions) == 0:
raise Exception('Expected at least one Interaction on the page')
for interaction in interactions:
metrics = self.CreateMetricsForTimelineInteractionRecord(interaction)
for m in metrics:
m.AddResults(self._model, self._renderer_thread,
interaction, results)
class TimelineBasedMeasurement(page_measurement.PageMeasurement):
"""Collects multiple metrics pages based on their interaction records.
A timeline measurement shifts the burden of what metrics to collect onto the
page under test, or the pageset running that page. Instead of the measurement
having a fixed set of values it collects about the page, the page being tested
issues (via javascript) an Interaction record into the user timing API that
describing what the page is doing at that time, as well as a standardized set
of flags describing the semantics of the work being done. The
TimelineBasedMeasurement object collects a trace that includes both these
interaction recorsd, and a user-chosen amount of performance data using
Telemetry's various timeline-producing APIs, tracing especially.
It then passes the recorded timeline to different TimelineBasedMetrics based
on those flags. This allows a single run through a page to produce load timing
data, smoothness data, critical jank information and overall cpu usage
information.
For information on how to mark up a page to work with
TimelineBasedMeasurement, refer to the
perf.metrics.timeline_interaction_record module.
"""
def __init__(self):
super(TimelineBasedMeasurement, self).__init__('smoothness')
def AddCommandLineOptions(self, parser):
parser.add_option(
'--overhead-level', type='choice',
choices=ALL_OVERHEAD_LEVELS,
default=NO_OVERHEAD_LEVEL,
help='How much overhead to incur during the measurement.')
def CanRunForPage(self, page):
return hasattr(page, 'smoothness')
def WillNavigateToPage(self, page, tab):
if not tab.browser.supports_tracing:
raise Exception('Not supported')
assert self.options.overhead_level in ALL_OVERHEAD_LEVELS
if self.options.overhead_level == NO_OVERHEAD_LEVEL:
categories = timeline_module.MINIMAL_TRACE_CATEGORIES
elif self.options.overhead_level == \
MINIMAL_OVERHEAD_LEVEL:
categories = ''
else:
categories = '*,disabled-by-default-cc.debug'
tab.browser.StartTracing(categories)
def MeasurePage(self, page, tab, results):
""" Collect all possible metrics and added them to results. """
trace_result = tab.browser.StopTracing()
model = model_module.TimelineModel(trace_result)
renderer_thread = model.GetRendererThreadFromTab(tab)
meta_metrics = _TimelineBasedMetrics(model, renderer_thread)
meta_metrics.AddResults(results)
| bsd-3-clause | 5,216,105,921,857,188,000 | 39.436364 | 80 | 0.74955 | false |
Sh4kE/fun-with-wiki-science | arxiv_import.py | 1 | 1272 | from bs4 import BeautifulSoup
import urllib
import config as c
from pymongo import MongoClient
client = MongoClient()
db = client.articles.entries
def gen_index(seed=db.count()):
i = seed
while True:
i +=1
yield i
index = gen_index()
def generate_filename(entry, directory = c.ARTICLE_DIR):
authors = [a.split()[-1] for a in entry['authors']]
authors = authors[0]+'_et.al' if len(authors) > 1 else authors[0]
title = entry['title'].replace(' ', '_')
return ''.join([directory, authors,'-',title , '.pdf'])
def fetch(url):
html_doc = urllib.urlopen(url).read()
s = BeautifulSoup(html_doc)
entries = [{
'pdf' : e.findAll('link',attrs={'type': 'application/pdf'})[0]['href'],
'url' : e.findAll('link',attrs={'type': 'text/html'})[0]['href'],
'authors': [a.text.strip() for a in e.findAll('author')],
'title': str(e.title.next),
'id': str.split(str(e.id.next),'/')[-1],
'index': next(index)
} for e in s.findAll('entry')]
entries = filter(lambda e: db.find_one({'id': e['id']}) == None, entries)
for entry in entries:
entry['path'] = generate_filename(entry)
map(lambda e: urllib.urlretrieve(e['pdf'], e['path']), entries)
if entries:
db.insert(entries)
return [e['index'] for e in entries]
| mit | 7,861,832,377,089,369,000 | 30.8 | 75 | 0.623428 | false |
sophron/wifiphisher | wifiphisher/extensions/roguehostapdinfo.py | 1 | 4097 | """
Extension that interacts with roguehostapd to print relevant information. For example,
information regarding automatic association attacks.
"""
from collections import defaultdict
import wifiphisher.common.constants as constants
class Roguehostapdinfo(object):
"""
Handles for printing KARMA attack information
"""
def __init__(self, data):
"""
Setup the class with all the given arguments.
:param self: A roguehostapdinfo object.
:param data: Shared data from main engine
:type self: roguehostapdinfo
:type data: dictionary
:return: None
:rtype: None
"""
self._data = data
self._packets_to_send = defaultdict(list)
self._mac2ssid_dict = defaultdict()
self._known_beacon_ssids = self._get_known_beacon_ssids()
def get_packet(self, packet):
"""
:param self: A roguehostapdinfo object
:param packet: A scapy.layers.RadioTap object
:type self: roguehostapdinfo
:type packet: scapy.layers.RadioTap
:return: empty list
:rtype: list
"""
return self._packets_to_send
def _get_known_beacon_ssids(self):
"""
:param self: A roguehostapdinfo object
:type self: roguehostapdinfo
:return: None
:rtype: None
"""
known_beacons_ssids = set()
# locate the known WLANS file
if self._data.args.known_beacons:
area_file = constants.KNOWN_WLANS_FILE
with open(area_file) as _file:
for line in _file:
if line.startswith("!"):
continue
essid = line.rstrip()
known_beacons_ssids.add(essid)
return known_beacons_ssids
def send_output(self):
"""
Send the output the extension manager
:param self: A roguehostapdinfo object.
:type self: roguehostapdinfo
:return: A list with the password checking information
:rtype: list
..note: In each packet we ask roguehostapd whether there are victims
associated to rogue AP
"""
info = []
ssid_mac_list = self._data.roguehostapd.get_karma_data()
try:
mac_list, ssid_list = zip(*ssid_mac_list)
except ValueError:
# incase ssid_mac_list is still empty
mac_list = []
ssid_list = []
# remove the one not in the current associated list
pop_macs = []
for mac in self._mac2ssid_dict:
if mac not in mac_list:
pop_macs.append(mac)
for key in pop_macs:
self._mac2ssid_dict.pop(key)
# add new associated victims to the dictionary
for idx, mac in enumerate(mac_list):
if mac not in self._mac2ssid_dict:
self._mac2ssid_dict[mac] = ssid_list[idx]
macssid_pairs = self._mac2ssid_dict.items()
for mac, ssid in macssid_pairs:
if ssid == self._data.target_ap_essid:
outputstr = "Victim " + mac + " probed for WLAN with ESSID: '" + ssid + "' (Evil Twin)"
elif ssid not in self._known_beacon_ssids:
outputstr = "Victim " + mac + " probed for WLAN with ESSID: '" + ssid + "' (KARMA)"
else:
outputstr = "Victim " + mac + " probed for WLAN with ESSID: '" + ssid + "' (Known Beacons)"
info.append(outputstr)
return info
def send_channels(self):
"""
Send channels to subscribe
:param self: A roguehostapdinfo object.
:type self: roguehostapdinfo
:return: empty list
:rtype: list
..note: we don't need to send frames in this extension
"""
return [self._data.target_ap_channel]
def on_exit(self):
"""
Free all the resources regarding to this module
:param self: A roguehostapdinfo object.
:type self: roguehostapdinfo
:return: None
:rtype: None
"""
pass
| gpl-3.0 | -7,268,679,541,904,218,000 | 32.040323 | 107 | 0.569929 | false |
ruzhytskyi/Koans | python2/koans/about_sets.py | 1 | 1706 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutSets(Koan):
def test_sets_make_keep_lists_unique(self):
highlanders = ['MacLeod', 'Ramirez', 'MacLeod', 'Matunas',
'MacLeod', 'Malcolm', 'MacLeod']
there_can_only_be_only_one = set(highlanders)
self.assertEqual(set(['MacLeod', 'Ramirez', 'Matunas', 'Malcolm']), there_can_only_be_only_one)
def test_sets_are_unordered(self):
self.assertEqual(set(['5', '4', '3', '2', '1']), set('12345'))
def test_convert_the_set_into_a_list_to_sort_it(self):
self.assertEqual(['1', '2', '3', '4', '5'], sorted(set('13245')))
# ------------------------------------------------------------------
def test_set_have_arithmetic_operators(self):
scotsmen = set(['MacLeod', 'Wallace', 'Willie'])
warriors = set(['MacLeod', 'Wallace', 'Leonidas'])
self.assertEqual(set(['Willie']), scotsmen - warriors)
self.assertEqual(set(['MacLeod', 'Wallace', 'Willie', 'Leonidas']), scotsmen | warriors)
self.assertEqual(set(['MacLeod', 'Wallace']), scotsmen & warriors)
self.assertEqual(set(['Willie', 'Leonidas']), scotsmen ^ warriors)
# ------------------------------------------------------------------
def test_we_can_query_set_membership(self):
self.assertEqual(True, 127 in set([127, 0, 0, 1]))
self.assertEqual(True, 'cow' not in set('apocalypse now'))
def test_we_can_compare_subsets(self):
self.assertEqual(True, set('cake') <= set('cherry cake'))
self.assertEqual(True, set('cake').issubset(set('cherry cake')))
self.assertEqual(False, set('cake') > set('pie'))
| mit | 7,430,903,744,952,290,000 | 38.674419 | 103 | 0.556858 | false |
abutcher/cf-diff | cfdiff/cfdiff.py | 1 | 4597 | # -*- coding: utf-8 -*-
# cf-diff Cloud Formations Diff Tool
# Copyright © 2014, Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
cfdiff cloudformation stack comparison
"""
import boto.cloudformation
import ConfigParser
import difflib
import glob
import os
import tempfile
from termcolor import colored
import sys
import json
import types
def _purify_json(json_text):
""" Ensure that 2 JSON objects are indented and formatted
in exactly the same way for unified diff-ing.
`json_text` - A string containing JSON-formatted data
"""
assert(isinstance(json_text, types.StringTypes))
json_data = json.loads(json_text)
return json.dumps(json_data, sort_keys=True,
separators=(",", ":"), indent=4)
class cfdiff(object):
def __init__(self, args):
self.args = args
self.config = self.load_config(args.stackname)
def compare(self, stackname=None):
"""
Compare a remote stack template with your local stack template.
`stackname` - The name of the stack to compare.
"""
remote_template = _purify_json(self.load_remote_template(stackname))
local_template = _purify_json(self.load_local_template())
for line in difflib.unified_diff(remote_template.splitlines(),
local_template.splitlines(),
fromfile='remote', tofile='local'):
if line.startswith('-'):
print colored(line, 'red')
elif line.startswith('+'):
print colored(line, 'green')
else:
print line
def load_config(self, stackname=None):
"""
Load configuration for a specific stack.
`stackname`: The name of the stack to load configuration for.
"""
config_path = os.path.expanduser('~/.config/cfdiff/config')
config = ConfigParser.SafeConfigParser()
configs = []
cfg = {}
if not os.path.exists(config_path):
print "No config located at %s check the README" % config_path
sys.exit(1)
configs.append(config_path)
config.read(configs)
for section in config.sections():
cfg[section] = dict(config.items(section))
if stackname not in cfg.keys():
print "%s isn't a stack in your configs" % stackname
sys.exit(1)
return cfg[stackname]
def load_local_template(self):
"""
Load local template file.
"""
if os.path.exists(self.config['location']):
if os.path.isdir(self.config['location']):
read_files = sorted(glob.glob(self.config['location'] + '*'))
local_output = tempfile.NamedTemporaryFile(mode='w+')
for f in read_files:
with open(f, 'r') as infile:
local_output.write(infile.read())
local_output.seek(0)
data = local_output.read()
local_output.close()
return data
else:
return open(self.config['location']).read()
else:
print "%s file does not exist" % self.config['location']
sys.exit(1)
def load_remote_template(self, stackname=None):
"""
Load remote template file.
"""
conn = boto.cloudformation.connect_to_region(self.config['region'],
aws_access_key_id=self.config['access_key'],
aws_secret_access_key=self.config['secret_key'])
stack = conn.describe_stacks(stack_name_or_id=stackname)
template = stack[0].get_template()['GetTemplateResponse']['GetTemplateResult']['TemplateBody']
remote_output = tempfile.NamedTemporaryFile(mode='w+')
remote_output.write(template)
remote_output.seek(0)
data = remote_output.read()
remote_output.close()
return data
| gpl-3.0 | 270,328,807,369,022,750 | 36.365854 | 102 | 0.601828 | false |
gdreid/job-salary-prediction | code/vectorization/text_extractor.py | 1 | 6819 | '''
Created on 2016
@author: Graham Reid
Builds a 2-gram vectorizer using scikit learn count vectorizer. Only really
interesting thing here is that I didn't concatenate all of the fields together.
This helps to preserve context.
'''
import random
import pickle
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from scipy.sparse import hstack
import csv
import string
trainPath = '../../data/data_raw.csv'
dataFile = open(trainPath)
reader = csv.reader(dataFile)
headers = reader.next()
target_index = headers.index('SalaryNormalized')
title_index = headers.index('Title')
description_index = headers.index('FullDescription')
location_index = headers.index('LocationRaw')
contract_type_index = headers.index('ContractType')
contract_time_index = headers.index('ContractTime')
company_index = headers.index('Company')
category_index = headers.index('Category')
source_index = headers.index('SourceName')
file_len = 0
for line in reader:
file_len = file_len +1
dataFile.seek(0)
reader.next()
salary_array = []
title_array = []
location_array = []
description_array = []
contract_type_array = []
contract_time_array = []
company_array = []
category_array = []
source_array = []
title_train_array = []
location_train_array = []
description_train_array = []
contract_type_train_array = []
contract_time_train_array = []
company_train_array = []
category_train_array = []
source_train_array = []
def format_string(field) :
return field.lower().translate(string.maketrans("",""), string.punctuation)
read_fraction = 1.0
training_indices = np.random.randint(0, file_len, int(file_len*read_fraction))
print 'reading data'
index = 0
for line in reader:
salary_array.append(np.log(float(line[target_index])))
title_array.append(format_string(line[title_index]))
description_array.append(format_string(line[description_index]))
location_array.append(format_string(line[location_index]))
contract_type_array.append(format_string(line[contract_type_index]))
contract_time_array.append(format_string(line[contract_time_index]))
company_array.append(format_string(line[company_index]))
category_array.append(format_string(line[category_index]))
source_array.append(format_string(line[source_index]))
index = index + 1
'''
for anything larger than unigrams, descriptions might be too large to be loaded
into memory all at once. Need to use some smaller read_fraction of documents
'''
for i in training_indices:
title_train_array.append(title_array[i])
description_train_array.append(description_array[i])
location_train_array.append(location_array[i])
contract_time_train_array.append(contract_time_array[i])
contract_type_train_array.append(contract_type_array[i])
company_train_array.append(company_array[i])
category_train_array.append(category_array[i])
source_train_array.append(source_array[i])
print 'creating vectorizers'
'''
word must be present in at least this fraction of the docments to be
vectorized (removes one-time mispellings, etc)
'''
fraction = 1.0/10000.0
title_vectorizer = CountVectorizer(binary = True, strip_accents='ascii',
min_df = int(fraction*file_len), ngram_range = (1,2))
description_vectorizer = CountVectorizer(binary = True, strip_accents='ascii',
min_df = int(fraction*file_len*read_fraction), ngram_range = (1,2))
location_vectorizer = CountVectorizer(binary = True, strip_accents='ascii',
min_df = int(fraction*file_len), ngram_range = (1,2))
contract_time_vectorizer = CountVectorizer(binary = True, strip_accents='ascii',
min_df = int(fraction*file_len), ngram_range = (1,2))
contract_type_vectorizer = CountVectorizer(binary = True, strip_accents='ascii',
min_df = int(fraction*file_len), ngram_range = (1,2))
company_vectorizer = CountVectorizer(binary = True, strip_accents='ascii',
min_df = int(fraction*file_len), ngram_range = (1,2))
category_vectorizer = CountVectorizer(binary = True, strip_accents='ascii',
min_df = int(fraction*file_len), ngram_range = (1,2))
source_vectorizer = CountVectorizer(binary = True, strip_accents='ascii',
min_df = int(fraction*file_len))
title_vectorizer.fit(title_array)
title_count_array = title_vectorizer.transform(title_array)
print 'title fit, shape: ', title_count_array.shape
description_vectorizer.fit(description_train_array)
description_count_array = description_vectorizer.transform(description_array)
print 'description fit, shape: ', description_count_array.shape
location_vectorizer.fit(location_array)
location_count_array = location_vectorizer.transform(location_array)
print 'location fit, shape: ', location_count_array.shape
contract_time_vectorizer.fit(contract_time_array)
contract_time_count_array = contract_time_vectorizer.transform(contract_time_array)
print 'contract time fit, shape: ', contract_time_count_array.shape
contract_type_vectorizer.fit(contract_type_array)
contract_type_count_array = contract_type_vectorizer.transform(contract_type_array)
print 'contract type fit, shape: ', contract_type_count_array.shape
company_vectorizer.fit(company_array)
company_count_array = company_vectorizer.transform(company_array)
print 'company fit, shape: ', company_count_array.shape
category_vectorizer.fit(category_array)
category_count_array = category_vectorizer.transform(category_array)
print 'category fit, shape: ', category_count_array.shape
source_vectorizer.fit(source_array)
source_count_array = source_vectorizer.transform(source_array)
print 'source fit, shape: ', source_count_array.shape
data_array = hstack([title_count_array, description_count_array,
location_count_array, contract_time_count_array, contract_type_count_array,
company_count_array, category_count_array, source_count_array])
print 'data stacked'
with open('../../data/data_arrays/data_binary_bigram.pk', 'wb') as output:
pickle.dump(data_array, output, pickle.HIGHEST_PROTOCOL)
pickle.dump(salary_array, output, pickle.HIGHEST_PROTOCOL)
with open('../../data/vectorizers/vectorizers_binary_bigram.pk', 'wb') as output:
pickle.dump(title_vectorizer, output, pickle.HIGHEST_PROTOCOL)
pickle.dump(description_vectorizer, output, pickle.HIGHEST_PROTOCOL)
pickle.dump(location_vectorizer, output, pickle.HIGHEST_PROTOCOL)
pickle.dump(contract_time_vectorizer, output, pickle.HIGHEST_PROTOCOL)
pickle.dump(contract_type_vectorizer, output, pickle.HIGHEST_PROTOCOL)
pickle.dump(company_vectorizer, output, pickle.HIGHEST_PROTOCOL)
pickle.dump(category_vectorizer, output, pickle.HIGHEST_PROTOCOL)
pickle.dump(source_vectorizer, output, pickle.HIGHEST_PROTOCOL)
print 'data_array read and written'
print 'data_array shape: ', data_array.shape
| gpl-3.0 | 5,507,033,565,940,186,000 | 35.859459 | 83 | 0.751723 | false |
baseride/pyZabbixSender | pyZabbixSender/pyZabbixSenderBase.py | 1 | 7074 | # -*- coding: utf-8
# Copyright 2015 Kurt Momberg <kurtqm (at) yahoo(dot)com(dot)ar>
# > Based on work by Klimenko Artyem <aklim007(at)gmail(dot)com>
# >> Based on work by Rob Cherry <zsend(at)lxrb(dot)com>
# >>> Based on work by Enrico Tröger <enrico(dot)troeger(at)uvena(dot)de>
# License: GNU GPLv2
import struct
import time
import sys
import re
# If you're using an old version of python that don't have json available,
# you can use simplejson instead: https://simplejson.readthedocs.org/en/latest/
try:
import json
except ImportError:
import simplejson as json
class InvalidResponse(Exception):
pass
class pyZabbixSenderBase:
'''
This class creates network-agnostic data structures to send data to a Zabbix server
'''
ZABBIX_SERVER = "127.0.0.1"
ZABBIX_PORT = 10051
def __init__(self, server=ZABBIX_SERVER, port=ZABBIX_PORT, verbose=False):
'''
#####Description:
This is the constructor, to obtain an object of type pyZabbixSender, linked to work with a specific server/port.
#####Parameters:
* **server**: [in] [string] [optional] This is the server domain name or IP. *Default value: "127.0.0.1"*
* **port**: [in] [integer] [optional] This is the port open in the server to receive zabbix traps. *Default value: 10051*
* **verbose**: [in] [boolean] [optional] This is to allow the library to write some output to stderr when finds an error. *Default value: False*
**Note: The "verbose" parameter will be revisited and could be removed/replaced in the future**
#####Return:
It returns a pyZabbixSender object.
'''
self.zserver = server
self.zport = port
self.verbose = verbose
self.timeout = 5 # Socket connection timeout.
self._data = [] # This is to store data to be sent later.
def __str__(self):
'''
This allows you to obtain a string representation of the internal data
'''
return str(self._data)
def _createDataPoint(self, host, key, value, clock=None):
'''
Creates a dictionary using provided parameters, as needed for sending this data.
'''
obj = {
'host': host,
'key': key,
'value': value,
}
if clock:
obj['clock'] = clock
return obj
def addData(self, host, key, value, clock=None):
'''
#####Description:
Adds host, key, value and optionally clock to the internal list of data to be sent later, when calling one of the methods to actually send the data to the server.
#####Parameters:
* **host**: [in] [string] [mandatory] The host which the data is associated to.
* **key**: [in] [string] [mandatory] The name of the trap associated to the host in the Zabbix server.
* **value**: [in] [any] [mandatory] The value you want to send. Please note that you need to take care about the type, as it needs to match key definition in the Zabbix server. Numeric types can be specified as number (for example: 12) or text (for example: "12").
* **clock**: [in] [integer] [optional] Here you can specify the Unix timestamp associated to your measurement. For example, you can process a log or a data file produced an hour ago, and you want to send the data with the timestamp when the data was produced, not when it was processed by you. If you don't specify this parameter, zabbix server will assign a timestamp when it receives the data.
You can create a timestamp compatible with "clock" parameter using this code:
int(round(time.time()))
*Default value: None*
#####Return:
This method doesn't have a return.
'''
obj = self._createDataPoint(host, key, value, clock)
self._data.append(obj)
def clearData(self):
'''
#####Description:
This method removes all data from internal storage. You need to specify when it's done, as it's not automatically done after a data send operation.
#####Parameters:
None
#####Return:
None
'''
self._data = []
def getData(self):
'''
#####Description:
This method is used to obtain a copy of the internal data stored in the object.
Please note you will **NOT** get the internal data object, but a copy of it, so no matter what you do with your copy, internal data will remain safe.
#####Parameters:
None
#####Return:
A copy of the internal data you added using the method *addData* (an array of dicts).
'''
copy_of_data = []
for data_point in self._data:
copy_of_data.append(data_point.copy())
return copy_of_data
def printData(self):
'''
#####Description:
Print stored data (to stdout), so you can see what will be sent if "sendData" is called. This is useful for debugging purposes.
#####Parameters:
None
#####Return:
None
'''
for elem in self._data:
print str(elem)
print 'Count: %d' % len(self._data)
def removeDataPoint(self, data_point):
'''
#####Description:
This method delete one data point from the internal stored data.
It's main purpose is to narrow the internal data to keep only those failed data points (those that were not received/processed by the server) so you can identify/retry them. Data points can be obtained from *sendDataOneByOne* return, or from *getData* return.
#####Parameters:
* **data_point**: [in] [dict] [mandatory] This is a dictionary as returned by *sendDataOneByOne()* or *getData* methods.
#####Return:
It returns True if data_point was found and deleted, and False if not.
'''
if data_point in self._data:
self._data.remove(data_point)
return True
return False
def recognize_response_raw(response_raw):
return recognize_response(json.loads(response_raw))
FAILED_COUNTER = re.compile('^.*failed.+?(\d+).*$')
PROCESSED_COUNTER = re.compile('^.*processed.+?(\d+).*$')
SECONDS_SPENT = re.compile('^.*seconds spent.+?((-|\+|\d|\.|e|E)+).*$')
def recognize_response(response):
failed = FAILED_COUNTER.match(response['info'].lower() if 'info' in response else '')
processed = PROCESSED_COUNTER.match(response['info'].lower() if 'info' in response else '')
seconds_spent = SECONDS_SPENT.match(response['info'].lower() if 'info' in response else '')
if failed is None or processed is None:
raise InvalidResponse('Unable to parse server response',packet,response_raw)
failed = int(failed.group(1))
processed = int(processed.group(1))
seconds_spent = float(seconds_spent.group(1)) if seconds_spent else None
response['info'] = {
'failed':failed,
'processed':processed,
'seconds spent':seconds_spent
}
return response
| gpl-2.0 | -1,269,972,157,071,972,000 | 37.237838 | 403 | 0.625954 | false |
tanchao/algo | interviews/zenefits/three_sum.py | 1 | 1745 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'tanchao'
class Solution:
# @param {integer[]} nums
# @return {integer[][]}
def threeSum(self, nums):
res = []
if len(nums) < 3:
return res
nums.sort() # sorted array for value judgement
for i in range(0, len(nums) - 2):
if i == 0 or nums[i] > nums[i - 1]: # note skip duplication on every step
left = i + 1
right = len(nums) - 1
while right > left:
if nums[i] + nums[left] + nums[right] == 0:
res.append([nums[i], nums[left], nums[right]])
''' this is for less than
tmp = []
tmp.append(nums[i])
tmp.append(nums[left])
while right > left:
tmp.append(right)
res.append(tmp)
tmp.pop()
right -= 1'''
right -= 1
left += 1
while right > left and nums[left] == nums[left - 1]: left += 1
while right > left and nums[right] == nums[right + 1]: right -= 1
elif nums[i] + nums[left] + nums[right] > 0:
while right > left:
right -= 1
if nums[right] < nums[right + 1]: break
else:
while right > left:
left += 1
if nums[left] > nums[left - 1]: break
return res
if __name__ == '__main__':
pass | mit | -540,101,211,688,594,900 | 36.148936 | 89 | 0.373066 | false |
IPIDataLab/Mongo-IRD-load | python/excell_parse.py | 1 | 9770 | #!/usr/bin/python
from xlrd import cellname
import re
from datetime import datetime
from utils import na_check, split_str_array, get_cell, geocode
#############################
#############################
# This file parses incoming
# excell workbooks to convert
# to JSON object array for input
# into MongoDB.
#############################
#############################
def parse(sheet, data):
# creat object where key = header name and value = columnm number
labels = sheet.row(0)
lkey = { str(labels[i]).replace("text:u","").replace("'","").lower(): i for i in range(0, len(labels)) }
# test by iterating over one sheet
nrows = sheet.nrows
for row_index in range(1, nrows):
# english names
a = get_cell(sheet,'MainNameEn',row_index,lkey)
if not a:
continue
# create document for each non-empty row
data.append({})
### ADD FIELDS
###
data[-1]['name_en'] = a
# UID
a = get_cell(sheet,'UID',row_index,lkey)
if a:
data[-1]['uid'] = a
# acronym
a = get_cell(sheet,'Acronym',row_index,lkey)
if a:
data[-1]['acronym'] = a
# main names
a = get_cell(sheet,'MainName',row_index,lkey,str_split=True)
if a:
data[-1]['main_names'] = a
# old names
a = get_cell(sheet,'MainOldNames',row_index,lkey,str_split=True)
if a:
data[-1]['old_alias'] = a
# parent organization
a = get_cell(sheet,'Organization',row_index,lkey,str_split=True)
if a:
data[-1]['parent_org'] = a
### ADD CONTACT DETAILS
###
# website
a = get_cell(sheet,'Web',row_index,lkey)
if a:
data[-1]['website'] = a
# email
a = get_cell(sheet,'Email',row_index,lkey)
if a:
data[-1]['email_gen'] = a
# contact established
a = get_cell(sheet,'ContactEstablished',row_index,lkey)
if a:
if a == 'Yes':
a = True
elif a == 'No':
a = False
data[-1]['contacted'] = a
# contact person
a = get_cell(sheet,'ContactPerson',row_index,lkey)
if a:
data[-1]['contact_person'] = (
a,
get_cell(sheet,'EmailContactPerson',row_index,lkey)
)
### ADD CHARACTERISTICS
###
# international
a = get_cell(sheet,'International',row_index,lkey)
if a:
if a == 'Yes':
a = True
elif a == 'No':
a = False
data[-1]['international'] = a
# type
org_type = get_cell(sheet,'Type',row_index,lkey)
org_type_array = []
if not org_type:
pass
elif org_type == 'No information':
pass
else:
for char in org_type:
org_type_array.append(char)
data[-1]["org_type"] = org_type_array
# thematic area of focus
a = get_cell(sheet,'Subject',row_index,lkey,str_split="([:;, ']|\. |\.$)")
if a:
# input control
b = []
for j in a:
i = j.lower()
if i == '3.':
i = '3'
if i == '1.h':
i = '1h'
# unspecified 2 and 6 are 2.9 and 6.9
i = re.sub(r'^([26])([a-m]?)$', r'\1.9\2', i)
i = re.sub(r'q', 'g', i)
## strict matrix check
if not re.match(r"^(1|2\.9|2\.1|2\.2|2\.3|3|4|5|6.9|6\.1|6\.2|6\.3|7|8|9|10|11|12)[a-m]?$", i):
print "please correct subject: '%s' in %s" % (i, get_cell(sheet,'MainNameEn',row_index,lkey))
else:
b.append(i)
if i != j :
print "autocorrect '%s' => '%s'" % (j,i)
data[-1]['subject'] = b
# structure
a = get_cell(sheet,'Structure',row_index,lkey)
if a:
data[-1]['structure'] = a
# to create array you would need to differentiate between delimiter and sub list in ()
# data[-1]["structure"] = split_str_array(structure, '; ',')
# finances
a = get_cell(sheet,'Finances',row_index,lkey)
if a:
data[-1]['finances'] = a
# Foundation
a = get_cell(sheet,'Foundation',row_index,lkey)
if a:
data[-1]['foundation'] = re.sub(r'\.0', '', a) ## get_cell gives... 1998.0
# history
a = get_cell(sheet,'History',row_index,lkey)
if a:
data[-1]['history'] = a
# aim
a = get_cell(sheet,'Aim',row_index,lkey)
if a:
data[-1]['aim'] = a
# aimURL
a = get_cell(sheet,'AimURL',row_index,lkey)
if a:
data[-1]['aim_URL'] = a
# IRD definition
a = get_cell(sheet,'IRDdefinition',row_index,lkey)
if a:
data[-1]['IRD_def'] = a
# IRD definition URL
a = get_cell(sheet,'IRDdefinitionURL',row_index,lkey)
if a:
data[-1]['IRD_def_URL'] = a
# religious affiliation
a = get_cell(sheet,'ReligiousAffiliation',row_index,lkey,str_split=True)
if a:
data[-1]['religious_affiliation'] = a
# languages
a = get_cell(sheet,'Languages',row_index,lkey,str_split=True)
if a:
data[-1]['languages'] = a
# Staff
a = get_cell(sheet,'Staff',row_index,lkey,str_split=True)
if a:
data[-1]['staff'] = a
# Region Focus
a = get_cell(sheet,'RegionFocus',row_index,lkey,str_split='[;,]')
if a:
data[-1]['regionfocus'] = a
### ADD ACTIVITIES
###
# General activities
a = get_cell(sheet,'Activities',row_index,lkey)
if a:
if a == 'No information':
data[-1]['general_activities'] =['No information']
elif a == 'See IRDActivities':
data[-1]['general_activities'] =['See IRDActivities']
else:
# regex to match pattern of <number>. <text>: to create true key values
activities = re.split('([0-9]{1,}\. [a-zA-Z \'\-!\0-9{1,}+,&]+:)',a)
activity_array = []
# activities = re.split('([0-9]{1,}\. [a-zA-Z ]+:)',a)
activity_name_array = []
activity_description_array = []
for activity in activities:
if activity == "":
pass
elif re.match('[0-9]\.',activity):
activity = re.sub('[0-9]\. ','',activity)
activity = re.sub(':','',activity)
activity_name_array.append(activity)
else:
activity = activity.strip()
activity_description_array.append(activity)
for x in xrange(1,len(activity_name_array)):
try:
activity_array.append({'activity_name':activity_name_array[x],'activity_description':activity_description_array[x]})
except IndexError:
print "!!ERROR: not as many activities descriptions as names in '%s' (%s)" % (get_cell(sheet,'Acronym',row_index,lkey), get_cell(sheet,'MainNameEn',row_index,lkey))
data[-1]['general_activities'] = activity_array
# IRD activities -- need to apply above model to separate activity name and activity description
a = get_cell(sheet,'IRDActivities',row_index,lkey)
if a:
if a == 'No information':
data[-1]['IRD_activities'] =['No information']
else:
IRD_activities_reg = re.split('[0-9]{1,2}\. ',get_cell(sheet,'IRDALocation',row_index,lkey))
IRD_activities = re.split('[0-9]{1,2}\. ',a)
IRD_activities_array = []
del IRD_activities[0]
del IRD_activities_reg[0]
## turn on to look for ragged array match
# if len(IRD_activities_reg) != len(IRD_activities):
# print name_en
# print IRD_activities_reg
# print IRD_activities
try:
for x in xrange(1,len(IRD_activities)):
region = re.sub('Lebanon, Syria, Egypt and Jordan', 'Lebanon; Syria; Egypt; Jordan', IRD_activities_reg[x])
region = re.sub('Bosnia and Herzegovina', 'Bosnia-Herzegovina', region)
region = re.sub('India, Pakistan, Bangladesh, Sri Lanka', 'India; Pakistan; Bangladesh; Sri Lanka', region)
region = re.sub('St. Jean Cap', 'St Jean Cap', region)
region = re.split('[;\.]| and ', region)
region = [ i.strip() for i in region if i.strip() ]
IRD_activity_obj = {
'activity' : IRD_activities[x],
'region' : region
}
IRD_activities_array.append(IRD_activity_obj)
except IndexError:
print "!!ERROR: non-matching number of activities and regions in '%s' (%s)" % (get_cell(sheet,'Acronym',row_index,lkey), get_cell(sheet,'MainNameEn',row_index,lkey))
data[-1]['IRD_activities'] = IRD_activities_array
# events
a = get_cell(sheet,'Events',row_index,lkey,str_split=True)
if a:
data[-1]['events'] = a
# publications
a = get_cell(sheet,'Publications',row_index,lkey,str_split=True)
if a:
data[-1]['publications'] = a
### RELATIONSHIPS
###
# IO relationships
a = get_cell(sheet,'RelationsIO',row_index,lkey,str_split=True)
if a:
data[-1]['IO_relations'] = a
# Other relationships
a = get_cell(sheet,'RelationsOther',row_index,lkey,str_split=True)
if a:
data[-1]['other_relations'] = a
# geocoding
addr = {}
geo = {}
for i in 'AddressMain/Address1/Address 1/Address2/Address 2/Address3/Address 3/Address4/Address 4/Address5/Address 5/Address3AndMore'.split('/'):
try:
a = get_cell(sheet, i, row_index,lkey)
#import ipdb; ipdb.set_trace()#
if a and a != 'No information':
geo = geocode(a)
geo['address'] = a
if i == 'AddressMain':
i = 'Address1'
if i == 'Address 1':
i = 'Address1'
if i == 'Address 2':
i = 'Address2'
if i == 'Address 3':
i = 'Address3'
if i == 'Address 4':
i = 'Address4'
if i == 'Address 5':
i = 'Address5'
addr[i] = geo
except KeyError:
pass
if addr:
data[-1]['adresses'] = addr
try:
country = re.sub(r', *\d+$', '', addr['Address1']['normalized'])
country = re.sub(r'^.*, ', '', country)
country = re.sub(r'(Al Jubail | *\d+ *)', '', country)
data[-1]['country'] = country
except KeyError:
pass
### ADD ENTRY STAMP DETAILs
###
a = get_cell(sheet,'Entry',row_index,lkey)
if not a:
pass
else:
try:
entry_value_array = split_str_array(a, ', ')
entry_date = datetime.strptime(entry_value_array[1], "%d.%m.%Y").date()
data[-1]["entry"] = {'author' : entry_value_array[0], 'date' : str(entry_date.year)+str(entry_date.month).zfill(2)+str(entry_date.day).zfill(2)}
except Exception:
# we don't care about this data format
#print "!!ERROR: bad format for entry date in '%s'" % a
data[-1]["entry"] = a;
return data
if __name__ == '__main__':
parse(sheet, data)
| gpl-2.0 | 8,808,311,664,993,507,000 | 26.521127 | 170 | 0.59304 | false |
mzhr/snakepig_engine | src/game.py | 1 | 1551 | import pyglet
from pyglet.window import key
from src import world
class GameStates:
MAIN_MENU = 0
GAME_LOAD = 1
GAME_PLAY = 2
GAME_MENU = 3
class Window(pyglet.window.Window):
def __init__(self, *args, **kwargs):
# Initialize window.
super(Window, self).__init__(800, 600, *args, **kwargs)
# Initilize window icon.
self.icon = pyglet.image.load("data/icon.png")
self.set_icon(self.icon)
# Initialize initial game state.
# Currently set to GAME_PLAY for testing purposes.
# Should exist on MAIN_MENU later on.
self.current_state = GameStates.GAME_LOAD
# Initilize batch for image drawing.
self.batch = pyglet.graphics.Batch()
self.group_background = pyglet.graphics.OrderedGroup(0)
self.group_tile = pyglet.graphics.OrderedGroup(1)
self.group_character = pyglet.graphics.OrderedGroup(2)
self.group_text = pyglet.graphics.OrderedGroup(3)
self.backgrounds = []
self.tiles = []
self.characters = []
self.texts = []
# Initlize input buffer.
self.keys = pyglet.window.key.KeyStateHandler()
self.push_handlers(self.keys)
# Initilize fps and update functions.
self.fps_display = pyglet.clock.ClockDisplay()
pyglet.clock.schedule_interval(self.update, 1/60.0)
def on_draw(self):
self.clear()
self.batch.draw()
self.fps_display.draw()
def update(self, dt):
if self.current_state == GameStates.GAME_LOAD:
self.game_world = world.World(self, "data/world.txt")
self.current_state = GameStates.GAME_PLAY
if self.current_state == GameStates.GAME_PLAY:
self.game_world.update()
| mit | 319,767,813,804,100,160 | 26.210526 | 57 | 0.711154 | false |
sheagcraig/python-jss | jss/jssobjectlist.py | 1 | 1229 | #!/usr/bin/env python
# Copyright (C) 2014-2017 Shea G Craig
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""jssobjectlist.py
Deprecated in favor of QuerySet.
"""
from __future__ import absolute_import
import warnings
from .queryset import QuerySet
class JSSObjectList(QuerySet):
"""Deprecated in favor of QuerySet."""
def __init__(self, factory, obj_class, objects=None):
super(JSSObjectList, self).__init__(objects)
warnings.warn(
'JSSObjectList is deprecated and will be removed in the future. '
'Please update code to use jss.QuerySet.', FutureWarning,
stacklevel=2)
| gpl-3.0 | 5,142,232,485,675,961,000 | 33.138889 | 77 | 0.719284 | false |
voidpp/vcp | vcp/system_package_manager_handlers.py | 1 | 3348 | import os
import re
import logging
from subprocess import check_call, check_output, CalledProcessError, PIPE
from abc import ABCMeta, abstractmethod, abstractproperty
import platform
from .exceptions import SystemPackageManagerHandlerException
logger = logging.getLogger(__name__)
def register(name, determiner):
def wrapper(cls):
cls.name = name
cls.determiner = determiner
SystemPackageManagerHandlerFactory.types[name] = cls
return cls
return wrapper
class SystemPackageManagerHandlerFactory(object):
types = {}
def create(self):
for name, cls in list(self.types.items()):
if cls.determiner.test():
return self.types[name]()
raise SystemPackageManagerHandlerException("Cannot determine the current system distro name.")
class SystemPackageManagerHandlerHandlerBase(object, metaclass=ABCMeta):
def get_system_dependencies(self, project):
if self.name not in project.system_dependencies:
return []
return project.system_dependencies[self.name]
def get_not_installed_packages(self, project):
names = self.get_system_dependencies(project)
return [name for name in names if not self.is_package_installed(name)]
@abstractmethod
def is_package_installed(self, name):
pass
class DeterminerBase(object, metaclass=ABCMeta):
@abstractmethod
def test(self):
pass
class LinuxDeterminer(DeterminerBase):
def __init__(self, *distro_names):
self._names = distro_names
def test(self):
distro_name = platform.linux_distribution()[0].lower()
return distro_name in self._names
class MaxOSDeterminer(DeterminerBase):
def __init__(self, pkg_mgr):
self._pkg_mgr = pkg_mgr
def test(self):
mac_ver = platform.mac_ver()
# not mac
if len(mac_ver[0]) < 2:
return False
try:
check_call(['which', self._pkg_mgr], stdout = PIPE, stderr = PIPE)
return True
except CalledProcessError:
return False
@register('brew', MaxOSDeterminer('brew'))
class BrewHandler(SystemPackageManagerHandlerHandlerBase):
def is_package_installed(self, name):
try:
check_call(['brew', 'ls', '--versions', name], stdout = PIPE, stderr = PIPE)
return True
except CalledProcessError as e:
if e.returncode == 1:
return False
else:
raise
@register('dpkg', LinuxDeterminer('debian', 'ubuntu', 'linuxmint'))
class DPKGHandler(SystemPackageManagerHandlerHandlerBase):
def is_package_installed(self, name):
try:
check_call(['dpkg', '-s', name], stdout = PIPE, stderr = PIPE)
return True
except CalledProcessError as e:
if e.returncode == 1:
return False
else:
raise
@register('pacman', LinuxDeterminer('arch'))
class PacManHandler(SystemPackageManagerHandlerHandlerBase):
def is_package_installed(self, name):
try:
check_call(['pacman', '-Qi', name], stdout = PIPE, stderr = PIPE)
return True
except CalledProcessError as e:
if e.returncode == 1:
return False
else:
raise
| mit | -888,727,097,514,618,100 | 28.368421 | 102 | 0.629331 | false |
SylvainCecchetto/plugin.video.catchuptvandmore | plugin.video.catchuptvandmore/resources/lib/skeletons/pl_live.py | 1 | 2824 | # -*- coding: utf-8 -*-
"""
Catch-up TV & More
Copyright (C) 2016 SylvainCecchetto
This file is part of Catch-up TV & More.
Catch-up TV & More is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
Catch-up TV & More is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with Catch-up TV & More; if not, write to the Free Software Foundation,
Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
# The unicode_literals import only has
# an effect on Python 2.
# It makes string literals as unicode like in Python 3
from __future__ import unicode_literals
from codequick import Script, utils
"""
The following dictionaries describe
the addon's tree architecture.
* Key: item id
* Value: item infos
- route (folder)/resolver (playable URL): Callback function to run once this item is selected
- thumb: Item thumb path relative to "media" folder
- fanart: Item fanart path relative to "meia" folder
"""
menu = {
'tvp3': {
'resolver': '/resources/lib/channels/pl/tvp:get_live_url',
'label': 'TVP 3 (' + utils.ensure_unicode(Script.setting['tvp3.language']) + ')',
'thumb':
'channels/pl/tvp3.png',
'fanart':
'channels/pl/tvp3_fanart.jpg',
'available_languages': [
"Białystok", "Bydgoszcz", "Gdańsk", "Gorzów Wielkopolski",
"Katowice", "Kielce", "Kraków", "Lublin", "Łódź", "Olsztyn",
"Opole", "Poznań", "Rzeszów", "Szczecin", "Warszawa", "Wrocław"
],
'enabled': True,
'order': 2
},
'tvpinfo': {
'resolver': '/resources/lib/channels/pl/tvp:get_live_url',
'label': 'TVP Info',
'thumb': 'channels/pl/tvpinfo.png',
'fanart': 'channels/pl/tvpinfo_fanart.jpg',
'enabled': True,
'order': 3
},
'tvppolonia': {
'resolver': '/resources/lib/channels/pl/tvp:get_live_url',
'label': 'TVP Polonia',
'thumb': 'channels/pl/tvppolonia.png',
'fanart': 'channels/pl/tvppolonia_fanart.jpg',
'enabled': True,
'order': 4
},
'tvppolandin': {
'resolver': '/resources/lib/channels/pl/tvp:get_live_url',
'label': 'TVP Poland IN',
'thumb': 'channels/pl/tvppolandin.png',
'fanart': 'channels/pl/tvppolandin_fanart.jpg',
'enabled': True,
'order': 5
}
}
| gpl-2.0 | -4,897,177,479,738,291,000 | 35.076923 | 97 | 0.625444 | false |
srusskih/SublimeBicycleRepair | bike/refactor/inlineVariable.py | 1 | 3533 | from bike.query.findDefinition import findAllPossibleDefinitionsByCoords
from bike.query.findReferences import findReferences
from bike.parsing.parserutils import maskStringsAndRemoveComments, linecontinueRE
from bike.transformer.undo import getUndoStack
from bike.transformer.save import queueFileToSave
from parser import ParserError
from bike.parsing.load import getSourceNode
import compiler
import re
def inlineLocalVariable(filename, lineno,col):
sourceobj = getSourceNode(filename)
return inlineLocalVariable_old(sourceobj, lineno,col)
def inlineLocalVariable_old(sourcenode,lineno,col):
definition, region, regionlinecount = getLocalVariableInfo(sourcenode, lineno, col)
addUndo(sourcenode)
replaceReferences(sourcenode, findReferences(sourcenode.filename, definition.lineno, definition.colno), region)
delLines(sourcenode, definition.lineno-1, regionlinecount)
updateSource(sourcenode)
def getLocalVariableInfo(sourcenode, lineno, col):
definition = findDefinition(sourcenode, lineno, col)
region, linecount = getRegionToInline(sourcenode, definition)
return definition, region, linecount
def findDefinition(sourcenode, lineno, col):
definition = findAllPossibleDefinitionsByCoords(sourcenode.filename,
lineno,col).next()
assert definition.confidence == 100
return definition
def getRegionToInline(sourcenode, defn):
line, linecount = getLineAndContinues(sourcenode, defn.lineno)
start, end = findRegionToInline(maskStringsAndRemoveComments(line))
return line[start:end], linecount
def findRegionToInline(maskedline):
match = re.compile("[^=]+=\s*(.+)$\n", re.DOTALL).match(maskedline)
assert match
return match.start(1), match.end(1)
# Possible refactoring: move to class of sourcenode
def getLineAndContinues(sourcenode, lineno):
line = sourcenode.getLine(lineno)
linecount = 1
while linecontinueRE.search(line):
line += sourcenode.getLine(lineno + linecount)
linecount += 1
return line, linecount
def addUndo(sourcenode):
getUndoStack().addSource(sourcenode.filename,sourcenode.getSource())
def replaceReferences(sourcenode, references, replacement):
for reference in safeReplaceOrder( references ):
replaceReference(sourcenode, reference, replacement)
def safeReplaceOrder( references ):
"""
When inlining a variable, if multiple instances occur on the line, then the
last reference must be replaced first. Otherwise the remaining intra-line
references will be incorrect.
"""
def safeReplaceOrderCmp(self, other):
return -cmp(self.colno, other.colno)
result = list(references)
result.sort(safeReplaceOrderCmp)
return result
def replaceReference(sourcenode, ref, replacement):
""" sourcenode.getLines()[ref.lineno-1][ref.colno:ref.colend] = replacement
But strings don't support slice assignment as they are immutable. :(
"""
sourcenode.getLines()[ref.lineno-1] = \
replaceSubStr(sourcenode.getLines()[ref.lineno-1],
ref.colno, ref.colend, replacement)
def replaceSubStr(str, start, end, replacement):
return str[:start] + replacement + str[end:]
# Possible refactoring: move to class of sourcenode
def delLines(sourcenode, lineno, linecount=1):
del sourcenode.getLines()[lineno:lineno+linecount]
def updateSource(sourcenode):
queueFileToSave(sourcenode.filename,"".join(sourcenode.getLines()))
| mit | 3,738,141,442,008,807,000 | 36.585106 | 115 | 0.741296 | false |
dayatz/taiga-back | tests/unit/conftest.py | 1 | 1094 | # -*- coding: utf-8 -*-
# Copyright (C) 2014-2017 Andrey Antukh <[email protected]>
# Copyright (C) 2014-2017 Jesús Espino <[email protected]>
# Copyright (C) 2014-2017 David Barragán <[email protected]>
# Copyright (C) 2014-2017 Alejandro Alonso <[email protected]>
# Copyright (C) 2014-2017 Anler Hernández <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from ..utils import disconnect_signals
def pytest_runtest_setup(item):
disconnect_signals()
| agpl-3.0 | -6,924,775,376,019,081,000 | 44.458333 | 74 | 0.756187 | false |
jkandasa/integration_tests | cfme/tests/cli/test_appliance_console.py | 1 | 14121 | import pytest
from collections import namedtuple
from wait_for import wait_for
from cfme.utils import os
from cfme.utils.log_validator import LogValidator
from cfme.utils.log import logger
from cfme.utils.conf import hidden
import tempfile
import lxml.etree
import yaml
TimedCommand = namedtuple('TimedCommand', ['command', 'timeout'])
LoginOption = namedtuple('LoginOption', ['name', 'option', 'index'])
TZ = namedtuple('TimeZone', ['name', 'option'])
tzs = [
TZ('Africa/Abidjan', ('1', '1')),
TZ('America/Argentina/Buenos_Aires', ('2', '6', '1')),
TZ('Antarctica/Casey', ('3', 'q', '1')),
TZ('Arctic/Longyearbyen', ('4', 'q', '1')),
TZ('Asia/Aden', ('5', '1')),
TZ('Atlantic/Azores', ('6', 'q', '1')),
TZ('Australia/Adelaide', ('7', 'q', '1')),
TZ('Europe/Amsterdam', ('8', '1')),
TZ('Indian/Antananarivo', ('9', 'q', '1')),
TZ('Pacific/Apia', ('10', '1')),
TZ('UTC', ('11',))
]
@pytest.mark.smoke
def test_black_console(appliance):
"""'ap | tee /tmp/opt.txt)' saves stdout to file, 'ap' launch appliance_console."""
command_set = ('ap | tee -a /tmp/opt.txt', 'ap')
appliance.appliance_console.run_commands(command_set)
assert appliance.ssh_client.run_command("cat /tmp/opt.txt | grep '{} Virtual Appliance'"
.format(appliance.product_name))
assert appliance.ssh_client.run_command("cat /tmp/opt.txt | grep '{} Database:'"
.format(appliance.product_name))
assert appliance.ssh_client.run_command("cat /tmp/opt.txt | grep '{} Version:'"
.format(appliance.product_name))
def test_black_console_set_hostname(appliance):
"""'ap' launch appliance_console, '' clear info screen, '1' loads network settings, '4/5' gives
access to set hostname, 'hostname' sets new hostname."""
hostname = 'test.example.com'
opt = ('1', '5') if appliance.version >= "5.8" else ('4',)
command_set = ('ap', '',) + opt + (hostname,)
appliance.appliance_console.run_commands(command_set)
def is_hostname_set(appliance):
assert appliance.ssh_client.run_command("hostname -f | grep {hostname}"
.format(hostname=hostname))
wait_for(is_hostname_set, func_args=[appliance])
return_code, output = appliance.ssh_client.run_command("hostname -f")
assert output.strip() == hostname
assert return_code == 0
@pytest.mark.parametrize('timezone', tzs, ids=[tz.name for tz in tzs])
def test_black_console_set_timezone(timezone, temp_appliance_preconfig_modscope):
"""'ap' launch appliance_console, '' clear info screen, '2/5' set timezone, 'opt' select
region, 'timezone' selects zone, 'y' confirm slection, '' finish."""
opt = '2' if temp_appliance_preconfig_modscope.version >= "5.8" else '5'
command_set = ('ap', '', opt) + timezone[1] + ('y', '')
temp_appliance_preconfig_modscope.appliance_console.run_commands(command_set)
temp_appliance_preconfig_modscope.appliance_console.timezone_check(timezone)
def test_black_console_internal_db(app_creds, temp_appliance_unconfig_funcscope):
"""'ap' launch appliance_console, '' clear info screen, '5/8' setup db, '1' Creates v2_key,
'1' selects internal db, 'y' continue, '1' use partition, 'n' don't create dedicated db, '0'
db region number, 'pwd' db password, 'pwd' confirm db password + wait 360 secs and '' finish."""
pwd = app_creds['password']
opt = '5' if temp_appliance_unconfig_funcscope.version >= "5.8" else '8'
command_set = ('ap', '', opt, '1', '1', 'y', '1', 'n', '0', pwd, TimedCommand(pwd, 360), '')
temp_appliance_unconfig_funcscope.appliance_console.run_commands(command_set)
temp_appliance_unconfig_funcscope.wait_for_evm_service()
temp_appliance_unconfig_funcscope.wait_for_web_ui()
def test_black_console_internal_db_reset(temp_appliance_preconfig_funcscope):
"""'ap' launch appliance_console, '' clear info screen, '5/8' setup db, '4' reset db, 'y'
confirm db reset, '1' db region number + wait 360 secs, '' continue"""
opt = '5' if temp_appliance_preconfig_funcscope.version >= "5.8" else '8'
temp_appliance_preconfig_funcscope.ssh_client.run_command('systemctl stop evmserverd')
command_set = ('ap', '', opt, '4', 'y', TimedCommand('1', 360), '')
temp_appliance_preconfig_funcscope.appliance_console.run_commands(command_set)
temp_appliance_preconfig_funcscope.ssh_client.run_command('systemctl start evmserverd')
temp_appliance_preconfig_funcscope.wait_for_evm_service()
temp_appliance_preconfig_funcscope.wait_for_web_ui()
def test_black_console_dedicated_db(temp_appliance_unconfig_funcscope, app_creds):
"""'ap' launch appliance_console, '' clear info screen, '5/8' setup db, '1' Creates v2_key,
'1' selects internal db, 'y' continue, '1' use partition, 'y' create dedicated db, 'pwd'
db password, 'pwd' confirm db password + wait 360 secs and '' finish."""
pwd = app_creds['password']
opt = '5' if temp_appliance_unconfig_funcscope.version >= "5.8" else '8'
command_set = ('ap', '', opt, '1', '1', 'y', '1', 'y', pwd, TimedCommand(pwd, 360), '')
temp_appliance_unconfig_funcscope.appliance_console.run_commands(command_set)
wait_for(lambda: temp_appliance_unconfig_funcscope.db.is_dedicated_active)
def test_black_console_external_db(temp_appliance_unconfig_funcscope, app_creds, appliance):
"""'ap' launch appliance_console, '' clear info screen, '5/8' setup db, '2' fetch v2_key,
'ip' address to fetch from, '' default username, 'pwd' db password, '' default v2_key location,
'3' join external region, 'port' ip and port of joining region, '' use defult db name, ''
default username, 'pwd' db password, 'pwd' confirm db password + wait 360 secs and '' finish."""
ip = appliance.hostname
pwd = app_creds['password']
opt = '5' if temp_appliance_unconfig_funcscope.version >= "5.8" else '8'
port = (ip, '') if temp_appliance_unconfig_funcscope.version >= "5.8" else (ip,)
command_set = ('ap', '', opt, '2', ip, '', pwd, '', '3') + port + ('', '',
pwd, TimedCommand(pwd, 360), '')
temp_appliance_unconfig_funcscope.appliance_console.run_commands(command_set)
temp_appliance_unconfig_funcscope.wait_for_evm_service()
temp_appliance_unconfig_funcscope.wait_for_web_ui()
def test_black_console_external_db_create(app_creds, dedicated_db_appliance,
temp_appliance_unconfig_funcscope):
"""'ap' launch appliance_console, '' clear info screen, '5/8' setup db, '1' create v2_key,
'2' create region in external db, '0' db region number, 'y' confirm create region in external db
'port' ip and port for dedicated db, '' use defult db name, '' default username, 'pwd' db
password, 'pwd' confirm db password + wait 360 secs and '' finish."""
ip = dedicated_db_appliance.hostname
pwd = app_creds['password']
opt = '5' if temp_appliance_unconfig_funcscope.version >= "5.8" else '8'
port = (ip, '') if temp_appliance_unconfig_funcscope.version >= "5.8" else (ip,)
command_set = ('ap', '', opt, '1', '2', '0', 'y') + port + ('', '', pwd,
TimedCommand(pwd, 360), '')
temp_appliance_unconfig_funcscope.appliance_console.run_commands(command_set)
temp_appliance_unconfig_funcscope.wait_for_evm_service()
temp_appliance_unconfig_funcscope.wait_for_web_ui()
def test_black_console_extend_storage(unconfigured_appliance):
"""'ap' launches appliance_console, '' clears info screen, '10/13' extend storage, '1' select
disk, 'y' confirm configuration and '' complete."""
opt = '10' if unconfigured_appliance.version >= "5.8" else '13'
command_set = ('ap', '', opt, '1', 'y', '')
unconfigured_appliance.appliance_console.run_commands(command_set)
def is_storage_extended(unconfigured_appliance):
assert unconfigured_appliance.ssh_client.run_command("df -h | grep /var/www/miq_tmp")
wait_for(is_storage_extended, func_args=[unconfigured_appliance])
@pytest.mark.skip('No IPA servers currently available')
def test_black_console_ipa(ipa_creds, configured_appliance):
"""'ap' launches appliance_console, '' clears info screen, '11/14' setup IPA, 'y' confirm setup
+ wait 40 secs and '' finish."""
opt = '11' if configured_appliance.version >= "5.8" else '14'
command_set = ('ap', '', opt, ipa_creds['hostname'], ipa_creds['domain'], '',
ipa_creds['username'], ipa_creds['password'], TimedCommand('y', 40), '')
configured_appliance.appliance_console.run_commands(command_set)
def is_sssd_running(configured_appliance):
assert configured_appliance.ssh_client.run_command("systemctl status sssd | grep running")
wait_for(is_sssd_running, func_args=[configured_appliance])
return_code, output = configured_appliance.ssh_client.run_command(
"cat /etc/ipa/default.conf | grep 'enable_ra = True'")
assert return_code == 0
@pytest.mark.skip('No IPA servers currently available')
@pytest.mark.parametrize('auth_type', [
LoginOption('sso', 'sso_enabled', '1'),
LoginOption('saml', 'saml_enabled', '2'),
LoginOption('local_login', 'local_login_disabled', '3')
], ids=['sso', 'saml', 'local_login'])
def test_black_console_external_auth(auth_type, app_creds, ipa_crud):
"""'ap' launches appliance_console, '' clears info screen, '12/15' change ext auth options,
'auth_type' auth type to change, '4' apply changes."""
evm_tail = LogValidator('/var/www/miq/vmdb/log/evm.log',
matched_patterns=['.*{} to true.*'.format(auth_type.option)],
hostname=ipa_crud.hostname,
username=app_creds['sshlogin'],
password=app_creds['password'])
evm_tail.fix_before_start()
opt = '12' if ipa_crud.version >= "5.8" else '15'
command_set = ('ap', '', opt, auth_type.index, '4')
ipa_crud.appliance_console.run_commands(command_set)
evm_tail.validate_logs()
evm_tail = LogValidator('/var/www/miq/vmdb/log/evm.log',
matched_patterns=['.*{} to false.*'.format(auth_type.option)],
hostname=ipa_crud.hostname,
username=app_creds['sshlogin'],
password=app_creds['password'])
evm_tail.fix_before_start()
opt = '12' if ipa_crud.version >= "5.8" else '15'
command_set = ('ap', '', opt, auth_type.index, '4')
ipa_crud.appliance_console.run_commands(command_set)
evm_tail.validate_logs()
@pytest.mark.skip('No IPA servers currently available')
def test_black_console_external_auth_all(app_creds, ipa_crud):
"""'ap' launches appliance_console, '' clears info screen, '12/15' change ext auth options,
'auth_type' auth type to change, '4' apply changes."""
evm_tail = LogValidator('/var/www/miq/vmdb/log/evm.log',
matched_patterns=['.*sso_enabled to true.*', '.*saml_enabled to true.*',
'.*local_login_disabled to true.*'],
hostname=ipa_crud.hostname,
username=app_creds['sshlogin'],
password=app_creds['password'])
evm_tail.fix_before_start()
opt = '12' if ipa_crud.version >= "5.8" else '15'
command_set = ('ap', '', opt, '1', '2', '3', '4')
ipa_crud.appliance_console.run_commands(command_set)
evm_tail.validate_logs()
evm_tail = LogValidator('/var/www/miq/vmdb/log/evm.log',
matched_patterns=['.*sso_enabled to false.*',
'.*saml_enabled to false.*', '.*local_login_disabled to false.*'],
hostname=ipa_crud.hostname,
username=app_creds['sshlogin'],
password=app_creds['password'])
evm_tail.fix_before_start()
opt = '12' if ipa_crud.version >= "5.8" else '15'
command_set = ('ap', '', opt, '1', '2', '3', '4')
ipa_crud.appliance_console.run_commands(command_set)
evm_tail.validate_logs()
def test_black_console_scap(temp_appliance_preconfig, soft_assert):
"""'ap' launches appliance_console, '' clears info screen, '14/17' Hardens appliance using SCAP
configuration, '' complete."""
opt = '14' if temp_appliance_preconfig.version >= "5.8" else '17'
command_set = ('ap', '', opt, '')
temp_appliance_preconfig.appliance_console.run_commands(command_set)
with tempfile.NamedTemporaryFile('w') as f:
f.write(hidden['scap.rb'])
f.flush()
os.fsync(f.fileno())
temp_appliance_preconfig.ssh_client.put_file(
f.name, '/tmp/scap.rb')
if temp_appliance_preconfig.version >= "5.8":
rules = '/var/www/miq/vmdb/productization/appliance_console/config/scap_rules.yml'
else:
rules = '/var/www/miq/vmdb/gems/pending/appliance_console/config/scap_rules.yml'
temp_appliance_preconfig.ssh_client.run_command('cd /tmp/ && ruby scap.rb '
'--rulesfile={rules}'.format(rules=rules))
temp_appliance_preconfig.ssh_client.get_file(
'/tmp/scap-results.xccdf.xml', '/tmp/scap-results.xccdf.xml')
temp_appliance_preconfig.ssh_client.get_file(
'{rules}'.format(rules=rules), '/tmp/scap_rules.yml') # Get the scap rules
with open('/tmp/scap_rules.yml') as f:
yml = yaml.load(f.read())
rules = yml['rules']
tree = lxml.etree.parse('/tmp/scap-results.xccdf.xml')
root = tree.getroot()
for rule in rules:
elements = root.findall(
'.//{{http://checklists.nist.gov/xccdf/1.1}}rule-result[@idref="{}"]'.format(rule))
if elements:
result = elements[0].findall('./{http://checklists.nist.gov/xccdf/1.1}result')
if result:
soft_assert(result[0].text == 'pass')
logger.info("{}: {}".format(rule, result[0].text))
else:
logger.info("{}: no result".format(rule))
else:
logger.info("{}: rule not found".format(rule))
| gpl-2.0 | -3,463,984,314,061,797,400 | 48.547368 | 100 | 0.629913 | false |
MLR-au/esrc-cnex | service/app/config.py | 1 | 3159 |
import os
import sys
import os.path
import ConfigParser
import collections
import traceback
import ast
from pyramid.httpexceptions import HTTPBadRequest
import logging
log = logging.getLogger(__name__)
class ConfigBase:
def __init__(self):
pass
def get(self, section, param, aslist=False):
data = self.cfg.get(section, param) if (self.cfg.has_section(section) and self.cfg.has_option(section, param)) else None
if data == None:
log.error("Missing parameter %s in section %s" % (param, section))
if aslist:
return [ d.strip() for d in data.split(',') ]
return data
class Config(ConfigBase):
def __init__(self, conf):
"""
Expects to be called with a pyramid request object.
The path to the configs will be extracted from the pyramid
configuration and a config object will be returned.
The params from the config will be available as instance
variables.
@params:
request: a pyramid request object
"""
self.cfg = ConfigParser.SafeConfigParser()
try:
self.cfg.read(conf)
except ConfigParser.ParsingError:
log.error('Config file parsing errors')
log.error(sys.exc_info()[1])
sys.exit()
self.app_config = {
'general': {
'token': self.get('GENERAL', 'token'),
'data_age': self.get('GENERAL', 'data_age'),
'sites': self.get('GENERAL', 'sites'),
'disable_auth': ast.literal_eval(self.get('GENERAL', 'disable_auth')),
'share_path': self.get('GENERAL', 'share_path'),
'share_url': self.get('GENERAL', 'share_url'),
},
'mongodb': {
'nodes': self.get('MONGODB', 'nodes', aslist=True),
'user': self.get('MONGODB', 'user'),
'pass': self.get('MONGODB', 'pass'),
'db': self.get('MONGODB', 'db'),
'replica_set': self.get('MONGODB', 'replica.set'),
'write_concern': self.get('MONGODB', 'write.concern')
}
}
class SiteConfig(ConfigBase):
def __init__(self, conf):
self.cfg = ConfigParser.SafeConfigParser()
try:
self.cfg.read(conf)
except ConfigParser.ParsingError:
log.error('Config file parsing errors')
log.error(sys.exc_info()[1])
sys.exit()
def load(self, site):
conf = {}
conf['code'] = self.get('GENERAL', 'code')
conf['name'] = self.get('GENERAL', 'name')
conf['url'] = self.get('GENERAL', 'url')
conf['eac'] = self.get('GENERAL', 'eac')
datamap = self.get('GENERAL', 'map', aslist=True)
conf['map'] = {}
conf['map']['source'] = datamap[0]
conf['map']['localpath'] = datamap[1]
conf['public'] = ast.literal_eval(self.get('GENERAL', 'public'))
conf['allow_groups'] = self.get('GENERAL', 'allow_groups', aslist=True)
conf['allow_users'] = self.get('GENERAL', 'allow_users', aslist=True)
return conf
| bsd-3-clause | -5,075,897,506,453,778,000 | 32.606383 | 128 | 0.552073 | false |
wagnerpeer/gitexplorer | gitexplorer/visualizations/punchcard.py | 1 | 3117 | '''
Created on 28.08.2017
@author: Peer
'''
from collections import defaultdict
import datetime
from itertools import chain
import matplotlib.pyplot as plt
from gitexplorer.basics import GitExplorerBase
def draw_punchcard(infos,
xaxis_range=24,
yaxis_range=7,
xaxis_ticks=range(24),
yaxis_ticks=['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'],
xaxis_label='Hour',
yaxis_label='Day'):
# build the array which contains the values
data = [[0.0] * xaxis_range for _ in range(yaxis_range)]
for key, value in infos.items():
data[key[0]][key[1]] = value
max_value = float(max(chain.from_iterable(data)))
# Draw the punchcard (create one circle per element)
# Ugly normalisation allows to obtain perfect circles instead of ovals....
for x in range(xaxis_range):
for y in range(yaxis_range):
circle = plt.Circle((x, y),
data[y][x] / 2 / max_value)
plt.gca().add_artist(circle)
plt.xlim(0, xaxis_range)
plt.ylim(0, yaxis_range)
plt.xticks(range(xaxis_range), xaxis_ticks)
plt.yticks(range(yaxis_range), yaxis_ticks)
plt.xlabel(xaxis_label)
plt.ylabel(yaxis_label)
plt.gca().invert_yaxis()
# make sure the axes are equal, and resize the canvas to fit the plot
plt.axis('scaled')
margin = 0.7
plt.axis([-margin, 23 + margin, 6 + margin, -margin])
scale = 0.5
plt.gcf().set_size_inches(xaxis_range * scale, yaxis_range * scale, forward=True)
plt.tight_layout()
def collect_data(commits):
'''
'''
information = defaultdict(int)
for commit in commits:
information[(commit['date'].isoweekday() - 1, commit['date'].hour)] += 1
return information
def find_commits(reference_day=datetime.datetime.today(),
days_before_reference=30,
number_of_commits=None):
'''Load commits from database meeting certain conditions.
Parameters
----------
days_before_reference: int (>=0), optional
Limit commits to number of days before reference_day
number_of_commits: int (>=0), optional
Limit the number of commits. If given it takes precedence before days_before_today.
Returns
-------
Documents meeting criteria defined through parameters
'''
criteria = {}
if(number_of_commits is None):
datetime_limit = reference_day - datetime.timedelta(days=days_before_reference)
criteria = {'date': {'$lte': reference_day, '$gte': datetime_limit}}
gitexplorer_database = GitExplorerBase.get_gitexplorer_database()
cursor = gitexplorer_database['commit_collection'].find(criteria)
if(number_of_commits is not None):
cursor = cursor.limit(number_of_commits)
return cursor
if(__name__ == '__main__'):
infos = collect_data(find_commits(days_before_reference=90,
number_of_commits=None))
draw_punchcard(infos)
plt.show()
| mit | 8,640,303,030,174,708,000 | 27.861111 | 110 | 0.616619 | false |
jaked122/MUF-highlighting | MufLoad.py | 1 | 17718 | #!/bin/python3
from telnetlib import Telnet
import re
from hashlib import sha512
from typing import *
from time import sleep
from os import stat, path
import yaml
import argparse
import datetime
prognameMatch = re.compile("\(\(\( filename: (.+) \)\)\)")
progDependencyMatch = re.compile("\(\(\( dependsOn: (.+) \)\)\)")
progIncludeMatch = re.compile(
'\(\(\( includes: (.+) as (\.\.[a-zA-Z0-9-]+) \)\)\)')
programFinder = "@find {}\n"
programFinderRegex = re.compile(b"(.+)([0-9]+):.+")
programFinderTerminator = re.compile(b'\*\*\*End of List\*\*\*')
ProgramId = re.compile(b"Program .+ created with number ([0-9]+)")
ProgramId2 = re.compile(
b'Entering editor for .+\(#([0-9]+).+\)\.')
# Command to list content of a program, showing line numbers
programListCommand = "@dlist {}\n"
programListMatch = re.compile(b"\s*([0-9]+):(.+)\r\n")
programListTerminator = re.compile(b"[0-9]+ lines displayed\.")
editorInsertExitMatch = [re.compile(b"Exiting insert mode\.")]
editorCompilerStringMatch = [re.compile(
b"Compiler done\."), re.compile(b"^Error in line")]
editorExitStringMatch = [re.compile(b"Editor exited\.")]
objectModifiedStringFieldMatch = \
[
re.compile(b"Modified: (.+) by (.+)$"),
re.compile(b"I don't see that there\.$")
]
objectModificationCommand = "ex {}\n"
functionListCommand = "@listfunc {}\n"
functionListRegex = re.compile("\x1b\[[^m]*m")
# Goals:
# Manage Dependencies:
# Upload changed files in necessary order
# Replacing special tokens with the correct program reference
# Send minimal line-by-line diff in format accepted by @edit
# and @prog
# Provide Server-Client code synchronization when requested
# (This will be hard to do properly in a very noisy server)
# Provide cleanup functionality for things that totally fuck up the system
# Current stuff that needs doing
# [x] Determine if file needs to be updated due to the program being modified
# since it was last retrieved.
# [ ] Better latency handling for the editor commands.
# a. expect/error loop until match is found
# b. Maybe the telnet class could do with a wrapper class
# for handling this automatically.
# 3.
class SyncException(Exception):
def __init__(self, filename, remoteid):
super(Exception, self).__init__(filename, remoteid)
self.message = "The object with id {} associated with {}".\
format(remoteid, filename) + \
" could not be found"
def because_I_cant_understand_strptime(s: str):
months = {
"Jan": 1,
"Feb": 2,
"Mar": 3,
"Apr": 4,
"May": 5,
"Jun": 6,
"Jul": 7,
"Aug": 8,
"Sep": 9,
"Oct": 10,
"Nov": 11,
"Dec": 12
}
m = re.compile("(Sat|Sun|Mon|Tue|Wed|Thu|Fri) " +
"(Jan|Feb|Mar|Apr|May|Jun|Jul" +
"|Aug|Sep|Oct|Nov|Dec) " +
"([123 ][0-9]) " +
"([012 ][0-9]):" +
"([0-5][0-9]):" +
"([0-5][0-9]) " +
"(CST|CDT) " +
"([0-9]+)").match(s)
month = months[m.group(2)]
monthday = int(m.group(3))
hour = int(m.group(4))
minute = int(m.group(5))
second = int(m.group(6))
year = int(m.group(7))
dt = datetime.datetime(year, month, day, hour, minute, second)
return dt
class MufFile():
def __init__(self, filename, depth=0, parent=None, send_method="name", id=None,
regname=None):
self.dependencies = []
self.transformedname = ""
self.filename = filename
self.hash = sha512()
self.length = 0
self.parent = parent
self.includes = {}
self.id = id
self.regname = regname
self.send_method = send_method
with open(filename) as file:
for z in file.readlines():
pnmatch = prognameMatch.match(z)
if pnmatch is not None:
self.transformedname = pnmatch.group(1)
continue
pdepmatch = progDependencyMatch.match(z)
if pdepmatch is not None:
self.dependencies.append(pdepmatch.group(1))
continue
pincMatch = progIncludeMatch.match(z)
if pincMatch is not None:
self.includes[pincMatch.group(2)] = pincMatch.group(1)
self.hash.update(z.encode())
self.length += 1
self.hash = self.hash.hexdigest()
def send(self, tc: Telnet):
let_be = False
while True:
if self.send_method == "name":
tc.write("@prog {}\n".format(self.transformedname).encode())
elif self.send_method == "id":
tc.write("@prog {}\n".format(self.id).encode())
elif self.send_method == "regname":
print("Using regname:{0}".format(self.regname))
tc.write("@prog {}\n".format(self.regname).encode())
mindex, match, _ = tc.expect([ProgramId, ProgramId2], timeout=3)
if match is not None:
self.id = int(match.group(1))
break
tc.write("1 {} delete\n".format(self.length * 10).encode())
tc.write("i\n".encode())
counter = 0
with open(self.filename) as fi:
lines = fi.readlines()
if len(lines[-1]) > 0:
lines.append('')
for i in lines:
tc.write("{}".format(i).encode())
# sleep(0.05)
counter += 1
print("{: =4.2}%".format(100 * counter / len(lines)),
end='\r', flush=True)
print("\n", end="", flush=True)
print("finished sending")
while True:
tc.write('.\n'.encode())
index, m, _ = tc.expect(editorInsertExitMatch,
timeout=5)
if m is not None:
break
print("compiling program")
while True:
tc.write("c\n".encode())
index, m, line = tc.expect(editorCompilerStringMatch,
timeout=7)
if index != None and index != 1:
print("Message Recieved")
print(line.decode("ascii"))
let_be = True
if m is not None:
break
print("quitting")
while True:
if let_be:
tc.write("q\n".encode())
else:
tc.write("x\n".encode())
index, m, _ = tc.expect(editorExitStringMatch,
timeout=7)
if m is not None:
break
@staticmethod
def check_last_modified(filename, remoteid, tc: Telnet):
tc.send(objectModificationCommand.format(remoteid).encode())
idx, match, _ = tc.expect(objectModifiedStringFieldMatch)
if idx == 1:
raise SyncException(filename, remoteid)
# mod_date = datetime.datetime.strptime(match.group(1),
# "%a %b %d %H:%M:%S %Z %Y")
mod_date = because_I_cant_understand_strptime(match.group(1))
local_stuff = path.getmtime(filename)
return mod_date >= local_stuff
@staticmethod
def sync(filename, remoteid, tc: Telnet):
tc.read_very_eager()
# tc.write(b"@set me=H\n")
# tc.write(b"pub #alloff\n")
sleep(2)
tc.read_very_eager()
tc.write(programListCommand.format(remoteid).encode())
print(programListCommand.format(remoteid))
with open(filename, 'w') as output:
lines = tc.read_until(b" lines displayed.").decode().split('\r\n')
for i in lines[:-1]:
if i[0:4] == "LOG>":
continue
if i[0:5] == "PROG>":
i = i[5:]
else:
continue
output.write(i + '\n')
# tc.write(b"@set me=!H\n")
# tc.write(b"pub #allon\n")
tc.read_very_eager()
# mindex = 0
# while mindex < 1:
# mindex, match, _ = tc.expect([programListMatch,
# programListTerminator])
# if mindex >= 1 \
# or match is None:
# break
# output.write(match.group(2).decode()+'\n')
# Keep track of whether or not files are up to date on the server.
class Cache():
def __init__(self, path):
import pickle
self.newfiles = {}
self.oldfiles = {}
try:
self = pickle.load(path + ".cache")
except IOError:
# probably doesn't exist
pass
def addFile(self, file: MufFile):
fname = file.filename
if fname in self.oldfiles.keys():
if self.newfiles[fname].hash != file.hash:
self.oldfiles[fname] = self.newfiles[fname]
self.newfiles[fname] = file
def syncOld(self, file: MufFile, tc: Telnet):
tc.write(programFinder.format(file.filename))
mindex, match, _ = tc.expect([programFinderRegex,
programFinderTerminator])
fn = None
while match is not None and mindex != 1:
if match.group(1) == file.transformedname:
fn = match.group(1)
break
else:
mindex, match, _ = tc.expect([programFinderRegex,
programFinderTerminator])
tc.write(programListCommand.format(fn))
mindex = 0
lines = []
lastindex = 0
while mindex != 1:
mindex, match, _ = tc.expect([programListMatch,
programListTerminator])
if mindex != 1:
if int(math.group(2)) != lastindex + 1:
print("Hmm. There might be a problem.")
else:
lastindex = int(match.group(1))
lines.append(match.group(2))
class DepGraph():
def __init__(self):
self.nodes = {}
self.edges = {}
self.depths = {}
self.validstarts = set()
def addFile(self, file: MufFile, depth=0):
self.nodes[file.filename] = file
if file.filename not in self.edges.keys():
self.edges[file.filename] = set()
self.depths[file.filename] = depth
if depth == 0:
self.validstarts.add(file.filename)
for fn in file.dependencies:
self.edges[file.filename].add(fn)
if fn not in self.nodes.keys():
self.addFile(MufFile(fn, depth=depth + 1), depth + 1)
def send(self, tc: Telnet):
stack = list()
path = []
sent = set()
for i in self.validstarts:
stack.append(i)
while len(stack) > 0:
cn = stack.pop()
if cn not in path:
path.append(cn)
else:
continue
for n in self.edges[cn]:
path.append(n)
stack.append(n)
for n in reversed(path):
print("Updating program {}".format(n))
self.nodes[n].send(tc)
# TODO: Use a cache to check if the file needs to be uploaded again,
# I.E. it's hash has changed.
# TODO: define a macro on the copy sent to the server such that each
# program refers to the correct id at runtime.
# argInterpret = argparse.ArgumentParser()
# argInterpret.add_argument()
# tc = Telnet(host="localhost", port=2001)
# tc.write(b"connect one potrzebie\n")
# dg = DepGraph()
# dg.addFile(MufFile("Channel/Channel.muf"))
# dg.send(tc)
parser = argparse.ArgumentParser("Manage files on the MUCK")
parser.add_argument("--send", dest='files', action='append',
help='Files to send', default=[])
parser.add_argument('--sync', dest='sync', action='store_const',
help='Sync files?', const=True, default=False)
parser.add_argument('--force-sync', default=[],
dest='needsync',
action='append', help='Force a file to be synced')
parser.add_argument('--send-all', dest='send_all', action='store_const',
help='send all files', const=True, default=False)
parser.add_argument('--spaz', const=True,default=False,action='store_const')
parser.add_argument('--primary',const=True,default=False,action='store_const')
parser.add_argument('--host',default=[],action='append',dest='host')
args = parser.parse_args()
with open('project.yaml') as projfile:
project = yaml.load(projfile)
print(project)
project = project['project']
for conn in project['connections']:
conn=conn['connect']
if args.primary and \
(not 'primary' in conn.keys()):
continue
if len(args.host)>0\
and conn['host'] not in args.host:
continue
print(conn)
tc = Telnet(host=conn['host'],
port=int(conn['port']))
tc.read_some()
tc.write("connect {} {}\n".format(conn['username'],
conn['password']).encode())
print("connect {} {}".format(conn['username'],
conn['password']))
sleep(2)
if args.spaz:
while True:
tc.close()
tc = Telnet(host=project['connect']['host'],
port=int(project['connect']['port']))
tc.read_some()
tc.write("connect {} {}\n".format(project['connect']['username'],
project['connect']['password']).encode())
sleep(0.1)
tc.read_some()
if args.sync and conn['sync']:
for i in project['sync']:
if 'no_exist' in i['file'].keys() and i['file']['no_exist']:
try:
stat(i['file']['name'])
print('skipping {}'.format(i['file']['name']))
continue
except FileNotFoundError:
print('need to get {}'.format(i['file']['name']))
MufFile.sync(i['file']['name'], i['file']['id'], tc)
for i in project['sync']:
if i['file']['name'] in args.needsync \
and 'sync' in conn.keys()\
and (not args.primary or\
args.primary and 'primary' in conn.keys()):
MufFile.sync(i['file']['name'], i['file']['id'], tc)
if args.send_all:
for i in project['send']:
f = None
should_send=True
if 'send_method' in i['file'].keys():
id = None
regname = None
print("Send method:" + i['file']['send_method'])
if 'id' in i['file'].keys():
id = i['file']['id']
if '#' in id and 'primary' not in conn.keys():
should_send=False
if 'regname' in i['file'].keys():
regname = i['file']['regname']
f = MufFile(i['file']['name'],
send_method=i['file']['send_method'],
id=id, regname=regname)
else:
print("No send method found")
f = MufFile(i['file']['name'])
f.transformedname = i['file']['gamename']
if '#' in i['file']['gamename'] and 'primary' not in conn.keys():
should_send=False
if not should_send:
print('File ',f.transformedname,' is not encoded using an cross game manner')
continue
print("Sending " + f.transformedname)
f.send(tc)
sleep(1)
print("\a")
else:
for i in project['send']:
if i['file']['name'] not in args.files:
continue
send_with_id = False
should_send = True
f = None
if 'send_method' in i['file'].keys():
id = None
regname = None
print("Send method:" + i['file']['send_method'])
if 'id' in i['file'].keys():
id = i['file']['id']
if '#' in id and 'primary' not in conn.keys():
should_send=False
if 'regname' in i['file'].keys():
regname = i['file']['regname']
f = MufFile(i['file']['name'],
send_method=i['file']['send_method'],
id=id, regname=regname)
else:
f = MufFile(i['file']['name'])
f.transformedname = i['file']['gamename']
if '#' in f.transformedname and 'primary' not in conn.keys():
should_send=False
if not should_send:
print(f.transformed_name, " is not appropriately kept with game independent identification. Skipping")
continue
print("Sending " + f.transformedname)
f.send(tc)
sleep(1)
tc.close()
| mit | -9,212,332,200,114,590,000 | 38.112583 | 122 | 0.4943 | false |
ablifedev/ABLIRC | ABLIRC/install/external_lib/HTSeq/__init__.py | 1 | 39255 | """HTSeq is a package to process high-throughput sequencing data.
See http://www-huber.embl.de/users/anders/HTSeq for documentation.
"""
import itertools, warnings, os, shlex
try:
from _HTSeq import *
except ImportError:
if os.path.isfile( "setup.py" ):
raise ImportError( "Cannot import 'HTSeq' when working directory is HTSeq's own build directory.")
else:
raise
from _version import __version__
#from vcf_reader import *
#########################
## Utils
#########################
class FileOrSequence( object ):
""" The construcutor takes one argument, which may either be a string,
which is interpreted as a file name (possibly with path), or a
connection, by which we mean a text file opened for reading, or
any other object that can provide an iterator over strings
(lines of the file).
The advantage of passing a file name instead of an already opened file
is that if an iterator is requested several times, the file will be
re-opened each time. If the file is already open, its lines can be read
only once, and then, the iterator stays exhausted.
Furthermore, if a file name is passed that end in ".gz" or ".gzip"
(case insensitive), it is transparently gunzipped.
"""
def __init__( self, filename_or_sequence ):
self.fos = filename_or_sequence
self.line_no = None
def __iter__( self ):
self.line_no = 1
if isinstance( self.fos, str ):
if self.fos.lower().endswith( ( ".gz" , ".gzip" ) ):
lines = gzip.open( self.fos )
else:
lines = open( self.fos )
else:
lines = self.fos
for line in lines:
yield line
self.line_no += 1
if isinstance( self.fos, str ):
lines.close()
self.line_no = None
def __repr__( self ):
if isinstance( self.fos, str ):
return "<%s object, connected to file name '%s'>" % (
self.__class__.__name__, self.fos )
else:
return "<%s object, connected to %s >" % (
self.__class__.__name__, repr( self.fos ) )
def get_line_number_string( self ):
if self.line_no is None:
if isinstance( self.fos, str ):
return "file %s closed" % self.fos
else:
return "file closed"
if isinstance( self.fos, str ):
return "line %d of file %s" % ( self.line_no, self.fos )
else:
return "line %d" % self.line_no
#########################
## Features
#########################
class GenomicFeature( object ):
"""A genomic feature, i.e., an interval on a genome with metadata.
At minimum, the following information should be provided by slots:
name: a string identifying the feature (e.g., a gene symbol)
type: a string giving the feature type (e.g., "gene", "exon")
iv: a GenomicInterval object specifying the feature locus
"""
def __init__( self, name, type_, interval ):
self.name = name
self.type = intern( type_ )
self.iv = interval
def __repr__( self ):
return "<%s: %s '%s' at %s: %d -> %d (strand '%s')>" % \
( self.__class__.__name__, self.type, self.name,
self.iv.chrom, self.iv.start_d, self.iv.end_d, self.iv.strand )
def __eq__( self, other ):
if not isinstance( other, GenomicFeature ):
return False
return self.name == other.name and self.type == other.type and \
self.iv == other.iv
def __neq__( self, other ):
if not isinstance( other, GenomicFeature ):
return True
return not self.__eq__( other )
def get_gff_line( self, with_equal_sign=False ):
try:
source = self.source
except AttributeError:
source = "."
try:
score = self.score
except AttributeError:
score = "."
try:
frame = self.frame
except AttributeError:
frame = "."
try:
attr = self.attr
except AttributeError:
attr = { 'ID': self.name }
if with_equal_sign:
sep = "="
else:
sep = " "
attr_str = '; '.join( [ '%s%s\"%s\"' % ( ak, sep, attr[ak] ) for ak in attr ] )
return "\t".join( str(a) for a in ( self.iv.chrom, source,
self.type, self.iv.start+1, self.iv.end, score,
self.iv.strand, frame, attr_str ) ) + "\n"
_re_attr_main = re.compile( "\s*([^\s\=]+)[\s=]+(.*)" )
_re_attr_empty = re.compile( "^\s*$" )
def parse_GFF_attribute_string( attrStr, extra_return_first_value=False ):
"""Parses a GFF attribute string and returns it as a dictionary.
If 'extra_return_first_value' is set, a pair is returned: the dictionary
and the value of the first attribute. This might be useful if this is the ID.
"""
if attrStr.endswith( "\n" ):
attrStr = attrStr[:-1]
d = {}
first_val = "_unnamed_"
for (i, attr) in itertools.izip( itertools.count(), _HTSeq.quotesafe_split( attrStr ) ):
if _re_attr_empty.match( attr ):
continue
if attr.count( '"' ) not in ( 0, 2 ):
raise ValueError, "The attribute string seems to contain mismatched quotes."
mo = _re_attr_main.match( attr )
if not mo:
raise ValueError, "Failure parsing GFF attribute line"
val = mo.group(2)
if val.startswith( '"' ) and val.endswith( '"' ):
val = val[1:-1]
#val = urllib.unquote( val )
d[ intern(mo.group(1)) ] = intern(val)
if extra_return_first_value and i == 0:
first_val = val
if extra_return_first_value:
return ( d, first_val )
else:
return d
_re_gff_meta_comment = re.compile( "##\s*(\S+)\s+(\S*)" )
class GFF_Reader( FileOrSequence ):
"""Parse a GFF file
Pass the constructor either a file name or an iterator of lines of a
GFF files. If a file name is specified, it may refer to a gzip compressed
file.
Iterating over the object then yields GenomicFeature objects.
"""
def __init__( self, filename_or_sequence, end_included=True ):
FileOrSequence.__init__( self, filename_or_sequence )
self.end_included = end_included
self.metadata = {}
def __iter__( self ):
for line in FileOrSequence.__iter__( self ):
if line == "\n":
continue
if line.startswith( '#' ):
if line.startswith( "##" ):
mo = _re_gff_meta_comment.match( line )
if mo:
self.metadata[ mo.group(1) ] = mo.group(2)
continue
( seqname, source, feature, start, end, score,
strand, frame, attributeStr ) = line.split( "\t", 8 )
( attr, name ) = parse_GFF_attribute_string( attributeStr, True )
if self.end_included:
iv = GenomicInterval( seqname, int(start)-1, int(end), strand )
else:
iv = GenomicInterval( seqname, int(start)-1, int(end)-1, strand )
f = GenomicFeature( name, feature, iv )
if score != ".":
score = float( score )
if frame != ".":
frame = int( frame )
f.source = source
f.score = score
f.frame = frame
f.attr = attr
yield f
def make_feature_dict( feature_sequence ):
"""A feature dict is a convenient way to organize a sequence of Feature
object (which you have got, e.g., from parse_GFF).
The function returns a dict with all the feature types as keys. Each value
of this dict is again a dict, now of feature names. The values of this dict
is a list of feature.
An example makes this clear. Let's say you load the C. elegans GTF file
from Ensemble and make a feature dict:
>>> worm_features_dict = HTSeq.make_feature_dict( HTSeq.parse_GFF(
... "test_data/Caenorhabditis_elegans.WS200.55.gtf.gz" ) )
(This command may take a few minutes to deal with the 430,000 features
in the GTF file. Note that you may need a lot of RAM if you have millions
of features.)
Then, you can simply access, say, exon 0 of gene "F08E10.4" as follows:
>>> worm_features_dict[ 'exon' ][ 'F08E10.4' ][ 0 ]
<GenomicFeature: exon 'F08E10.4' at V: 17479353 -> 17479001 (strand '-')>
"""
res = {}
for f in feature_sequence:
if f.type not in res:
res[ f.type ] = {}
res_ftype = res[ f.type ]
if f.name not in res_ftype:
res_ftype[ f.name ] = [ f ]
else:
res_ftype[ f.name ].append( f )
return res
#########################
## GenomicArray
#########################
def read_chrom_lens( filename, delimiter="\t" ):
return dict( ( ( chrom, int(len) )
for chrom, len in csv.reader( open(filename), delimiter=delimiter ) ) )
#########################
## Sequence readers
#########################
_re_fasta_header_line = re.compile( r'>\s*(\S+)\s*(.*)' )
class FastaReader( FileOrSequence ):
"""A Fasta_Reader is associated with a FASTA file or an open connection
to a file-like object with content in FASTA format.
It can generate an iterator over the sequences.
"""
def __iter__( self ):
seq = None
for line in FileOrSequence.__iter__( self ):
if line.startswith( ">" ):
if seq:
s = Sequence( seq, name )
s.descr = descr
yield s
mo = _re_fasta_header_line.match( line )
name = mo.group(1)
descr = mo.group(2)
seq = ""
else:
assert seq is not None, "FASTA file does not start with '>'."
seq += line[:-1]
if seq is not None:
s = Sequence( seq, name )
s.descr = descr
yield s
def get_sequence_lengths( self ):
seqname = None
seqlengths = {}
for line in FileOrSequence.__iter__( self ):
if line.startswith( ">" ):
if seqname is not None:
seqlengths[ seqname ] = length
mo = _re_fasta_header_line.match( line )
seqname = mo.group(1)
length = 0
else:
assert seqname is not None, "FASTA file does not start with '>'."
length += len( line.rstrip() )
if seqname is not None:
seqlengths[ seqname ] = length
return seqlengths
@staticmethod
def _import_pysam():
global pysam
try:
import pysam
except ImportError:
sys.stderr.write( "Please install the 'pysam' package to be able to use the Fasta indexing functionality." )
raise
def build_index( self, force = False ):
self._import_pysam()
if not isinstance( self.fos, str ):
raise TypeError, "This function only works with FastaReader objects " + \
"connected to a fasta file via file name"
index_filename = self.fos + ".fai"
if os.access( index_filename, os.R_OK ):
if (not force) and os.stat( self.filename_or_sequence ).st_mtime <= \
os.stat( index_filename ).st_mtime:
# index is up to date
return
pysam.faidx( self.fos )
if not os.access( index_filename, os.R_OK ):
raise SystemError, "Building of Fasta index failed due to unknown error."
def __getitem__( self, iv ):
if not isinstance( iv, GenomicInterval ):
raise TypeError, "GenomicInterval expected as key."
if not isinstance( self.fos, str ):
raise TypeError, "This function only works with FastaReader objects " + \
"connected to a fasta file via file name"
self._import_pysam()
fasta = pysam.faidx( self.fos, "%s:%d-%d" % ( iv.chrom, iv.start, iv.end-1 ) )
ans = list( FastaReader( fasta ) )
assert len( ans ) == 1
ans[0].name = str(iv)
if iv.strand != "-":
return ans[0]
else:
return ans[0].get_reverse_complement()
class FastqReader( FileOrSequence ):
"""A Fastq object is associated with a FASTQ self.file. When an iterator
is requested from the object, the FASTQ file is read.
qual_scale is one of "phred", "solexa", "solexa-old".
"""
def __init__( self, file_, qual_scale = "phred" ):
FileOrSequence.__init__( self, file_ )
self.qual_scale = qual_scale
if qual_scale not in ( "phred", "solexa", "solexa-old" ):
raise ValueError, "Illegal quality scale."
def __iter__( self ):
fin = FileOrSequence.__iter__( self )
while True:
id1 = fin.next()
seq = fin.next()
id2 = fin.next()
qual = fin.next()
if qual == "":
if id1 != "":
warnings.warn( "Number of lines in FASTQ file is not "
"a multiple of 4. Discarding the last, "
"incomplete record" )
break
if not qual.endswith( "\n" ):
qual += "\n"
if not id1.startswith( "@" ):
raise ValueError( "Primary ID line in FASTQ file does"
"not start with '@'. Either this is not FASTQ data or the parser got out of sync." )
if not id2.startswith( "+" ):
raise ValueError( "Secondary ID line in FASTQ file does"
"not start with '+'. Maybe got out of sync." )
if len( id2 ) > 2 and id1[1:] != id2[1:]:
raise ValueError( "Primary and secondary ID line in FASTQ"
"disagree." )
yield SequenceWithQualities( seq[:-1], id1[1:-1], qual[:-1],
self.qual_scale )
class BowtieReader( FileOrSequence ):
"""A BowtieFile object is associated with a Bowtie output file that
contains short read alignments. It can generate an iterator of Alignment
objects."""
def __iter__( self ):
for line in FileOrSequence.__iter__( self ):
try:
algnt = BowtieAlignment( line )
except ValueError:
if line.startswith( "Reported " ):
continue
warnings.warn( "BowtieReader: Ignoring the following line, which could not be parsed:\n%s\n" % line,
RuntimeWarning )
yield algnt
def bundle_multiple_alignments( sequence_of_alignments ):
"""Some alignment programs, e.g., Bowtie, can output multiple alignments,
i.e., the same read is reported consecutively with different alignments.
This function takes an iterator over alignments and bundles consecutive
alignments regarding the same read to a list of Alignment objects and
returns an iterator over these.
"""
alignment_iter = iter( sequence_of_alignments )
algnt = alignment_iter.next()
ma = [ algnt ]
for algnt in alignment_iter:
if algnt.read.name != ma[0].read.name:
yield ma
ma = [ algnt ]
else:
ma.append( algnt )
yield ma
class SolexaExportAlignment( Alignment ):
"""Iterating over SolexaExportReader objects will yield SoelxaExportRecord
objects. These have four fields:
read - a SequenceWithQualities object
aligned - a boolean, indicating whether the object was aligned
iv - a GenomicInterval giving the alignment (or None, if not aligned)
passed_filter - a boolean, indicating whether the object passed the filter
nomatch_code - a code indicating why no match was found (or None, if the
read was aligned)
As long as 'aligned' is True, a SolexaExportRecord can be treated as an
Alignment object.
"""
def __init__( self ):
# Data is filled in by SolexaExportRecord
pass
def __repr__( self ):
if self.aligned:
return "< %s object: Read '%s', aligned to %s >" % (
self.__class__.__name__, self.read.name, self.iv )
else:
return "< %s object: Non-aligned read '%s' >" % (
self.__class__.__name__, self.read.name )
class SolexaExportReader( FileOrSequence ):
"""Parser for *_export.txt files from the SolexaPipeline software.
Iterating over a SolexaExportReader yields SolexaExportRecord objects.
"""
def __init__( self, filename_or_sequence, solexa_old = False ):
FileOrSequence.__init__( self, filename_or_sequence)
if solexa_old:
self.qualscale = "solexa-old"
else:
self.qualscale = "solexa"
@classmethod
def parse_line_bare( dummy, line ):
if line[-1] == "\n":
line = line[:-1]
res = {}
( res['machine'], res['run_number'], res['lane'], res['tile'], res['x_coord'],
res['y_coord'], res['index_string'], res['read_nbr'], res['read_seq'],
res['qual_str'], res['chrom'], res['contig'], res['pos'], res['strand'],
res['match_descr'], res['single_read_algnt_score'],
res['paired_read_algnt_score'], res['partner_chrom'], res['partner_contig'],
res['partner_offset'], res['partner_strand'], res['passed_filtering'] ) \
= line.split( "\t" )
return res
def __iter__( self ):
for line in FileOrSequence.__iter__( self ):
record = SolexaExportAlignment()
fields = SolexaExportReader.parse_line_bare( line )
if fields['read_nbr'] != "1":
warnings.warn( "Paired-end read encountered. PE is so far supported only for " +
"SAM files, not yet for SolexaExport. All PE-related fields are ignored. " )
record.read = SequenceWithQualities(
fields['read_seq'],
"%s:%s:%s:%s:%s#0" % (fields['machine'], fields['lane'], fields['tile'],
fields['x_coord'], fields['y_coord'] ),
fields['qual_str'], self.qualscale )
if fields['passed_filtering'] == 'Y':
record.passed_filter = True
elif fields['passed_filtering'] == 'N':
record.passed_filter = False
else:
raise ValueError, "Illegal 'passed filter' value in Solexa export data: '%s'." % fields['passed_filtering']
record.index_string = fields['index_string']
if fields['pos'] == '':
record.iv = None
record.nomatch_code = fields['chrom']
else:
if fields['strand'] == 'F':
strand = '+'
elif fields['strand'] == 'R':
strand = '-'
else:
raise ValueError, "Illegal strand value in Solexa export data."
start = int( fields['pos'] )
chrom = fields['chrom']
if fields['chrom'] == "":
chrom = fields['contig']
record.iv = GenomicInterval( chrom, start,
start + len( fields['read_seq'] ), strand )
yield record
class SAM_Reader( FileOrSequence ):
"""A SAM_Reader object is associated with a SAM file that
contains short read alignments. It can generate an iterator of Alignment
objects."""
def __iter__( self ):
for line in FileOrSequence.__iter__( self ):
if line.startswith( "@" ):
# do something with the header line
continue
try:
algnt = SAM_Alignment.from_SAM_line( line )
except ValueError, e:
e.args = e.args + ( self.get_line_number_string(), )
raise
yield algnt
class GenomicArrayOfSets( GenomicArray ):
"""A GenomicArrayOfSets is a specialization of GenomicArray that allows to store
sets of objects. On construction, the step vectors are initialized with empty sets.
By using the 'add_value' method, objects can be added to intervals. If an object
is already present in the set(s) at this interval, an the new object is added to
the present set, and the set is split if necessary.
"""
def __init__( self, chroms, stranded=True, storage='step', memmap_dir = "" ):
GenomicArray.__init__( self, chroms, stranded, 'O', storage, memmap_dir )
def add_chrom( self, chrom, length = sys.maxint, start_index = 0 ):
GenomicArray.add_chrom( self, chrom, length, start_index )
for cv in self.chrom_vectors[ chrom ].values():
cv[:] = set()
cv.is_vector_of_sets = True
###########################
## paired-end handling
###########################
def pair_SAM_alignments( alignments, bundle=False ):
mate_missing_count = [0]
def process_list( almnt_list ):
while len( almnt_list ) > 0:
a1 = almnt_list.pop( 0 )
# Find its mate
for a2 in almnt_list:
if a1.pe_which == a2.pe_which:
continue
if a1.aligned != a2.mate_aligned or a1.mate_aligned != a2.aligned:
continue
if not (a1.aligned and a2.aligned):
break
if a1.iv.chrom == a2.mate_start.chrom and a1.iv.start == a2.mate_start.pos and \
a2.iv.chrom == a1.mate_start.chrom and a2.iv.start == a1.mate_start.pos:
break
else:
if a1.mate_aligned:
mate_missing_count[0] += 1
if mate_missing_count[0] == 1:
warnings.warn( "Read " + a1.read.name + " claims to have an aligned mate " +
"which could not be found in an adjacent line." )
a2 = None
if a2 is not None:
almnt_list.remove( a2 )
if a1.pe_which == "first":
yield ( a1, a2 )
else:
assert a1.pe_which == "second"
yield ( a2, a1 )
almnt_list = []
current_name = None
for almnt in alignments:
if not almnt.paired_end:
raise ValueError, "'pair_alignments' needs a sequence of paired-end alignments"
if almnt.pe_which == "unknown":
raise ValueError, "Paired-end read found with 'unknown' 'pe_which' status."
if almnt.read.name == current_name:
almnt_list.append( almnt )
else:
if bundle:
yield list( process_list( almnt_list ) )
else:
for p in process_list( almnt_list ):
yield p
current_name = almnt.read.name
almnt_list = [ almnt ]
if bundle:
yield list( process_list( almnt_list ) )
else:
for p in process_list( almnt_list ):
yield p
if mate_missing_count[0] > 1:
warnings.warn( "%d reads with missing mate encountered." % mate_missing_count[0] )
def pair_SAM_alignments_with_buffer( alignments, max_buffer_size=3000000 ):
almnt_buffer = {}
ambiguous_pairing_counter = 0
for almnt in alignments:
if not almnt.paired_end:
raise ValueError, "Sequence of paired-end alignments expected, but got single-end alignment."
if almnt.pe_which == "unknown":
raise ValueError, "Cannot process paired-end alignment found with 'unknown' 'pe_which' status."
matekey = (
almnt.read.name,
"second" if almnt.pe_which == "first" else "first",
almnt.mate_start.chrom if almnt.mate_aligned else None,
almnt.mate_start.pos if almnt.mate_aligned else None,
almnt.iv.chrom if almnt.aligned else None,
almnt.iv.start if almnt.aligned else None,
-almnt.inferred_insert_size if almnt.aligned and almnt.mate_aligned else None )
if matekey in almnt_buffer:
if len( almnt_buffer[ matekey ] ) == 1:
mate = almnt_buffer[ matekey ][ 0 ]
del almnt_buffer[ matekey ]
else:
mate = almnt_buffer[ matekey ].pop( 0 )
if ambiguous_pairing_counter == 0:
ambiguous_pairing_first_occurance = matekey
ambiguous_pairing_counter += 1
if almnt.pe_which == "first":
yield ( almnt, mate )
else:
yield ( mate, almnt )
else:
almntkey = (
almnt.read.name, almnt.pe_which,
almnt.iv.chrom if almnt.aligned else None,
almnt.iv.start if almnt.aligned else None,
almnt.mate_start.chrom if almnt.mate_aligned else None,
almnt.mate_start.pos if almnt.mate_aligned else None,
almnt.inferred_insert_size if almnt.aligned and almnt.mate_aligned else None )
if almntkey not in almnt_buffer:
almnt_buffer[ almntkey ] = [ almnt ]
else:
almnt_buffer[ almntkey ].append( almnt )
if len(almnt_buffer) > max_buffer_size:
raise ValueError, "Maximum alignment buffer size exceeded while pairing SAM alignments."
if len(almnt_buffer) > 0:
warnings.warn( "Mate records missing for %d records; first such record: %s." %
( len(almnt_buffer), str( almnt_buffer.values()[0][0] ) ) )
for almnt_list in almnt_buffer.values():
for almnt in almnt_list:
if almnt.pe_which == "first":
yield ( almnt, None )
else:
yield ( None, almnt )
if ambiguous_pairing_counter > 0:
warnings.warn( "Mate pairing was ambiguous for %d records; mate key for first such record: %s." %
( ambiguous_pairing_counter, str( ambiguous_pairing_first_occurance ) ) )
###########################
## variant calls
###########################
_re_vcf_meta_comment = re.compile( "^##([a-zA-Z]+)\=(.*)$" )
_re_vcf_meta_descr = re.compile('ID=[^,]+,?|Number=[^,]+,?|Type=[^,]+,?|Description="[^"]+",?')
_re_vcf_meta_types = re.compile( "[INFO|FILTER|FORMAT]" )
_vcf_typemap = {
"Integer":int,
"Float":float,
"String":str,
"Flag":bool
}
class VariantCall( object ):
def __init__( self, chrom = None, pos = None, identifier = None, ref = None, alt = None, qual = None, filtr = None, info = None ):
self.chrom = chrom
self.pos = pos
self.id = identifier
self.ref = ref
self.alt = alt
self.qual = qual
self.filter = filtr
self.info = info
self._original_line = None
@classmethod
def fromdict( cls, dictionary ):
ret = cls()
ret.chrom = dictionary["chrom"]
ret.pos = dictionary["pos"]
ret.id = dictionary["id"]
ret.ref = dictionary["ref"]
ret.alt = dictionary["alt"]
ret.qual = dictionary["qual"]
ret.filter = dictionary["filter"]
ret.info = dictionary["info"]
ret._original_line = None
@classmethod
def fromline( cls, line, nsamples = 0, sampleids = [] ):
ret = cls()
if nsamples == 0:
ret.format = None
ret.chrom, ret.pos, ret.id, ret.ref, ret.alt, ret.qual, ret.filter, ret.info = line.rstrip("\n").split("\t", 7)
else:
lsplit = line.rstrip("\n").split("\t")
ret.chrom, ret.pos, ret.id, ret.ref, ret.alt, ret.qual, ret.filter, ret.info = lsplit[:8]
ret.format = lsplit[8].split(":")
ret.samples = {}
spos=9
for sid in sampleids:
ret.samples[ sid ] = dict( ( name, value ) for (name, value) in itertools.izip( ret.format, lsplit[spos].split(":") ) )
spos += 1
ret.pos = GenomicPosition( ret.chrom, int(ret.pos) )
ret.alt = ret.alt.split(",")
ret._original_line = line
return ret
def infoline( self ):
if self.info.__class__ == dict:
return ";".join(map((lambda key: str(key) + "=" + str(self.info[key])), self.info ))
else:
return self.info
def get_original_line( self ):
warnings.warn( "Original line is empty, probably this object was created from scratch and not from a line in a .vcf file!" )
return self._original_line
def sampleline( self ):
if self.format == None:
print >> sys.stderr, "No samples in this variant call!"
return ""
keys = self.format
ret = [ ":".join( keys ) ]
for sid in self.samples:
tmp = []
for k in keys:
if k in self.samples[sid]:
tmp.append( self.samples[sid][k] )
ret.append( ":".join(tmp) )
return "\t".join( ret )
def to_line( self ):
if self.format == None:
return "\t".join( map( str, [ self.pos.chrom, self.pos.pos, self.id, self.ref, ",".join( self.alt ), self.qual, self.filter, self.infoline() ] ) ) + "\n"
else:
return "\t".join( map( str, [ self.pos.chrom, self.pos.pos, self.id, self.ref, ",".join( self.alt ), self.qual, self.filter, self.infoline(), self.sampleline() ] ) ) + "\n"
def __descr__( self ):
return "<VariantCall at %s, ref '%s', alt %s >" % (str(self.pos).rstrip("/."), self.ref, str(self.alt).strip("[]"))
def __str__( self ):
return "%s:'%s'->%s" % (str(self.pos).rstrip("/."), self.ref, str(self.alt).strip("[]"))
def unpack_info( self, infodict ):
tmp = {}
for token in self.info.strip(";").split(";"):
if re.compile("=").search(token):
token = token.split("=")
if infodict.has_key( token[0] ):
tmp[token[0]] = map( infodict[token[0]], token[1].split(",") )
else:
tmp[token[0]] = token[1].split(",")
if len( tmp[ token[0] ] ) == 1:
tmp[token[0]] = tmp[token[0]][0]
else: #Flag attribute found
tmp[token] = True
diff = set( infodict.keys() ).difference( set( tmp.keys() ) )
for key in diff:
if infodict[key] == bool:
tmp[key] = False
self.info = tmp
class VCF_Reader( FileOrSequence ):
def __init__( self, filename_or_sequence ):
FileOrSequence.__init__( self, filename_or_sequence )
self.metadata = {}
self.info = {}
self.filters = {}
self.formats = {}
self.nsamples = 0
self.sampleids = []
def make_info_dict( self ):
self.infodict = dict( ( key, _vcf_typemap[self.info[key]["Type"]] ) for key in self.info.keys() )
def parse_meta( self, header_filename = None ):
if header_filename == None:
the_iter = FileOrSequence.__iter__( self )
else:
the_iter = open( header_filename, "r" )
for line in the_iter:
if line.startswith( '#' ):
if line.startswith( "##" ):
mo = _re_vcf_meta_comment.match( line )
if mo:
value = mo.group(2)
if mo.group(1) == "INFO":
value = dict( e.rstrip(",").split("=",1) for e in _re_vcf_meta_descr.findall(value) )
key = value["ID"]
del value["ID"]
self.info[ key ] = value
elif mo.group(1) == "FILTER":
value = dict( e.rstrip(",").split("=",1) for e in _re_vcf_meta_descr.findall(value) )
key = value["ID"]
del value["ID"]
self.filters[ key ] = value
elif mo.group(1) == "FORMAT":
value = dict( e.rstrip(",").split("=",1) for e in _re_vcf_meta_descr.findall(value) )
key = value["ID"]
del value["ID"]
self.formats[ key ] = value
else:
self.metadata[ mo.group(1) ] = mo.group(2)
else:
self.sampleids = line.rstrip("\t\n").split("\t")[9:]
self.nsamples = len( self.sampleids )
continue
else:
break
def meta_info( self, header_filename = None ):
ret = []
if header_filename == None:
the_iter = FileOrSequence.__iter__( self )
else:
the_iter = open( header_filename, "r" )
for line in the_iter:
if line.startswith( '#' ):
ret.append( line )
else:
break
return ret
def __iter__( self ):
for line in FileOrSequence.__iter__( self ):
if line == "\n" or line.startswith( '#' ):
continue
vc = VariantCall.fromline( line, self.nsamples, self.sampleids )
yield vc
class WiggleReader( FileOrSequence ):
def __init__( self, filename_or_sequence, verbose = True ):
FileOrSequence.__init__( self, filename_or_sequence )
self.attributes = {}
self.stepType = 'none'
self.verbose = verbose
def __iter__( self ):
span = 1
pos = None
step = None
chrom = None
for line in FileOrSequence.__iter__( self ):
if line.startswith( 'track' ):
fields = shlex.split(line)[1:]
self.attributes = dict([(p[0], p[1].strip('"')) for p in [x.split("=") for x in fields]])
elif line.startswith( 'fixedStep' ): # do fixed step stuff
self.stepType = 'fixed'
fields = shlex.split(line)[1:]
declarations = dict([(p[0], p[1].strip('"')) for p in [x.split("=") for x in fields]])
pos = int(declarations['start'])
step = int(declarations['step'])
chrom = declarations['chrom']
if 'span' in declarations:
span = int(declarations['span'])
else:
span = 1
elif line.startswith( 'variableStep' ): # do variable step stuff
self.stepType = 'variable'
fields = shlex.split(line)[1:]
declarations = dict([(p[0], p[1].strip('"')) for p in [x.split("=") for x in fields]])
chrom = declarations['chrom']
if 'span' in declarations:
span = int(declarations['span'])
else:
span = 1
elif line.startswith( 'browser' ) or line.startswith( '#' ): #Comment or ignored
if self.verbose:
print "Ignored line:", line
continue
else:
if self.stepType == 'fixed':
yield ( GenomicInterval( chrom, pos, pos + span, '.' ), float(line.strip()) )
pos += step
elif self.stepType == 'variable':
tmp = line.strip().split(" ")
pos = int(tmp[0])
yield ( GenomicInterval( chrom, pos, pos + span, '.' ), float(tmp[1]) )
class BAM_Reader( object ):
def __init__( self, filename ):
global pysam
self.filename = filename
self.sf = None # This one is only used by __getitem__
self.record_no = -1
try:
import pysam
except ImportError:
sys.stderr.write( "Please Install PySam to use the BAM_Reader Class (http://code.google.com/p/pysam/)" )
raise
def __iter__( self ):
sf = pysam.Samfile(self.filename, "rb")
self.record_no = 0
for pa in sf:
yield SAM_Alignment.from_pysam_AlignedRead( pa, sf )
self.record_no += 1
def fetch( self, reference = None, start = None, end = None, region = None ):
sf = pysam.Samfile(self.filename, "rb")
self.record_no = 0
try:
for pa in sf.fetch( reference, start, end, region ):
yield SAM_Alignment.from_pysam_AlignedRead( pa, sf )
self.record_no += 1
except ValueError as e:
if e.message == "fetch called on bamfile without index":
print "Error: ", e.message
print "Your bam index file is missing or wrongly named, convention is that file 'x.bam' has index file 'x.bam.bai'!"
else:
raise
except:
raise
def get_line_number_string( self ):
if self.record_no == -1:
return "unopened file %s" % ( self.filename )
else:
return "record #%d in file %s" % ( self.record_no, self.filename )
def __getitem__( self, iv ):
if not isinstance( iv, GenomicInterval ):
raise TypeError, "Use a HTSeq.GenomicInterval to access regions within .bam-file!"
if self.sf is None:
self.sf = pysam.Samfile( self.filename, "rb" )
if not self.sf._hasIndex():
raise ValueError, "The .bam-file has no index, random-access is disabled!"
for pa in self.sf.fetch( iv.chrom, iv.start+1, iv.end ):
yield SAM_Alignment.from_pysam_AlignedRead( pa, self.sf )
def get_header_dict( self ):
sf = pysam.Samfile(self.filename, "rb")
return sf.header
class BAM_Writer( object ):
def __init__( self, filename, template = None, referencenames = None, referencelengths = None, text = None, header = None ):
try:
import pysam
except ImportError:
sys.stderr.write( "Please Install PySam to use the BAM_Writer Class (http://code.google.com/p/pysam/)" )
raise
self.filename = filename
self.template = template
self.referencenames = referencenames
self.referencelengths = referencelengths
self.text = text
self.header = header
self.sf = pysam.Samfile( self.filename, mode="wb", template = self.template, referencenames = self.referencenames, referencelengths = self.referencelengths, text = self.text, header = self.header )
@classmethod
def from_BAM_Reader( cls, fn, br ):
return BAM_Writer( filename = fn, header = br.get_header_dict() )
def write( self, alnmt):
self.sf.write( alnmt.to_pysam_AlignedRead( self.sf ) )
def close( self ):
self.sf.close()
class BED_Reader( FileOrSequence ):
def __init__( self, filename_or_sequence ):
FileOrSequence.__init__( self, filename_or_sequence )
def __iter__( self ):
for line in FileOrSequence.__iter__( self ):
if line.startswith( "track" ) or line.startswith( "#" ):
continue
fields = line.split()
if len(fields) < 3:
raise ValueError, "BED file line contains less than 3 fields"
#2015-7-28 modified by ChengChao
#if len(fields) > 9:
# raise ValueError, "BED file line contains more than 9 fields"
iv = GenomicInterval( fields[0], int(fields[1]), int(fields[2]), fields[5] if len(fields) > 5 else "." )
f = GenomicFeature( fields[3] if len(fields) > 3 else "unnamed", "BED line", iv )
f.score = float( fields[4] ) if len(fields) > 4 else None
#f.thick = GenomicInterval( iv.chrom, int( fields[6] ), int( fields[7] ), iv.strand ) if len(fields) > 7 else None
#f.itemRgb = [ int(a) for a in fields[8].split(",") ] if len(fields) > 8 else None
f.line = line.strip()
yield(f)
| mit | 5,902,803,634,480,403,000 | 36.890927 | 203 | 0.543472 | false |
jceipek/Mind-Rush | biofeedback.py | 1 | 7491 | #
# biofeedback.py
#
# Copyright (C)2011 Julian Ceipek and Patrick Varin
#
# Redistribution is permitted under the BSD license. See LICENSE for details.
#
try:
import multiprocessing
except:
raise Exception("Unable to load multiprocessing Python module.")
try:
import serial
except:
raise Exception("""Unable to load serial Python module.
Do you have pyserial installed?""")
import time
from engine.trueProcess import TrueProcess
from engine.altInput import AltInput
class Arduino:
def __init__(self):
self.active = multiprocessing.Value('i',1)
self.eventReader = None
self.proc = None
def listen(self, deviceID, mindFlexActive=True, eyeCircuitActive=True):
self.mindflexQueue = multiprocessing.Queue(11)
self.eyeCircuitQueue = multiprocessing.Queue(5)
self.proc = TrueProcess(self.mindflexReader, deviceID,
mindFlexActive, eyeCircuitActive)
def mindflexReader(self, deviceID,
mindFlexActive=True, eyeCircuitActive=True):
self.quality = -1
self.attention = -1
self.meditation = -1
self.delta = -1
self.theta = -1
self.lowAlpha = -1
self.highAlpha = -1
self.lowBeta = -1
self.highBeta = -1
self.lowGamma = -1
self.highGamma = -1
self.eyeSignal = -1
connected = False
count = 0
while not connected and count <= 5:
try:
ser = serial.Serial(deviceID, 9600)
connected = True
except Exception as e:
count += 1
print e
if count >= 5:
raise Exception("Unable to communicate with Arduino")
while self.active.value == 1 and (mindFlexActive or eyeCircuitActive):
try:
line = ser.readline().strip()
except Exception as e:
line = ""
print "Reading from Arduino Failed: ",e
if mindFlexActive and ('EEG:' in line):
line = line.split(':')
line = line[1].split(',')
try:
if not len(line) == 11:
raise ValueError
newQuality = (200.0-int(line[0]))/200.0
newAttention = int(line[1])/100.0
newMeditation = int(line[2])/100.0
newDelta = int(line[3])
newTheta = int(line[4])
newLowAlpha = int(line[5])
newHighAlpha = int(line[6])
newLowBeta = int(line[7])
newHighBeta = int(line[8])
newLowGamma = int(line[9])
newHighGamma = int(line[10])
if self.quality != newQuality:
self.quality = newQuality
self.putMindflexMessage(('Arduino_quality',self.quality))
if self.attention != newAttention:
self.attention = newAttention
self.putMindflexMessage(('Arduino_attention',self.attention))
if self.meditation != newMeditation:
self.meditation = newMeditation
self.put(('Arduino_meditation',self.meditation))
if self.delta != newDelta:
self.delta = newDelta
self.putMindflexMessage(('Arduino_delta',self.delta))
if self.theta != newTheta:
self.theta = newTheta
self.putMindflexMessage(('Arduino_theta',self.theta))
if self.lowAlpha != newLowAlpha:
self.lowAlpha = newLowAlpha
self.putMindflexMessage(('Arduino_lowAlpha',self.lowAlpha))
if self.highAlpha != newHighAlpha:
self.highAlpha = newHighAlpha
self.putMindflexMessage(('Arduino_highAlpha',self.highAlpha))
if self.lowBeta != newLowBeta:
self.lowBeta = newLowBeta
self.putMindflexMessage(('Arduino_lowBeta',self.lowBeta))
if self.highBeta != newHighBeta:
self.highBeta = newHighBeta
self.putMindflexMessage(('Arduino_highBeta',self.highBeta))
if self.lowGamma != newLowGamma:
self.lowGamma = newLowGamma
self.putMindflexMessage(('Arduino_lowGamma',self.lowGamma))
if self.highGamma != newHighGamma:
self.highGamma = newHighGamma
self.putMindflexMessage(('Arduino_highGamma',self.highGamma))
except:
print line
print "Caught Mindflex serial error!"
elif eyeCircuitActive and ('EMG:' in line):
line = line.split(':')
line = line[1].split(',')
try:
if not len(line) == 1:
raise ValueError
newEyeSignal = int(line[0])
if self.eyeSignal != newEyeSignal:
self.eyeSignal = newEyeSignal
self.putEyeCircuitMessage(('Arduino_eyeValue',
self.eyeSignal))
except Exception as e:
print e
print "Caught EMG circuit serial error!",line
try:
ser.close()
print "Arduino Serial Connection Closed"
except:
print "Unable to close serial connection to Arduino!"
def putEyeCircuitMessage(self, message):
while self.eyeCircuitQueue.full():
self.eyeCircuitQueue.get()
self.eyeCircuitQueue.put(message)
def putMindflexMessage(self, message):
while self.mindflexQueue.full():
self.mindflexQueue.get()
self.mindflexQueue.put(message)
def deactivate(self):
self.active.value = 0
print("Closed Arduino Process")
class Biofeedback(AltInput):
def __init__(self, deviceID, mindFlexActive=True, eyeCircuitActive=True):
self.arduino = Arduino()
self.arduino.listen(deviceID, mindFlexActive, eyeCircuitActive)
def poll(self):
return (not self.arduino.mindflexQueue.empty() or
not self.arduino.eyeCircuitQueue.empty())
def getEvents(self):
events = []
if not self.arduino.mindflexQueue.empty():
reading = self.arduino.mindflexQueue.get()
identifier = reading[0]
value = reading[1]
discrete = False #All of the bio-feedback events we use are continuous values
mindflexReading = self.makeEvent(identifier, value, discrete)
events.append(mindflexReading)
if not self.arduino.eyeCircuitQueue.empty():
reading = self.arduino.eyeCircuitQueue.get()
identifier = reading[0]
value = reading[1]
discrete = False #All of the bio-feedback events we use are continuous values
eyeCircuitReading = self.makeEvent(identifier, value, discrete)
events.append(eyeCircuitReading)
return events
def stop(self):
self.arduino.deactivate()
| bsd-3-clause | -5,254,349,475,841,835,000 | 37.813472 | 89 | 0.536243 | false |
kebrister/lscat-blcontrols | pmac/mk_pgpmac_redis.py | 1 | 36959 | #! /usr/bin/python
# coding=utf-8
import sys
import iniParser
import datetime
if len(sys.argv) <= 1:
print >> sys.stderr, "Usage: %s headOfRedisVariableNames [prefIniFileName [hardIniFileName]]"
sys.exit(-1)
if len(sys.argv) > 1:
head = sys.argv[1]
if len(sys.argv) > 2:
pref_ini = sys.argv[2]
else:
pref_ini = None
if len(sys.argv) > 3:
hard_ini = sys.argv[3]
else:
hard_ini = None
configs = {
"orange-2" : { "re" : "redis\.kvseq|stns\.2\.(.+)", "head" : "stns.2", "pub" : "MD2-21-ID-E", "pg" : "1", "autoscint" : "1"},
"orange-2.ls-cat.org" : { "re" : "redis\.kvseq|stns\.2\.(.+)", "head" : "stns.2", "pub" : "MD2-21-ID-E", "pg" : "1", "autoscint" : "1"},
"venison.ls-cat.org" : { "re" : "redis\.kvseq|stns\.2\.(.+)", "head" : "stns.2", "pub" : "MD2-21-ID-E", "pg" : "1", "autoscint" : "1"},
"mung-2" : { "re" : "redis\.kvseq|stns\.1\.(.+)", "head" : "stns.1", "pub" : "MD2-21-ID-D", "pg" : "1", "autoscint" : "1"},
"mung-2.ls-cat.org" : { "re" : "redis\.kvseq|stns\.1\.(.+)", "head" : "stns.1", "pub" : "MD2-21-ID-D", "pg" : "1", "autoscint" : "1"},
"vidalia.ls-cat.org" : { "re" : "redis\.kvseq|stns\.1\.(.+)", "head" : "stns.1", "pub" : "MD2-21-ID-D", "pg" : "1", "autoscint" : "1"},
}
plcc2_dict = {
"omega" : { "status1" : "M5001", "status2" : "M5021", "position" : "M5041"},
"align.x" : { "status1" : "M5002", "status2" : "M5022", "position" : "M5042"},
"align.y" : { "status1" : "M5003", "status2" : "M5023", "position" : "M5043"},
"align.z" : { "status1" : "M5004", "status2" : "M5024", "position" : "M5044"},
"lightPolar" : { "status1" : "M5005", "status2" : "M5025", "position" : "M5045"},
"cam.zoom" : { "status1" : "M5006", "status2" : "M5026", "position" : "M5046"},
"appy" : { "status1" : "M5007", "status2" : "M5027", "position" : "M5047"},
"appz" : { "status1" : "M5008", "status2" : "M5028", "position" : "M5048"},
"capy" : { "status1" : "M5009", "status2" : "M5029", "position" : "M5049"},
"capz" : { "status1" : "M5010", "status2" : "M5030", "position" : "M5050"},
"scint" : { "status1" : "M5011", "status2" : "M5031", "position" : "M5051"},
"centering.x" : { "status1" : "M5012", "status2" : "M5032", "position" : "M5052"},
"centering.y" : { "status1" : "M5013", "status2" : "M5033", "position" : "M5053"},
"kappa" : { "status1" : "M5014", "status2" : "M5034", "position" : "M5054"},
"phi" : { "status1" : "M5015", "status2" : "M5035", "position" : "M5055"}
}
# M5001=M1 ; Omega
# M5002=M2 ; Alignment Table X
# M5003=M3 ; Alignment Table Y
# M5004=M4 ; Alignment Table Z
# M5005=M5 ; Analyser
# M5006=M6 ; Zoom
# M5007=M7 ; Aperture Y
# M5008=M8 ; Aperture Z
# M5009=M9 ; Capillary Y
# M5010=M10 ; Capillary Z
# M5011=M11 ; Scintillator Z
# M5012=M17 ; Center X
# M5013=M18 ; Center Y
# M5014=M19 ; Kappa
# M5015=M20 ; Phi
#
# M5021=M91 ; Omega
# M5022=M92 ; Alignment Table X
# M5023=M93 ; Alignment Table Y
# M5024=M94 ; Alignment Table Z
# M5025=M95 ; Analyser
# M5026=M96 ; Zoom
# M5027=M97 ; Aperture Y
# M5028=M98 ; Aperture Z
# M5029=M99 ; Capillary Y
# M5030=M100 ; Capillary Z
# M5031=M101 ; Scintillator Z
# M5032=M107 ; Center X
# M5033=M108 ; Center Y
# M5034=M109 ; Kappa
# M5035=M110 ; Phi
#
#
# ; Motor actual position
# M5041=(M181/(I108*32)) ; Phi
# M5042=(M182/(I208*32)) ; Table XYZ : X
# M5043=(M183/(I308*32)) ; Table XYZ : Y
# M5044=(M184/(I408*32)) ; Table XYZ : Z
# M5045=(M185/(I508*32)) ; Analyser
# M5046=(M186/(I608*32)) ; Zoom camera
# M5047=(M187/(I708*32)) ; Aperture Y
# M5048=(M188/(I808*32)) ; Aperture Z
# M5049=(M189/(I908*32)) ; Capillary Y
# M5050=(M190/(I1008*32)) ; Capillary Z
# M5051=(M191/(I1108*32)) ; Scintillator Z
# M5052=(M197/(I1708*32)) ; Centring #17
# M5053=(M198/(I1808*32)) ; Centring #18
# M5054=(M199/(I1908*32)) ; Mini Kappa 1
# M5055=(M200/(I2008*32)) ; Mini Kappa 2
#
# M5060=M6000 ; 11C byte 1
# M5061=M6001 ; 11C byte 2
# M5062=M6002 ; 11C byte 3
# M5063=M6003 ; 11C byte 5
# M5064=M6004 ; 11C byte 6
# M5065=M1200 ; Front Light DAC
# M5066=M1201 ; Back Light DAC
# M5067=M1203 ; Scintillator Piezo
# ;***************** Motor Status 1,Limits,Open loop *****************************
# ;PMAC side
# M1->X:$0B0,24 ; Phi
# M2->X:$130,24 ; Table XYZ : X
# M3->X:$1B0,24 ; Table XYZ : Y
# M4->X:$230,24 ; Table XYZ : Z
# M5->X:$2B0,24 ; Analyser
# M6->X:$330,24 ; Zoom DC Camera
# M7->X:$3B0,24 ; Aperture Y
# M8->X:$430,24 ; Aperture Z
# M9->X:$4B0,24 ; Capillary Y
# M10->X:$530,24 ; Capillary Z
# M11->X:$5B0,24 ; Scintillator Z
# M12->X:$630,24 ; Unused
# M13->X:$6B0,24 ; Unused
# M14->X:$730,24 ; Unused
# M15->X:$7B0,24 ; Unused
# M16->X:$830,24 ; Unused
# M17->X:$8B0,24 ; Centring Table Motor #17
# M18->X:$930,24 ; Centring Table Motor #18
# M19->X:$9B0,24 ; Mini Kappa 1
# M20->X:$A30,24 ; Mini Kappa 2
# M21->X:$AB0,24 ; Unused
# M22->X:$B30,24 ; Unused
# M23->X:$BB0,24 ; Unused
# M24->X:$C30,24 ; Unused
#
# ;open loop status
# M61->x:$0B0,18,1 ; Phi
# M62->x:$130,18,1 ; Table XYZ : X
# M63->x:$1B0,18,1 ; Table XYZ : Y
# M64->x:$230,18,1 ; Table XYZ : Z
# M65->x:$2B0,18,1 ; Analyser
# M66->x:$330,18,1 ; Zoom DC Camera
# M67->x:$3B0,18,1 ; Aperture Y
# M68->x:$430,18,1 ; Aperture Z
# M69->x:$4B0,18,1 ; Capillary Y
# M70->x:$530,18,1 ; Capillary Z
# M71->x:$5B0,18,1 ; Scintillator Z
# M72->x:$630,18,1 ; Unused
# M73->x:$6B0,18,1 ; Unused
# M74->x:$730,18,1 ; Unused
# M75->x:$7B0,18,1 ; Unused
# M76->x:$830,18,1 ; Unused
# M77->x:$8B0,18,1 ; Centring Table Motor X #17
# M78->x:$930,18,1 ; Centring Table Motor Y #18
# M79->x:$9B0,18,1 ; Mini Kappa 1
# M80->x:$A30,18,1 ; Mini Kappa 2
# ; M81->x:$AB0,18,1 ; Unused
# ; M82->x:$B30,18,1 ; Unused
# ; M83->X:$BB0,18,1 ; Unused
# ; M84->X:$C30,18,1 ; Unused
#
# ;*************** Motor Status 2,I2T,Fatal following error **********************
# ;PMAC side
# M91->Y:$0C0,24 ; Phi
# M92->Y:$140,24 ; Table XYZ : X
# M93->Y:$1C0,24 ; Table XYZ : Y
# M94->Y:$240,24 ; Table XYZ : Z
# M95->Y:$2C0,24 ; Analyser
# M96->Y:$340,24 ; Zoom DC Camera
# M97->Y:$3C0,24 ; Aperture Y
# M98->Y:$440,24 ; Aperture Z
# M99->Y:$4C0,24 ; Capillary Y
# M100->Y:$540,24 ; Capillary Z
# M101->Y:$5C0,24 ; Scintillator Z
# M102->Y:$640,24 ; Unused
# M103->Y:$6C0,24 ; Unused
# M104->Y:$740,24 ; Unused
# M105->Y:$7C0,24 ; Unused
# M106->Y:$840,24 ; Unused
# M107->Y:$8C0,24 ; Centring Table Motor #17
# M108->Y:$940,24 ; Centring Table Motor #18
# M109->Y:$9C0,24 ; Mini Kappa 1
# M110->Y:$A40,24 ; Mini Kappa 2
# M111->Y:$AC0,24 ; Unused
# M112->Y:$B40,24 ; Unused
# M113->Y:$BC0,24 ; Unused
# M114->Y:$C40,24 ; Unused
#
# ;**************************** In position status *******************************
# M121->Y:$0C0,0,1 ; Phi
# M122->Y:$140,0,1 ; Table XYZ : X
# M123->Y:$1C0,0,1 ; Table XYZ : Y
# M124->Y:$240,0,1 ; Table XYZ : Z
# M125->Y:$2C0,0,1 ; Analyser
# ; ;M125=1 Patch when Analyser goes really wrong !
# M126->Y:$340,0,1 ; Zoom DC Camera
# M127->Y:$3C0,0,1 ; Aperture Y
# M128->Y:$440,0,1 ; Aperture Z
# M129->Y:$4C0,0,1 ; Capillary Y
# M130->Y:$540,0,1 ; Capillary Z
# M131->Y:$5C0,0,1 ; Scintillator Z
# M132->Y:$640,0,1 ; Unused
# M133->Y:$6C0,0,1 ; Unused
# M134->Y:$740,0,1 ; Unused
# M135->Y:$7C0,0,1 ; Unused
# M136->Y:$840,0,1 ; Unused
# M137->Y:$8C0,0,1 ; Centring Table Motor #17
# M138->Y:$940,0,1 ; Centring Table Motor #18
# M139->Y:$9C0,0,1 ; Mini Kappa 1
# M140->Y:$A40,0,1 ; Mini Kappa 2
# M141->Y:$AC0,0,1 ; Unused
# M142->Y:$B40,0,1 ; Unused
# M143->Y:$BC0,0,1 ; Unused
# M144->Y:$C40,0,1 ; Unused
#
# Bug/Feature: only fields listed in motor_dict will be searched for in the ini file.
#
# Also see the comments for the motor_field_lists list below
#
# motor_dict keys
# motor_num: The pmac motor number between 1 and 32 inclusive. Leave undefined or set to -1 for motor for DAC and Binary Output motor like objects
# coord_num: The coordinate system the said motor finds itself in between 1 and 16 inclusive. Leave undefined or 0 for DAC and Binary Output motor like objects.
# max_accel: counts/msec/msec
# max_speed: counts/msec
# u2c: The conversion between counts and user units: Multiply user units by u2c to get counts. Should never be zero.
# active: 1 if the motor should be set up and used, 0 otherwise
# hard_ini: The section name for this motor in the microdiff_hard.ini file
# moveMode: freeRotation, rotation, or translation (default) used for the LS-CAT GUI
# reference: (omega only) The angle for which centering.y is up and centering.x is positive downstream
# axis: The axis letter for the PMAC in the specified coordinate system (X, Y, Z, etc)
# neutralPosition: The offset in user units between the home position and what we want to call zero
# printf: The printf format string for the position in the ncurses interface (uses a field width specifier *)
# format: The printf format string to update the redis value
# maxPosition: The software upper limit in user units relative to the home position
# minPosition: The software lower limit in user units relative to the home position
# smallStep: Recommened small step value for a user interface
# largeStep: Recommened large step value for a user interface
# update_resolution: Don't update redis until the position has changed by this amount in user units
#
# NOTE: active_init, home, and inactive_init should only be specified if the default string will not serve the purposes such as
# for omega and the polarizer
#
# active_init: A comma separated list of strings (double quoted if spaces present) enclosed in braces to send to the PMAC when the motor is active.
# home:` A comma separated list of strings (double quoted if spaces present) enclosed in braces to send to the PMAC to home the motor
# inactive_init: A comma separated list of strings (double quoted if spaces present) enclosed in braces to send to the PMAC when the motor is inactive.
motor_dict = {
"omega" : { "motor_num" : "1", "max_accel" : "2", "max_speed" : "1664", "coord_num" : "1", "u2c" : "12800",
"home" : '{"M401=1 M1115=1 #1$",&1E,#1&1B1R}',"active_init" : '{M31=1,&1#1->X,"M700=(M700 | $000001) ^ $000001", M1115=1}',
"inactive_init" : '{M31=0,&1#1->0,"M700=M700 | $000001",M1115=0}',"moveMode" : "freeRotation",
"reference" : "228.5", "format" : "%.3f", "printf" : "%*.4f deg", "axis" : "X",
"hard_ini" : "PHIRotationAxis.PHIMotor", "neutralPosition" : "0", "active" : "1"
},
"align.x" : { "motor_num" : "2", "max_accel" : "2", "max_speed" : "121", "coord_num" : "3", "u2c" : "60620.8",
"smallStep" : "0.001",
"axis" : "X", "format" : "%.3f",
"minPosition" : "0.1", "maxPosition" : "4.0",
"hard_ini" : "PHIAxisXYZTable.PHIXMotor", "neutralPosition" : "0", "active" : "1"
},
"align.y" : { "motor_num" : "3", "max_accel" : "0.5", "max_speed" : "121", "coord_num" : "3", "u2c" : "60620.8",
"smallStep" : "0.001",
"axis" : "Y", "format" : "%.3f",
"minPosition" : "0.16", "maxPosition" : "16.15",
"hard_ini" : "PHIAxisXYZTable.PHIYMotor", "neutralPosition" : "0", "active" : "1"
},
"align.z" : { "motor_num" : "4", "max_accel" : "0.5", "max_speed" : "121", "coord_num" : "3", "u2c" : "60620.8",
"smallStep" : "0.001",
"axis" : "Z", "format" : "%.3f",
"minPosition" : "0.45", "maxPosition" : "5.85",
"hard_ini" : "PHIAxisXYZTable.PHIZMotor", "neutralPosition" : "0", "active" : "1"
},
"lightPolar" : { "motor_num" : "5", "max_accel" : "0.2", "max_speed" : "3", "u2c" : "142", "coord_num" : "0",
"home" : '{#5$,#5HMZ}', "active_init" : '{}', "inactive_init" : '{}',
"largeStep" : "45", "smallStep" : "10", "format" : "%.1f",
"printf" : "%*.1f deg", "update_resolution" : "1",
"hard_ini" : "Analyser.AnalyserMotor", "neutralPosition" : "0", "active" : "1"
},
"cam.zoom" : { "motor_num" : "6","max_accel" : "0.2", "max_speed" : "10", "coord_num" : "4", "u2c" : "1.0",
"smallStep" : "1",
"axis" : "Z","format" : "%.0f",
"minPosition" : "1","update_resolution" : "1",
"hard_ini" : "CoaxZoom.ZoomMotor", "neutralPosition" : "0", "in_position_band" : "1600", "active" : "1"
},
"appy" : { "motor_num" : "7","max_accel" : "1", "max_speed" : "201", "coord_num" : "5", "u2c" : "121241.6",
"smallStep" : "0.002",
"axis" : "Y","format" : "%.3f",
"minPosition" : "0.2","maxPosition" : "3.25",
"hard_ini" : "ApertureYZTable.ApertureYMotor", "neutralPosition" : "0", "active" : "1"
},
"appz" : { "motor_num" : "8","max_accel" : "1", "max_speed" : "201", "coord_num" : "5", "u2c" : "60620.8",
"smallStep" : "0.002",
"axis" : "Z","format" : "%.3f",
"minPosition" : "0.3","maxPosition" : "82.5",
"hard_ini" : "ApertureYZTable.ApertureZMotor", "neutralPosition" : "0", "active" : "1"
},
"capy" : { "motor_num" : "9","max_accel" : "1", "max_speed" : "201", "coord_num" : "5", "u2c" : "121241.6",
"smallStep" : "0.002",
"axis" : "U","format" : "%.3f",
"minPosition" : "0.05","maxPosition" : "3.19",
"hard_ini" : "CapillaryBSYZtable.CapillaryBSYMotor", "neutralPosition" : "0", "active" : "1"
},
"capz" : { "motor_num" : "10","max_accel" : "0.5", "max_speed" : "201", "coord_num" : "5", "u2c" : "19865.6",
"smallStep" : "0.002",
"axis" : "V","format" : "%.3f",
"minPosition" : "0.57","maxPosition" : "81.49",
"hard_ini" : "CapillaryBSYZtable.CapillaryBSZMotor", "neutralPosition" : "0", "active" : "1"
},
"scint" : { "motor_num" : "11","max_accel" : "0.5", "max_speed" : "151", "coord_num" : "5", "u2c" : "19865.6",
"smallStep" : "0.002",
"axis" : "W","format" : "%.3f",
"minPosition" : "0.2","maxPosition" : "86.1",
"hard_ini" : "ScintillatorPhotodiode.Zmotor", "neutralPosition" : "0", "active" : "1"
},
"centering.x" : { "motor_num" : "17","max_accel" : "0.5", "max_speed" : "150", "coord_num" : "2", "u2c" : "182400",
"smallStep" : "0.001",
"axis" : "X","format" : "%.3f",
"minPosition" : "-2.56","maxPosition" : "2.496",
"hard_ini" : "CentringXYTable.XCentringMotor", "neutralPosition" : "0", "active" : "1"
},
"centering.y" : {"motor_num" : "18","max_accel" : "0.5", "max_speed" : "150", "coord_num" : "2", "u2c" : "182400",
"smallStep" : "0.001",
"axis" : "Y","format" : "%.3f",
"minPosition" : "-2.58","maxPosition" : "2.4",
"hard_ini" : "CentringXYTable.YCentringMotor", "neutralPosition" : "0", "active" : "1"
},
"kappa" : { "motor_num" : "19","max_accel" : "0.2", "max_speed" : "50", "coord_num" : "7", "u2c" : "2844.444",
"moveMode" : "rotation",
"axis" : "X","format" : "%.2f",
"minPosition" : "-5","update_resolution" : "1.0",
"hard_ini" : "MiniKappa.Kappa1", "neutralPosition" : "0", "active" : "1"
},
"phi" : { "motor_num" : "20","max_accel" : "0.2", "max_speed" : "50", "coord_num" : "7", "u2c" : "711.111",
"moveMode" : "freeRotation",
"axis" : "Y","format" : "%.2f",
"update_resolution" : "1.0",
"hard_ini" : "MiniKappa.Kappa2", "neutralPosition" : "0", "active" : "1"
},
"fastShutter" : { "canHome" : "false","type" : "BO",
"update_resolution" : "0.5","canStop" : "false", "active" : "1", "in_position_band" : "0"
},
"frontLight.intensity" : { "canHome" : "false","type" : "DAC",
"update_resolution" : "0.5","canStop" : "false", "active" : "1", "in_position_band" : "0"
},
"backLight.intensity" : { "canHome" : "false","type" : "DAC",
"update_resolution" : "0.5","canStop" : "false", "active" : "1", "in_position_band" : "0"
},
"scint.focus" : { "canHome" : "false","type" : "DAC",
"update_resolution" : "0.5","canStop" : "false", "active" : "1", "in_position_band" : "0"
},
"backLight" : { "canHome" : "false","type" : "BO",
"update_resolution" : "0.5","canStop" : "false", "active" : "1", "in_position_band" : "0"
},
"cryo" : { "canHome" : "false","type" : "BO",
"update_resolution" : "0.5","canStop" : "false", "active" : "1", "in_position_band" : "0"
},
"dryer" : { "canHome" : "false","type" : "BO",
"update_resolution" : "0.5","canStop" : "false", "active" : "1", "in_position_band" : "0"
},
"fluo" : { "canHome" : "false","type" : "BO",
"update_resolution" : "0.5","canStop" : "false", "active" : "1", "in_position_band" : "0"
},
"frontLight" : { "canHome" : "false","type" : "BO",
"update_resolution" : "0.5","canStop" : "false", "active" : "1", "in_position_band" : "0"
},
"backLight.factor" : { "canHome" : "false","type" : "DAC",
"update_resolution" : "0.5","canStop" : "false", "active" : "1", "in_position_band" : "0"
},
"frontLight.factor" : { "canHome" : "false","type" : "DAC",
"update_resolution" : "0.5","canStop" : "false", "active" : "1", "in_position_band" : "0"
},
"smartMagnet" : { "canHome" : "false","type" : "BO", "active_init" : '{m1100=0,m1106=1}', "inactive_init" : '{m1100=1,m1106=0}',
"update_resolution" : "0.5","canStop" : "false", "active" : "1", "in_position_band" : "0"
}
}
def mk_home( mname, d):
if not d.has_key("motor_num") or not d.has_key("coord_num"):
return ""
motor_num = int(d["motor_num"])
coord_num = int(d["coord_num"])
if motor_num < 1 or motor_num > 32:
return ""
if mname == "kappa":
prog_num = 119
else:
prog_num = motor_num
return '{#%d$,M%d=1,&%dE,#%d&%dB%dR}' % (motor_num, motor_num+400, coord_num, motor_num, coord_num, prog_num)
def mk_active_init( d):
if not d.has_key("motor_num") or not d.has_key("coord_num") or not d.has_key( "axis"):
return ""
motor_num = int(d["motor_num"])
coord_num = int(d["coord_num"])
axis = str(d["axis"])
mask = 1 << (motor_num - 1)
if motor_num < 1 or motor_num > 32:
return ""
return '{M%d=1,&%d#%d->%s,"M700=(M700 | $%0x) ^ $%0x"}' % (motor_num + 30, coord_num, motor_num, axis, mask, mask)
def mk_inactive_init( d):
if not d.has_key("motor_num") or not d.has_key("coord_num") or not d.has_key( "axis"):
return ""
motor_num = int(d["motor_num"])
coord_num = int(d["coord_num"])
axis = str(d["axis"])
mask = 1 << (motor_num - 1)
if motor_num < 1 or motor_num > 32:
return ""
return '{M%d=0,&%d#%d->0,"M700=M700 | $%0x"}' % (motor_num + 30, coord_num, motor_num, mask)
def active_simulation( sim):
if str(sim) != "0":
rtn = "0"
else:
rtn = "1"
return rtn
def asis( arg):
return arg
hard_ini_fields = {
"active" : ["Simulation", active_simulation],
"coord_num" : ["CoordinateSystem", asis],
"largeStep" : ["LargeStep", asis],
"maxPosition" : ["MaxPosition", asis],
"minPosition" : ["MinPosition", asis],
"motor_num" : ["MotorNumber", asis],
"neutralPosition" : ["NeutralPosition", asis],
"precision" : ["Precision", asis],
"smallStep" : ["SmallStep", asis],
"u2c" : ["UnitRatio", asis]
}
# DBR TYPES
# 0 String
# 1 Short (16 bit)
# 2 Float (32 bit)
# 3 Enum (not supported as of 121219)
# 4 Char (8 bit)
# 5 Int (32 bit)
# 6 Double (64 bit)
motor_field_lists = [
# name, default, dbrtype
["active", "1", 1], # 1 if the motor is to be enabled and used (not fully supported as of 121219)
["active_init", "", 0], # postgresql style string array of initialization strings to send to PMAC if the motor is active
["axis", "", 4], # PMAC axis (single charater: X,Y,Z, etc)
["canHome", "0", 1], # 1 if a homing routine can be called
["canMove", "true", 0], # "true" if we can move this motor, "false" if we cannot.
["canStop", "true", 0], # "true" if it makes sense to display a stop button, "false" otherwise
["coord_num", "", 1], # PMAC coordinate system number for this motor
["currentPreset", "", 0], # Name of the current preset position
["in_position_band", "160", 1], # Motors within this amount are considered "In Position". UNITS ARE 1/16 OF A COUNT
["format", "%f", 0], # format string for publish position to redis
["hard_ini", None, 0], # Name of section in microdiff_hard.ini
["home", "", 0], # postgresql style string array of strings to send to PMAC to home motor
["inPosition", "true", 0], # "true" if the motor is in position, "false" if it is moving
["inactive_init", "", 0], # postgresql style string array of initialization strings to send to PMAC if the motor is inactive
["largeStep", "1.0", 6], # increment for large step in a UI
["maxPosition", "Infinity", 6], # upper soft limit
["max_accel", "", 0], # maximum motor acceleration, used for motors that are too be scanned (ie, omega)
["max_speed", "", 6], # maximum motor speed, used for motors that are too be scanned (ie, omega)
["minPosition", "-Infinity", 6], # lower soft limit
["motor_num", "-1", 1], # PMAC motor number
["moveMode", "translation",0], # translation, rotation, freeRotation
["name", "", 0], # What we think the motor should be called in a UI
["negLimitSet", "0", 1], # 1 if on the limit, 0 otherwise
["neutralPosition", "0", 6], # Move here after a home and call it zero. Should be called -offset or offset or somehting like that.
["posLimitSet", "0", 1], # 1 if on the limit, 0 otherwise
["position", "", 6], # our position
["precision", "0.001", 6], # precision of the motion: moves of less than this amount are ignored (use in_position_band instead)
["presets.length", "0", 1], # number of presets defined
["printPrecision", "3", 1], # for ui to print out position (see the printf field for another way of doing this)
["printf", "%*.3f", 0], # printf style format string for ncurses interface
["smallStep", "0.1", 6], # step size for UI for a fine movement
["status_str", "", 0], # Explanation of what the motor is doing
["type", "PMAC", 0], # type of motor: PMAC, DAC, BO, SOFT, etc
["u2c", "1.0", 6], # multipy user units times u2c to get motor counts
["unit", "mm", 0], # user units
["update_resolution", "0.001", 4] # update redis when motor is moving only when a change of this magnetude is seen
]
bi_list = ["CryoSwitch"]
motor_presets = {
"align.x" : [
# name value canTune pref_ini section pref_ini option
[ "Beam", "0.0", "1", "PHIAxisXYZTable", "XBeam_X1"],
[ "Back", "-1.8", "1", "PHIAxisXYZTable", "XScintillatorOut_X2"],
[ "Back_Vector", "-1.8", "1", "PHIAxisXYZTable", "XScintillatorOut_X2"]
],
"align.y" : [
# name value canTune pref_ini section pref_ini option
[ "Beam", "0.0", "1", "PHIAxisXYZTable", "YBeam_Y1"],
[ "Back", "1.0", "1", "PHIAxisXYZTable", "YScintillatorOut_Y2"],
[ "Back_Vector", "1.0", "1", "PHIAxisXYZTable", "YScintillatorOut_Y2"]
],
"align.z" : [
# name value canTune pref_ini section pref_ini option
[ "Beam", "0.0", "1", "PHIAxisXYZTable", "ZBeam_Z1"],
[ "Back", "1.9", "1", "PHIAxisXYZTable", "ZScintillatorOut_Z2"],
[ "Back_Vector", "1.9", "1", "PHIAxisXYZTable", "ZScintillatorOut_Z2"]
],
"appy" : [
# name value canTune pref_ini section pref_ini option
[ "In", "0.117", "1", "ApertureYZTable", "BeamHorizontalPosition_Y0"]
],
"appz" : [
[ "In", "80", "1", "ApertureYZTable", "BeamVerticalPosition_Z1"],
[ "Out", "71.777", "0", "ApertureYZTable", "VerticalOffScreenPosition_Z2"],
[ "Cover", "2.0", "0", "ApertureYZTable", "OffVerticalPosition_Z0"]
],
"backLight" : [
[ "On", "1", None, None, None],
[ "Off", "0", None, None, None]
],
"frontLight" : [
[ "On", "1", None, None, None],
[ "Off", "0", None, None, None]
],
"capy" : [
[ "In", "0.082", "1", "CapillaryBSYZtable", "HorizontalBeamPosition_Y0"]
],
"capz" : [
[ "In", "78.2617", "1", "CapillaryBSYZtable", "VerticalBeamPosition_Z1"],
[ "Out", "69.944", "0", "CapillaryBSYZtable", "VerticalOffScreenPosition_Z2"],
[ "Cover", "0.3", "0", "CapillaryBSYZtable", "VeticalOffPosition_Z0"]
],
"fastShutter" : [
[ "Open", "1", None, None, None],
[ "Close", "0", None, None, None]
],
"kappa" : [
[ "manualMount", "180.0", None, "MiniKappa", "Kappa1MountPosition"],
[ "reference", "228.5", None, "CentringXYTable", "PhiReference"]
],
"omega" : [
[ "manualMount", "180.0", None, "PHIRotationAxis", "KappaMountPosition"]
],
"scint.focus" : [
[ "tuner", "53", "1", "ScintillatorPhotodiode", "OnFocusPiezoPosition"]
],
"scint" : [
[ "Photodiode", "53.0", "1", "ScintillatorPhotodiode", "DiodeOnBeamVerticalPosition_Z2"],
[ "Scintillator", "78.788", "1", "ScintillatorPhotodiode", "ScintiOnBeamVerticalPosition_Z1"],
[ "Cover", "2.0", "0", "ScintillatorPhotodiode", "OffVerticalPosition_Z0"]
]
}
zoom_settings = [
#lev front back pos scalex scaley section
[1, 4.0, 8.0, 34100, 2.7083, 3.3442, "CoaxCam.Zoom1"],
[2, 6.0, 8.1, 31440, 2.2487, 2.2776, "CoaxCam.Zoom2"],
[3, 6.5, 8.2, 27460, 1.7520, 1.7550, "CoaxCam.Zoom3"],
[4, 7.0, 8.3, 23480, 1.3360, 1.3400, "CoaxCam.Zoom4"],
[5, 8.0, 10.0, 19500, 1.0140, 1.0110, "CoaxCam.Zoom5"],
[6, 9.0, 12.0, 15520, 0.7710, 0.7760, "CoaxCam.Zoom6"],
[7, 10.0, 17.0, 11540, 0.5880, 0.5920, "CoaxCam.Zoom7"],
[8, 12.0, 25.0, 7560, 0.4460, 0.4480, "CoaxCam.Zoom8"],
[9, 15.0, 37.0, 3580, 0.3410, 0.3460, "CoaxCam.Zoom9"],
[10, 16.0, 42.0, 0, 0.2700, 0.2690, "CoaxCam.Zoom10"]
]
# config
for c in configs.keys():
print "HMSET config.%s HEAD '%s' PUB '%s' RE '%s' PG '%s' AUTOSCINT '%s'" % \
(c.lower(), configs[c]["head"], configs[c]["pub"], configs[c]["re"], configs[c]["pg"], configs[c]["autoscint"])
# motor stuff
if hard_ini:
hi = iniParser.iniParser( hard_ini)
hi.read()
for m in motor_dict.keys():
print "HSETNX %s.%s.name VALUE '%s'" % (head, m, m) # These values are not part of any defaults
print "PUBLISH mk_pgpmac_redis %s.%s.name" % (head, m) #
print "HSETNX %s.%s.name DBRTYPE 0" % (head, m) #
print "HSETNX %s.%s.position VALUE ''" % (head, m) #
print "PUBLISH mk_pgpmac_redis %s.%s.position" % (head, m) #
print "HSETNX %s.%s.position DBRTYPE 6" % (head, m) #
if hard_ini != None and motor_dict[m].has_key("hard_ini"):
motor_dict[m]["motor_num"] = hi.get(motor_dict[m]["hard_ini"], "motornumber")
motor_dict[m]["coord_num"] = hi.get(motor_dict[m]["hard_ini"], "coordinatesystem")
# set home, active_init, and inactive_init based on current motor and coordinate numbers
#
if not motor_dict[m].has_key( "home"):
motor_dict[m]["home"] = mk_home( m, motor_dict[m])
if not motor_dict[m].has_key( "active_init"):
motor_dict[m]["active_init"] = mk_active_init( motor_dict[m])
if not motor_dict[m].has_key( "inactive_init"):
motor_dict[m]["inactive_init"] = mk_inactive_init( motor_dict[m])
for k in motor_dict[m]:
if k == "hard_ini": # this is sort of a meta field
continue
# Use the value from the hard ini file, if it is available
# Overide the current value if it is available
#
if hard_ini == None or \
not motor_dict[m].has_key("hard_ini") or \
motor_dict[m]["hard_ini"] == None or \
not hard_ini_fields.has_key( k) or \
not hi.has_section( motor_dict[m]["hard_ini"]) or \
not hi.has_option( motor_dict[m]["hard_ini"], hard_ini_fields[k][0]):
# Use the hard coded value found in this file
#
v = motor_dict[m][k]
f = "HSETNX"
else:
# Use the ini file value
#
xlate = hard_ini_fields[k][1]
v = xlate(hi.get( motor_dict[m]["hard_ini"], hard_ini_fields[k][0]))
f = "HSET"
print "%s %s.%s.%s VALUE '%s'" % (f, head, m, k, v)
print "PUBLISH mk_pgpmac_redis %s.%s.%s" % (f, head, m)
# Throw out the default default value for fields not found any other way
#
for field, default, dbrtype in motor_field_lists:
print "HSETNX %s.%s.%s VALUE '%s'" % (head, m, field, default)
print "PUBLISH mk_pgpmac_redis %s.%s.%s" % (head, m, field)
print "HSETNX %s.%s.%s DBRTYPE '%s'" % (head, m, field, dbrtype)
# Add the presets
#
if pref_ini:
pi = iniParser.iniParser( pref_ini)
pi.read()
i = 0;
if motor_presets.has_key( m):
for pname, ppos, ptune, section, option in motor_presets[m]:
print "HSETNX %s.%s.presets.%d.name VALUE %s" % (head, m, i, pname)
print "PUBLISH mk_pgpmac_redis %s.%s.presets.%d.name" % (head, m, i)
f = "HSETNX"
if pref_ini and section and option and pi.has_section( section) and pi.has_option( section, option):
ppos = pi.get( section, option)
f = "HSET"
print "%s %s.%s.presets.%d.position VALUE %s" % ( f, head, m, i, ppos)
print "PUBLISH mk_pgpmac_redis %s.%s.presets.%d.position" % (head, m, i)
if ptune != None:
print "HSETNX %s.%s.presets.%d.canTune VALUE %s" % ( head, m, i, ppos)
print "PUBLISH mk_pgpmac_redis %s.%s.presets.%d.canTune" % (head, m, i)
i += 1
print "HSET %s.%s.presets.length VALUE %d" % ( head, m, i)
print "PUBLISH mk_pgpmac_redis %s.%s.presets.length" % (head, m)
# omega reference angle is unique
if m=="omega":
if pref_ini and pi.has_section( "CentringXYTable") and pi.has_option( "CentringXYTable", "PhiReference"):
ppos = pi.get( "CentringXYTable", "PhiReference")
print "HSET %s.omega.reference VALUE %s" % (head, ppos)
print "PUBLISH mk_pgpmac_redis %s.omega.reference" % (head)
# light and zoom settings
for lev, f, b, p, x, y, section in zoom_settings:
fnc = "HSETNX"
if pref_ini != None and pi.has_section( section) and pi.has_option( section, "FrontLightIntensity"):
f = pi.get( section, "FrontLightIntensity")
fnc = "HSET"
print "%s %s.cam.zoom.%d.FrontLightIntensity VALUE %s" % (fnc, head, lev, f)
print "PUBLISH mk_pgpmac_redis %s.cam.zoom.%d.FrontLightIntensity" % (head, lev)
fnc = "HSETNX"
if pref_ini != None and pi.has_section( section) and pi.has_option( section, "LightIntensity"):
b = pi.get( section, "LightIntensity")
fnc = "HSET"
print "%s %s.cam.zoom.%d.LightIntensity VALUE %s" % (fnc, head, lev, b)
print "PUBLISH mk_pgpmac_redis %s.cam.zoom.%d.LightIntensity" % (head, lev)
fnc = "HSETNX"
if pref_ini != None and pi.has_section( section) and pi.has_option( section, "MotorPosition"):
p = pi.get( section, "MotorPosition")
fnc = "HSET"
print "%s %s.cam.zoom.%d.MotorPosition VALUE %s" % (fnc, head, lev, p)
print "PUBLISH mk_pgpmac_redis %s.cam.zoom.%d.MotorPosition" % (head, lev)
fnc = "HSETNX"
if pref_ini != None and pi.has_section( section) and pi.has_option( section, "ScaleX"):
x = pi.get( section, "ScaleX")
fnc = "HSET"
print "%s %s.cam.zoom.%d.ScaleX VALUE %s" % (fnc, head, lev, x)
print "PUBLISH mk_pgpmac_redis %s.cam.zoom.%d.ScaleX" % (head, lev)
fnc = "HSETNX"
if pref_ini != None and pi.has_section( section) and pi.has_option( section, "ScaleY"):
y = pi.get( section, "ScaleY")
fnc = "HSET"
print "%s %s.cam.zoom.%d.ScaleY VALUE %s" % (fnc, head, lev, y)
print "PUBLISH mk_pgpmac_redis %s.cam.zoom.%d.ScaleY" % (head, lev)
plcc2_file = open( "%s-plcc2.pmc" % (head), "w")
plcc2_file.write( "OPEN PLCC2 CLEAR\n")
plcc2_file.write( ";\n")
plcc2_file.write( "; Auto generated by mk_pgpmac_redis.py on %s\n" % datetime.datetime.isoformat(datetime.datetime.now()))
plcc2_file.write( "; Insert into your .pmc file (replacing plcc 2 completely) and reload with the pmac executive program.\n")
plcc2_file.write( ";\n")
plcc2_file.write( "M522=M520; Used for A&B registers set up.\n")
plcc2_file.write( "\n");
for m in plcc2_dict.keys():
if not motor_dict.has_key( m) or not motor_dict[m].has_key( "motor_num"):
continue
motor_num = int( motor_dict[m]["motor_num"])
if motor_num < 1 or motor_num > 32:
continue
plcc2_file.write( "%s=M%d ; %s Status 1\n" % (plcc2_dict[m]["status1"], motor_num, m))
plcc2_file.write( "%s=M%d ; %s Status 2\n" % (plcc2_dict[m]["status2"], motor_num + 90, m))
plcc2_file.write( "%s=(M%d/(I%d*32)) ; %s Position\n" % (plcc2_dict[m]["position"], motor_num+180, motor_num*100 + 8, m))
plcc2_file.write( "M5070=M1048 ; FShutterIsOpen\n")
plcc2_file.write( "M5071=P3002 ; PhiScan\n")
plcc2_file.write( "M5072=P3001 ; FastShutterHasOpened\n")
plcc2_file.write( "M5073=P3005 ; FastShutterHasGloballyOpened\n")
plcc2_file.write( "M5074=P177 ; Number of passes (FShutterIsOpen false and FastShutterHasOpened true and npasses=1 means we can read the detector)\n")
plcc2_file.write( "CLOSE\n")
plcc2_file.close();
| gpl-3.0 | 2,189,701,493,260,907,300 | 47.952318 | 172 | 0.516572 | false |
listen-lavender/webcrawl | webcrawl/queue/lib/queue.py | 1 | 8585 | """A multi-producer, multi-consumer queue."""
from time import time as _time
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
from collections import deque
import heapq
__all__ = ['Empty', 'Full', 'Queue', 'PriorityQueue', 'LifoQueue']
class Empty(Exception):
"Exception raised by Queue.get(block=0)/get_nowait()."
pass
class Full(Exception):
"Exception raised by Queue.put(block=0)/put_nowait()."
pass
class Queue(object):
"""Create a queue object with a given maximum size.
If maxsize is <= 0, the queue size is infinite.
"""
def __init__(self, maxsize=0):
self.maxsize = maxsize
self._init(maxsize)
# mutex must be held whenever the queue is mutating. All methods
# that acquire mutex must release it before returning. mutex
# is shared between the three conditions, so acquiring and
# releasing the conditions also acquires and releases mutex.
self.mutex = _threading.Lock()
# Notify not_empty whenever an item is added to the queue; a
# thread waiting to get is notified then.
self.not_empty = _threading.Condition(self.mutex)
# Notify not_full whenever an item is removed from the queue;
# a thread waiting to put is notified then.
self.not_full = _threading.Condition(self.mutex)
# Notify all_tasks_done whenever the number of unfinished tasks
# drops to zero; thread waiting to join() is notified to resume
self.all_tasks_done = _threading.Condition(self.mutex)
self.unfinished_tasks = 0
def task_done(self):
"""Indicate that a formerly enqueued task is complete.
Used by Queue consumer threads. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items
have been processed (meaning that a task_done() call was received
for every item that had been put() into the queue).
Raises a ValueError if called more times than there were items
placed in the queue.
"""
self.all_tasks_done.acquire()
try:
unfinished = self.unfinished_tasks - 1
if unfinished <= 0:
if unfinished < 0:
raise ValueError('task_done() called too many times')
self.all_tasks_done.notify_all()
self.unfinished_tasks = unfinished
finally:
self.all_tasks_done.release()
def join(self):
"""Blocks until all items in the Queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer thread calls task_done()
to indicate the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
"""
self.all_tasks_done.acquire()
try:
while self.unfinished_tasks:
self.all_tasks_done.wait()
finally:
self.all_tasks_done.release()
def qsize(self):
"""Return the approximate size of the queue (not reliable!)."""
self.mutex.acquire()
n = self._qsize()
self.mutex.release()
return n
def empty(self):
"""Return True if the queue is empty, False otherwise (not reliable!)."""
self.mutex.acquire()
n = not self._qsize()
self.mutex.release()
return n
def full(self):
"""Return True if the queue is full, False otherwise (not reliable!)."""
self.mutex.acquire()
n = 0 < self.maxsize == self._qsize()
self.mutex.release()
return n
def put(self, item, block=True, timeout=None):
"""Put an item into the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until a free slot is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the Full exception if no free slot was available within that time.
Otherwise ('block' is false), put an item on the queue if a free slot
is immediately available, else raise the Full exception ('timeout'
is ignored in that case).
"""
self.not_full.acquire()
try:
if self.maxsize > 0:
if not block:
if self._qsize() == self.maxsize:
raise Full
elif timeout is None:
while self._qsize() == self.maxsize:
self.not_full.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = _time() + timeout
while self._qsize() == self.maxsize:
remaining = endtime - _time()
if remaining <= 0.0:
raise Full
self.not_full.wait(remaining)
self._put(item)
self.unfinished_tasks += 1
self.not_empty.notify()
finally:
self.not_full.release()
def put_nowait(self, item):
"""Put an item into the queue without blocking.
Only enqueue the item if a free slot is immediately available.
Otherwise raise the Full exception.
"""
return self.put(item, False)
def get(self, block=True, timeout=None):
"""Remove and return an item from the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until an item is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the Empty exception if no item was available within that time.
Otherwise ('block' is false), return an item if one is immediately
available, else raise the Empty exception ('timeout' is ignored
in that case).
"""
self.not_empty.acquire()
try:
if not block:
if not self._qsize():
raise Empty
elif timeout is None:
while not self._qsize():
self.not_empty.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = _time() + timeout
while not self._qsize():
remaining = endtime - _time()
if remaining <= 0.0:
raise Empty
self.not_empty.wait(remaining)
item = self._get()
self.not_full.notify()
return item
finally:
self.not_empty.release()
def get_nowait(self):
"""Remove and return an item from the queue without blocking.
Only get an item if one is immediately available. Otherwise
raise the Empty exception.
"""
return self.get(False)
# Override these methods to implement other queue organizations
# (e.g. stack or priority queue).
# These will only be called with appropriate locks held
# Initialize the queue representation
def _init(self, maxsize):
self.queue = deque()
def _qsize(self, len=len):
return len(self.queue)
# Put a new item in the queue
def _put(self, item):
self.queue.append(item)
# Get an item from the queue
def _get(self):
return self.queue.popleft()
class PriorityQueue(Queue):
'''Variant of Queue that retrieves open entries in priority order (lowest first).
Entries are typically tuples of the form: (priority number, data).
'''
def _init(self, maxsize):
self.queue = []
def _qsize(self, len=len):
return len(self.queue)
def _put(self, item, heappush=heapq.heappush):
heappush(self.queue, item)
def _get(self, heappop=heapq.heappop):
return heappop(self.queue)
class LifoQueue(Queue):
'''Variant of Queue that retrieves most recently added entries first.'''
def _init(self, maxsize):
self.queue = []
def _qsize(self, len=len):
return len(self.queue)
def _put(self, item):
self.queue.append(item)
def _get(self):
return self.queue.pop()
| mit | 4,989,844,749,189,693,000 | 34.184426 | 85 | 0.588818 | false |
robmcmullen/peppy | peppy/configprefs.py | 1 | 3479 | # peppy Copyright (c) 2006-2010 Rob McMullen
# Licenced under the GPLv2; see http://peppy.flipturn.org for more info
"""
Managing user config files and directories.
"""
import os, os.path, types
from ConfigParser import ConfigParser
import cPickle as pickle
import wx
from peppy.lib.userparams import *
from peppy.debug import *
class HomeConfigDir(ClassPrefs, debugmixin):
"""Simple loader for config files in the home directory.
Wrapper around home directory files. Load and save files or
pickled objects in the home directory.
"""
def __init__(self, dirname, create=True, debug=False):
if dirname:
self.dir = os.path.normpath(dirname)
else:
self.dir = wx.StandardPaths.Get().GetUserDataDir()
self.dprint("Configuration dir: %s" % self.dir)
if not self.exists():
if create:
try:
self.create()
except:
d = wx.StandardPaths.Get().GetUserDataDir()
if d != self.dir:
self.dir = d
try:
self.create()
except:
eprint("Can't create configuration directory %s" % self.dir)
self.dir = None
def create(self, name=None):
"""Create the main config directory or a subdirectory within it
@param name: subdirectory within config directory, or None if you want
to create the main config directory.
"""
try:
if name:
path = self.fullpath(name)
else:
path = self.dir
self.dprint("Creating %s" % path)
os.mkdir(path)
return True
except:
import traceback
self.dprint("Failed creating %s" % path)
self.dprint("".join(traceback.format_stack()))
return False
def fullpath(self, name):
return os.path.join(self.dir, name)
def exists(self, name=None):
if name is not None:
d = self.fullpath(name)
else:
d = self.dir
return os.path.exists(d)
def contents(self, subdir):
path = self.fullpath(subdir)
if os.path.exists(path):
return path, os.listdir(path)
return None, []
def open(self, name, mode='r'):
path = self.fullpath(name)
fd = open(path,mode)
return fd
def remove(self, name):
path = self.fullpath(name)
os.remove(path)
def loadObject(self, name):
item = None
if self.exists(name):
fd = self.open(name, 'rb')
item = pickle.load(fd)
fd.close()
return item
def saveObject(self, name, item):
fd = self.open(name, 'wb')
pickle.dump(item, fd)
fd.close()
if __name__=='__main__':
# Testing stuff that creates a directory in the user's homedir.
# Don't perform this in the standard unit tests.
def testHomeDir():
print "for platform %s:" % os.sys.platform
c=HomeConfigDir(".configprefs",debug=True)
print "found home dir=%s" % c.dir
fd=c.open("test.cfg",'w')
fd.write('blah!!!')
fd.close()
nums=[0,1,2,4,6,99]
c.saveObject("stuff.bin",nums)
print c.loadObject("stuff.bin")
testHomeDir()
| gpl-2.0 | 8,596,785,216,098,643,000 | 28.483051 | 88 | 0.538661 | false |
ptressel/sahana-eden-madpub | models/000_config.py | 1 | 14638 | # -*- coding: utf-8 -*-
"""
Deployment settings
All settings which are typically edited for a deployment should be done here
Deployers shouldn't typically need to edit any other files.
"""
# Remind admin to edit this file
FINISHED_EDITING_CONFIG_FILE = True # change to True after you finish editing this file
if not FINISHED_EDITING_CONFIG_FILE:
raise HTTP(501, body="Please edit models/000_config.py first")
s3cfg = local_import("s3cfg")
deployment_settings = s3cfg.S3Config(T)
# Database settings
deployment_settings.database.db_type = "sqlite"
deployment_settings.database.host = "localhost"
deployment_settings.database.port = None # use default
deployment_settings.database.database = "sahana"
deployment_settings.database.username = "sahana"
deployment_settings.database.password = "password"
deployment_settings.database.pool_size = 30
# Authentication settings
# This setting should be changed _before_ registering the 1st user
deployment_settings.auth.hmac_key = "aliceinwonderland"
# These settings should be changed _after_ the 1st (admin) user is
# registered in order to secure the deployment
deployment_settings.auth.registration_requires_verification = False
deployment_settings.auth.registration_requires_approval = False
deployment_settings.auth.openid = False
# Base settings
# Set this to the Public URL of the instance
deployment_settings.base.public_url = "http://127.0.0.1:8000"
# Set this to True to switch to Debug mode
# Debug mode means that uncompressed CSS/JS files are loaded
# JS Debug messages are also available in the Console
# can also load an individual page in debug mode by appending URL with
# ?debug=1
deployment_settings.base.debug = False
# Switch to "False" in Production for a Performance gain
# (need to set to "True" again when Table definitions are changed)
deployment_settings.base.migrate = True
# Enable/disable pre-population of the database.
# Should be True on 1st_run to pre-populate the database
# - unless doing a manual DB migration
# Then set to False in Production (to save 1x DAL hit every page)
# NOTE: the web UI will not be accessible while the DB is empty,
# instead run:
# python web2py.py -N -S eden -M
# to create the db structure, then exit and re-import the data.
deployment_settings.base.prepopulate = True
# Set this to True to use Content Delivery Networks to speed up Internet-facing sites
deployment_settings.base.cdn = False
# Email settings
# Outbound server
deployment_settings.mail.server = "127.0.0.1:25"
# Useful for Windows Laptops:
#deployment_settings.mail.server = "smtp.gmail.com:587"
#deployment_settings.mail.login = "username:password"
# From Address
deployment_settings.mail.sender = "'Sahana' <[email protected]>"
# Address to which mails get sent to approve new users
deployment_settings.mail.approver = "[email protected]"
# Twitter settings:
# Register an app at http://twitter.com/apps
# (select Aplication Type: Client)
# You'll get your consumer_key and consumer_secret from Twitter
# You can keep these empty if you don't need Twitter integration
deployment_settings.twitter.oauth_consumer_key = ""
deployment_settings.twitter.oauth_consumer_secret = ""
# L10n settings
# Uncomment this if the deployment is just in a few countries
# (used in the GIS Location Selector & maybe in future: Messaging)
#deployment_settings.L10n.countries = ["PK"]
# Languages used in the deployment (used for Language Toolbar & GIS Locations)
# http://www.loc.gov/standards/iso639-2/php/code_list.php
deployment_settings.L10n.languages = {
"en":T("English"),
"es":T("Spanish"),
#"fr":T("French"),
#"pa":T("Punjabi"),
#"ps":T("Pashto"),
#"sd":T("Sindhi"),
"ja":T("Japanese"),
"ur":T("Urdu"),
"zh-tw":T("Chinese (Taiwan)"),
}
# Default language for Language Toolbar (& GIS Locations in future)
deployment_settings.L10n.default_language = "en"
# Display the language toolbar
deployment_settings.L10n.display_toolbar = True
# Default timezone for users
deployment_settings.L10n.utc_offset = "UTC +0000"
# Religions used in Person Registry
# @ToDo: find a better code
# http://eden.sahanafoundation.org/ticket/594
deployment_settings.L10n.religions = {
"none":T("none"),
"christian":T("Christian"),
"muslim":T("Muslim"),
"jew":T("Jew"),
"buddhist":T("Buddhist"),
"hindu":T("Hindu"),
"bahai":T("Bahai"),
"other":T("other")
}
# GIS (Map) settings
# Provide a tool to select locations via a map on all forms with location_id
deployment_settings.gis.map_selector = True
# Display Resources recorded to Admin-Level Locations on the map
deployment_settings.gis.display_L0 = False
# Currently unused
#deployment_settings.gis.display_L1 = True
# Allow non-MapAdmins to edit Admin locations?
# (defaults to True, if not set)
deployment_settings.gis.edit_L0 = False
deployment_settings.gis.edit_L1 = True
#deployment_settings.gis.edit_L2 = True
deployment_settings.gis.locations_hierarchy = {
"L0":T("Country"),
"L1":T("Province"),
"L2":T("District"),
"L3":T("Town"),
"L4":T("Village"),
"L5":T("Location"), # Street Address
"XX":T("Imported")
}
# Maximum Marker Size
# (takes effect only on display)
deployment_settings.gis.marker_max_height = 35
deployment_settings.gis.marker_max_width = 30
# Duplicate Features so that they show wrapped across the Date Line?
# Points only for now
# lon<0 have a duplicate at lon+360
# lon>0 have a duplicate at lon-360
deployment_settings.gis.duplicate_features = False
# Mouse Position: 'normal', 'mgrs' or 'off'
deployment_settings.gis.mouse_position = "normal"
# Print Service URL: http://eden.sahanafoundation.org/wiki/BluePrintGISPrinting
#deployment_settings.gis.print_service = "/geoserver/pdf/"
# Do we have a spatial DB available? (currently unused. Will support PostGIS & Spatialite.)
deployment_settings.gis.spatialdb = False
# GeoServer (Currently used by GeoExplorer. Will allow REST control of GeoServer.)
# NB Needs to be publically-accessible URL for querying via client JS
#deployment_settings.gis.geoserver_url = "http://localhost/geoserver"
#deployment_settings.gis.geoserver_username = "admin"
#deployment_settings.gis.geoserver_password = "password"
# OpenStreetMap settings:
# Register your app by logging in to www.openstreetmap.org & then selecting 'oauth settings'
deployment_settings.osm.oauth_consumer_key = ""
deployment_settings.osm.oauth_consumer_secret = ""
# Security Policy settings
# Lock-down access to Map Editing
#deployment_settings.security.map = True
# Security Policy (defaults to 1 = Simple)
#deployment_settings.security.policy = 2 # Editor
# Should users be allowed to register themselves?
deployment_settings.security.self_registration = True
# Use 'soft' deletes
deployment_settings.security.archive_not_delete = True
# Audit settings
# We Audit if either the Global or Module asks us to
# (ignore gracefully if module author hasn't implemented this)
# NB Auditing (especially Reads) slows system down & consumes diskspace
#deployment_settings.security.audit_write = False
#deployment_settings.security.audit_read = False
# UI/Workflow options
# Should user be prompted to save before navigating away?
#deployment_settings.ui.navigate_away_confirm = False
# Should potentially large dropdowns be turned into autocompletes?
# (unused currently)
#deployment_settings.ui.autocomplete = True
# Comment/uncomment modules here to disable/enable them
# Modules menu is defined in 01_menu.py
from gluon.storage import Storage
deployment_settings.modules = Storage(
default = Storage(
name_nice = T("Home"),
access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller
module_type = 0 # This item is always 1st in the menu
),
admin = Storage(
name_nice = T("Administration"),
description = T("Site Administration"),
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = 0 # This item is handled separately in the menu
),
gis = Storage(
name_nice = T("Map"),
description = T("Situation Awareness & Geospatial Analysis"),
module_type = 1, # 1st item in the menu
resources = Storage(
gis_location = {"importer" : True}
)
),
doc = Storage(
name_nice = T("Documents and Photos"),
description = T("A library of digital resources, such as photos, documents and reports"),
module_type = 10,
),
msg = Storage(
name_nice = T("Messaging"),
description = T("Sends & Receives Alerts via Email & SMS"),
module_type = 10,
),
pr = Storage(
name_nice = T("Person Registry"),
description = T("Central point to record details on People"),
access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still)
module_type = 10,
resources = Storage(
pr_address = {"importer" : True},
pr_pe_contact = {"importer" : True},
pr_presence = {"importer" : True},
pr_identity = {"importer" : True},
pr_person = {"importer" : True},
pr_group = {"importer" : True},
pr_group_membership = {"importer" : True},
)
),
pf = Storage(
name_nice = T("Person Finder"),
description = T("Helps to report and search for Missing Persons"),
module_type = 10,
),
dvi = Storage(
name_nice = T("Disaster Victim Identification"),
description = T("Disaster Victim Identification"),
module_type = 10,
#access = "|DVI|", # Only users with the DVI role can see this module in the default menu & access the controller
#audit_read = True, # Can enable Audit for just an individual module here
#audit_write = True,
resources = Storage(
dvi_recreq = {"importer" : True},
)
),
#dvr = Storage(
# name_nice = T("Disaster Victim Registry"),
# description = T("Traces internally displaced people (IDPs) and their needs"),
# module_type = 10
# ),
org = Storage(
name_nice = T("Organization Registry"),
description = T('Lists "who is doing what & where". Allows relief agencies to coordinate their activities'),
module_type = 10,
resources = Storage(
org_organisation = {"importer" : True},
org_office = {"importer" : True},
org_staff = {"importer" : True}
)
),
project = Storage(
name_nice = T("Project Tracking"),
description = T("Tracking of Projects, Activities and Tasks"),
module_type = 10
),
# NB Budget module depends on Project Tracking Module
budget = Storage(
name_nice = T("Budgeting Module"),
description = T("Allows a Budget to be drawn up"),
module_type = 10,
resources = Storage(
budget_item = {"importer" : True},
budget_kit = {"importer" : True},
budget_bundle = {"importer" : True},
)
),
logs = Storage(
name_nice = T("Logistics Management"),
description = T("Managing, Storing and Distributing Relief Items"),
module_type = 10
),
rms = Storage(
name_nice = T("Requests"),
description = T("Tracks requests for aid and matches them against donors who have pledged aid"),
module_type = 3,
resources = Storage(
rms_req = {"importer" : True},
)
),
cr = Storage(
name_nice = T("Shelter Registry"),
description = T("Tracks the location, distibution, capacity and breakdown of victims in Shelters"),
module_type = 10,
resources = Storage(
cr_shelter = {"importer" : True }
)
),
hms = Storage(
name_nice = T("Hospitals"),
description = T("Helps to monitor status of hospitals"),
module_type = 10,
resources = Storage(
hms_hospital = {"importer" : True}
)
),
vol = Storage(
name_nice = T("Volunteers"),
description = T("Manage volunteers by capturing their skills, availability and allocation"),
module_type = 10,
),
irs = Storage(
name_nice = T("Incident Reporting"),
description = T("Incident Reporting System"),
module_type = 10
),
assess = Storage(
name_nice = "Assessments",
description = "Rapid Assessments & Flexible Impact Assessments",
module_type = 2,
),
survey = Storage(
name_nice = "Survey Module",
description = "Create, enter, and manage surveys.",
module_type = 10,
),
delphi = Storage(
name_nice = T("Delphi Decision Maker"),
description = T("Supports the decision making of large groups of Crisis Management Experts by helping the groups create ranked list."),
module_type = 10,
),
importer = Storage(
name_nice = "Spreadsheet Importer",
description = "Used to import data from spreadsheets into the database",
module_type = 10,
),
#flood = Storage(
# name_nice = T("Flood Alerts"),
# description = T("Flood Alerts show water levels in various parts of the country"),
# module_type = 10
# ),
#ticket = Storage(
# name_nice = T("Ticketing Module"),
# description = T("Master Message Log to process incoming reports & requests"),
# module_type = 10,
# ),
#lms = Storage(
# name_nice = T("Logistics Management System"),
# description = T("An intake system, a warehouse management system, commodity tracking, supply chain management, procurement and other asset and resource management capabilities."),
# module_type = 10
# ),
mad = Storage(
name_nice = "Mobile Assessment of Damage",
description = "Uploads damage information and images from mobile devices",
module_type = 10,
),
)
| mit | 1,233,628,247,905,388,000 | 39.325069 | 192 | 0.646331 | false |
MirkoDziadzka/pyhkdf | src/hkdf.py | 1 | 3000 | """
This is a straight forward implementation of RFC 5869
HMAC-based Extract-and-Expand Key Derivation Function (HKDF)
http://tools.ietf.org/html/rfc5869
"""
import warnings
from Crypto.Hash import SHA512, HMAC
class HKDF:
"""
HMAC-based Extract-and-Expand Key Derivation Function (RFC 5869)
usage:
>> engine = HKDF(b"password", b"salt", digestmod=SHA256)
>> key1 = engine.expand(b"info", length)
This is equvalent to
>> prk = HKDF.rfc_extract(b"password", b"salt", digest=SHA256)
>> key1 = HKDF.rfc_expand(prk, b"info", lengta, digest=SHA256h)
"""
@staticmethod
def rfc_extract(key: bytes, salt: bytes=b"", digest=SHA512) -> bytes:
""" The extract step from RFC 5869
Coverts the key and the salt to a pseudorandom key using
the given hash function.
"""
if not salt:
salt = b'\0' * digest.digest_size
return HMAC.new(salt, key, digestmod=digest).digest()
@staticmethod
def rfc_expand(prk: bytes, info: bytes, length: int, digest=SHA512) -> bytes:
""" The expand step from RFC 5896
Take the result of rfc_extract (given as prk) and
compute a key from this based on info and a requested length.
digest must be the same as in the extract step.
"""
if length < 0:
raise ValueError("Parameter length must be greater or equal 0")
if length > digest.digest_size * 255:
raise ValueError(f"Parameter length must be less or equal {digest.digest_size * 255}")
# generate key stream, stop when we have enought bytes
keystream = []
keystream_length = 0
block_index = 0
key_block = b""
while keystream_length < length:
block_index += 1
data = key_block + info + bytes([block_index % 256])
key_block = HMAC.new(prk, data, digestmod=digest).digest()
keystream.append(key_block)
keystream_length += len(key_block)
return b"".join(keystream)[:length]
def __init__(self, key: bytes, salt: bytes=b"", digestmod=SHA512):
self.__digest = digestmod
self.__prk = self.rfc_extract(key, salt, digestmod)
@property
def digest_length(self):
""" return the digest_length of the hash module """
return self.__digest.digest_size
@property
def _prk(self):
""" the pseudorandom key, computed from the input key and the salt
"""
return self.__prk
def expand(self, info: bytes, length: int) -> bytes:
""" expand a key for the given context (info) in the given length
"""
return self.rfc_expand(self.__prk, info, length, digest=self.__digest)
def extract_key(self, info: bytes, length: int) -> bytes:
""" Deprecated: use expand() instead """
warnings.warn("deprecated, use expand() instead", DeprecationWarning)
return self.expand(info, length)
| mit | -7,948,960,739,531,117,000 | 29.927835 | 98 | 0.609 | false |
blindsightcorp/rigor | test/constants.py | 1 | 2408 | import datetime
import os.path
kDirName, filename = os.path.split(os.path.abspath(__file__))
kFixtureFile = os.path.join(kDirName, 'types.db')
kTestFile = os.path.join(kDirName, 'test.db')
kTestDirectory = os.path.join(kDirName, 'tempdir', 'child')
kConfigFile = os.path.join(kDirName, 'testing.ini')
kConfigFile2 = os.path.join(kDirName, 'testing2.ini')
kLockFile = os.path.join(kDirName, 'lockfile')
kAwsBucket = 'orion.aws.testing'
kImportFile = os.path.join(kDirName, 'import.json')
kImportDirectory = os.path.join(kDirName, 'to_import')
kRepoDirectory = os.path.join(kDirName, 'imported')
kImportDatabase = os.path.join(kDirName, 'imported.db')
kExampleTextFile = os.path.join(kDirName, 'example_text_file.txt')
kExampleImageFile = os.path.join(kDirName, 'example_image.png')
kExampleTemporaryImageFile = os.path.join(kDirName, 'example_image_temp.png')
kExampleDownloadedFile = os.path.join(kDirName, 'fetched.dat')
kExampleCheckpointFile = os.path.join(kDirName, 'example_checkpoint.dat')
kExampleNewCheckpointFile = os.path.join(kDirName, 'example_new_checkpoint.dat')
kS3HostName = 's3.amazonaws.com'
kExampleBucket = 'rigor-test-bucket'
kExampleCredentials = 'test_credentials'
kExampleImageDimensions = (1080, 3840, 3)
kNonexistentFile = '/xxxzzfooxxx'
kExamplePercept = {
'annotations': [
{'boundary': ((1, 10), (3, 6), (1, 10), (10, 3)), 'confidence': 4, 'domain': u'test', 'model': u'e', 'properties': {u'prop': u'value'}, 'stamp': datetime.datetime(2015, 2, 3, 20, 16, 7, 252667), 'tags': [ u'test_tag', ]},
{'boundary': ((10, 4), (4, 8), (3, 8), (6, 3)), 'confidence': 5, 'domain': u'test', 'model': u'e', 'stamp': datetime.datetime(2015, 2, 3, 20, 16, 7, 252787)},
{'boundary': ((1, 7), (1, 9), (7, 1), (3, 5)), 'confidence': 4, 'domain': u'test', 'model': u'd', 'stamp': datetime.datetime(2015, 2, 3, 20, 16, 7, 252969)}
],
'device_id': u'device_1938401',
'format': u'image/jpeg',
'hash': u'edd66afcf0eb4f5ef392fd8e94ff0ff2139ddc01',
'locator': u'example://mybucket/182828291',
'properties': {u'val1': u'val2'},
'sensors': {'acceleration_x': 0.1, 'acceleration_y': 0.2, 'acceleration_z': 0.3, 'altitude': 123.0, 'altitude_accuracy': 2.34, 'bearing': 180.1, 'bearing_accuracy': 1.23, 'location': (34.56, -120.2), 'location_accuracy': 0.1, 'location_provider': u'gps', 'speed': 60.1},
'stamp': datetime.datetime(2015, 2, 3, 20, 16, 7, 252487),
'x_size': 800, 'y_size': 600
}
| bsd-2-clause | -5,517,956,473,567,648,000 | 57.731707 | 271 | 0.685631 | false |
ninjin/contra | gtbtokenize.py | 1 | 12308 | #!/usr/bin/env python
# Implements a GENIA Treebank - like tokenization.
# This is a python translation of my GTB-tokenize.pl, which in turn
# draws in part on Robert MacIntyre's 1995 PTB tokenizer,
# (http://www.cis.upenn.edu/~treebank/tokenizer.sed) and Yoshimasa
# Tsuruoka's GENIA tagger tokenization (tokenize.cpp;
# www-tsujii.is.s.u-tokyo.ac.jp/GENIA/tagger)
# by Sampo Pyysalo, 2011. Licensed under the MIT license.
# http://www.opensource.org/licenses/mit-license.php
# NOTE: intended differences to GTB tokenization:
# - Does not break "protein(s)" -> "protein ( s )"
from __future__ import with_statement
import re
INPUT_ENCODING = "UTF-8"
OUTPUT_ENCODING = "UTF-8"
DEBUG_GTB_TOKENIZATION = False
# Penn treebank bracket escapes (others excluded)
PTB_ESCAPES = [('(', '-LRB-'),
(')', '-RRB-'),
('[', '-LSB-'),
(']', '-RSB-'),
('{', '-LCB-'),
('}', '-RCB-'),
]
def PTB_escape(s):
for u, e in PTB_ESCAPES:
s = s.replace(u, e)
return s
def PTB_unescape(s):
for u, e in PTB_ESCAPES:
s = s.replace(e, u)
return s
# processing in three stages: "initial" regexs run first, then
# "repeated" run as long as there are changes, and then "final"
# run. As the tokenize() function itself is trivial, comments relating
# to regexes given with the re.compiles.
__initial, __repeated, __final = [], [], []
# separate but do not break ellipsis
__initial.append((re.compile(r'\.\.\.'), r' ... '))
# To avoid breaking names of chemicals, protein complexes and similar,
# only add space to related special chars if there's already space on
# at least one side.
__initial.append((re.compile(r'([,;:@#]) '), r' \1 '))
__initial.append((re.compile(r' ([,;:@#])'), r' \1 '))
# always separated
__initial.append((re.compile(r'\$'), r' $ '))
__initial.append((re.compile(r'\%'), r' % '))
__initial.append((re.compile(r'\&'), r' & '))
# separate punctuation followed by space even if there's closing
# brackets or quotes in between, but only sentence-final for
# periods (don't break e.g. "E. coli").
__initial.append((re.compile(r'([,:;])([\[\]\)\}\>\"\']* +)'), r' \1\2'))
__initial.append((re.compile(r'(\.+)([\[\]\)\}\>\"\']* +)$'), r' \1\2'))
# these always
__initial.append((re.compile(r'\?'), ' ? '))
__initial.append((re.compile(r'\!'), ' ! '))
# separate greater than and less than signs, avoiding breaking
# "arrows" (e.g. "-->", ">>") and compound operators (e.g. "</=")
__initial.append((re.compile(r'((?:=\/)?<+(?:\/=|--+>?)?)'), r' \1 '))
__initial.append((re.compile(r'((?:<?--+|=\/)?>+(?:\/=)?)'), r' \1 '))
# separate dashes, not breaking up "arrows"
__initial.append((re.compile(r'(<?--+\>?)'), r' \1 '))
# Parens only separated when there's space around a balanced
# bracketing. This aims to avoid splitting e.g. beta-(1,3)-glucan,
# CD34(+), CD8(-)CD3(-).
# Previously had a proper recursive implementation for this, but it
# was much too slow for large-scale use. The following is
# comparatively fast but a bit of a hack:
# First "protect" token-internal brackets by replacing them with
# their PTB escapes. "Token-internal" brackets are defined as
# matching brackets of which at least one has no space on either
# side. To match GTB tokenization for cases like "interleukin
# (IL)-mediated", and "p65(RelA)/p50", treat following dashes and
# slashes as space. Nested brackets are resolved inside-out;
# to get this right, add a heuristic considering boundary
# brackets as "space".
# (First a special case (rareish): "protect" cases with dashes after
# paranthesized expressions that cannot be abbreviations to avoid
# breaking up e.g. "(+)-pentazocine". Here, "cannot be abbreviations"
# is taken as "contains no uppercase charater".)
__initial.append((re.compile(r'\(([^ A-Z()\[\]{}]+)\)-'), r'-LRB-\1-RRB--'))
# These are repeated until there's no more change (per above comment)
__repeated.append((re.compile(r'(?<![ (\[{])\(([^ ()\[\]{}]*)\)'), r'-LRB-\1-RRB-'))
__repeated.append((re.compile(r'\(([^ ()\[\]{}]*)\)(?![ )\]}\/-])'), r'-LRB-\1-RRB-'))
__repeated.append((re.compile(r'(?<![ (\[{])\[([^ ()\[\]{}]*)\]'), r'-LSB-\1-RSB-'))
__repeated.append((re.compile(r'\[([^ ()\[\]{}]*)\](?![ )\]}\/-])'), r'-LSB-\1-RSB-'))
__repeated.append((re.compile(r'(?<![ (\[{])\{([^ ()\[\]{}]*)\}'), r'-LCB-\1-RCB-'))
__repeated.append((re.compile(r'\{([^ ()\[\]{}]*)\}(?![ )\]}\/-])'), r'-LCB-\1-RCB-'))
# Remaining brackets are not token-internal and should be
# separated.
__final.append((re.compile(r'\('), r' -LRB- '))
__final.append((re.compile(r'\)'), r' -RRB- '))
__final.append((re.compile(r'\['), r' -LSB- '))
__final.append((re.compile(r'\]'), r' -RSB- '))
__final.append((re.compile(r'\{'), r' -LCB- '))
__final.append((re.compile(r'\}'), r' -RCB- '))
# initial single quotes always separated
__final.append((re.compile(r' (\'+)'), r' \1 '))
# final with the exception of 3' and 5' (rough heuristic)
__final.append((re.compile(r'(?<![35\'])(\'+) '), r' \1 '))
# This more frequently disagreed than agreed with GTB
# # Separate slashes preceded by space (can arise from
# # e.g. splitting "p65(RelA)/p50"
# __final.append((re.compile(r' \/'), r' \/ '))
# Standard from PTB (TODO: pack)
__final.append((re.compile(r'\'s '), ' \'s '))
__final.append((re.compile(r'\'S '), ' \'S '))
__final.append((re.compile(r'\'m '), ' \'m '))
__final.append((re.compile(r'\'M '), ' \'M '))
__final.append((re.compile(r'\'d '), ' \'d '))
__final.append((re.compile(r'\'D '), ' \'D '))
__final.append((re.compile(r'\'ll '), ' \'ll '))
__final.append((re.compile(r'\'re '), ' \'re '))
__final.append((re.compile(r'\'ve '), ' \'ve '))
__final.append((re.compile(r'n\'t '), ' n\'t '))
__final.append((re.compile(r'\'LL '), ' \'LL '))
__final.append((re.compile(r'\'RE '), ' \'RE '))
__final.append((re.compile(r'\'VE '), ' \'VE '))
__final.append((re.compile(r'N\'T '), ' N\'T '))
__final.append((re.compile(r' Cannot '), ' Can not '))
__final.append((re.compile(r' cannot '), ' can not '))
__final.append((re.compile(r' D\'ye '), ' D\' ye '))
__final.append((re.compile(r' d\'ye '), ' d\' ye '))
__final.append((re.compile(r' Gimme '), ' Gim me '))
__final.append((re.compile(r' gimme '), ' gim me '))
__final.append((re.compile(r' Gonna '), ' Gon na '))
__final.append((re.compile(r' gonna '), ' gon na '))
__final.append((re.compile(r' Gotta '), ' Got ta '))
__final.append((re.compile(r' gotta '), ' got ta '))
__final.append((re.compile(r' Lemme '), ' Lem me '))
__final.append((re.compile(r' lemme '), ' lem me '))
__final.append((re.compile(r' More\'n '), ' More \'n '))
__final.append((re.compile(r' more\'n '), ' more \'n '))
__final.append((re.compile(r'\'Tis '), ' \'T is '))
__final.append((re.compile(r'\'tis '), ' \'t is '))
__final.append((re.compile(r'\'Twas '), ' \'T was '))
__final.append((re.compile(r'\'twas '), ' \'t was '))
__final.append((re.compile(r' Wanna '), ' Wan na '))
__final.append((re.compile(r' wanna '), ' wan na '))
# clean up possible extra space
__final.append((re.compile(r' +'), r' '))
def _tokenize(s):
"""
Tokenizer core. Performs GTP-like tokenization, using PTB escapes
for brackets (but not quotes). Assumes given string has initial
and terminating space. You probably want to use tokenize() instead
of this function.
"""
# see re.complies for comments
for r, t in __initial:
s = r.sub(t, s)
while True:
o = s
for r, t in __repeated:
s = r.sub(t, s)
if o == s: break
for r, t in __final:
s = r.sub(t, s)
return s
def tokenize(s, ptb_escaping=False, use_single_quotes_only=False,
escape_token_internal_parens=False):
"""
Tokenizes the given string with a GTB-like tokenization. Input
will adjusted by removing surrounding space, if any. Arguments
hopefully self-explanatory.
"""
if DEBUG_GTB_TOKENIZATION:
orig = s
# Core tokenization needs starting and ending space and no newline;
# store to return string ending similarly
# TODO: this isn't this difficult ... rewrite nicely
s = re.sub(r'^', ' ', s)
m = re.match(r'^((?:.+|\n)*?) *(\n*)$', s)
assert m, "INTERNAL ERROR on '%s'" % s # should always match
s, s_end = m.groups()
s = re.sub(r'$', ' ', s)
if ptb_escaping:
if use_single_quotes_only:
# special case for McCCJ: escape into single quotes.
s = re.sub(r'([ \(\[\{\<])\"', r'\1 '+"' ", s)
else:
# standard PTB quote escaping
s = re.sub(r'([ \(\[\{\<])\"', r'\1 `` ', s)
else:
# no escaping, just separate
s = re.sub(r'([ \(\[\{\<])\"', r'\1 " ', s)
s = _tokenize(s)
# as above (not quite sure why this is after primary tokenization...)
if ptb_escaping:
if use_single_quotes_only:
s = s.replace('"', " ' ")
else:
s = s.replace('"', " '' ")
else:
s = s.replace('"', ' " ')
if not ptb_escaping:
if not escape_token_internal_parens:
# standard unescape for PTB escapes introduced in core
# tokenization
s = PTB_unescape(s)
else:
# only unescape if a space can be matched on both
# sides of the bracket.
s = re.sub(r'(?<= )-LRB-(?= )', '(', s)
s = re.sub(r'(?<= )-RRB-(?= )', ')', s)
s = re.sub(r'(?<= )-LSB-(?= )', '[', s)
s = re.sub(r'(?<= )-RSB-(?= )', ']', s)
s = re.sub(r'(?<= )-LCB-(?= )', '{', s)
s = re.sub(r'(?<= )-RCB-(?= )', '}', s)
# Clean up added space (well, maybe other also)
s = re.sub(r' +', ' ', s)
s = re.sub(r'^ +', '', s)
s = re.sub(r' +$', '', s)
# Only do final comparison in debug mode.
if DEBUG_GTB_TOKENIZATION:
# revised must match original when whitespace, quotes (etc.)
# and escapes are ignored
# TODO: clean this up
r1 = PTB_unescape(orig.replace(' ', '').replace('\n','').replace("'",'').replace('"','').replace('``',''))
r2 = PTB_unescape(s.replace(' ', '').replace('\n','').replace("'",'').replace('"','').replace('``',''))
if r1 != r2:
print >> sys.stderr, "tokenize(): error: text mismatch (returning original):\nORIG: '%s'\nNEW: '%s'" % (orig, s)
s = orig
return s+s_end
def __argparser():
import argparse
ap=argparse.ArgumentParser(description="Perform GENIA Treebank-like text tokenization.")
ap.add_argument("-ptb", default=False, action="store_true", help="Use Penn Treebank escapes")
ap.add_argument("-mccc", default=False, action="store_true", help="Special processing for McClosky-Charniak-Johnson parser input")
ap.add_argument("-sp", default=False, action="store_true", help="Special processing for Stanford parser+PTBEscapingProcessor input. (not necessary for Stanford Parser version 1.6.5 and newer)")
ap.add_argument("files", metavar="FILE", nargs="*", help="Files to tokenize.")
return ap
def main(argv):
import sys
import codecs
arg = __argparser().parse_args(argv[1:])
# sorry, the special cases are a bit of a mess
ptb_escaping, use_single_quotes_only, escape_token_internal_parens = False, False, False
if arg.ptb:
ptb_escaping = True
if arg.mccc:
ptb_escaping = True
# current version of McCCJ has trouble with double quotes
use_single_quotes_only = True
if arg.sp:
# current version of Stanford parser PTBEscapingProcessor
# doesn't correctly escape word-internal parentheses
escape_token_internal_parens = True
# for testing, read stdin if no args
if len(arg.files) == 0:
arg.files.append('/dev/stdin')
for fn in arg.files:
try:
with codecs.open(fn, encoding=INPUT_ENCODING) as f:
for l in f:
t = tokenize(l, ptb_escaping=ptb_escaping,
use_single_quotes_only=use_single_quotes_only,
escape_token_internal_parens=escape_token_internal_parens)
sys.stdout.write(t.encode(OUTPUT_ENCODING))
except Exception, e:
print >> sys.stderr, "Failed to read", fn, ":", e
if __name__ == "__main__":
import sys
sys.exit(main(sys.argv))
| isc | 1,279,164,132,318,494,200 | 37.704403 | 197 | 0.576129 | false |
sgibbes/carbon-budget | analyses/hadoop_analysis.py | 1 | 3108 | ### Code for running Hadoop clusters on the model endpoints
### then using the cumulative summing script to sum the endpoint and area by tcd threshold.
### Also, sample code for copying results from spot machine to s3 for two endpoints.
### git clone https://github.com/wri/gfw-annual-loss-processing
'''
For annual gain rate:
python annual_update.py --analysis-type annualGain --points-folder s3://gfw2-data/climate/carbon_model/model_output_tsv/20181116/annualGain_tcd2000/ --output-folder s3://gfw2-data/climate/carbon_model/model_output_Hadoop/raw/annualGain_tcd2000/20181120/ --polygons-folder s3://gfw2-data/alerts-tsv/country-pages/climate/tsvs/ --iterate-by points
python cumsum_hadoop_output.py --input s3://gfw2-data/climate/carbon_model/model_output_Hadoop/raw/annualGain_tcd2000/20181120/ --no-years --analysis-name annualGain
For cumulative gain rate:
python annual_update.py --analysis-type cumulGain --points-folder s3://gfw2-data/climate/carbon_model/model_output_tsv/20181116/cumulGain_tcd2000/ --output-folder s3://gfw2-data/climate/carbon_model/model_output_Hadoop/raw/cumulGain_tcd2000/20181120/ --polygons-folder s3://gfw2-data/alerts-tsv/country-pages/climate/tsvs/ --iterate-by points
python cumsum_hadoop_output.py --input s3://gfw2-data/climate/carbon_model/model_output_Hadoop/raw/cumulGain_tcd2000/20181120/ --no-years --analysis-name cumulGain
aws s3 cp /home/ubuntu/gfw-annual-loss-processing/2_Cumulate-Results-and-Create-API-Datasets/processing/XXXXXXXXXXXXXXXXXXXXXXXX/output.csv s3://gfw2-data/climate/carbon_model/model_output_Hadoop/processed/cumulGain_tcd2000/20181121/cumulGain_t_carbon_2001_15.csv
For net emissions:
python annual_update.py --analysis-type netEmis --points-folder s3://gfw2-data/climate/carbon_model/model_output_tsv/20181116/netEmis_tcd2000/ --output-folder s3://gfw2-data/climate/carbon_model/model_output_Hadoop/raw/netEmis_tcd2000/20181120/ --polygons-folder s3://gfw2-data/alerts-tsv/country-pages/climate/tsvs/ --iterate-by points
python cumsum_hadoop_output.py --input s3://gfw2-data/climate/carbon_model/model_output_Hadoop/raw/netEmis_tcd2000/20181120/ --no-years --analysis-name netEmis
For gross emissions:
python annual_update.py --analysis-type grossEmis --points-folder s3://gfw2-data/climate/carbon_model/model_output_tsv/20181119/grossEmis_tcd2000_tcl/ --output-folder s3://gfw2-data/climate/carbon_model/model_output_Hadoop/raw/grossEmis_tcd2000_tcl/20181120/ --polygons-folder s3://gfw2-data/alerts-tsv/country-pages/climate/tsvs/ --iterate-by points --dryrun
python cumsum_hadoop_output.py --input s3://gfw2-data/climate/carbon_model/model_output_Hadoop/raw/grossEmis_tcd2000_tcl/20181120/ --max-year 2015 --analysis-name grossEmis
# The cumsum for gross emissions takes a few minutes because there are so many more rows in it (for each year)
aws s3 cp /home/ubuntu/gfw-annual-loss-processing/2_Cumulate-Results-and-Create-API-Datasets/processing/357b9433-185e-4c2f-8659-ec613eb58250/output.csv s3://gfw2-data/climate/carbon_model/model_output_Hadoop/processed/grossEmis_tcd2000_tcl/20181121/grossEmis_t_CO2_2001_15.csv
'''
| apache-2.0 | 1,434,611,297,971,112,400 | 99.258065 | 359 | 0.79601 | false |
openmips/stbgui | lib/python/Components/Harddisk.py | 1 | 27772 | import os
import time
from Tools.CList import CList
from SystemInfo import SystemInfo
from Components.Console import Console
from boxbranding import getBoxType, getMachineBuild
import Task
def readFile(filename):
file = open(filename)
data = file.read().strip()
file.close()
return data
def getProcMounts():
try:
mounts = open("/proc/mounts", 'r')
except IOError, ex:
print "[Harddisk] Failed to open /proc/mounts", ex
return []
result = [line.strip().split(' ') for line in mounts]
for item in result:
# Spaces are encoded as \040 in mounts
item[1] = item[1].replace('\\040', ' ')
return result
def isFileSystemSupported(filesystem):
try:
for fs in open('/proc/filesystems', 'r'):
if fs.strip().endswith(filesystem):
return True
return False
except Exception, ex:
print "[Harddisk] Failed to read /proc/filesystems:", ex
def findMountPoint(path):
'Example: findMountPoint("/media/hdd/some/file") returns "/media/hdd"'
path = os.path.abspath(path)
while not os.path.ismount(path):
path = os.path.dirname(path)
return path
DEVTYPE_UDEV = 0
DEVTYPE_DEVFS = 1
class Harddisk:
def __init__(self, device, removable=False):
self.device = device
if os.access("/dev/.udev", 0) or os.access("/run/udev/data", 0):
self.type = DEVTYPE_UDEV
elif os.access("/dev/.devfsd", 0):
self.type = DEVTYPE_DEVFS
else:
print "[Harddisk] Unable to determine structure of /dev"
self.card = False
self.max_idle_time = 0
self.idle_running = False
self.last_access = time.time()
self.last_stat = 0
self.timer = None
self.is_sleeping = False
self.dev_path = ''
self.disk_path = ''
self.mount_path = None
self.mount_device = None
self.phys_path = os.path.realpath(self.sysfsPath('device'))
self.removable = removable
self.internal = "pci" in self.phys_path or "ahci" in self.phys_path or "sata" in self.phys_path
try:
data = open("/sys/block/%s/queue/rotational" % device, "r").read().strip()
self.rotational = int(data)
except:
self.rotational = True
if self.type == DEVTYPE_UDEV:
self.dev_path = '/dev/' + self.device
self.disk_path = self.dev_path
self.card = "sdhci" in self.phys_path
elif self.type == DEVTYPE_DEVFS:
tmp = readFile(self.sysfsPath('dev')).split(':')
s_major = int(tmp[0])
s_minor = int(tmp[1])
for disc in os.listdir("/dev/discs"):
dev_path = os.path.realpath('/dev/discs/' + disc)
disk_path = dev_path + '/disc'
try:
rdev = os.stat(disk_path).st_rdev
except OSError:
continue
if s_major == os.major(rdev) and s_minor == os.minor(rdev):
self.dev_path = dev_path
self.disk_path = disk_path
break
self.card = self.device[:2] == "hd" and "host0" not in self.dev_path
print "[Harddisk] new device", self.device, '->', self.dev_path, '->', self.disk_path
if not removable and not self.card:
self.startIdle()
def __lt__(self, ob):
return self.device < ob.device
def partitionPath(self, n):
if self.type == DEVTYPE_UDEV:
if self.dev_path.startswith('/dev/mmcblk0'):
return self.dev_path + "p" + n
else:
return self.dev_path + n
elif self.type == DEVTYPE_DEVFS:
return self.dev_path + '/part' + n
def sysfsPath(self, filename):
return os.path.join('/sys/block/', self.device, filename)
def stop(self):
if self.timer:
self.timer.stop()
self.timer.callback.remove(self.runIdle)
def bus(self):
ret = _("External")
# SD/MMC(F1 specific)
if self.type == DEVTYPE_UDEV:
type_name = " (SD/MMC)"
# CF(7025 specific)
elif self.type == DEVTYPE_DEVFS:
type_name = " (CF)"
if self.card:
ret += type_name
else:
if self.internal:
ret = _("Internal")
if not self.rotational:
ret += " (SSD)"
return ret
def diskSize(self):
cap = 0
try:
line = readFile(self.sysfsPath('size'))
cap = int(line)
return cap / 1000 * 512 / 1000
except:
dev = self.findMount()
if dev:
try:
stat = os.statvfs(dev)
cap = int(stat.f_blocks * stat.f_bsize)
return cap / 1000 / 1000
except:
pass
return cap
def capacity(self):
cap = self.diskSize()
if cap == 0:
return ""
if cap < 1000:
return _("%03d MB") % cap
return _("%d.%03d GB") % (cap/1000, cap%1000)
def model(self):
try:
if self.device[:2] == "hd":
return readFile('/proc/ide/' + self.device + '/model')
elif self.device[:2] == "sd":
vendor = readFile(self.sysfsPath('device/vendor'))
model = readFile(self.sysfsPath('device/model'))
return vendor + '(' + model + ')'
elif self.device.startswith('mmcblk0'):
return readFile(self.sysfsPath('device/name'))
else:
raise Exception, "[Harddisk] no hdX or sdX or mmcX"
except Exception, e:
print "[Harddisk] Failed to get model:", e
return "-?-"
def free(self):
dev = self.findMount()
if dev:
stat = os.statvfs(dev)
return (stat.f_bfree/1000) * (stat.f_bsize/1024)
return -1
def numPartitions(self):
numPart = -1
if self.type == DEVTYPE_UDEV:
try:
devdir = os.listdir('/dev')
except OSError:
return -1
for filename in devdir:
if filename.startswith(self.device):
numPart += 1
elif self.type == DEVTYPE_DEVFS:
try:
idedir = os.listdir(self.dev_path)
except OSError:
return -1
for filename in idedir:
if filename.startswith("disc"):
numPart += 1
if filename.startswith("part"):
numPart += 1
return numPart
def mountDevice(self):
for parts in getProcMounts():
if os.path.realpath(parts[0]).startswith(self.dev_path):
self.mount_device = parts[0]
self.mount_path = parts[1]
return parts[1]
return None
def enumMountDevices(self):
for parts in getProcMounts():
if os.path.realpath(parts[0]).startswith(self.dev_path):
yield parts[1]
def findMount(self):
if self.mount_path is None:
return self.mountDevice()
return self.mount_path
def unmount(self):
dev = self.mountDevice()
if dev is None:
# not mounted, return OK
return 0
cmd = 'umount ' + dev
print "[Harddisk] ", cmd
res = os.system(cmd)
return (res >> 8)
def createPartition(self):
cmd = 'printf "8,\n;0,0\n;0,0\n;0,0\ny\n" | sfdisk -f -uS ' + self.disk_path
res = os.system(cmd)
return (res >> 8)
def mkfs(self):
# No longer supported, use createInitializeJob instead
return 1
def mount(self):
# try mounting through fstab first
if self.mount_device is None:
dev = self.partitionPath("1")
else:
# if previously mounted, use the same spot
dev = self.mount_device
try:
fstab = open("/etc/fstab")
lines = fstab.readlines()
fstab.close()
except IOError:
return -1
for line in lines:
parts = line.strip().split(" ")
fspath = os.path.realpath(parts[0])
if fspath == dev:
print "[Harddisk] mounting:", fspath
cmd = "mount -t auto " + fspath
res = os.system(cmd)
return (res >> 8)
# device is not in fstab
res = -1
if self.type == DEVTYPE_UDEV:
# we can let udev do the job, re-read the partition table
res = os.system('hdparm -z ' + self.disk_path)
# give udev some time to make the mount, which it will do asynchronously
from time import sleep
sleep(3)
return (res >> 8)
def fsck(self):
# No longer supported, use createCheckJob instead
return 1
def killPartitionTable(self):
zero = 512 * '\0'
h = open(self.dev_path, 'wb')
# delete first 9 sectors, which will likely kill the first partition too
for i in range(9):
h.write(zero)
h.close()
def killPartition(self, n):
zero = 512 * '\0'
part = self.partitionPath(n)
h = open(part, 'wb')
for i in range(3):
h.write(zero)
h.close()
def createInitializeJob(self):
job = Task.Job(_("Initializing storage device..."))
size = self.diskSize()
print "[Harddisk] size: %s MB" % size
task = UnmountTask(job, self)
task = Task.PythonTask(job, _("Removing partition table"))
task.work = self.killPartitionTable
task.weighting = 1
task = Task.LoggingTask(job, _("Rereading partition table"))
task.weighting = 1
task.setTool('hdparm')
task.args.append('-z')
task.args.append(self.disk_path)
task = Task.ConditionTask(job, _("Waiting for partition"), timeoutCount=20)
task.check = lambda: not os.path.exists(self.partitionPath("1"))
task.weighting = 1
if os.path.exists('/usr/sbin/parted'):
use_parted = True
else:
if size > 2097151:
addInstallTask(job, 'parted')
use_parted = True
else:
use_parted = False
task = Task.LoggingTask(job, _("Creating partition"))
task.weighting = 5
if use_parted:
task.setTool('parted')
if size < 1024:
# On very small devices, align to block only
alignment = 'min'
else:
# Prefer optimal alignment for performance
alignment = 'opt'
if size > 2097151:
parttype = 'gpt'
else:
parttype = 'msdos'
task.args += ['-a', alignment, '-s', self.disk_path, 'mklabel', parttype, 'mkpart', 'primary', '0%', '100%']
else:
task.setTool('sfdisk')
task.args.append('-f')
task.args.append('-uS')
task.args.append(self.disk_path)
if size > 128000:
# Start at sector 8 to better support 4k aligned disks
print "[Harddisk] Detected >128GB disk, using 4k alignment"
task.initial_input = "8,,L\n;0,0\n;0,0\n;0,0\ny\n"
else:
# Smaller disks (CF cards, sticks etc) don't need that
task.initial_input = ",,L\n;\n;\n;\ny\n"
task = Task.ConditionTask(job, _("Waiting for partition"))
task.check = lambda: os.path.exists(self.partitionPath("1"))
task.weighting = 1
task = MkfsTask(job, _("Creating filesystem"))
big_o_options = ["dir_index"]
if isFileSystemSupported("ext4"):
task.setTool("mkfs.ext4")
else:
task.setTool("mkfs.ext3")
if size > 250000:
# No more than 256k i-nodes (prevent problems with fsck memory requirements)
task.args += ["-T", "largefile", "-N", "262144"]
big_o_options.append("sparse_super")
elif size > 16384:
# between 16GB and 250GB: 1 i-node per megabyte
task.args += ["-T", "largefile"]
big_o_options.append("sparse_super")
elif size > 2048:
# Over 2GB: 32 i-nodes per megabyte
task.args += ["-T", "largefile", "-N", str(size * 32)]
task.args += ["-m0", "-O", ",".join(big_o_options), self.partitionPath("1")]
task = MountTask(job, self)
task.weighting = 3
task = Task.ConditionTask(job, _("Waiting for mount"), timeoutCount=20)
task.check = self.mountDevice
task.weighting = 1
return job
def initialize(self):
# no longer supported
return -5
def check(self):
# no longer supported
return -5
def createCheckJob(self):
job = Task.Job(_("Checking filesystem..."))
if self.findMount():
# Create unmount task if it was not mounted
UnmountTask(job, self)
dev = self.mount_device
else:
# otherwise, assume there is one partition
dev = self.partitionPath("1")
task = Task.LoggingTask(job, "fsck")
task.setTool('fsck.ext3')
task.args.append('-f')
task.args.append('-p')
task.args.append(dev)
MountTask(job, self)
task = Task.ConditionTask(job, _("Waiting for mount"))
task.check = self.mountDevice
return job
def createExt4ConversionJob(self):
if not isFileSystemSupported('ext4'):
raise Exception, _("[Harddisk] You system does not support ext4")
job = Task.Job(_("Converting ext3 to ext4..."))
if not os.path.exists('/sbin/tune2fs'):
addInstallTask(job, 'e2fsprogs-tune2fs')
if self.findMount():
# Create unmount task if it was not mounted
UnmountTask(job, self)
dev = self.mount_device
else:
# otherwise, assume there is one partition
dev = self.partitionPath("1")
task = Task.LoggingTask(job, "fsck")
task.setTool('fsck.ext3')
task.args.append('-p')
task.args.append(dev)
task = Task.LoggingTask(job, "tune2fs")
task.setTool('tune2fs')
task.args.append('-O')
task.args.append('extents,uninit_bg,dir_index')
task.args.append('-o')
task.args.append('journal_data_writeback')
task.args.append(dev)
task = Task.LoggingTask(job, "fsck")
task.setTool('fsck.ext4')
task.postconditions = [] # ignore result, it will always "fail"
task.args.append('-f')
task.args.append('-p')
task.args.append('-D')
task.args.append(dev)
MountTask(job, self)
task = Task.ConditionTask(job, _("Waiting for mount"))
task.check = self.mountDevice
return job
def getDeviceDir(self):
return self.dev_path
def getDeviceName(self):
return self.disk_path
# the HDD idle poll daemon.
# as some harddrives have a buggy standby timer, we are doing this by hand here.
# first, we disable the hardware timer. then, we check every now and then if
# any access has been made to the disc. If there has been no access over a specifed time,
# we set the hdd into standby.
def readStats(self):
try:
l = open("/sys/block/%s/stat" % self.device).read()
except IOError:
return -1,-1
data = l.split(None,5)
return (int(data[0]), int(data[4]))
def startIdle(self):
from enigma import eTimer
# disable HDD standby timer
if self.bus() == _("External"):
Console().ePopen(("sdparm", "sdparm", "--set=SCT=0", self.disk_path))
else:
Console().ePopen(("hdparm", "hdparm", "-S0", self.disk_path))
self.timer = eTimer()
self.timer.callback.append(self.runIdle)
self.idle_running = True
self.setIdleTime(self.max_idle_time) # kick the idle polling loop
def runIdle(self):
if not self.max_idle_time:
return
t = time.time()
idle_time = t - self.last_access
stats = self.readStats()
l = sum(stats)
if l != self.last_stat and l >= 0: # access
self.last_stat = l
self.last_access = t
idle_time = 0
self.is_sleeping = False
if idle_time >= self.max_idle_time and not self.is_sleeping:
self.setSleep()
self.is_sleeping = True
def setSleep(self):
if self.bus() == _("External"):
Console().ePopen(("sdparm", "sdparm", "--flexible", "--readonly", "--command=stop", self.disk_path))
else:
Console().ePopen(("hdparm", "hdparm", "-y", self.disk_path))
def setIdleTime(self, idle):
self.max_idle_time = idle
if self.idle_running:
if not idle:
self.timer.stop()
else:
self.timer.start(idle * 100, False) # poll 10 times per period.
def isSleeping(self):
return self.is_sleeping
class Partition:
# for backward compatibility, force_mounted actually means "hotplug"
def __init__(self, mountpoint, device = None, description = "", force_mounted = False):
self.mountpoint = mountpoint
self.description = description
self.force_mounted = mountpoint and force_mounted
self.is_hotplug = force_mounted # so far; this might change.
self.device = device
def __str__(self):
return "Partition(mountpoint=%s,description=%s,device=%s)" % (self.mountpoint,self.description,self.device)
def stat(self):
if self.mountpoint:
return os.statvfs(self.mountpoint)
else:
raise OSError, "Device %s is not mounted" % self.device
def free(self):
try:
s = self.stat()
return s.f_bavail * s.f_bsize
except OSError:
return None
def total(self):
try:
s = self.stat()
return s.f_blocks * s.f_bsize
except OSError:
return None
def tabbedDescription(self):
if self.mountpoint.startswith('/media/net') or self.mountpoint.startswith('/media/autofs'):
# Network devices have a user defined name
return self.description
return self.description + '\t' + self.mountpoint
def mounted(self, mounts = None):
# THANK YOU PYTHON FOR STRIPPING AWAY f_fsid.
# TODO: can os.path.ismount be used?
if self.force_mounted:
return True
if self.mountpoint:
if mounts is None:
mounts = getProcMounts()
for parts in mounts:
if self.mountpoint.startswith(parts[1]): # use startswith so a mount not ending with '/' is also detected.
return True
return False
def filesystem(self, mounts = None):
if self.mountpoint:
if mounts is None:
mounts = getProcMounts()
for fields in mounts:
if self.mountpoint.endswith('/') and not self.mountpoint == '/':
if fields[1] + '/' == self.mountpoint:
return fields[2]
else:
if fields[1] == self.mountpoint:
return fields[2]
return ''
def addInstallTask(job, package):
task = Task.LoggingTask(job, "update packages")
task.setTool('opkg')
task.args.append('update')
task = Task.LoggingTask(job, "Install " + package)
task.setTool('opkg')
task.args.append('install')
task.args.append(package)
class HarddiskManager:
def __init__(self):
self.hdd = [ ]
self.cd = ""
self.partitions = [ ]
self.devices_scanned_on_init = [ ]
self.on_partition_list_change = CList()
self.enumerateBlockDevices()
# Find stuff not detected by the enumeration
p = (
("/media/hdd", _("Hard disk")),
("/media/card", _("Card")),
("/media/cf", _("Compact flash")),
("/media/mmc1", _("MMC card")),
("/media/net", _("Network mount")),
("/media/net1", _("Network mount %s") % ("1")),
("/media/net2", _("Network mount %s") % ("2")),
("/media/net3", _("Network mount %s") % ("3")),
("/media/ram", _("Ram disk")),
("/media/usb", _("USB stick")),
("/", _("Internal flash"))
)
known = set([os.path.normpath(a.mountpoint) for a in self.partitions if a.mountpoint])
for m,d in p:
if (m not in known) and os.path.ismount(m):
self.partitions.append(Partition(mountpoint=m, description=d))
def getBlockDevInfo(self, blockdev):
devpath = "/sys/block/" + blockdev
error = False
removable = False
blacklisted = False
is_cdrom = False
partitions = []
try:
if os.path.exists(devpath + "/removable"):
removable = bool(int(readFile(devpath + "/removable")))
if os.path.exists(devpath + "/dev"):
dev = int(readFile(devpath + "/dev").split(':')[0])
else:
dev = None
blacklisted = dev in [1, 7, 31, 253, 254] + (SystemInfo["HasMMC"] and [179] or []) #ram, loop, mtdblock, romblock, ramzswap, mmc
if blockdev[0:2] == 'sr':
is_cdrom = True
if blockdev[0:2] == 'hd':
try:
media = readFile("/proc/ide/%s/media" % blockdev)
if "cdrom" in media:
is_cdrom = True
except IOError:
error = True
# check for partitions
if not is_cdrom and os.path.exists(devpath):
for partition in os.listdir(devpath):
if partition[0:len(blockdev)] != blockdev:
continue
partitions.append(partition)
else:
self.cd = blockdev
except IOError:
error = True
# check for medium
medium_found = True
try:
open("/dev/" + blockdev).close()
except IOError, err:
if err.errno == 159: # no medium present
medium_found = False
return error, blacklisted, removable, is_cdrom, partitions, medium_found
def enumerateBlockDevices(self):
print "[Harddisk] enumerating block devices..."
for blockdev in os.listdir("/sys/block"):
error, blacklisted, removable, is_cdrom, partitions, medium_found = self.addHotplugPartition(blockdev)
if not error and not blacklisted and medium_found:
for part in partitions:
self.addHotplugPartition(part)
self.devices_scanned_on_init.append((blockdev, removable, is_cdrom, medium_found))
def getAutofsMountpoint(self, device):
r = self.getMountpoint(device)
if r is None:
return "/media/" + device
return r
def getMountpoint(self, device):
dev = "/dev/%s" % device
for item in getProcMounts():
if item[0] == dev:
return item[1]
return None
def addHotplugPartition(self, device, physdev = None):
# device is the device name, without /dev
# physdev is the physical device path, which we (might) use to determine the userfriendly name
if not physdev:
dev, part = self.splitDeviceName(device)
try:
physdev = os.path.realpath('/sys/block/' + dev + '/device')[4:]
except OSError:
physdev = dev
print "[Harddisk] couldn't determine blockdev physdev for device", device
error, blacklisted, removable, is_cdrom, partitions, medium_found = self.getBlockDevInfo(device)
if not blacklisted and medium_found:
description = self.getUserfriendlyDeviceName(device, physdev)
p = Partition(mountpoint = self.getMountpoint(device), description = description, force_mounted = True, device = device)
self.partitions.append(p)
if p.mountpoint: # Plugins won't expect unmounted devices
self.on_partition_list_change("add", p)
# see if this is a harddrive
l = len(device)
if l and (not device[l-1].isdigit() or device == 'mmcblk0'):
self.hdd.append(Harddisk(device, removable))
self.hdd.sort()
SystemInfo["Harddisk"] = True
return error, blacklisted, removable, is_cdrom, partitions, medium_found
def addHotplugAudiocd(self, device, physdev = None):
# device is the device name, without /dev
# physdev is the physical device path, which we (might) use to determine the userfriendly name
if not physdev:
dev, part = self.splitDeviceName(device)
try:
physdev = os.path.realpath('/sys/block/' + dev + '/device')[4:]
except OSError:
physdev = dev
print "couldn't determine blockdev physdev for device", device
error, blacklisted, removable, is_cdrom, partitions, medium_found = self.getBlockDevInfo(device)
if not blacklisted and medium_found:
description = self.getUserfriendlyDeviceName(device, physdev)
p = Partition(mountpoint = "/media/audiocd", description = description, force_mounted = True, device = device)
self.partitions.append(p)
self.on_partition_list_change("add", p)
SystemInfo["Harddisk"] = False
return error, blacklisted, removable, is_cdrom, partitions, medium_found
def removeHotplugPartition(self, device):
for x in self.partitions[:]:
if x.device == device:
self.partitions.remove(x)
if x.mountpoint: # Plugins won't expect unmounted devices
self.on_partition_list_change("remove", x)
l = len(device)
if l and not device[l-1].isdigit():
for hdd in self.hdd:
if hdd.device == device:
hdd.stop()
self.hdd.remove(hdd)
break
SystemInfo["Harddisk"] = len(self.hdd) > 0
def HDDCount(self):
return len(self.hdd)
def HDDList(self):
list = [ ]
for hd in self.hdd:
hdd = hd.model() + " - " + hd.bus()
cap = hd.capacity()
if cap != "":
hdd += " (" + cap + ")"
list.append((hdd, hd))
return list
def getCD(self):
return self.cd
def getMountedPartitions(self, onlyhotplug = False, mounts=None):
if mounts is None:
mounts = getProcMounts()
parts = [x for x in self.partitions if (x.is_hotplug or not onlyhotplug) and x.mounted(mounts)]
devs = set([x.device for x in parts])
for devname in devs.copy():
if not devname:
continue
dev, part = self.splitDeviceName(devname)
if part and dev in devs: # if this is a partition and we still have the wholedisk, remove wholedisk
devs.remove(dev)
# return all devices which are not removed due to being a wholedisk when a partition exists
return [x for x in parts if not x.device or x.device in devs]
def splitDeviceName(self, devname):
# this works for: sdaX, hdaX, sr0 (which is in fact dev="sr0", part=""). It doesn't work for other names like mtdblock3, but they are blacklisted anyway.
dev = devname[:3]
part = devname[3:]
for p in part:
if not p.isdigit():
return devname, 0
return dev, part and int(part) or 0
def getUserfriendlyDeviceName(self, dev, phys):
print "[Harddisk] device: ", dev
print "[Harddisk] physical: ", phys
dev, part = self.splitDeviceName(dev)
description = _("External Storage %s") % dev
try:
description = readFile("/sys" + phys + "/model")
except IOError, s:
print "[Harddisk] couldn't read model: ", s
# not wholedisk and not partition 1
if part and part != 1:
description += _(" (Partition %d)") % part
print "[Harddisk] description: ", description
return description
def addMountedPartition(self, device, desc):
for x in self.partitions:
if x.mountpoint == device:
#already_mounted
return
self.partitions.append(Partition(mountpoint=device, description=desc))
def removeMountedPartition(self, mountpoint):
for x in self.partitions[:]:
if x.mountpoint == mountpoint:
self.partitions.remove(x)
self.on_partition_list_change("remove", x)
def setDVDSpeed(self, device, speed = 0):
ioctl_flag=int(0x5322)
if not device.startswith('/'):
device = "/dev/" + device
try:
from fcntl import ioctl
cd = open(device)
ioctl(cd.fileno(), ioctl_flag, speed)
cd.close()
except Exception, ex:
print "[Harddisk] Failed to set %s speed to %s" % (device, speed), ex
class UnmountTask(Task.LoggingTask):
def __init__(self, job, hdd):
Task.LoggingTask.__init__(self, job, _("Unmount"))
self.hdd = hdd
self.mountpoints = []
def prepare(self):
try:
dev = self.hdd.disk_path.split('/')[-1]
open('/dev/nomount.%s' % dev, "wb").close()
except Exception, e:
print "[Harddisk] ERROR: Failed to create /dev/nomount file:", e
self.setTool('umount')
self.args.append('-f')
for dev in self.hdd.enumMountDevices():
self.args.append(dev)
self.postconditions.append(Task.ReturncodePostcondition())
self.mountpoints.append(dev)
if not self.mountpoints:
print "[Harddisk] UnmountTask: No mountpoints found?"
self.cmd = 'true'
self.args = [self.cmd]
def afterRun(self):
for path in self.mountpoints:
try:
os.rmdir(path)
except Exception, ex:
print "[Harddisk] Failed to remove path '%s':" % path, ex
class MountTask(Task.LoggingTask):
def __init__(self, job, hdd):
Task.LoggingTask.__init__(self, job, _("Mount"))
self.hdd = hdd
def prepare(self):
try:
dev = self.hdd.disk_path.split('/')[-1]
os.unlink('/dev/nomount.%s' % dev)
except Exception, e:
print "[Harddisk] ERROR: Failed to remove /dev/nomount file:", e
# try mounting through fstab first
if self.hdd.mount_device is None:
dev = self.hdd.partitionPath("1")
else:
# if previously mounted, use the same spot
dev = self.hdd.mount_device
fstab = open("/etc/fstab")
lines = fstab.readlines()
fstab.close()
for line in lines:
parts = line.strip().split(" ")
fspath = os.path.realpath(parts[0])
if os.path.realpath(fspath) == dev:
self.setCmdline("mount -t auto " + fspath)
self.postconditions.append(Task.ReturncodePostcondition())
return
# device is not in fstab
if self.hdd.type == DEVTYPE_UDEV:
# we can let udev do the job, re-read the partition table
# Sorry for the sleep 2 hack...
self.setCmdline('sleep 2; hdparm -z ' + self.hdd.disk_path)
self.postconditions.append(Task.ReturncodePostcondition())
class MkfsTask(Task.LoggingTask):
def prepare(self):
self.fsck_state = None
def processOutput(self, data):
print "[Mkfs]", data
if 'Writing inode tables:' in data:
self.fsck_state = 'inode'
elif 'Creating journal' in data:
self.fsck_state = 'journal'
self.setProgress(80)
elif 'Writing superblocks ' in data:
self.setProgress(95)
elif self.fsck_state == 'inode':
if '/' in data:
try:
d = data.strip(' \x08\r\n').split('/',1)
if '\x08' in d[1]:
d[1] = d[1].split('\x08',1)[0]
self.setProgress(80*int(d[0])/int(d[1]))
except Exception, e:
print "[Mkfs] E:", e
return # don't log the progess
self.log.append(data)
harddiskmanager = HarddiskManager()
def isSleepStateDevice(device):
ret = os.popen("hdparm -C %s" % device).read()
if 'SG_IO' in ret or 'HDIO_DRIVE_CMD' in ret:
return None
if 'drive state is: standby' in ret or 'drive state is: idle' in ret:
return True
elif 'drive state is: active/idle' in ret:
return False
return None
def internalHDDNotSleeping(external=False):
state = False
if harddiskmanager.HDDCount():
for hdd in harddiskmanager.HDDList():
if hdd[1].internal or external:
if hdd[1].idle_running and hdd[1].max_idle_time and not hdd[1].isSleeping():
state = True
return state
SystemInfo["ext4"] = isFileSystemSupported("ext4")
| gpl-2.0 | 3,402,724,838,510,467,000 | 28.67094 | 155 | 0.662214 | false |
localmed/django-assetfiles | assetfiles/filters/sass.py | 1 | 4949 | from __future__ import unicode_literals
import os
from django.conf import settings
from django.contrib.staticfiles.finders import find
from assetfiles.filters import BaseFilter, CommandMixin, ExtensionMixin
import assetfiles.settings
from assetfiles.exceptions import SassFilterError
class SassFilter(ExtensionMixin, CommandMixin, BaseFilter):
"""
Filters Sass files into CSS.
Attributes:
sass_path: The full path to the Sass command. This defaults to a
customized binstub that allows for better Bundler integration.
functions_path: The full path to the Sass extension functions for
Django integration. Set to None or False to bypass adding
these functions.
"""
SCRIPTS_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), '../scripts'))
input_exts = ('sass', 'scss')
output_ext = 'css'
sass_path = 'sass'
sass_env_path = os.path.join(SCRIPTS_PATH, 'sass_env.rb')
sass_functions_path = os.path.join(SCRIPTS_PATH, 'sass_functions.rb')
def __init__(self, options=None, *args, **kwargs):
super(SassFilter, self).__init__(*args, **kwargs)
if options is None:
options = {}
sass_options = assetfiles.settings.SASS_OPTIONS
self.sass_path = options.get(
'sass_path',
sass_options.get('sass_path', self.sass_path)
)
self.sass_env_path = options.get(
'sass_env_path',
sass_options.get('sass_env_path', self.sass_env_path)
)
self.sass_functions_path = options.get(
'sass_functions_path',
sass_options.get('sass_functions_path', self.sass_functions_path)
)
options['compass'] = options.get(
'compass',
sass_options.get('compass', self._detect_compass())
)
for option in ('style', 'precision', 'quiet', 'debug_info',
'line_numbers', 'cache_location', 'no_cache'):
if option not in options:
options[option] = sass_options.get(option)
options['require'] = (
sass_options.get('require', []) +
options.get('require', [])
)
if self.sass_functions_path:
options['require'].insert(0, self.sass_functions_path)
if self.sass_env_path:
options['require'].insert(0, self.sass_env_path)
options['load_paths'] = (
sass_load_paths +
sass_options.get('load_paths', []) +
options.get('load_paths', [])
)
self.options = options
def filter(self, input):
command = '{command} {args} {input}'.format(
command=self.sass_path,
args=self._build_args(),
input=self.format_option_value(input),
)
return self.run_command(
command,
extra_env={'DJANGO_STATIC_URL': settings.STATIC_URL},
exception_type=SassFilterError
)
def is_filterable(self, output_path):
"""
Skips files prefixed with a '_'. These are Sass dependencies.
"""
_, file_name = os.path.split(output_path)
return not file_name.startswith('_')
def _build_args(self):
"""
Returns a list of arguments for the Sass command.
"""
args = []
args += self.format_option_array('require', self.options['require'])
args += self.format_option_array('load_path', self.options['load_paths'])
value_options = ('style', 'precision', 'cache_location')
for option in value_options:
if self.options[option]:
args.append(self.format_option(option, self.options[option]))
bool_options = ('quiet', 'compass', 'debug_info',
'line_numbers', 'no_cache')
for option in bool_options:
if self.options[option]:
args.append(self.format_option_name(option))
return ' '.join(args)
def _detect_compass(self):
"""
Returns true if Compass integration is available.
"""
return os.system('which compass > /dev/null') is 0
def get_static_sass_dirs(dirs=None):
"""
Returns the directories with Sass files within the static directories.
Args:
dirs: A list or tuple of directory names that contain Sass files.
Can be configured with the ASSETFILES_SASS_DIRS setting, which by
default is `('css',)`
Returns:
A list of directory paths containing Sass files.
"""
if not dirs:
dirs = assetfiles.settings.SASS_DIRS
load_paths = []
for dir in dirs:
load_paths += find(dir, all=True) or []
return load_paths
"""
Directories that will be added to the Sass load path.
By default, these are 'css' directories within the static directories.
"""
sass_load_paths = get_static_sass_dirs()
| mit | 3,942,540,000,699,276,300 | 31.136364 | 81 | 0.589614 | false |
greenbender/pynntp | nntp/iodict.py | 1 | 3453 | """
Case-insentitive ordered dictionary (useful for headers).
Copyright (C) 2013-2020 Byron Platt
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from collections import OrderedDict, namedtuple
try:
from collections.abc import MutableMapping, Mapping
except ImportError:
from collections import MutableMapping, Mapping
from .polyfill import cached_property
__all__ = ['IODict']
class IKey(object):
def __init__(self, orig):
self.orig = orig
@classmethod
def _uncase(cls, value):
if hasattr(value, 'casefold'):
return value.casefold()
if hasattr(value, 'lower'):
return value.lower()
if isinstance(value, tuple):
return tuple(cls._uncase(v) for v in value)
return value
@cached_property
def value(self):
return self._uncase(self.orig)
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
if not isinstance(other, IKey):
return self == IKey(other)
return self.value == other.value
def __repr__(self):
return repr(self.orig)
def __str__(self):
return str(self.orig)
class IODict(MutableMapping):
"""Case in-sensitive ordered dictionary.
>>> iod = IODict([('ABC', 1), ('DeF', 'A'), (('gHi', 'jkl', 20), 'b')])
>>> iod['ABC'], iod['abc'], iod['aBc']
(1, 1, 1)
>>> iod['DeF'], iod['def'], iod['dEf']
('A', 'A', 'A')
>>> iod[('gHi', 'jkl', 20)], iod[('ghi', 'jKL', 20)]
('b', 'b')
>>> iod == {'aBc': 1, 'deF': 'A', ('Ghi', 'JKL', 20): 'b'}
True
>>> iod.popitem()
(('gHi', 'jkl', 20), 'b')
"""
def __init__(self, *args, **kwargs):
self.__proxy = OrderedDict()
for arg in args:
self.update(arg)
self.update(kwargs)
def __getitem__(self, key):
return self.__proxy[IKey(key)]
def __setitem__(self, key, value):
self.__proxy[IKey(key)] = value
def __delitem__(self, key):
del self.__proxy[IKey(key)]
def __iter__(self):
for key in self.__proxy:
yield key.orig
def __len__(self):
return len(self.__proxy)
def __eq__(self, other):
if not isinstance(other, Mapping):
return NotImplemented
if not isinstance(other, IODict):
return self == IODict(other)
return self.__proxy == other.__proxy
def __repr__(self):
clsname = type(self).__name__
return '%s(%r)' % (clsname, list(self.__proxy.items()))
def keys(self):
for key in self.__proxy:
yield key.orig
def items(self):
for key in self.__proxy:
yield key.orig, self[key.orig]
def popitem(self):
key, value = self.__proxy.popitem()
return key.orig, value
if __name__ == "__main__":
import doctest
doctest.testmod()
| gpl-3.0 | 4,563,081,144,591,528,400 | 25.976563 | 75 | 0.585578 | false |
InsightSoftwareConsortium/ITKExamples | src/Filtering/MathematicalMorphology/ErodeAGrayscaleImage/Code.py | 1 | 1553 | #!/usr/bin/env python
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import itk
itk.auto_progress(2)
if len(sys.argv) != 4:
print("Usage: " + sys.argv[0] + " <inputImage> <outputImage> <radius>")
sys.exit(1)
inputImage = sys.argv[1]
outputImage = sys.argv[2]
radiusValue = int(sys.argv[3])
PixelType = itk.UC
Dimension = 2
ImageType = itk.Image[PixelType, Dimension]
ReaderType = itk.ImageFileReader[ImageType]
reader = ReaderType.New()
reader.SetFileName(inputImage)
StructuringElementType = itk.FlatStructuringElement[Dimension]
structuringElement = StructuringElementType.Ball(radiusValue)
GrayscaleFilterType = itk.GrayscaleErodeImageFilter[
ImageType, ImageType, StructuringElementType
]
grayscaleFilter = GrayscaleFilterType.New()
grayscaleFilter.SetInput(reader.GetOutput())
grayscaleFilter.SetKernel(structuringElement)
WriterType = itk.ImageFileWriter[ImageType]
writer = WriterType.New()
writer.SetFileName(outputImage)
writer.SetInput(grayscaleFilter.GetOutput())
writer.Update()
| apache-2.0 | 1,056,718,304,177,390,500 | 27.759259 | 75 | 0.773342 | false |
mganeva/mantid | scripts/Muon/GUI/Common/muon_data_context.py | 1 | 20168 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
from __future__ import (absolute_import, division, print_function)
import Muon.GUI.Common.utilities.load_utils as load_utils
import Muon.GUI.Common.utilities.xml_utils as xml_utils
from Muon.GUI.Common.ADSHandler.muon_workspace_wrapper import MuonWorkspaceWrapper
from Muon.GUI.Common.muon_group import MuonGroup
from Muon.GUI.Common.muon_pair import MuonPair
from Muon.GUI.Common.muon_load_data import MuonLoadData
from Muon.GUI.Common.utilities.run_string_utils import run_list_to_string
from Muon.GUI.Common.ADSHandler.workspace_naming import (get_raw_data_workspace_name, get_group_data_workspace_name,
get_pair_data_workspace_name, get_base_data_directory,
get_raw_data_directory, get_group_data_directory,
get_pair_data_directory, get_group_asymmetry_name)
from Muon.GUI.Common.calculate_pair_and_group import calculate_group_data, calculate_pair_data, estimate_group_asymmetry_data
from Muon.GUI.Common.utilities.muon_file_utils import allowed_instruments
from collections import OrderedDict
from mantid.api import WorkspaceGroup
from mantid.kernel import ConfigServiceImpl, ConfigService
from Muon.GUI.Common.observer_pattern import Observable
def get_default_grouping(workspace, instrument, main_field_direction):
parameter_name = "Default grouping file"
if instrument == "MUSR" or instrument == 'CHRONUS':
parameter_name += " - " + main_field_direction
try:
if isinstance(workspace, WorkspaceGroup):
grouping_file = workspace[0].getInstrument().getStringParameter(parameter_name)[0]
else:
grouping_file = workspace.getInstrument().getStringParameter(parameter_name)[0]
except IndexError:
return [], []
instrument_directory = ConfigServiceImpl.Instance().getInstrumentDirectory()
filename = instrument_directory + grouping_file
new_groups, new_pairs, description = xml_utils.load_grouping_from_XML(filename)
return new_groups, new_pairs
def construct_empty_group(group_names, group_index=0):
"""
Create an empty MuonGroup appropriate for adding to the current grouping table.
"""
new_group_name = "group_" + str(group_index)
while new_group_name in group_names:
# modify the name until it is unique
group_index += 1
new_group_name = "group_" + str(group_index)
return MuonGroup(group_name=new_group_name, detector_ids=[1])
def construct_empty_pair(group_names, pair_names, pair_index=0):
"""
Create an empty MuonPair appropriate for adding to the current pairing table.
"""
new_pair_name = "pair_" + str(pair_index)
while new_pair_name in pair_names:
# modify the name until it is unique
pair_index += 1
new_pair_name = "pair_" + str(pair_index)
if len(group_names) == 1:
group1 = group_names[0]
group2 = group_names[0]
elif len(group_names) >= 2:
group1 = group_names[0]
group2 = group_names[1]
else:
group1 = None
group2 = None
return MuonPair(pair_name=new_pair_name,
forward_group_name=group1, backward_group_name=group2, alpha=1.0)
class MuonDataContext(object):
"""
The MuonContext is the core class for the MuonAnalysis 2 interface. It stores all the data and parameters used
in the interface and serves as the model part of the MVP design pattern for every widget in the interface.
By sharing a common instance of this class, the interface remains synchronized by use of the observer pattern to
notify subcribers of changes, whi will then respond by updating their view from this commonly shared model.
The actual processing of data occurs via this class (as it should as the model).
"""
# ADS base directory for all workspaces
base_directory = "Muon Data"
def __init__(self, load_data=MuonLoadData()):
"""
Currently, only a single run is loaded into the Home/Grouping tab at once. This is held in the _current_data
member. The load widget may load multiple runs at once, these are stored in the _loaded_data member.
Groups and Pairs associated to the current run are stored in _grousp and _pairs as ordered dictionaries.
"""
self._groups = OrderedDict()
self._pairs = OrderedDict()
self._loaded_data = load_data
self._gui_variables = {'SummedPeriods': [1], 'SubtractedPeriods': []}
self._current_data = {"workspace": load_utils.empty_loaded_data()} # self.get_result(False)
self._current_runs = []
self._main_field_direction = ''
self._instrument = ConfigService.getInstrument().name() if ConfigService.getInstrument().name()\
in allowed_instruments else 'EMU'
self.instrumentNotifier = MuonDataContext.InstrumentNotifier(self)
self.message_notifier = MuonDataContext.MessageNotifier(self)
self.gui_variables_notifier = MuonDataContext.GuiVariablesNotifier(self)
def is_data_loaded(self):
return self._loaded_data.num_items() > 0
def is_multi_period(self):
return len(self.current_data["OutputWorkspace"]) > 1
@property
def current_data(self):
return self._current_data["workspace"]
@property
def instrument(self):
return self._instrument
@instrument.setter
def instrument(self, value):
ConfigService['default.instrument'] = value
self._instrument = value
self.main_field_direction = ''
self.set_groups_and_pairs_to_default()
self.instrumentNotifier.notify_subscribers(self._instrument)
@property
def current_run(self):
return self._current_data["run"]
@property
def run(self):
try:
# refer to the output of the loading widget (e.g. for co-adding)
runs = run_list_to_string(self.current_run)
except Exception:
# extract from sample logs
run_log = self.get_sample_log("run_number")
if run_log:
runs = run_log.value
else:
runs = 0
return runs
@property
def group_names(self):
return self._groups.keys()
@property
def pair_names(self):
return self._pairs.keys()
@property
def groups(self):
return self._groups
@property
def pairs(self):
return self._pairs
@property
def gui_variables(self):
return self._gui_variables
@property
def current_runs(self):
return self._current_runs
@current_runs.setter
def current_runs(self, value):
if not self.check_run_list_are_all_same_field(value):
self.message_notifier.notify_subscribers(self.create_multiple_field_directions_error_message(value))
self._current_runs = value
@property
def current_filenames(self):
current_filenames = []
for run in self.current_runs:
if self._loaded_data.get_data(run=run, instrument=self.instrument):
current_filenames.append(self._loaded_data.get_data(run=run, instrument=self.instrument)['filename'])
return current_filenames
@property
def current_workspaces(self):
current_workspaces = []
for run in self.current_runs:
current_workspaces.append(self._loaded_data.get_data(run=run, instrument=self.instrument)['workspace'])
return current_workspaces
@property
def first_good_data(self):
if self.gui_variables['FirstGoodDataFromFile']:
return self.loaded_data(self.current_runs[-1])["FirstGoodData"]
else:
return self.gui_variables['FirstGoodData']
def add_group(self, group):
assert isinstance(group, MuonGroup)
if self.check_group_contains_valid_detectors(group):
self._groups[group.name] = group
else:
raise ValueError('Invalid detectors in group {}'.format(group.name))
def add_pair(self, pair):
assert isinstance(pair, MuonPair)
self._pairs[pair.name] = pair
def update_current_data(self):
# Update the current data; resetting the groups and pairs to their default values
if len(self.current_runs) > 0:
self._current_data = self._loaded_data.get_data(run=self.current_runs[0], instrument=self.instrument)
self.main_field_direction = self.current_data['MainFieldDirection']
if not self.groups:
self.set_groups_and_pairs_to_default()
else:
self._current_data = {"workspace": load_utils.empty_loaded_data()}
def loaded_data(self, run):
loaded_dict = self._loaded_data.get_data(run=run, instrument=self.instrument)
if loaded_dict:
return self._loaded_data.get_data(run=run, instrument=self.instrument)['workspace']
else:
return None
@property
def loaded_workspace(self):
return self.current_data["OutputWorkspace"][0].workspace
def loaded_workspace_as_group(self, run):
if self.is_multi_period():
workspace_group = WorkspaceGroup()
for workspace_wrapper in self._loaded_data.get_data(run=run, instrument=self.instrument)['workspace']['OutputWorkspace']:
workspace_group.addWorkspace(workspace_wrapper.workspace)
return workspace_group
else:
return self._loaded_data.get_data(run=run, instrument=self.instrument)['workspace']['OutputWorkspace'][0].workspace
def period_string(self, run=None):
summed_periods = self.gui_variables["SummedPeriods"] if 'SummedPeriods' in self.gui_variables else [1]
subtracted_periods = self.gui_variables["SubtractedPeriods"] if 'SubtractedPeriods' in self.gui_variables else []
if subtracted_periods:
return '+'.join([str(period) for period in summed_periods]) + '-' + '-'.join([str(period) for period in subtracted_periods])
else:
return '+'.join([str(period) for period in summed_periods])
@property
def num_detectors(self):
try:
n_det = self.loaded_workspace.detectorInfo().size()
except AttributeError:
# default to 1
n_det = 1
return n_det
def num_periods(self, run):
return len(self._loaded_data.get_data(run=run, instrument=self.instrument)['workspace']['OutputWorkspace'])
@property
def main_field_direction(self):
return self._main_field_direction
@main_field_direction.setter
def main_field_direction(self, value):
if value and value != self._main_field_direction and self._main_field_direction:
self.message_notifier.notify_subscribers('MainFieldDirection has changed between'
' data sets, click default to reset grouping if required')
self._main_field_direction = value
@property
def dead_time_table(self):
return self.current_data["DeadTimeTable"]
def get_sample_logs(self):
logs = None
try:
logs = self.loaded_workspace.getSampleDetails()
except Exception:
print("Cannot find sample logs")
return logs
def get_sample_log(self, log_name):
logs = self.get_sample_logs()
try:
log = logs.getLogData(log_name)
except Exception:
log = None
return log
# ------------------------------------------------------------------------------------------------------------------
# Clearing data
# ------------------------------------------------------------------------------------------------------------------
def clear_groups(self):
self._groups = OrderedDict()
def clear_pairs(self):
self._pairs = OrderedDict()
def clear(self):
self.clear_groups()
self.clear_pairs()
self._current_data = {"workspace": load_utils.empty_loaded_data()}
def _base_run_name(self, run=None):
""" e.g. EMU0001234 """
if not run:
run = self.run
if isinstance(run, int):
return str(self.instrument) + str(run)
else:
return str(self.instrument) + run
# ------------------------------------------------------------------------------------------------------------------
# Showing workspaces in the ADS
# ------------------------------------------------------------------------------------------------------------------
def show_raw_data(self):
for run in self.current_runs:
run_string = run_list_to_string(run)
loaded_workspace = self._loaded_data.get_data(run=run, instrument=self.instrument)['workspace']['OutputWorkspace']
directory = get_base_data_directory(self, run_string) + get_raw_data_directory(self, run_string)
if len(loaded_workspace) > 1:
# Multi-period data
for i, single_ws in enumerate(loaded_workspace):
name = directory + get_raw_data_workspace_name(self, run_string, period=str(i + 1))
single_ws.show(name)
else:
# Single period data
name = directory + get_raw_data_workspace_name(self, run_string)
loaded_workspace[0].show(name)
def show_all_groups(self):
for group_name in self._groups.keys():
self.show_group_data(group_name)
if self.do_rebin():
for group_name in self._groups.keys():
self.show_group_data(group_name, rebin=True)
def show_group_data(self, group_name, show=True, rebin=False):
for run in self.current_runs:
run_as_string = run_list_to_string(run)
group_workspace = calculate_group_data(self, group_name, run, rebin)
group_asymmetry = estimate_group_asymmetry_data(self, group_name, run, rebin)
directory = get_base_data_directory(self, run_as_string) + get_group_data_directory(self, run_as_string)
name = get_group_data_workspace_name(self, group_name, run_as_string, rebin)
asym_name = get_group_asymmetry_name(self, group_name, run_as_string, rebin)
if not rebin:
self._groups[group_name]._workspace[str(run)] = MuonWorkspaceWrapper(group_workspace)
self._groups[group_name]._asymmetry_estimate[str(run)] = MuonWorkspaceWrapper(group_asymmetry)
if show:
self._groups[group_name].workspace[str(run)].show(directory + name)
self._groups[group_name]._asymmetry_estimate[str(run)].show(directory + asym_name)
else:
self._groups[group_name]._workspace_rebin[str(run)] = MuonWorkspaceWrapper(group_workspace)
self._groups[group_name]._asymmetry_estimate_rebin[str(run)] = MuonWorkspaceWrapper(group_asymmetry)
if show:
self._groups[group_name]._workspace_rebin[str(run)].show(directory + name)
self._groups[group_name]._asymmetry_estimate_rebin[str(run)].show(directory + asym_name)
def show_all_pairs(self):
for pair_name in self._pairs.keys():
self.show_pair_data(pair_name)
if self.do_rebin():
for pair_name in self._pairs.keys():
self.show_pair_data(pair_name, rebin=True)
def show_pair_data(self, pair_name, show=True, rebin=False):
for run in self.current_runs:
run_as_string = run_list_to_string(run)
name = get_pair_data_workspace_name(self, pair_name, run_as_string, rebin)
directory = get_base_data_directory(self, run_as_string) + get_pair_data_directory(self, run_as_string)
pair_workspace = calculate_pair_data(self, pair_name, run, rebin)
if not rebin:
self._pairs[pair_name].workspace[str(run)] = MuonWorkspaceWrapper(pair_workspace)
if show:
self._pairs[pair_name].workspace[str(run)].show(directory + name)
else:
self._pairs[pair_name].workspace_rebin[str(run)] = MuonWorkspaceWrapper(pair_workspace)
if show:
self._pairs[pair_name].workspace_rebin[str(run)].show(directory + name)
def calculate_all_groups(self):
for group_name in self._groups.keys():
calculate_group_data(self, group_name)
def set_groups_and_pairs_to_default(self):
groups, pairs = get_default_grouping(self.loaded_workspace, self.instrument, self.main_field_direction)
self.clear_groups()
for group in groups:
self.add_group(group)
self.clear_pairs()
for pair in pairs:
self.add_pair(pair)
def check_group_contains_valid_detectors(self, group):
if max(group.detectors) > self.num_detectors or min(group.detectors) < 1:
return False
else:
return True
def add_or_replace_gui_variables(self, **kwargs):
if all([key in self._gui_variables and self._gui_variables[key] == kwargs[key] for key in kwargs.keys()]) and kwargs:
return
self._gui_variables.update(kwargs)
self.gui_variables_notifier.notify_subscribers()
def do_rebin(self):
return (self.gui_variables['RebinType'] == 'Fixed' and
'RebinFixed' in self.gui_variables and self.gui_variables['RebinFixed']) or\
(self.gui_variables['RebinType'] == 'Variable' and
'RebinVariable' in self.gui_variables and self.gui_variables['RebinVariable'])
def check_run_list_are_all_same_field(self, run_list):
if not run_list:
return True
first_field = self._loaded_data.get_main_field_direction(run=run_list[0], instrument=self.instrument)
return all(first_field==self._loaded_data.get_main_field_direction(run=run, instrument=self.instrument)
for run in run_list)
def create_multiple_field_directions_error_message(self, run_list):
transverse = []
longitudinal = []
for run in run_list:
field_direction = self._loaded_data.get_main_field_direction(run=run, instrument=self.instrument)
if field_direction.lower() == 'transverse':
transverse += run
elif field_direction.lower() == 'longitudinal':
longitudinal += run
else:
return 'Unrecognised field direction {} for run {}'.format(field_direction, run)
message = 'MainFieldDirection changes within current run set:\n'
message += 'transverse field runs {}\n'.format(run_list_to_string(transverse))
message += 'longitudinal field runs {}\n'.format(run_list_to_string(longitudinal))
return message
class InstrumentNotifier(Observable):
def __init__(self, outer):
Observable.__init__(self)
self.outer = outer # handle to containing class
def notify_subscribers(self, *args, **kwargs):
Observable.notify_subscribers(self, *args)
class MessageNotifier(Observable):
def __init__(self, outer):
Observable.__init__(self)
self.outer = outer # handle to containing class
def notify_subscribers(self, *args, **kwargs):
Observable.notify_subscribers(self, *args)
class GuiVariablesNotifier(Observable):
def __init__(self, outer):
Observable.__init__(self)
self.outer = outer # handle to containing class
def notify_subscribers(self, *args, **kwargs):
Observable.notify_subscribers(self, *args)
| gpl-3.0 | 6,414,852,969,859,135,000 | 40.669421 | 136 | 0.61662 | false |
andrewgailey/robogen | robogen/rgkit/backup bots/SfparI.py | 1 | 19116 | ##################################
## ##
## ____ __ ##
## / ___| / _|_ __ __ _ _ __ ##
## \___ \| |_| '_ \ / _` | '__| ##
## ___) | _| |_) | (_| | | ##
## |____/|_| | .__/ \__,_|_| ##
## |_| ##
## ##
## by Spferical ##
## ##
## Feel free to modify/improve! ##
## ##
##################################
import rg
# global variable to store the future moves of each ally robot
# we can use this to avoid friendly collisions
future_moves = []
future_attacks = []
# this is used to store the current turn considered by the future_moves array
future_moves_turn = 0
def cant_easily_leave_spawn(loc, game):
"""Returns whether a bot would need 2+ moves to exit the spawn area.
(i.e. the bot is in spawn and all of the locations around it are occupied/
obstacle/invalid)"""
if 'spawn' in rg.loc_types(loc):
adjacent_locs = rg.locs_around(loc,
filter_out=['spawn', 'obstacle', 'invalid'])
all_bots = game.get('robots')
for loc in adjacent_locs:
if loc in all_bots:
adjacent_locs.remove(loc)
return (len(adjacent_locs) == 0)
# if the bot is not in spawn, then it can easily leave it
# by standing still, hehe.
return False
def bot_is_in_trouble(bot, game):
"""Returns whether a bot is in trouble.
If a bot could die in the next turn, it is in trouble."""
return could_die_in_loc(bot.hp, bot.location, bot.player_id, game)
def could_die_in_loc(hp, loc, player_id, game):
"""Returns whether or not a bot could die in a given location,
based on its hp and player_id.
Considers the number of enemy bots nearby and whether or not
the robot is standing on a spawn tile just before more will spawn."""
adjacent_bots = get_bots_next_to(loc, game)
adjacent_enemies = [b for b in adjacent_bots if b.player_id != player_id]
# each adjacent enemy can deal up to 10 damage in a turn
possible_hp_loss = len(adjacent_enemies) * 10
if possible_hp_loss >= hp:
# could die if all of the adjacent_enemies attack
return True
if 'spawn' in rg.loc_types(loc):
if game['turn'] % 10 == 0:
# next turn, if we remain on the spawn square, it could die
return True
return False
def get_weakest_bot(bots):
"""Returns the weakest bot out of a list of bots."""
assert len(bots) != 0
# bots have 50 hp max
least_hp = 51
weakest_bot = None
for bot in bots:
if bot.hp < least_hp:
weakest_bot = bot
least_hp = bot.hp
return weakest_bot
def get_bots_next_to(location, game):
"""Returns all bots next to a location."""
all_bots = game.get('robots')
bots = []
for loc in all_bots.keys():
if loc in rg.locs_around(location):
bots.append(all_bots[loc])
return bots
def get_bot_in_location(location, game):
"""Returns the bot in the given location."""
bots = game.get('robots')
if location in bots.keys():
return bots[location]
else:
return None
def is_possible_suicider(bot, game):
"""Returns whether a bot is a possible suicider based on a kinda
restrictive algorithm.
Returns true if the sum of the hp of all enemy bots is greater than
the bot's hp and there are more than 1 adjacent enemy bots and
there is at least one adjacent bot that would die."""
# get all adjacent enemies of suicider
adjacent_bots = get_bots_next_to(bot.location, game)
for bot2 in adjacent_bots:
if bot2.player_id == bot.player_id:
adjacent_bots.remove(bot2)
# whether the total possible hp hit would outweigh the
# hp lost
if (sum([min(bot2.hp, 15) for bot2 in adjacent_bots]) > bot.hp):
if len(adjacent_bots) > 1:
for bot2 in adjacent_bots:
if bot2.hp <= 15:
return True
return False
class Robot:
def sort_bots_closest_first(self, bots):
"""Sorts a list of bots sorted closest to farthest away."""
return sorted(bots, key=lambda b: rg.wdist(self.location, b.location))
def get_enemy_bots_next_to(self, location, game):
"""Returns the enemy bots next to a location."""
enemies = []
for loc in rg.locs_around(location):
bot = get_bot_in_location(loc, game)
if (bot) and (bot.player_id != self.player_id):
enemies.append(bot)
return enemies
def get_friendlies_next_to(self, location, game):
"""Returns the friendly bots next to a location.
Note: does not return /this/ robot.(filters out any robot whose
location is equal to this robot's location)"""
friendlies = []
for loc in rg.locs_around(location):
bot = get_bot_in_location(loc, game)
if (bot) and (bot.player_id == self.player_id):
if bot.location != self.location:
friendlies.append(bot)
return friendlies
def get_adjacent_enemy_bots(self, game):
"""Returns a list of the adjacent enemy bots."""
return self.get_enemy_bots_next_to(self.location, game)
def is_suiciding_beneficial(self, game):
"""Returns whether or not the bot should suicide on this turn."""
# get the adjacent bots
adjacent_bots = self.get_adjacent_enemy_bots(game)
if (sum([min(bot.hp, 15) for bot in adjacent_bots]) > self.hp):
# see if the bot can escape to any adjacent location
for loc in rg.locs_around(self.location,
filter_out=['invalid', 'obstacle']):
# the bot can't escape to the location if there's an enemy in it
if not could_die_in_loc(self.hp, loc, self.player_id, game):
bot_in_loc = get_bot_in_location(loc, game)
if bot_in_loc and bot_in_loc.player_id != self.player_id:
continue
else:
return False
return True
def get_distance_to_closest_bot(self, game, loc=None,
friendly=False, enemy=False):
"""Returns the distance from the given location (or, by default,
this robot's location) to the nearest enemy."""
if not loc: loc = self.location
bots = game.get('robots')
shortest_distance = 99999
for bot in bots.values():
if bot.location != loc and bot.location != self.location:
if (friendly == enemy == False) or \
(enemy and (bot.player_id != self.player_id)) or \
(friendly and (bot.player_id == self.player_id)):
dist = rg.wdist(loc, bot.location)
shortest_distance = min(dist, shortest_distance)
return shortest_distance
def act(self, game):
"""The function called by game.py itself: returns the action the robot
should take this turn."""
action = []
# update the future_moves array if necessary
# only the first robot will do this
global future_moves_turn, future_moves, future_attacks
if future_moves_turn != game['turn']:
future_moves = []
future_attacks = []
future_moves_turn = game['turn']
#adjacent_bots = self.get_adjacent_enemy_bots(game)
if self.is_suiciding_beneficial(game):
action = ['suicide']
else:
locs = [self.location] + rg.locs_around(self.location,
filter_out=['invalid', 'obstacle'])
target_loc = self.get_best_loc(locs, game)
if target_loc != self.location:
action = ['move', target_loc]
else:
attack_locs = rg.locs_around(self.location,
filter_out=['invalid', 'obstacle'])
action = ['attack', self.get_best_attack_loc(attack_locs, game)]
if action[0] == 'move':
assert not action[1] in future_moves
future_moves.append(action[1])
if action[1] == self.location:
action = ['guard']
else:
pass
elif action[0] != 'suicide':
pass#future_moves.append(self.location)
if action[0] == 'attack':
future_attacks.append(action[1])
return action
def get_best_loc(self, locs, game):
"""Returns the best location out of a list.
The 'goodness' of a tile is determined by get_tile_goodness()."""
best_loc_weight = -9999
best_loc = None
for loc in locs:
loc_weight = self.get_tile_goodness(loc, game)
if loc_weight > best_loc_weight:
best_loc = loc
best_loc_weight = loc_weight
assert best_loc
return best_loc
def get_tile_goodness(self, loc, game):
"""Returns how 'good' a tile is to move to or stay on.
Based on a whole bunch of factors. Fine-tuning necessary."""
types = rg.loc_types(loc)
enemies_next_to_loc = self.get_enemy_bots_next_to(loc, game)
enemies_next_to_loc_fighting_friendlies = []
for enemy in enemies_next_to_loc:
if self.get_friendlies_next_to(enemy.location, game):
enemies_next_to_loc_fighting_friendlies.append(enemy)
enemies_next_to_loc_to_fight_friendlies = []
for enemy in enemies_next_to_loc:
for pos in rg.locs_around(enemy.location):
if pos in future_moves:
enemies_next_to_loc_to_fight_friendlies.append(enemy)
break
friendlies_next_to_loc = self.get_friendlies_next_to(loc, game)
nearby_friendlies_in_spawn = []
nearby_friendlies_in_deep_spawn = []
for friendly in friendlies_next_to_loc:
if 'spawn' in rg.loc_types(friendly.location):
nearby_friendlies_in_spawn.append(friendly)
if cant_easily_leave_spawn(friendly.location, game):
nearby_friendlies_in_deep_spawn.append(friendly)
friendly_in_loc = enemy_in_loc = False
if loc != self.location:
bot_in_location = get_bot_in_location(loc, game)
if bot_in_location:
if bot_in_location.player_id == self.player_id:
friendly_in_loc = True
else:
enemy_in_loc = True
else:
bot_in_location = None
distance_to_closest_enemy = self.get_distance_to_closest_bot(game,
loc=loc, enemy=True)
distance_to_closest_friendly = self.get_distance_to_closest_bot(game,
loc=loc,friendly=True)
nearby_friendlies_in_trouble = []
for friendly in friendlies_next_to_loc:
if bot_is_in_trouble(friendly, game):
nearby_friendlies_in_trouble.append(friendly)
goodness = 0
# get out of spawn areas, especially if things are about to spawn
# highest priority: +20 pts if things are about to spawn
if game['turn'] <= 90:
goodness -= ('spawn' in types) * ((game['turn'] % 10 == 0) * 20 + 1)
# if the bot can't easily leave spawn (e.g. has to move through
# more spawn area or an enemy to get out) in the location, that's bad
# the closer to the spawn timer we are, the worse this is, so
# multiply it by the game turn % 10
if game['turn'] <= 90:
goodness -= cant_easily_leave_spawn(loc, game) * (
game['turn'] % 10) * 0.5
# if enemies next to the location are fighting or will fight
# other friendlies, help them
goodness += len(enemies_next_to_loc_fighting_friendlies) * 2.5
goodness += len(enemies_next_to_loc_to_fight_friendlies) * 0.5
# more enemies next to a location, the worse.
# even worse if a friendly is already in the location
# (so the enemies will target that loc)
# even worse if our hp is low
goodness -= len(enemies_next_to_loc) ** 2 + friendly_in_loc
goodness -= friendly_in_loc * 4
# slight bias towards NOT moving right next to friendlies
# a sort of lattics, like
# X X X X
# X X X
# X X X X
# is the best shape, I think
#goodness -= len(friendlies_next_to_loc) * 0.05
# nearby friendlies in trouble will definitely want to escape this turn
goodness -= len(nearby_friendlies_in_trouble) * 9
if could_die_in_loc(self.hp, loc, self.player_id, game):
# /try/ not to go where the bot can die
# seriously
goodness -= 20
# all else remaining the same, move towards the center
goodness -= rg.dist(loc, rg.CENTER_POINT) * 0.01
# bias towards remaining in place and attacking
goodness += (loc == self.location) * \
(0.25 + 0.75 * (len(enemies_next_to_loc) == 1))
# especailly if we're only fighting one bot
if self.hp > 15:
# if we are strong enough, move close to (2 squares away) the
#nearest enemy
goodness -= max(distance_to_closest_enemy, 2)
else:
#otherwise, run away from the nearest enemy, up to 2 squares away
goodness += min(distance_to_closest_enemy, 2)
# friendlies should group together
# if a bot is caught alone, bots that actively hunt and surround,
# e.g. Chaos Witch Quelaang, will murder them
# so move up to two tiles from the nearest friendly
goodness -= min(distance_to_closest_friendly, 2) * 0.5
# don't move into an enemy
# it's slightly more ok to move into an enemy that could die in the
# next turn by staying here, cause he's likely to either run or die
# it's perfectly alright, maybe even encouraged, to move into a bot
# that would die from bumping into you anyways (<=5hp)
if enemy_in_loc:
goodness -= enemy_in_loc * (30 - 29 * \
bot_is_in_trouble(bot_in_location, game))
goodness += 3 * (bot_in_location.hp <= 5)
# don't block friendlies trying to move out of spawn!
# only matters when things will still spawn in the future, of course
if game['turn'] <= 90:
# if they can escape through us
if not 'spawn' in types:
goodness -= len(nearby_friendlies_in_spawn) * 2
#especially don't block those who can't easily leave spawn
# (the two lists overlap, so no extra weighting needed)
goodness -= len(nearby_friendlies_in_deep_spawn) * 2
# don't move next to possible suiciders if our hp is low enough to die
# from them
for enemy in enemies_next_to_loc_fighting_friendlies:
if is_possible_suicider(enemy, game) and (self.hp <= 15):
goodness -= 2
# the more enemies that could move next to the loc, the worse
# (the more this bot could be surrounded)
goodness -= min(len(self.get_enemies_that_could_move_next_to(
loc, game)), 1) * 0.5
# don't move into a square if another bot already plans to move there
goodness -= 999 * (loc in future_moves)
#allies attacking the same spot is bad, but not the end of the world..
# e.g. if a robot needs to go through a spot being attacked by an
# ally to leave spawn, he DEFINITELY still needs to move there
goodness -= 9 * (loc in future_attacks)
return goodness
def get_enemies_that_could_move_next_to(self, loc, game):
enemies = []
for bot in game.get('robots').values():
if bot.player_id != self.player_id:
if rg.wdist(bot.location, loc) == 2:
enemies.append(bot)
return enemies
def get_attack_goodness(self, loc, game):
"""Returns how 'good' attacking a certain location is.
Based upon the number of friendlies and enemies next to the location,
any bot that is in the location, etc."""
types = rg.loc_types(loc)
enemies_next_to_loc = self.get_enemy_bots_next_to(loc, game)
friendlies_next_to_loc = self.get_friendlies_next_to(loc, game)
nearby_friendlies_in_trouble = []
for friendly in friendlies_next_to_loc:
if bot_is_in_trouble(friendly, game):
nearby_friendlies_in_trouble.append(friendly)
nearby_enemies_in_trouble = []
for enemy in enemies_next_to_loc:
if bot_is_in_trouble(enemy, game):
nearby_enemies_in_trouble.append(enemy)
robot = get_bot_in_location(loc, game)
goodness = 0
if robot:
if robot.player_id == self.player_id:
# we're attacking a friendly's location
# no enemy's gonna move into them...
goodness -= 5
else:
#attacking an enemy is good
goodness += (100 - robot.hp) / 50.0 * 20
else:
# no bot is at the location
# so base the goodness on how likely it is for bots to move there
#more enemies that can move into the location, the better
# weighted by 3 because even if there are two other friendlies
# next to the loc, we still want to attack if it's the only square
# an enemy is next to
goodness += len(enemies_next_to_loc) * 3
#enemies aren't too likely to move next to a friendly
goodness -= len(friendlies_next_to_loc)
# if there are enemies in trouble nearby, we want to try and catch
# them escaping!
goodness += len(nearby_enemies_in_trouble) * 5
# nearby friendlies in trouble will definitely want to escape this
# turn
# maybe to this square
goodness -= len(nearby_friendlies_in_trouble)
# don't attack where an ally is already moving to
# or attacking, at least not too much
if loc in future_moves:
goodness -= 20
elif loc in future_attacks:
goodness -= 3
return goodness
def get_best_attack_loc(self, locs, game):
"""Determines the best location to attack out of a list of locations.
Uses get_attack_goodness() to weigh the locations."""
best_loc_weight = -9999
best_loc = None
for loc in locs:
loc_weight = self.get_attack_goodness(loc, game)
if loc_weight > best_loc_weight:
best_loc = loc
best_loc_weight = loc_weight
return best_loc
| unlicense | -6,406,700,526,533,794,000 | 38.172131 | 80 | 0.568372 | false |
praekelt/malaria24-django | malaria24/settings/base.py | 1 | 8348 | """
Django settings for base malaria24.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
from os.path import abspath, dirname, join
from os import environ
from django.conf import global_settings
from django.utils.translation import ugettext_lazy as _
from datetime import timedelta
import dj_database_url
from celery.schedules import crontab
# Absolute filesystem path to the Django project directory:
PROJECT_ROOT = dirname(dirname(dirname(abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
DEFAULT_SECRET_KEY = 'please-change-me'
SECRET_KEY = environ.get('SECRET_KEY') or DEFAULT_SECRET_KEY
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['*']
# Base URL to use when referring to full URLs within the Wagtail admin
# backend - e.g. in notification emails. Don't include '/admin' or
# a trailing slash
BASE_URL = environ.get("BASE_URL") or 'http://example.com'
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.sites',
'django.contrib.staticfiles',
'compressor',
'taggit',
'modelcluster',
'wagtail.wagtailcore',
'wagtail.wagtailadmin',
'wagtail.wagtaildocs',
'wagtail.wagtailsnippets',
'wagtail.wagtailusers',
'wagtail.wagtailsites',
'wagtail.wagtailimages',
'wagtail.wagtailembeds',
'wagtail.wagtailsearch',
'wagtail.wagtailredirects',
'wagtail.wagtailforms',
'molo.core',
'malaria24',
'malaria24.ona',
'djcelery',
'raven.contrib.django.raven_compat',
'rest_framework',
'rest_framework.authtoken',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'wagtail.wagtailcore.middleware.SiteMiddleware',
'wagtail.wagtailredirects.middleware.RedirectMiddleware',
)
ROOT_URLCONF = 'malaria24.urls'
WSGI_APPLICATION = 'malaria24.wsgi.application'
# SITE stuff
SITE_ID = 1
# CELERY stuff
BROKER_URL = 'redis://localhost:6379/0'
CELERY_RESULT_BACKEND = 'redis://localhost:6379/0'
CELERYBEAT_SCHEDULE = {
'poll-ona-fetch-forms': {
'task': 'malaria24.ona.tasks.ona_fetch_forms',
'schedule': timedelta(hours=1),
},
'poll-ona-reported-cases': {
'task': 'malaria24.ona.tasks.ona_fetch_reported_cases',
'schedule': timedelta(minutes=10),
},
'send-weekly-digest': {
'task': 'malaria24.ona.tasks.compile_and_send_digest_email',
'schedule': crontab(hour=8, minute=15, day_of_week='mon'),
},
}
DEFAULT_FROM_EMAIL = 'MalariaConnect <[email protected]>'
# JEMBI settings
# Send to them by default
FORWARD_TO_JEMBI = environ.get('FORWARD_TO_JEMBI', 'true').lower() == 'true'
JEMBI_URL = environ.get('JEMBI_URL') or 'http://jembi.org/malaria24'
JEMBI_USERNAME = environ.get('JEMBI_USERNAME') or '[email protected]'
JEMBI_PASSWORD = environ.get('JEMBI_PASSWORD') or 'not_a_real_password'
# Logging
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'root': {
'level': 'WARNING',
'handlers': ['sentry'],
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'sentry': {
'level': 'ERROR',
'class': ('raven.contrib.django.raven_compat.'
'handlers.SentryHandler'),
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'django.db.backends': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
},
'raven': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'sentry.errors': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
},
}
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
# SQLite (simplest install)
DATABASES = {'default': dj_database_url.config(
default='sqlite:///%s' % (join(PROJECT_ROOT, 'db.sqlite3'),))}
# PostgreSQL (Recommended, but requires the psycopg2 library and Postgresql
# development headers)
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql_psycopg2',
# 'NAME': 'base',
# 'USER': 'postgres',
# 'PASSWORD': '',
# 'HOST': '', # Set to empty string for localhost.
# 'PORT': '', # Set to empty string for default.
# # number of seconds database connections should persist for
# 'CONN_MAX_AGE': 600,
# }
# }
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-gb'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Native South African languages are currently not included in the default
# list of languges in django
# https://github.com/django/django/blob/master/django/conf/global_settings.py#L50
LANGUAGES = global_settings.LANGUAGES + (
('zu', _('Zulu')),
('xh', _('Xhosa')),
('st', _('Sotho')),
('ve', _('Venda')),
('tn', _('Tswana')),
('ts', _('Tsonga')),
('ss', _('Swati')),
('nr', _('Ndebele')),
)
LOCALE_PATHS = (
join(PROJECT_ROOT, "locale"),
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_ROOT = join(PROJECT_ROOT, 'static')
STATIC_URL = '/static/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
MEDIA_ROOT = join(PROJECT_ROOT, 'media')
MEDIA_URL = '/media/'
# Django compressor settings
# http://django-compressor.readthedocs.org/en/latest/settings/
COMPRESS_PRECOMPILERS = (
('text/x-scss', 'django_libsass.SassCompiler'),
)
# Template configuration
TEMPLATE_CONTEXT_PROCESSORS = global_settings.TEMPLATE_CONTEXT_PROCESSORS + (
'django.core.context_processors.request',
'molo.core.context_processors.locale',
)
# Wagtail settings
LOGIN_URL = 'wagtailadmin_login'
LOGIN_REDIRECT_URL = 'wagtailadmin_home'
WAGTAIL_SITE_NAME = "base"
# Use Elasticsearch as the search backend for extra performance and better
# search results:
# http://wagtail.readthedocs.org/en/latest/howto/performance.html#search
# http://wagtail.readthedocs.org/en/latest/core_components/
# search/backends.html#elasticsearch-backend
#
# WAGTAILSEARCH_BACKENDS = {
# 'default': {
# 'BACKEND': ('wagtail.wagtailsearch.backends.'
# 'elasticsearch.ElasticSearch'),
# 'INDEX': 'base',
# },
# }
# Whether to use face/feature detection to improve image
# cropping - requires OpenCV
WAGTAILIMAGES_FEATURE_DETECTION_ENABLED = False
# REST Framework conf defaults
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
)
}
# email host settings
EMAIL_HOST = environ.get('EMAIL_HOST', 'localhost')
EMAIL_PORT = environ.get('EMAIL_PORT', 25)
EMAIL_HOST_USER = environ.get('EMAIL_HOST_USER', '')
EMAIL_HOST_PASSWORD = environ.get('EMAIL_HOST_PASSWORD', '')
EMAIL_USE_TLS = environ.get('EMAIL_USE_TLS', 'false').lower() == 'true'
| bsd-2-clause | -8,339,247,801,502,718,000 | 26.826667 | 81 | 0.65333 | false |
litui/openparliament | parliament/default_settings.py | 1 | 5763 | import os
DEBUG = True
ADMINS = [
('Michael Mulley', '[email protected]'),
]
MANAGERS = ADMINS
PROJ_ROOT = os.path.dirname(os.path.realpath(__file__))
CACHE_MIDDLEWARE_KEY_PREFIX = 'parl'
CACHE_MIDDLEWARE_ANONYMOUS_ONLY = True
# Set to True to disable functionality where user-provided data is saved
PARLIAMENT_DB_READONLY = False
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
TIME_ZONE = 'America/Montreal'
# Language code for this installation.
# MUST BE either 'en' or 'fr'
LANGUAGE_CODE = 'en'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
LOCALE_PATHS = [os.path.join(PROJ_ROOT, 'locale')]
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.realpath(os.path.join(PROJ_ROOT, '..', '..', 'mediafiles'))
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/media/'
STATICFILES_DIRS = [os.path.join(PROJ_ROOT, 'static')]
STATIC_ROOT = os.path.realpath(os.path.join(PROJ_ROOT, '..', '..', 'staticfiles'))
STATIC_URL = '/static/'
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
]
COMPRESS_CSS_FILTERS = [
'parliament.core.utils.AutoprefixerFilter',
'compressor.filters.css_default.CssAbsoluteFilter',
'compressor.filters.cssmin.rCSSMinFilter'
]
COMPRESS_JS_FILTERS = []
COMPRESS_OFFLINE = True
COMPRESS_ENABLED = False
COMPRESS_PRECOMPILERS = (
('text/x-scss', 'django_libsass.SassCompiler'),
('es6', 'cat {infile} | ./node_modules/.bin/babel --presets es2015 > {outfile}'),
)
COMPRESS_CACHEABLE_PRECOMPILERS = ['es6']
PARLIAMENT_LANGUAGE_MODEL_PATH = os.path.realpath(os.path.join(PROJ_ROOT, '..', '..', 'language_models'))
PARLIAMENT_GENERATE_TEXT_ANALYSIS = False
APPEND_SLASH = False
SESSION_COOKIE_HTTPONLY = True
SESSION_COOKIE_AGE = 60*60*24*60 # 60 days
SESSION_COOKIE_SECURE = True
PARLIAMENT_API_HOST = 'api.openparliament.ca'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': [os.path.join(PROJ_ROOT, 'templates')],
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'parliament.accounts.context_processors.auth',
'parliament.core.utils.lang_context',
],
},
},
]
MIDDLEWARE_CLASSES = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.common.CommonMiddleware',
'parliament.accounts.middleware.AuthenticatedEmailMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
#'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
#'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
'parliament.core.api.FetchFromCacheMiddleware',
]
ROOT_URLCONF = 'parliament.urls'
WSGI_APPLICATION = 'parliament.wsgi.application'
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.humanize',
'django.contrib.flatpages',
'django.contrib.sitemaps',
'django.contrib.staticfiles',
'django_extensions',
'haystack',
'imagekit',
'compressor',
'parliament.core',
'parliament.accounts',
'parliament.hansards',
'parliament.elections',
'parliament.bills',
'parliament.politicians',
'parliament.activity',
'parliament.alerts',
'parliament.committees',
'parliament.search',
'parliament.text_analysis',
]
LOGGING = {
'version': 1,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(module)s %(levelname)s %(message)s'
},
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
}
},
'loggers': {
'django': {
'handlers': ['null'],
'propagate': True,
'level': 'INFO',
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'parliament': {
'handlers': ['console'],
'level': 'WARNING',
}
},
}
| agpl-3.0 | 7,099,144,173,799,089,000 | 28.860104 | 105 | 0.639597 | false |
matthiask/zivinetz | tests/testapp/test_changes.py | 1 | 1099 | from datetime import date
from django.test import TestCase
from testapp import factories
from zivinetz.models import AssignmentChange
class ChangesTestCase(TestCase):
def test_change_tracking(self):
assignment = factories.AssignmentFactory.create()
self.assertEqual(AssignmentChange.objects.count(), 1)
assignment.status = assignment.ARRANGED
assignment.arranged_on = date.today()
assignment.save()
self.assertEqual(AssignmentChange.objects.count(), 2)
assignment.status = assignment.MOBILIZED
assignment.mobilized_on = date.today()
assignment.save()
self.assertEqual(AssignmentChange.objects.count(), 3)
assignment.delete()
self.assertEqual(AssignmentChange.objects.count(), 4)
# Test the listing view.
admin = factories.UserFactory.create(is_staff=True, is_superuser=True)
self.client.login(username=admin.username, password="test")
self.assertContains(
self.client.get("/zivinetz/reporting/assignmentchanges/"), "by unknown", 4
)
| mit | 6,544,142,790,884,116,000 | 27.921053 | 86 | 0.688808 | false |
kozistr/Awesome-GANs | awesome_gans/magan/magan_train.py | 1 | 7449 | import time
import numpy as np
import tensorflow as tf
import awesome_gans.image_utils as iu
import awesome_gans.magan.magan_model as magan
from awesome_gans.datasets import CelebADataSet as DataSet
from awesome_gans.datasets import DataIterator
results = {'output': './gen_img/', 'model': './model/MAGAN-model.ckpt'}
train_step = {
'epochs': 50,
'batch_size': 64,
'global_step': 200001,
'logging_interval': 1000,
}
def main():
start_time = time.time() # Clocking start
# loading CelebA DataSet
ds = DataSet(
height=64,
width=64,
channel=3,
ds_image_path="D:/DataSet/CelebA/CelebA-64.h5",
ds_label_path="D:/DataSet/CelebA/Anno/list_attr_celeba.txt",
# ds_image_path="D:/DataSet/CelebA/Img/img_align_celeba/",
ds_type="CelebA",
use_save=False,
save_file_name="D:/DataSet/CelebA/CelebA-64.h5",
save_type="to_h5",
use_img_scale=False,
img_scale="-1,1",
)
# saving sample images
test_images = np.reshape(iu.transform(ds.images[:100], inv_type='127'), (100, 64, 64, 3))
iu.save_images(test_images, size=[10, 10], image_path=results['output'] + 'sample.png', inv_type='127')
ds_iter = DataIterator(x=ds.images, y=None, batch_size=train_step['batch_size'], label_off=True)
# GPU configure
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as s:
# MAGAN Model
model = magan.MAGAN(s)
# Initializing
s.run(tf.global_variables_initializer())
# Load model & Graph & Weights
saved_global_step = 0
ckpt = tf.train.get_checkpoint_state('./model/')
if ckpt and ckpt.model_checkpoint_path:
model.saver.restore(s, ckpt.model_checkpoint_path)
saved_global_step = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])
print("[+] global step : %s" % saved_global_step, " successfully loaded")
else:
print('[-] No checkpoint file found')
n_steps = ds.num_images // model.batch_size # training set size
# Pre-Train
print("[*] pre-training - getting proper Margin")
margin = 0 # 3.0585415484215974
if margin == 0:
sum_d_loss = 0.0
for i in range(2):
for batch_x in ds_iter.iterate():
batch_x = np.reshape(
iu.transform(batch_x, inv_type='127'),
(model.batch_size, model.height, model.width, model.channel),
)
batch_z = np.random.uniform(-1.0, 1.0, [model.batch_size, model.z_dim]).astype(np.float32)
_, d_real_loss = s.run(
[model.d_op, model.d_real_loss],
feed_dict={
model.x: batch_x,
model.z: batch_z,
model.m: 0.0,
},
)
sum_d_loss += d_real_loss
print("[*] Epoch {:1d} Sum of d_real_loss : {:.8f}".format(i + 1, sum_d_loss))
# Initial margin value
margin = sum_d_loss / n_steps
print("[+] Margin : {0}".format(margin))
old_margin = margin
s_g_0 = np.inf # Sg_0 = infinite
global_step = saved_global_step
start_epoch = global_step // (ds.num_images // model.batch_size) # recover n_epoch
ds_iter.pointer = saved_global_step % (ds.num_images // model.batch_size) # recover n_iter
for epoch in range(start_epoch, train_step['epochs']):
s_d, s_g = 0.0, 0.0
for batch_x in ds_iter.iterate():
batch_x = iu.transform(batch_x, inv_type='127')
batch_x = np.reshape(batch_x, (model.batch_size, model.height, model.width, model.channel))
batch_z = np.random.uniform(-1.0, 1.0, [model.batch_size, model.z_dim]).astype(np.float32)
# Update D network
_, d_loss, d_real_loss = s.run(
[model.d_op, model.d_loss, model.d_real_loss],
feed_dict={
model.x: batch_x,
model.z: batch_z,
model.m: margin,
},
)
# Update D real sample
s_d += np.sum(d_real_loss)
# Update G network
_, g_loss, d_fake_loss = s.run(
[model.g_op, model.g_loss, model.d_fake_loss],
feed_dict={
model.x: batch_x,
model.z: batch_z,
model.m: margin,
},
)
# Update G fake sample
s_g += np.sum(d_fake_loss)
# Logging
if global_step % train_step['logging_interval'] == 0:
summary = s.run(
model.merged,
feed_dict={
model.x: batch_x,
model.z: batch_z,
model.m: margin,
},
)
# Print loss
print(
"[+] Epoch %03d Global Step %05d => " % (epoch, global_step),
" D loss : {:.8f}".format(d_loss),
" G loss : {:.8f}".format(g_loss),
)
# Training G model with sample image and noise
sample_z = np.random.uniform(-1.0, 1.0, [model.sample_num, model.z_dim]).astype(np.float32)
samples = s.run(
model.g,
feed_dict={
model.z: sample_z,
model.m: margin,
},
)
# Summary saver
model.writer.add_summary(summary, global_step)
# Export image generated by model G
sample_image_height = model.sample_size
sample_image_width = model.sample_size
sample_dir = results['output'] + 'train_{:08d}.png'.format(global_step)
# Generated image save
iu.save_images(
samples, size=[sample_image_height, sample_image_width], image_path=sample_dir, inv_type='127'
)
# Model save
model.saver.save(s, results['model'], global_step)
global_step += 1
# Update margin
if s_d / n_steps < margin and s_d < s_g and s_g_0 <= s_g:
margin = s_d / n_steps
print("[*] Margin updated from {:8f} to {:8f}".format(old_margin, margin))
old_margin = margin
s_g_0 = s_g
# Convergence Measure
e_d = s_d / n_steps
e_g = s_g / n_steps
l_ = e_d + np.abs(e_d - e_g)
print("[+] Epoch %03d " % epoch, " L : {:.8f}".format(l_))
end_time = time.time() - start_time # Clocking end
# Elapsed time
print("[+] Elapsed time {:.8f}s".format(end_time))
# Close tf.Session
s.close()
if __name__ == '__main__':
main()
| mit | 8,328,749,761,969,198,000 | 34.303318 | 118 | 0.470936 | false |
doctori/PythonTDD | functional_tests/test_login.py | 1 | 1831 | import time
from .base import FunctionalTest
TEST_EMAIL = '[email protected]'
class LoginTest(FunctionalTest):
def switch_to_new_window(self, text_in_title):
retries = 60
while retries > 0:
for handle in self.browser.window_handles:
self.browser.switch_to_window(handle)
if text_in_title in self.browser.title:
return
retries -= 1
time.sleep(0.5)
self.fail('could not find window')
def test_login_with_persona(self):
# Edith goes to the awesome superlists site
# and notices a "Sign in" link for the first time.
self.browser.get(self.server_url)
self.browser.find_element_by_id('id_login').click()
# A Persona login box appears
self.switch_to_new_window('Mozilla Persona')
# Edith logs in with her email address
## Use mockmyid.com for test email
self.browser.find_element_by_id(
'authentication_email'
).send_keys(TEST_EMAIL)
self.browser.find_element_by_tag_name('button').click()
# The Persona window closes
self.switch_to_new_window('To-Do')
# She can see that she is logged in
self.wait_to_be_logged_in(email=TEST_EMAIL)
# Refreshing the page, she sees it's a real session login,
# not just a one-off for that page
self.browser.refresh()
self.wait_to_be_logged_in(email=TEST_EMAIL)
# Terrified of this new feature, she reflexively clicks "logout"
self.browser.find_element_by_id('id_logout').click()
self. wait_to_be_logged_out(email=TEST_EMAIL)
# The "logged out" status also persists after a refresh
self.browser.refresh()
self.wait_to_be_logged_out(email=TEST_EMAIL)
| gpl-2.0 | 630,886,441,368,789,200 | 32.290909 | 72 | 0.619334 | false |
napjon/moocs_solution | introcs-udacity/Search Engine(jonappsearch)/main.py | 1 | 1817 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
form = """
<html>
<head>
<title>Search Engine</title>
</head>
<body>
<h2>Search Engine</h2>
<form method="post">
<textarea name="text">%(text)s</textarea>
<br>
<input type="submit">
<br>
<br>
<br>
%(links)s
</form>
</body>
</html>
"""
import webapp2
import cgi
from search import lucky_search
from crawler import crawl_web, compute_ranks
class MainHandler(webapp2.RequestHandler):
def render(self, text = "", links = ""):
return self.response.write(form%{'text' :self.escape_html(text),
'links':self.escape_html(links)})
def get(self):
self.render()
def escape_html(self,s):
return cgi.escape(s, quote = True)
def post(self):
corpus, graph = crawl_web('http://udacity.com/cs101x/urank/index.html')
ranks = compute_ranks(graph)
query = self.request.get('text')
result = lucky_search(corpus, ranks, query)
if not result:
self.render(text = "", links = "try www.google.com")
else:
self.render(text = query, links = result)
app = webapp2.WSGIApplication([
('/', MainHandler)
], debug=True)
| mit | -7,123,328,385,974,137,000 | 23.890411 | 79 | 0.628509 | false |
IPVL/swift_test | swift/proxy/server.py | 1 | 2944 | from swift.ipvl.inspect_custom import whoami, whosdaddy
pass # (WIS) print __name__
required_filters = [
{'name': 'catch_errors'},
{'name': 'gatekeeper',
'after_fn': lambda pipe: (['catch_errors']
if pipe.startswith('catch_errors')
else [])},
{'name': 'dlo', 'after_fn': lambda _junk: [
'staticweb', 'tempauth', 'keystoneauth',
'catch_errors', 'gatekeeper', 'proxy_logging']}]
class Application(object):
"""docstring for Application"""
def __init__(self, arg=None):
pass # (WIS) print "%s %s (%s -> %s)" % (__name__, self.__class__.__name__, whosdaddy(), whoami())
self.arg = arg
def __call__(self, env, start_response):
pass # (WIS) print "%s %s" % (self.__class__.__name__, env)
start_response('200 OK', [('Content-Type', 'text/plain')])
return self.__class__.__name__
def modify_wsgi_pipeline(self, pipe):
"""
Called during WSGI pipeline creation. Modifies the WSGI pipeline
context to ensure that mandatory middleware is present in the pipeline.
:param pipe: A PipelineWrapper object
"""
pipeline_was_modified = False
print 'pipe: %s ' % pipe
for filter_spec in reversed(required_filters):
filter_name = filter_spec['name']
print 'filter name : %s ' % filter_name
if filter_name not in pipe:
afters = filter_spec.get('after_fn', lambda _junk: [])(pipe)
print '%s after : %s ' % (filter_name, afters)
insert_at = 0
for after in afters:
try:
insert_at = max(insert_at, pipe.index(after) + 1)
except ValueError: # not in pipeline; ignore it
pass
# self.logger.info(
# 'Adding required filter %s to pipeline at position %d' %
# (filter_name, insert_at))
print 'Adding required filter %s to pipeline at position %d' % (filter_name, insert_at)
ctx = pipe.create_filter(filter_name)
pipe.insert_filter(ctx, index=insert_at)
pipeline_was_modified = True
if pipeline_was_modified:
# self.logger.info("Pipeline was modified. New pipeline is \"%s\".",
# pipe)
print "Pipeline was modified. New pipeline is \"%s\".", pipe
else:
# self.logger.debug("Pipeline is \"%s\"", pipe)
print "Pipeline is \"%s\"", pipe
def app_factory(global_conf, **local_conf):
"""paste.deploy app factory for creating WSGI proxy apps."""
pass # (WIS) print "%s (%s -> %s)" % (__name__, whosdaddy(), whoami())
conf = global_conf.copy()
conf.update(local_conf)
app = Application(conf)
# app.check_config()
return app
| mit | -6,019,711,509,896,874,000 | 39.328767 | 107 | 0.529891 | false |
vnevoa/DiffTrike | SoapBox/sb_joystick.py | 1 | 2438 | #
# Copyright 2011 Vasco Nevoa.
#
# This file is part of DiffTrike.
#
# DiffTrike is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DiffTrike is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DiffTrike. If not, see <http://www.gnu.org/licenses/>.
#
# This module implements the controller's joystick device driver.
# It depends on python-pygame.
#
import pygame, time
from pygame.joystick import *
class Joystick():
"""initializes and gets data from the joystick"""
def __init__(self, joystick_num):
init = pygame.joystick.get_init()
if not init:
print "Initializing Joystick module."
pygame.joystick.init()
count = pygame.joystick.get_count()
self.buttons = 0
if (count > joystick_num):
self.dev = pygame.joystick.Joystick(joystick_num)
print 'Initializing Joystick ' + str(joystick_num) + ': ' + self.dev.get_name()
self.dev.init()
self.buttons = self.dev.get_numbuttons()
self.hats = self.dev.get_numhats()
self.trackballs = self.dev.get_numballs()
print "Joystick has "+ str(self.buttons) + " buttons, " + str(self.hats) + " hats, " + str(self.trackballs) + " trackballs."
self.present = 1
else:
print "Joystick not found."
self.present = 0
def getXY(self):
if self.present:
return ( self.dev.get_axis(0), self.dev.get_axis(1) )
else:
return ( 0.0, 0.0 )
def getButtons(self, highest = 1):
ret = []
for b in range(min(highest, self.buttons)):
ret.append(self.dev.get_button(b))
return ret
# This is a simple test routine that only runs if this module is
# called directly with "python sb_joystick.py"
if __name__ == '__main__':
pygame.init();
joy = Joystick(0);
while True:
t0 = time.time()
pygame.event.pump()
p = joy.getXY()
b = joy.getButtons(4)
t1 = time.time()
print "X=%0.2f Y=%0.2f B0=%d B1=%d B2=%d B3=%d T=%0.1f" % (p[0],p[1],b[0],b[1],b[2],b[3],(t1-t0)*1000)
time.sleep(0.25)
| gpl-3.0 | -457,062,874,747,265,800 | 31.078947 | 127 | 0.653815 | false |
Infinidat/lanister | manage.py | 1 | 7158 | #! /usr/bin/python
from __future__ import print_function
import os
import sys
import time
import random
import string
import subprocess
from _lib.bootstrapping import bootstrap_env, from_project_root, requires_env, from_env_bin
from _lib.ansible import ensure_ansible
bootstrap_env(["base"])
from _lib.params import APP_NAME
from _lib.source_package import prepare_source_package
from _lib.db import db
from _lib.celery import celery
from _lib.utils import interact
from _lib.deployment import run_gunicorn
import click
import requests
import logbook
##### ACTUAL CODE ONLY BENEATH THIS POINT ######
@click.group()
def cli():
pass
cli.add_command(run_gunicorn)
cli.add_command(db)
cli.add_command(celery)
@cli.command('ensure-secret')
@click.argument("conf_file")
def ensure_secret(conf_file):
dirname = os.path.dirname(conf_file)
if not os.path.isdir(dirname):
os.makedirs(dirname)
if os.path.exists(conf_file):
return
with open(conf_file, "w") as f:
print('SECRET_KEY: "{0}"'.format(_generate_secret()), file=f)
print('SECURITY_PASSWORD_SALT: "{0}"'.format(_generate_secret()), file=f)
def _generate_secret(length=50):
return "".join([random.choice(string.ascii_letters) for i in range(length)])
@cli.command()
@click.option("--develop", is_flag=True)
@click.option("--app", is_flag=True)
def bootstrap(develop, app):
deps = ["base"]
if develop:
deps.append("develop")
if app:
deps.append("app")
bootstrap_env(deps)
click.echo(click.style("Environment up to date", fg='green'))
@cli.command()
@click.option('--livereload/--no-livereload', is_flag=True, default=True)
@click.option('-p', '--port', default=8000, envvar='TESTSERVER_PORT')
@click.option('--tmux/--no-tmux', is_flag=True, default=True)
@requires_env("app", "develop")
def testserver(tmux, livereload, port):
if tmux:
return _run_tmux_frontend(port=port)
from flask_app.app import create_app
app = create_app({'DEBUG': True, 'TESTING': True, 'SECRET_KEY': 'dummy', 'SECURITY_PASSWORD_SALT': 'dummy'})
extra_files=[
from_project_root("flask_app", "app.yml")
]
app = create_app({'DEBUG': True, 'TESTING': True, 'SECRET_KEY': 'dummy'})
if livereload:
from livereload import Server
s = Server(app)
for filename in extra_files:
s.watch(filename)
s.watch('flask_app')
logbook.StreamHandler(sys.stderr, level='DEBUG').push_application()
s.serve(port=port, liveport=35729)
else:
app.run(port=port, extra_files=extra_files)
def _run_tmux_frontend(port):
tmuxp = from_env_bin('tmuxp')
os.execve(tmuxp, [tmuxp, 'load', from_project_root('_lib', 'frontend_tmux.yml')], dict(os.environ, TESTSERVER_PORT=str(port), CONFIG_DIRECTORY=from_project_root("conf.d")))
@cli.command()
@click.option("--dest", type=click.Choice(["production", "staging", "localhost", "vagrant", "custom"]), help="Deployment target", required=True)
@click.option("-i", "--inventory", type=str, default=None, help="Path to an inventory file. Should be specified only when \"--dest custom\" is set")
@click.option("--vagrant-machine", type=str, default="", help="Vagrant machine to provision")
@click.option("--sudo/--no-sudo", default=False)
@click.option("--ask-sudo-pass/--no-ask-sudo-pass", default=False)
def deploy(dest, sudo, ask_sudo_pass, vagrant_machine, inventory):
prepare_source_package()
ansible = ensure_ansible()
if dest == "vagrant":
# Vagrant will invoke ansible
environ = os.environ.copy()
environ["PATH"] = "{}:{}".format(os.path.dirname(ansible), environ["PATH"])
# "vagrant up --provision" doesn't call provision if the virtual machine is already up,
# so we have to call vagrant provision explicitly
click.echo(click.style("Running deployment on Vagrant. This may take a while...", fg='magenta'))
subprocess.check_call('vagrant up ' + vagrant_machine, shell=True, env=environ)
subprocess.check_call('vagrant provision ' + vagrant_machine, shell=True, env=environ)
else:
if dest == "custom":
if inventory is None:
raise click.ClickException("-i/--inventory should be specified together with \"--dest custom\"")
if not os.path.exists(inventory):
raise click.ClickException("Custom inventory file {} doesn't exist".format(inventory))
else:
if inventory is not None:
raise click.ClickException("-i/--inventory should be specified only when \"--dest custom\" is specified")
inventory = from_project_root("ansible", "inventories", dest)
click.echo(click.style("Running deployment on {}. This may take a while...".format(inventory), fg='magenta'))
cmd = [ansible, "-i", inventory]
if dest in ("localhost",):
cmd.extend(["-c", "local"])
if dest == "localhost":
cmd.append("--sudo")
if sudo:
cmd.append('--sudo')
if ask_sudo_pass:
cmd.append('--ask-sudo-pass')
cmd.append(from_project_root("ansible", "site.yml"))
subprocess.check_call(cmd)
@cli.command()
def unittest():
_run_unittest()
@requires_env("app", "develop")
def _run_unittest():
subprocess.check_call(
[from_env_bin("py.test"), "tests/test_ut"], cwd=from_project_root())
@cli.command()
@click.argument('pytest_args', nargs=-1)
def pytest(pytest_args):
_run_pytest(pytest_args)
@requires_env("app", "develop")
def _run_pytest(pytest_args=()):
subprocess.check_call(
[from_env_bin("py.test")]+list(pytest_args), cwd=from_project_root())
@cli.command()
def fulltest():
_run_fulltest()
@requires_env("app", "develop")
def _run_fulltest(extra_args=()):
subprocess.check_call([from_env_bin("py.test"), "tests"]
+ list(extra_args), cwd=from_project_root())
@cli.command('travis-test')
def travis_test():
subprocess.check_call('createdb {0}'.format(APP_NAME), shell=True)
_run_unittest()
subprocess.check_call('dropdb {0}'.format(APP_NAME), shell=True)
def _wait_for_travis_availability():
click.echo(click.style("Waiting for service to become available on travis", fg='magenta'))
time.sleep(10)
for _ in range(10):
click.echo("Checking service...")
resp = requests.get("http://localhost/")
click.echo("Request returned {0}".format(resp.status_code))
if resp.status_code == 200:
break
time.sleep(5)
else:
raise RuntimeError("Web service did not become responsive")
click.echo(click.style("Service is up", fg='green'))
def _db_container_name():
return '{0}-db'.format(APP_NAME)
@cli.command()
@requires_env("app", "develop")
def shell():
from flask_app.app import create_app
from flask_app import models
app = create_app()
with app.app_context():
interact({
'db': db,
'app': app,
'models': models,
'db': models.db,
})
if __name__ == "__main__":
cli()
| bsd-3-clause | -4,284,564,785,641,032,700 | 31.536364 | 176 | 0.637748 | false |
olivierlemasle/murano | murano/tests/unit/engine/test_mock_context_manager.py | 1 | 5012 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
from yaql import contexts
from yaql import specs
from murano.dsl import constants
from murano.dsl import executor
from murano.dsl import murano_class
from murano.engine import execution_session
from murano.engine import mock_context_manager
from murano.engine.system import test_fixture
from murano.tests.unit import base
from murano.tests.unit.dsl.foundation import object_model as om
from murano.tests.unit.dsl.foundation import runner
from murano.tests.unit.dsl.foundation import test_case
FIXTURE_CLASS = 'io.murano.system.Agent'
FIXTURE_FUNC = 'call'
def _get_fd(set_to_extract):
return list(set_to_extract)[0]
class TestMockContextManager(mock_context_manager.MockContextManager):
def __init__(self, functions):
super(TestMockContextManager, self).__init__()
self.__functions = functions
def create_root_context(self, runtime_version):
root_context = super(TestMockContextManager, self).create_root_context(
runtime_version)
context = root_context.create_child_context()
for name, func in six.iteritems(self.__functions):
context.register_function(func, name)
return context
class MockRunner(runner.Runner):
def __init__(self, model, package_loader, functions):
if isinstance(model, six.string_types):
model = om.Object(model)
model = om.build_model(model)
if 'Objects' not in model:
model = {'Objects': model}
self.executor = executor.MuranoDslExecutor(
package_loader, TestMockContextManager(functions),
execution_session.ExecutionSession())
self._root = self.executor.load(model).object
class TestMockManager(base.MuranoTestCase):
def test_create_class_context(self):
mock_manager = mock_context_manager.MockContextManager()
mock_murano_class = mock.MagicMock(spec=murano_class.MuranoClass)
mock_murano_class.name = FIXTURE_CLASS
original_function = mock.MagicMock(spec=specs.FunctionDefinition)
original_function.is_method = True
original_function.name = FIXTURE_FUNC
original_context = contexts.Context()
p = mock.patch("inspect.getargspec", new=mock.MagicMock())
p.start()
original_context.register_function(original_function)
mock_murano_class.context = original_context
p.stop()
mock_function = mock.MagicMock(spec=specs.FunctionDefinition)
mock_function.is_method = True
mock_function.name = FIXTURE_FUNC
mock_manager.class_mock_ctx[FIXTURE_CLASS] = [mock_function]
result_context = mock_manager.create_class_context(mock_murano_class)
all_functions = result_context.collect_functions(FIXTURE_FUNC)
# Mock function should go first, but result context should contain both
self.assertIs(mock_function, _get_fd(all_functions[0]))
self.assertIs(original_function, _get_fd(all_functions[1]))
def test_create_root_context(self):
mock_manager = mock_context_manager.MockContextManager()
ctx_to_check = mock_manager.create_root_context(
constants.RUNTIME_VERSION_1_1)
inject_count = ctx_to_check.collect_functions('inject')
with_original_count = ctx_to_check.collect_functions('withOriginal')
self.assertEqual(2, len(inject_count[0]))
self.assertEqual(1, len(with_original_count[0]))
class TestMockYaqlFunctions(test_case.DslTestCase):
def setUp(self):
super(TestMockYaqlFunctions, self).setUp()
self.package_loader.load_package('io.murano', None).register_class(
test_fixture.TestFixture)
self.runner = MockRunner(om.Object('TestMocks'),
self.package_loader, self._functions)
def test_inject_method_with_str(self):
self.runner.testInjectMethodWithString()
def test_inject_object_with_str(self):
self.runner.testInjectObjectWithString()
def test_inject_method_with_yaql_expr(self):
self.runner.testInjectMethodWithYaqlExpr()
def test_inject_method_with_yaql_expr2(self):
self.runner.testInjectMethodWithYaqlExpr2()
def test_inject_object_with_yaql_expr(self):
self.runner.testInjectObjectWithYaqlExpr()
def test_with_original(self):
self.runner.testWithoriginal()
def test_original_method(self):
self.runner.testOriginalMethod()
| apache-2.0 | 1,920,191,678,098,974,000 | 36.684211 | 79 | 0.69992 | false |
ipeterov/convenient-rpc | task_server/lib/tasks.py | 1 | 3496 | import uuid
import time
from functools import partial
from collections import Counter
from threading import Thread
from queue import Queue, Empty
class TaskManager:
@staticmethod
def hash_task(task):
return hash(''.join(str(task.get(key, '')) for key in ('package', 'version', 'function')))
def __init__(self):
self.wait_interval = 0.01
self.tasks = {}
self.answers = {}
self.unsent_tasks = []
self.streams = {}
self.task_performance = {}
def get_tasks(self):
return self.tasks
def start_stream(self, ids, unordered=False):
def fill_queue(queue, iterable):
for item in iterable:
queue.put(item)
stream_id = str(uuid.uuid4())
answer_queue = Queue()
answer_gen = self.get_answers(ids, unordered=unordered)
self.streams[stream_id] = {
'generator': answer_gen,
'queue': answer_queue,
'worker': Thread(target=partial(fill_queue, answer_queue, answer_gen)),
'left': len(ids)
}
self.streams[stream_id]['worker'].start()
return stream_id
def get_from_stream(self, stream_id):
if stream_id not in self.streams:
raise WrongIDException()
answers = []
while True:
try:
answers.append(self.streams[stream_id]['queue'].get_nowait())
except Empty:
break
self.streams[stream_id]['left'] -= len(answers)
last = self.streams[stream_id]['left'] == 0
if last:
self.streams.pop(stream_id)
return answers, last
def add_task(self, task):
id_ = str(uuid.uuid4())
self.tasks[id_] = task
self.unsent_tasks.append(id_)
return id_
def get_answers(self, ids, unordered=False):
if unordered:
while ids:
for id_ in ids.copy():
if id_ in self.answers:
ids.remove(id_)
yield self.answers.pop(id_)
time.sleep(self.wait_interval)
else:
for id_ in ids:
while id_ not in self.answers:
time.sleep(self.wait_interval)
yield self.answers.pop(id_)
def get_task(self):
while True:
try:
id_ = self.unsent_tasks.pop(0)
break
except IndexError:
time.sleep(self.wait_interval)
task = self.tasks[id_]
return id_, task
def add_answer(self, id_, answer, time=None):
if id_ in self.tasks:
task = self.tasks[id_]
del self.tasks[id_]
else:
raise WrongIDException()
hash_key = self.hash_task(task)
self.answers[id_] = answer
if time != None:
self.task_performance.setdefault(hash_key, []).append(time)
def estimate_runtime(self, hash_key):
if hash_key in self.task_performance:
times = self.task_performance[hash_key]
return sum(times) / len(times)
else:
return 0
def estimate_time_left(self):
tasks = Counter(self.hash_task(self.tasks[id_]) for id_ in self.unsent_tasks).items()
return sum(self.estimate_runtime(hash_key) * count for hash_key, count in tasks)
class NotReadyException(Exception):
pass
class WrongIDException(Exception):
pass
| mit | 3,637,333,381,968,971,300 | 25.484848 | 98 | 0.543764 | false |
dlshriver/pinq | tests/test_queryable_first.py | 1 | 1194 | import unittest
import pinq
class queryable_first_tests(unittest.TestCase):
def setUp(self):
self.queryable0 = pinq.as_queryable([])
self.queryable1 = pinq.as_queryable(range(1))
self.queryable2 = pinq.as_queryable(range(1, 11))
def test_first_only_element(self):
self.assertEqual(self.queryable1.first(), 0)
def test_first_many_elements(self):
self.assertEqual(self.queryable2.first(), 1)
def test_first_only_element_with_condition(self):
self.assertEqual(self.queryable1.first(lambda x: x < 5), 0)
def test_first_many_elements_with_condition_same_first(self):
self.assertEqual(self.queryable2.first(lambda x: x < 5), 1)
def test_first_many_elements_with_condition_change_first(self):
self.assertEqual(self.queryable2.first(lambda x: x > 5), 6)
def test_first_predicate_type_error(self):
self.assertRaises(TypeError, self.queryable1.first, 100)
def test_first_empty_value_error(self):
self.assertRaises(ValueError, self.queryable0.first)
def test_first_no_satisfying_value_error(self):
self.assertRaises(ValueError, self.queryable2.first, lambda x: x > 100)
| mit | -665,743,693,051,481,600 | 34.117647 | 79 | 0.69263 | false |
acesonl/remotecare | remotecare/core/backends.py | 1 | 1343 | # -*- coding: utf-8 -*-
"""
The standard email backend is replaced by a custom
ModelBackend that supports getting the user based
on the stored hmac email value.
:subtitle:`Class definitions:`
"""
from django.contrib.auth.backends import ModelBackend
from apps.account.models import User
from django.contrib.auth.hashers import check_password
class EmailBackend(ModelBackend):
'''
Custom authentication backend which uses the hmac
email address rather than the username to authenticate.
'''
def authenticate(self, email=None, password=None, username=None, **kwargs):
"""
Processes an authentication attempt
args:
- email: not used
- password: the password to check
- username: the plain-text email address to search for
Returns:
the user if found and password correct else None.
"""
try:
# match the user's HMAC email address to the
# entered 'username'
# The hmac_email field will automatically HMAC the username.lower()
# value
user = User.objects.get(hmac_email=username)
if check_password(password, user.password):
return user
else:
return None
except User.DoesNotExist:
return None
| gpl-3.0 | 3,425,033,322,685,303,300 | 30.97619 | 79 | 0.630678 | false |
rukai/GameToy | timer.py | 1 | 1829 | class Timer:
def __init__(self, interrupts):
self.interrupts = interrupts
self.div = 0 # FF04
self.sub_div = 0
self.tima = 0 # FF05
self.sub_tima = 0
self.tma = 0 # FF06
# FF07 bits 2-0
self.timer_run = False
self.clock_select = 0
self.clock = 0
def update(self, cycles):
self.sub_div += cycles
if self.sub_div >= 256:
self.div = (self.div + 1) % 0x100
if self.timer_run:
self.sub_tima += cycles
else:
self.sub_tima = 0 # Assuming timer progress is lost when disabled
if self.sub_tima >= self.clock:
if self.tima == 0xFF:
self.tima = self.tma
self.interrupts.callTimer()
else:
self.tima += 1
# Divider Register
def readDIV(self):
return self.div
def writeDIV(self, value):
self.div = 0
# Timer Counter
def readTIMA(self):
return self.tima
def writeTIMA(self, value):
self.tima = value
# Timer Modulo
def readTMA(self):
return self.tma
def writeTMA(self, value):
self.tma = value
# Timer Controller
def readTAC(self):
value = int(timer_run) << 2
value |= clock_select
return value
def writeTAC(self, value):
self.timer_run = bool(value & 0b00000100)
self.clock_select = value & 0b00000011
if self.clock_select == 0:
self.clock = 1024
elif self.clock_select == 1:
self.clock = 16
elif self.clock_select == 2:
self.clock = 64
elif self.clock_select == 3:
self.clock = 256
else:
assert(False)
| gpl-3.0 | -91,548,163,793,290,540 | 24.402778 | 77 | 0.503007 | false |
pcmoritz/ray-1 | test/actor_test.py | 1 | 55166 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import random
import numpy as np
import os
import sys
import time
import unittest
import ray
import ray.test.test_utils
class ActorAPI(unittest.TestCase):
def tearDown(self):
ray.worker.cleanup()
def testKeywordArgs(self):
ray.init(num_workers=0, driver_mode=ray.SILENT_MODE)
@ray.remote
class Actor(object):
def __init__(self, arg0, arg1=1, arg2="a"):
self.arg0 = arg0
self.arg1 = arg1
self.arg2 = arg2
def get_values(self, arg0, arg1=2, arg2="b"):
return self.arg0 + arg0, self.arg1 + arg1, self.arg2 + arg2
actor = Actor.remote(0)
self.assertEqual(ray.get(actor.get_values.remote(1)), (1, 3, "ab"))
actor = Actor.remote(1, 2)
self.assertEqual(ray.get(actor.get_values.remote(2, 3)), (3, 5, "ab"))
actor = Actor.remote(1, 2, "c")
self.assertEqual(ray.get(actor.get_values.remote(2, 3, "d")),
(3, 5, "cd"))
actor = Actor.remote(1, arg2="c")
self.assertEqual(ray.get(actor.get_values.remote(0, arg2="d")),
(1, 3, "cd"))
self.assertEqual(ray.get(actor.get_values.remote(0, arg2="d", arg1=0)),
(1, 1, "cd"))
actor = Actor.remote(1, arg2="c", arg1=2)
self.assertEqual(ray.get(actor.get_values.remote(0, arg2="d")),
(1, 4, "cd"))
self.assertEqual(ray.get(actor.get_values.remote(0, arg2="d", arg1=0)),
(1, 2, "cd"))
# Make sure we get an exception if the constructor is called
# incorrectly.
with self.assertRaises(Exception):
actor = Actor.remote()
with self.assertRaises(Exception):
actor = Actor.remote(0, 1, 2, arg3=3)
# Make sure we get an exception if the method is called incorrectly.
actor = Actor.remote(1)
with self.assertRaises(Exception):
ray.get(actor.get_values.remote())
def testVariableNumberOfArgs(self):
ray.init(num_workers=0)
@ray.remote
class Actor(object):
def __init__(self, arg0, arg1=1, *args):
self.arg0 = arg0
self.arg1 = arg1
self.args = args
def get_values(self, arg0, arg1=2, *args):
return self.arg0 + arg0, self.arg1 + arg1, self.args, args
actor = Actor.remote(0)
self.assertEqual(ray.get(actor.get_values.remote(1)), (1, 3, (), ()))
actor = Actor.remote(1, 2)
self.assertEqual(ray.get(actor.get_values.remote(2, 3)),
(3, 5, (), ()))
actor = Actor.remote(1, 2, "c")
self.assertEqual(ray.get(actor.get_values.remote(2, 3, "d")),
(3, 5, ("c",), ("d",)))
actor = Actor.remote(1, 2, "a", "b", "c", "d")
self.assertEqual(ray.get(actor.get_values.remote(2, 3, 1, 2, 3, 4)),
(3, 5, ("a", "b", "c", "d"), (1, 2, 3, 4)))
@ray.remote
class Actor(object):
def __init__(self, *args):
self.args = args
def get_values(self, *args):
return self.args, args
a = Actor.remote()
self.assertEqual(ray.get(a.get_values.remote()), ((), ()))
a = Actor.remote(1)
self.assertEqual(ray.get(a.get_values.remote(2)), ((1,), (2,)))
a = Actor.remote(1, 2)
self.assertEqual(ray.get(a.get_values.remote(3, 4)), ((1, 2), (3, 4)))
def testNoArgs(self):
ray.init(num_workers=0)
@ray.remote
class Actor(object):
def __init__(self):
pass
def get_values(self):
pass
actor = Actor.remote()
self.assertEqual(ray.get(actor.get_values.remote()), None)
def testNoConstructor(self):
# If no __init__ method is provided, that should not be a problem.
ray.init(num_workers=0)
@ray.remote
class Actor(object):
def get_values(self):
pass
actor = Actor.remote()
self.assertEqual(ray.get(actor.get_values.remote()), None)
def testCustomClasses(self):
ray.init(num_workers=0)
class Foo(object):
def __init__(self, x):
self.x = x
@ray.remote
class Actor(object):
def __init__(self, f2):
self.f1 = Foo(1)
self.f2 = f2
def get_values1(self):
return self.f1, self.f2
def get_values2(self, f3):
return self.f1, self.f2, f3
actor = Actor.remote(Foo(2))
results1 = ray.get(actor.get_values1.remote())
self.assertEqual(results1[0].x, 1)
self.assertEqual(results1[1].x, 2)
results2 = ray.get(actor.get_values2.remote(Foo(3)))
self.assertEqual(results2[0].x, 1)
self.assertEqual(results2[1].x, 2)
self.assertEqual(results2[2].x, 3)
def testCachingActors(self):
# Test defining actors before ray.init() has been called.
@ray.remote
class Foo(object):
def __init__(self):
pass
def get_val(self):
return 3
# Check that we can't actually create actors before ray.init() has been
# called.
with self.assertRaises(Exception):
f = Foo.remote()
ray.init(num_workers=0)
f = Foo.remote()
self.assertEqual(ray.get(f.get_val.remote()), 3)
def testDecoratorArgs(self):
ray.init(num_workers=0, driver_mode=ray.SILENT_MODE)
# This is an invalid way of using the actor decorator.
with self.assertRaises(Exception):
@ray.remote()
class Actor(object):
def __init__(self):
pass
# This is an invalid way of using the actor decorator.
with self.assertRaises(Exception):
@ray.remote(invalid_kwarg=0) # noqa: F811
class Actor(object):
def __init__(self):
pass
# This is an invalid way of using the actor decorator.
with self.assertRaises(Exception):
@ray.remote(num_cpus=0, invalid_kwarg=0) # noqa: F811
class Actor(object):
def __init__(self):
pass
# This is a valid way of using the decorator.
@ray.remote(num_cpus=1) # noqa: F811
class Actor(object):
def __init__(self):
pass
# This is a valid way of using the decorator.
@ray.remote(num_gpus=1) # noqa: F811
class Actor(object):
def __init__(self):
pass
# This is a valid way of using the decorator.
@ray.remote(num_cpus=1, num_gpus=1) # noqa: F811
class Actor(object):
def __init__(self):
pass
def testRandomIDGeneration(self):
ray.init(num_workers=0)
@ray.remote
class Foo(object):
def __init__(self):
pass
# Make sure that seeding numpy does not interfere with the generation
# of actor IDs.
np.random.seed(1234)
random.seed(1234)
f1 = Foo.remote()
np.random.seed(1234)
random.seed(1234)
f2 = Foo.remote()
self.assertNotEqual(f1._ray_actor_id.id(), f2._ray_actor_id.id())
def testActorClassName(self):
ray.init(num_workers=0)
@ray.remote
class Foo(object):
def __init__(self):
pass
Foo.remote()
r = ray.worker.global_worker.redis_client
actor_keys = r.keys("ActorClass*")
self.assertEqual(len(actor_keys), 1)
actor_class_info = r.hgetall(actor_keys[0])
self.assertEqual(actor_class_info[b"class_name"], b"Foo")
self.assertEqual(actor_class_info[b"module"], b"__main__")
class ActorMethods(unittest.TestCase):
def tearDown(self):
ray.worker.cleanup()
def testDefineActor(self):
ray.init()
@ray.remote
class Test(object):
def __init__(self, x):
self.x = x
def f(self, y):
return self.x + y
t = Test.remote(2)
self.assertEqual(ray.get(t.f.remote(1)), 3)
# Make sure that calling an actor method directly raises an exception.
with self.assertRaises(Exception):
t.f(1)
def testActorDeletion(self):
ray.init(num_workers=0)
# Make sure that when an actor handles goes out of scope, the actor
# destructor is called.
@ray.remote
class Actor(object):
def getpid(self):
return os.getpid()
a = Actor.remote()
pid = ray.get(a.getpid.remote())
a = None
ray.test.test_utils.wait_for_pid_to_exit(pid)
actors = [Actor.remote() for _ in range(10)]
pids = ray.get([a.getpid.remote() for a in actors])
a = None
actors = None
[ray.test.test_utils.wait_for_pid_to_exit(pid) for pid in pids]
@ray.remote
class Actor(object):
def method(self):
return 1
# Make sure that if we create an actor and call a method on it
# immediately, the actor doesn't get killed before the method is
# called.
self.assertEqual(ray.get(Actor.remote().method.remote()), 1)
def testActorDeletionWithGPUs(self):
ray.init(num_workers=0, num_gpus=1)
# When an actor that uses a GPU exits, make sure that the GPU resources
# are released.
@ray.remote(num_gpus=1)
class Actor(object):
def getpid(self):
return os.getpid()
for _ in range(5):
# If we can successfully create an actor, that means that enough
# GPU resources are available.
a = Actor.remote()
pid = ray.get(a.getpid.remote())
# Make sure that we can't create another actor.
with self.assertRaises(Exception):
Actor.remote()
# Let the actor go out of scope, and wait for it to exit.
a = None
ray.test.test_utils.wait_for_pid_to_exit(pid)
def testActorState(self):
ray.init()
@ray.remote
class Counter(object):
def __init__(self):
self.value = 0
def increase(self):
self.value += 1
def value(self):
return self.value
c1 = Counter.remote()
c1.increase.remote()
self.assertEqual(ray.get(c1.value.remote()), 1)
c2 = Counter.remote()
c2.increase.remote()
c2.increase.remote()
self.assertEqual(ray.get(c2.value.remote()), 2)
def testMultipleActors(self):
# Create a bunch of actors and call a bunch of methods on all of them.
ray.init(num_workers=0)
@ray.remote
class Counter(object):
def __init__(self, value):
self.value = value
def increase(self):
self.value += 1
return self.value
def reset(self):
self.value = 0
num_actors = 20
num_increases = 50
# Create multiple actors.
actors = [Counter.remote(i) for i in range(num_actors)]
results = []
# Call each actor's method a bunch of times.
for i in range(num_actors):
results += [actors[i].increase.remote()
for _ in range(num_increases)]
result_values = ray.get(results)
for i in range(num_actors):
self.assertEqual(
result_values[(num_increases * i):(num_increases * (i + 1))],
list(range(i + 1, num_increases + i + 1)))
# Reset the actor values.
[actor.reset.remote() for actor in actors]
# Interweave the method calls on the different actors.
results = []
for j in range(num_increases):
results += [actor.increase.remote() for actor in actors]
result_values = ray.get(results)
for j in range(num_increases):
self.assertEqual(
result_values[(num_actors * j):(num_actors * (j + 1))],
num_actors * [j + 1])
class ActorNesting(unittest.TestCase):
def tearDown(self):
ray.worker.cleanup()
def testRemoteFunctionWithinActor(self):
# Make sure we can use remote funtions within actors.
ray.init(num_cpus=100)
# Create some values to close over.
val1 = 1
val2 = 2
@ray.remote
def f(x):
return val1 + x
@ray.remote
def g(x):
return ray.get(f.remote(x))
@ray.remote
class Actor(object):
def __init__(self, x):
self.x = x
self.y = val2
self.object_ids = [f.remote(i) for i in range(5)]
self.values2 = ray.get([f.remote(i) for i in range(5)])
def get_values(self):
return self.x, self.y, self.object_ids, self.values2
def f(self):
return [f.remote(i) for i in range(5)]
def g(self):
return ray.get([g.remote(i) for i in range(5)])
def h(self, object_ids):
return ray.get(object_ids)
actor = Actor.remote(1)
values = ray.get(actor.get_values.remote())
self.assertEqual(values[0], 1)
self.assertEqual(values[1], val2)
self.assertEqual(ray.get(values[2]), list(range(1, 6)))
self.assertEqual(values[3], list(range(1, 6)))
self.assertEqual(ray.get(ray.get(actor.f.remote())), list(range(1, 6)))
self.assertEqual(ray.get(actor.g.remote()), list(range(1, 6)))
self.assertEqual(
ray.get(actor.h.remote([f.remote(i) for i in range(5)])),
list(range(1, 6)))
def testDefineActorWithinActor(self):
# Make sure we can use remote funtions within actors.
ray.init(num_cpus=10)
@ray.remote
class Actor1(object):
def __init__(self, x):
self.x = x
def new_actor(self, z):
@ray.remote
class Actor2(object):
def __init__(self, x):
self.x = x
def get_value(self):
return self.x
self.actor2 = Actor2.remote(z)
def get_values(self, z):
self.new_actor(z)
return self.x, ray.get(self.actor2.get_value.remote())
actor1 = Actor1.remote(3)
self.assertEqual(ray.get(actor1.get_values.remote(5)), (3, 5))
def testUseActorWithinActor(self):
# Make sure we can use actors within actors.
ray.init(num_cpus=10)
@ray.remote
class Actor1(object):
def __init__(self, x):
self.x = x
def get_val(self):
return self.x
@ray.remote
class Actor2(object):
def __init__(self, x, y):
self.x = x
self.actor1 = Actor1.remote(y)
def get_values(self, z):
return self.x, ray.get(self.actor1.get_val.remote())
actor2 = Actor2.remote(3, 4)
self.assertEqual(ray.get(actor2.get_values.remote(5)), (3, 4))
def testDefineActorWithinRemoteFunction(self):
# Make sure we can define and actors within remote funtions.
ray.init(num_cpus=10)
@ray.remote
def f(x, n):
@ray.remote
class Actor1(object):
def __init__(self, x):
self.x = x
def get_value(self):
return self.x
actor = Actor1.remote(x)
return ray.get([actor.get_value.remote() for _ in range(n)])
self.assertEqual(ray.get(f.remote(3, 1)), [3])
self.assertEqual(ray.get([f.remote(i, 20) for i in range(10)]),
[20 * [i] for i in range(10)])
def testUseActorWithinRemoteFunction(self):
# Make sure we can create and use actors within remote funtions.
ray.init(num_cpus=10)
@ray.remote
class Actor1(object):
def __init__(self, x):
self.x = x
def get_values(self):
return self.x
@ray.remote
def f(x):
actor = Actor1.remote(x)
return ray.get(actor.get_values.remote())
self.assertEqual(ray.get(f.remote(3)), 3)
def testActorImportCounter(self):
# This is mostly a test of the export counters to make sure that when
# an actor is imported, all of the necessary remote functions have been
# imported.
ray.init(num_cpus=10)
# Export a bunch of remote functions.
num_remote_functions = 50
for i in range(num_remote_functions):
@ray.remote
def f():
return i
@ray.remote
def g():
@ray.remote
class Actor(object):
def __init__(self):
# This should use the last version of f.
self.x = ray.get(f.remote())
def get_val(self):
return self.x
actor = Actor.remote()
return ray.get(actor.get_val.remote())
self.assertEqual(ray.get(g.remote()), num_remote_functions - 1)
class ActorInheritance(unittest.TestCase):
def tearDown(self):
ray.worker.cleanup()
def testInheritActorFromClass(self):
# Make sure we can define an actor by inheriting from a regular class.
# Note that actors cannot inherit from other actors.
ray.init()
class Foo(object):
def __init__(self, x):
self.x = x
def f(self):
return self.x
def g(self, y):
return self.x + y
@ray.remote
class Actor(Foo):
def __init__(self, x):
Foo.__init__(self, x)
def get_value(self):
return self.f()
actor = Actor.remote(1)
self.assertEqual(ray.get(actor.get_value.remote()), 1)
self.assertEqual(ray.get(actor.g.remote(5)), 6)
class ActorSchedulingProperties(unittest.TestCase):
def tearDown(self):
ray.worker.cleanup()
def testRemoteFunctionsNotScheduledOnActors(self):
# Make sure that regular remote functions are not scheduled on actors.
ray.init(num_workers=0)
@ray.remote
class Actor(object):
def __init__(self):
pass
def get_id(self):
return ray.worker.global_worker.worker_id
a = Actor.remote()
actor_id = ray.get(a.get_id.remote())
@ray.remote
def f():
return ray.worker.global_worker.worker_id
resulting_ids = ray.get([f.remote() for _ in range(100)])
self.assertNotIn(actor_id, resulting_ids)
class ActorsOnMultipleNodes(unittest.TestCase):
def tearDown(self):
ray.worker.cleanup()
def testActorsOnNodesWithNoCPUs(self):
ray.init(num_cpus=0)
@ray.remote
class Foo(object):
def __init__(self):
pass
with self.assertRaises(Exception):
Foo.remote()
def testActorLoadBalancing(self):
num_local_schedulers = 3
ray.worker._init(start_ray_local=True, num_workers=0,
num_local_schedulers=num_local_schedulers)
@ray.remote
class Actor1(object):
def __init__(self):
pass
def get_location(self):
return ray.worker.global_worker.plasma_client.store_socket_name
# Create a bunch of actors.
num_actors = 30
num_attempts = 20
minimum_count = 5
# Make sure that actors are spread between the local schedulers.
attempts = 0
while attempts < num_attempts:
actors = [Actor1.remote() for _ in range(num_actors)]
locations = ray.get([actor.get_location.remote()
for actor in actors])
names = set(locations)
counts = [locations.count(name) for name in names]
print("Counts are {}.".format(counts))
if (len(names) == num_local_schedulers and
all([count >= minimum_count for count in counts])):
break
attempts += 1
self.assertLess(attempts, num_attempts)
# Make sure we can get the results of a bunch of tasks.
results = []
for _ in range(1000):
index = np.random.randint(num_actors)
results.append(actors[index].get_location.remote())
ray.get(results)
class ActorsWithGPUs(unittest.TestCase):
def tearDown(self):
ray.worker.cleanup()
def testActorGPUs(self):
num_local_schedulers = 3
num_gpus_per_scheduler = 4
ray.worker._init(
start_ray_local=True, num_workers=0,
num_local_schedulers=num_local_schedulers,
num_gpus=(num_local_schedulers * [num_gpus_per_scheduler]))
@ray.remote(num_gpus=1)
class Actor1(object):
def __init__(self):
self.gpu_ids = ray.get_gpu_ids()
def get_location_and_ids(self):
assert ray.get_gpu_ids() == self.gpu_ids
return (
ray.worker.global_worker.plasma_client.store_socket_name,
tuple(self.gpu_ids))
# Create one actor per GPU.
actors = [Actor1.remote() for _
in range(num_local_schedulers * num_gpus_per_scheduler)]
# Make sure that no two actors are assigned to the same GPU.
locations_and_ids = ray.get([actor.get_location_and_ids.remote()
for actor in actors])
node_names = set([location for location, gpu_id in locations_and_ids])
self.assertEqual(len(node_names), num_local_schedulers)
location_actor_combinations = []
for node_name in node_names:
for gpu_id in range(num_gpus_per_scheduler):
location_actor_combinations.append((node_name, (gpu_id,)))
self.assertEqual(set(locations_and_ids),
set(location_actor_combinations))
# Creating a new actor should fail because all of the GPUs are being
# used.
with self.assertRaises(Exception):
Actor1.remote()
def testActorMultipleGPUs(self):
num_local_schedulers = 3
num_gpus_per_scheduler = 5
ray.worker._init(
start_ray_local=True, num_workers=0,
num_local_schedulers=num_local_schedulers,
num_gpus=(num_local_schedulers * [num_gpus_per_scheduler]))
@ray.remote(num_gpus=2)
class Actor1(object):
def __init__(self):
self.gpu_ids = ray.get_gpu_ids()
def get_location_and_ids(self):
return (
ray.worker.global_worker.plasma_client.store_socket_name,
tuple(self.gpu_ids))
# Create some actors.
actors1 = [Actor1.remote() for _ in range(num_local_schedulers * 2)]
# Make sure that no two actors are assigned to the same GPU.
locations_and_ids = ray.get([actor.get_location_and_ids.remote()
for actor in actors1])
node_names = set([location for location, gpu_id in locations_and_ids])
self.assertEqual(len(node_names), num_local_schedulers)
# Keep track of which GPU IDs are being used for each location.
gpus_in_use = {node_name: [] for node_name in node_names}
for location, gpu_ids in locations_and_ids:
gpus_in_use[location].extend(gpu_ids)
for node_name in node_names:
self.assertEqual(len(set(gpus_in_use[node_name])), 4)
# Creating a new actor should fail because all of the GPUs are being
# used.
with self.assertRaises(Exception):
Actor1.remote()
# We should be able to create more actors that use only a single GPU.
@ray.remote(num_gpus=1)
class Actor2(object):
def __init__(self):
self.gpu_ids = ray.get_gpu_ids()
def get_location_and_ids(self):
return (
ray.worker.global_worker.plasma_client.store_socket_name,
tuple(self.gpu_ids))
# Create some actors.
actors2 = [Actor2.remote() for _ in range(num_local_schedulers)]
# Make sure that no two actors are assigned to the same GPU.
locations_and_ids = ray.get([actor.get_location_and_ids.remote()
for actor in actors2])
self.assertEqual(node_names,
set([location for location, gpu_id
in locations_and_ids]))
for location, gpu_ids in locations_and_ids:
gpus_in_use[location].extend(gpu_ids)
for node_name in node_names:
self.assertEqual(len(gpus_in_use[node_name]), 5)
self.assertEqual(set(gpus_in_use[node_name]), set(range(5)))
# Creating a new actor should fail because all of the GPUs are being
# used.
with self.assertRaises(Exception):
Actor2.remote()
def testActorDifferentNumbersOfGPUs(self):
# Test that we can create actors on two nodes that have different
# numbers of GPUs.
ray.worker._init(start_ray_local=True, num_workers=0,
num_local_schedulers=3, num_gpus=[0, 5, 10])
@ray.remote(num_gpus=1)
class Actor1(object):
def __init__(self):
self.gpu_ids = ray.get_gpu_ids()
def get_location_and_ids(self):
return (
ray.worker.global_worker.plasma_client.store_socket_name,
tuple(self.gpu_ids))
# Create some actors.
actors = [Actor1.remote() for _ in range(0 + 5 + 10)]
# Make sure that no two actors are assigned to the same GPU.
locations_and_ids = ray.get([actor.get_location_and_ids.remote()
for actor in actors])
node_names = set([location for location, gpu_id in locations_and_ids])
self.assertEqual(len(node_names), 2)
for node_name in node_names:
node_gpu_ids = [gpu_id for location, gpu_id in locations_and_ids
if location == node_name]
self.assertIn(len(node_gpu_ids), [5, 10])
self.assertEqual(set(node_gpu_ids),
set([(i,) for i in range(len(node_gpu_ids))]))
# Creating a new actor should fail because all of the GPUs are being
# used.
with self.assertRaises(Exception):
Actor1.remote()
def testActorMultipleGPUsFromMultipleTasks(self):
num_local_schedulers = 10
num_gpus_per_scheduler = 10
ray.worker._init(
start_ray_local=True, num_workers=0,
num_local_schedulers=num_local_schedulers, redirect_output=True,
num_gpus=(num_local_schedulers * [num_gpus_per_scheduler]))
@ray.remote
def create_actors(n):
@ray.remote(num_gpus=1)
class Actor(object):
def __init__(self):
self.gpu_ids = ray.get_gpu_ids()
def get_location_and_ids(self):
return ((ray.worker.global_worker.plasma_client
.store_socket_name),
tuple(self.gpu_ids))
# Create n actors.
for _ in range(n):
Actor.remote()
ray.get([create_actors.remote(num_gpus_per_scheduler)
for _ in range(num_local_schedulers)])
@ray.remote(num_gpus=1)
class Actor(object):
def __init__(self):
self.gpu_ids = ray.get_gpu_ids()
def get_location_and_ids(self):
return (
ray.worker.global_worker.plasma_client.store_socket_name,
tuple(self.gpu_ids))
# All the GPUs should be used up now.
with self.assertRaises(Exception):
Actor.remote()
@unittest.skipIf(sys.version_info < (3, 0), "This test requires Python 3.")
def testActorsAndTasksWithGPUs(self):
num_local_schedulers = 3
num_gpus_per_scheduler = 6
ray.worker._init(
start_ray_local=True, num_workers=0,
num_local_schedulers=num_local_schedulers,
num_cpus=num_gpus_per_scheduler,
num_gpus=(num_local_schedulers * [num_gpus_per_scheduler]))
def check_intervals_non_overlapping(list_of_intervals):
for i in range(len(list_of_intervals)):
for j in range(i):
first_interval = list_of_intervals[i]
second_interval = list_of_intervals[j]
# Check that list_of_intervals[i] and list_of_intervals[j]
# don't overlap.
self.assertLess(first_interval[0], first_interval[1])
self.assertLess(second_interval[0], second_interval[1])
intervals_nonoverlapping = (
first_interval[1] <= second_interval[0] or
second_interval[1] <= first_interval[0])
assert intervals_nonoverlapping, (
"Intervals {} and {} are overlapping."
.format(first_interval, second_interval))
@ray.remote(num_gpus=1)
def f1():
t1 = time.monotonic()
time.sleep(0.1)
t2 = time.monotonic()
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert gpu_ids[0] in range(num_gpus_per_scheduler)
return (ray.worker.global_worker.plasma_client.store_socket_name,
tuple(gpu_ids), [t1, t2])
@ray.remote(num_gpus=2)
def f2():
t1 = time.monotonic()
time.sleep(0.1)
t2 = time.monotonic()
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 2
assert gpu_ids[0] in range(num_gpus_per_scheduler)
assert gpu_ids[1] in range(num_gpus_per_scheduler)
return (ray.worker.global_worker.plasma_client.store_socket_name,
tuple(gpu_ids), [t1, t2])
@ray.remote(num_gpus=1)
class Actor1(object):
def __init__(self):
self.gpu_ids = ray.get_gpu_ids()
assert len(self.gpu_ids) == 1
assert self.gpu_ids[0] in range(num_gpus_per_scheduler)
def get_location_and_ids(self):
assert ray.get_gpu_ids() == self.gpu_ids
return (
ray.worker.global_worker.plasma_client.store_socket_name,
tuple(self.gpu_ids))
def locations_to_intervals_for_many_tasks():
# Launch a bunch of GPU tasks.
locations_ids_and_intervals = ray.get(
[f1.remote() for _
in range(5 * num_local_schedulers * num_gpus_per_scheduler)] +
[f2.remote() for _
in range(5 * num_local_schedulers * num_gpus_per_scheduler)] +
[f1.remote() for _
in range(5 * num_local_schedulers * num_gpus_per_scheduler)])
locations_to_intervals = collections.defaultdict(lambda: [])
for location, gpu_ids, interval in locations_ids_and_intervals:
for gpu_id in gpu_ids:
locations_to_intervals[(location, gpu_id)].append(interval)
return locations_to_intervals
# Run a bunch of GPU tasks.
locations_to_intervals = locations_to_intervals_for_many_tasks()
# Make sure that all GPUs were used.
self.assertEqual(len(locations_to_intervals),
num_local_schedulers * num_gpus_per_scheduler)
# For each GPU, verify that the set of tasks that used this specific
# GPU did not overlap in time.
for locations in locations_to_intervals:
check_intervals_non_overlapping(locations_to_intervals[locations])
# Create an actor that uses a GPU.
a = Actor1.remote()
actor_location = ray.get(a.get_location_and_ids.remote())
actor_location = (actor_location[0], actor_location[1][0])
# This check makes sure that actor_location is formatted the same way
# that the keys of locations_to_intervals are formatted.
self.assertIn(actor_location, locations_to_intervals)
# Run a bunch of GPU tasks.
locations_to_intervals = locations_to_intervals_for_many_tasks()
# Make sure that all but one of the GPUs were used.
self.assertEqual(len(locations_to_intervals),
num_local_schedulers * num_gpus_per_scheduler - 1)
# For each GPU, verify that the set of tasks that used this specific
# GPU did not overlap in time.
for locations in locations_to_intervals:
check_intervals_non_overlapping(locations_to_intervals[locations])
# Make sure that the actor's GPU was not used.
self.assertNotIn(actor_location, locations_to_intervals)
# Create several more actors that use GPUs.
actors = [Actor1.remote() for _ in range(3)]
actor_locations = ray.get([actor.get_location_and_ids.remote()
for actor in actors])
# Run a bunch of GPU tasks.
locations_to_intervals = locations_to_intervals_for_many_tasks()
# Make sure that all but 11 of the GPUs were used.
self.assertEqual(len(locations_to_intervals),
num_local_schedulers * num_gpus_per_scheduler - 1 - 3)
# For each GPU, verify that the set of tasks that used this specific
# GPU did not overlap in time.
for locations in locations_to_intervals:
check_intervals_non_overlapping(locations_to_intervals[locations])
# Make sure that the GPUs were not used.
self.assertNotIn(actor_location, locations_to_intervals)
for location in actor_locations:
self.assertNotIn(location, locations_to_intervals)
# Create more actors to fill up all the GPUs.
more_actors = [Actor1.remote() for _ in
range(num_local_schedulers *
num_gpus_per_scheduler - 1 - 3)]
# Wait for the actors to finish being created.
ray.get([actor.get_location_and_ids.remote() for actor in more_actors])
# Now if we run some GPU tasks, they should not be scheduled.
results = [f1.remote() for _ in range(30)]
ready_ids, remaining_ids = ray.wait(results, timeout=1000)
self.assertEqual(len(ready_ids), 0)
def testActorsAndTasksWithGPUsVersionTwo(self):
# Create tasks and actors that both use GPUs and make sure that they
# are given different GPUs
ray.init(num_cpus=10, num_gpus=10)
@ray.remote(num_gpus=1)
def f():
time.sleep(4)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
return gpu_ids[0]
@ray.remote(num_gpus=1)
class Actor(object):
def __init__(self):
self.gpu_ids = ray.get_gpu_ids()
assert len(self.gpu_ids) == 1
def get_gpu_id(self):
assert ray.get_gpu_ids() == self.gpu_ids
return self.gpu_ids[0]
results = []
actors = []
for _ in range(5):
results.append(f.remote())
a = Actor.remote()
results.append(a.get_gpu_id.remote())
# Prevent the actor handle from going out of scope so that its GPU
# resources don't get released.
actors.append(a)
gpu_ids = ray.get(results)
self.assertEqual(set(gpu_ids), set(range(10)))
@unittest.skipIf(sys.version_info < (3, 0), "This test requires Python 3.")
def testActorsAndTaskResourceBookkeeping(self):
ray.init(num_cpus=1)
@ray.remote
class Foo(object):
def __init__(self):
start = time.monotonic()
time.sleep(0.1)
end = time.monotonic()
self.interval = (start, end)
def get_interval(self):
return self.interval
def sleep(self):
start = time.monotonic()
time.sleep(0.01)
end = time.monotonic()
return start, end
# First make sure that we do not have more actor methods running at a
# time than we have CPUs.
actors = [Foo.remote() for _ in range(4)]
interval_ids = []
interval_ids += [actor.get_interval.remote() for actor in actors]
for _ in range(4):
interval_ids += [actor.sleep.remote() for actor in actors]
# Make sure that the intervals don't overlap.
intervals = ray.get(interval_ids)
intervals.sort(key=lambda x: x[0])
for interval1, interval2 in zip(intervals[:-1], intervals[1:]):
self.assertLess(interval1[0], interval1[1])
self.assertLess(interval1[1], interval2[0])
self.assertLess(interval2[0], interval2[1])
def testBlockingActorTask(self):
ray.init(num_cpus=1, num_gpus=1)
@ray.remote(num_gpus=1)
def f():
return 1
@ray.remote
class Foo(object):
def __init__(self):
pass
def blocking_method(self):
ray.get(f.remote())
# Make sure we can execute a blocking actor method even if there is
# only one CPU.
actor = Foo.remote()
ray.get(actor.blocking_method.remote())
@ray.remote(num_gpus=1)
class GPUFoo(object):
def __init__(self):
pass
def blocking_method(self):
ray.get(f.remote())
# Make sure that we GPU resources are not released when actors block.
actor = GPUFoo.remote()
x_id = actor.blocking_method.remote()
ready_ids, remaining_ids = ray.wait([x_id], timeout=500)
self.assertEqual(ready_ids, [])
self.assertEqual(remaining_ids, [x_id])
class ActorReconstruction(unittest.TestCase):
def tearDown(self):
ray.worker.cleanup()
def testLocalSchedulerDying(self):
ray.worker._init(start_ray_local=True, num_local_schedulers=2,
num_workers=0, redirect_output=True)
@ray.remote
class Counter(object):
def __init__(self):
self.x = 0
def local_plasma(self):
return ray.worker.global_worker.plasma_client.store_socket_name
def inc(self):
self.x += 1
return self.x
local_plasma = ray.worker.global_worker.plasma_client.store_socket_name
# Create an actor that is not on the local scheduler.
actor = Counter.remote()
while ray.get(actor.local_plasma.remote()) == local_plasma:
actor = Counter.remote()
ids = [actor.inc.remote() for _ in range(100)]
# Wait for the last task to finish running.
ray.get(ids[-1])
# Kill the second plasma store to get rid of the cached objects and
# trigger the corresponding local scheduler to exit.
process = ray.services.all_processes[
ray.services.PROCESS_TYPE_PLASMA_STORE][1]
process.kill()
process.wait()
# Get all of the results
results = ray.get(ids)
self.assertEqual(results, list(range(1, 1 + len(results))))
def testManyLocalSchedulersDying(self):
# This test can be made more stressful by increasing the numbers below.
# The total number of actors created will be
# num_actors_at_a_time * num_local_schedulers.
num_local_schedulers = 5
num_actors_at_a_time = 3
num_function_calls_at_a_time = 10
ray.worker._init(start_ray_local=True,
num_local_schedulers=num_local_schedulers,
num_workers=0, redirect_output=True)
@ray.remote
class SlowCounter(object):
def __init__(self):
self.x = 0
def inc(self, duration):
time.sleep(duration)
self.x += 1
return self.x
# Create some initial actors.
actors = [SlowCounter.remote() for _ in range(num_actors_at_a_time)]
# Wait for the actors to start up.
time.sleep(1)
# This is a mapping from actor handles to object IDs returned by
# methods on that actor.
result_ids = collections.defaultdict(lambda: [])
# In a loop we are going to create some actors, run some methods, kill
# a local scheduler, and run some more methods.
for i in range(num_local_schedulers - 1):
# Create some actors.
actors.extend([SlowCounter.remote()
for _ in range(num_actors_at_a_time)])
# Run some methods.
for j in range(len(actors)):
actor = actors[j]
for _ in range(num_function_calls_at_a_time):
result_ids[actor].append(
actor.inc.remote(j ** 2 * 0.000001))
# Kill a plasma store to get rid of the cached objects and trigger
# exit of the corresponding local scheduler. Don't kill the first
# local scheduler since that is the one that the driver is
# connected to.
process = ray.services.all_processes[
ray.services.PROCESS_TYPE_PLASMA_STORE][i + 1]
process.kill()
process.wait()
# Run some more methods.
for j in range(len(actors)):
actor = actors[j]
for _ in range(num_function_calls_at_a_time):
result_ids[actor].append(
actor.inc.remote(j ** 2 * 0.000001))
# Get the results and check that they have the correct values.
for _, result_id_list in result_ids.items():
self.assertEqual(ray.get(result_id_list),
list(range(1, len(result_id_list) + 1)))
def setup_test_checkpointing(self, save_exception=False,
resume_exception=False):
ray.worker._init(start_ray_local=True, num_local_schedulers=2,
num_workers=0, redirect_output=True)
@ray.remote(checkpoint_interval=5)
class Counter(object):
_resume_exception = resume_exception
def __init__(self, save_exception):
self.x = 0
# The number of times that inc has been called. We won't bother
# restoring this in the checkpoint
self.num_inc_calls = 0
self.save_exception = save_exception
def local_plasma(self):
return ray.worker.global_worker.plasma_client.store_socket_name
def inc(self, *xs):
self.num_inc_calls += 1
self.x += 1
return self.x
def get_num_inc_calls(self):
return self.num_inc_calls
def test_restore(self):
# This method will only work if __ray_restore__ has been run.
return self.y
def __ray_save__(self):
if self.save_exception:
raise Exception("Exception raised in checkpoint save")
return self.x, -1
def __ray_restore__(self, checkpoint):
if self._resume_exception:
raise Exception("Exception raised in checkpoint resume")
self.x, val = checkpoint
self.num_inc_calls = 0
# Test that __ray_save__ has been run.
assert val == -1
self.y = self.x
local_plasma = ray.worker.global_worker.plasma_client.store_socket_name
# Create an actor that is not on the local scheduler.
actor = Counter.remote(save_exception)
while ray.get(actor.local_plasma.remote()) == local_plasma:
actor = Counter.remote(save_exception)
args = [ray.put(0) for _ in range(100)]
ids = [actor.inc.remote(*args[i:]) for i in range(100)]
return actor, ids
def testCheckpointing(self):
actor, ids = self.setup_test_checkpointing()
# Wait for the last task to finish running.
ray.get(ids[-1])
# Kill the corresponding plasma store to get rid of the cached objects.
process = ray.services.all_processes[
ray.services.PROCESS_TYPE_PLASMA_STORE][1]
process.kill()
process.wait()
# Get all of the results. TODO(rkn): This currently doesn't work.
# results = ray.get(ids)
# self.assertEqual(results, list(range(1, 1 + len(results))))
self.assertEqual(ray.get(actor.test_restore.remote()), 99)
# The inc method should only have executed once on the new actor (for
# the one method call since the most recent checkpoint).
self.assertEqual(ray.get(actor.get_num_inc_calls.remote()), 1)
def testLostCheckpoint(self):
actor, ids = self.setup_test_checkpointing()
# Wait for the first fraction of tasks to finish running.
ray.get(ids[len(ids) // 10])
actor_key = b"Actor:" + actor._ray_actor_id.id()
for index in ray.actor.get_checkpoint_indices(
ray.worker.global_worker, actor._ray_actor_id.id()):
ray.worker.global_worker.redis_client.hdel(
actor_key, "checkpoint_{}".format(index))
# Kill the corresponding plasma store to get rid of the cached objects.
process = ray.services.all_processes[
ray.services.PROCESS_TYPE_PLASMA_STORE][1]
process.kill()
process.wait()
self.assertEqual(ray.get(actor.inc.remote()), 101)
# Each inc method has been reexecuted once on the new actor.
self.assertEqual(ray.get(actor.get_num_inc_calls.remote()), 101)
# Get all of the results that were previously lost. Because the
# checkpoints were lost, all methods should be reconstructed.
results = ray.get(ids)
self.assertEqual(results, list(range(1, 1 + len(results))))
def testCheckpointException(self):
actor, ids = self.setup_test_checkpointing(save_exception=True)
# Wait for the last task to finish running.
ray.get(ids[-1])
# Kill the corresponding plasma store to get rid of the cached objects.
process = ray.services.all_processes[
ray.services.PROCESS_TYPE_PLASMA_STORE][1]
process.kill()
process.wait()
self.assertEqual(ray.get(actor.inc.remote()), 101)
# Each inc method has been reexecuted once on the new actor, since all
# checkpoint saves failed.
self.assertEqual(ray.get(actor.get_num_inc_calls.remote()), 101)
# Get all of the results that were previously lost. Because the
# checkpoints were lost, all methods should be reconstructed.
results = ray.get(ids)
self.assertEqual(results, list(range(1, 1 + len(results))))
errors = ray.error_info()
# We submitted 101 tasks with a checkpoint interval of 5.
num_checkpoints = 101 // 5
# Each checkpoint task throws an exception when saving during initial
# execution, and then again during re-execution.
self.assertEqual(len([error for error in errors if error[b"type"] ==
b"task"]), num_checkpoints * 2)
def testCheckpointResumeException(self):
actor, ids = self.setup_test_checkpointing(resume_exception=True)
# Wait for the last task to finish running.
ray.get(ids[-1])
# Kill the corresponding plasma store to get rid of the cached objects.
process = ray.services.all_processes[
ray.services.PROCESS_TYPE_PLASMA_STORE][1]
process.kill()
process.wait()
self.assertEqual(ray.get(actor.inc.remote()), 101)
# Each inc method has been reexecuted once on the new actor, since all
# checkpoint resumes failed.
self.assertEqual(ray.get(actor.get_num_inc_calls.remote()), 101)
# Get all of the results that were previously lost. Because the
# checkpoints were lost, all methods should be reconstructed.
results = ray.get(ids)
self.assertEqual(results, list(range(1, 1 + len(results))))
errors = ray.error_info()
# The most recently executed checkpoint task should throw an exception
# when trying to resume. All other checkpoint tasks should reconstruct
# the previous task but throw no errors.
self.assertTrue(len([error for error in errors if error[b"type"] ==
b"task"]) > 0)
class DistributedActorHandles(unittest.TestCase):
def tearDown(self):
ray.worker.cleanup()
def make_counter_actor(self, checkpoint_interval=-1):
ray.init()
@ray.remote(checkpoint_interval=checkpoint_interval)
class Counter(object):
def __init__(self):
self.value = 0
def increase(self):
self.value += 1
return self.value
return Counter.remote()
def testFork(self):
counter = self.make_counter_actor()
num_calls = 1
self.assertEqual(ray.get(counter.increase.remote()), num_calls)
@ray.remote
def fork(counter):
return ray.get(counter.increase.remote())
# Fork once.
num_calls += 1
self.assertEqual(ray.get(fork.remote(counter)), num_calls)
num_calls += 1
self.assertEqual(ray.get(counter.increase.remote()), num_calls)
# Fork num_iters times.
num_iters = 100
num_calls += num_iters
ray.get([fork.remote(counter) for _ in range(num_iters)])
num_calls += 1
self.assertEqual(ray.get(counter.increase.remote()), num_calls)
def testForkConsistency(self):
counter = self.make_counter_actor()
@ray.remote
def fork_many_incs(counter, num_incs):
x = None
for _ in range(num_incs):
x = counter.increase.remote()
# Only call ray.get() on the last task submitted.
return ray.get(x)
num_incs = 100
# Fork once.
num_calls = num_incs
self.assertEqual(ray.get(fork_many_incs.remote(counter, num_incs)),
num_calls)
num_calls += 1
self.assertEqual(ray.get(counter.increase.remote()), num_calls)
# Fork num_iters times.
num_iters = 10
num_calls += num_iters * num_incs
ray.get([fork_many_incs.remote(counter, num_incs) for _ in
range(num_iters)])
# Check that we ensured per-handle serialization.
num_calls += 1
self.assertEqual(ray.get(counter.increase.remote()), num_calls)
@unittest.skip("Garbage collection for distributed actor handles not "
"implemented.")
def testGarbageCollection(self):
counter = self.make_counter_actor()
@ray.remote
def fork(counter):
for _ in range(10):
x = counter.increase.remote()
time.sleep(0.1)
return ray.get(x)
x = fork.remote(counter)
ray.get(counter.increase.remote())
del counter
print(ray.get(x))
def testCheckpoint(self):
counter = self.make_counter_actor(checkpoint_interval=1)
num_calls = 1
self.assertEqual(ray.get(counter.increase.remote()), num_calls)
@ray.remote
def fork(counter):
return ray.get(counter.increase.remote())
# Passing an actor handle with checkpointing enabled shouldn't be
# allowed yet.
with self.assertRaises(Exception):
fork.remote(counter)
num_calls += 1
self.assertEqual(ray.get(counter.increase.remote()), num_calls)
@unittest.skip("Fork/join consistency not yet implemented.")
def testLocalSchedulerDying(self):
ray.worker._init(start_ray_local=True, num_local_schedulers=2,
num_workers=0, redirect_output=False)
@ray.remote
class Counter(object):
def __init__(self):
self.x = 0
def local_plasma(self):
return ray.worker.global_worker.plasma_client.store_socket_name
def inc(self):
self.x += 1
return self.x
@ray.remote
def foo(counter):
for _ in range(100):
x = counter.inc.remote()
return ray.get(x)
local_plasma = ray.worker.global_worker.plasma_client.store_socket_name
# Create an actor that is not on the local scheduler.
actor = Counter.remote()
while ray.get(actor.local_plasma.remote()) == local_plasma:
actor = Counter.remote()
# Concurrently, submit many tasks to the actor through the original
# handle and the forked handle.
x = foo.remote(actor)
ids = [actor.inc.remote() for _ in range(100)]
# Wait for the last task to finish running.
ray.get(ids[-1])
y = ray.get(x)
# Kill the second plasma store to get rid of the cached objects and
# trigger the corresponding local scheduler to exit.
process = ray.services.all_processes[
ray.services.PROCESS_TYPE_PLASMA_STORE][1]
process.kill()
process.wait()
# Submit a new task. Its results should reflect the tasks submitted
# through both the original handle and the forked handle.
self.assertEqual(ray.get(actor.inc.remote()), y + 1)
if __name__ == "__main__":
unittest.main(verbosity=2)
| apache-2.0 | 7,384,625,375,820,351,000 | 34.182398 | 79 | 0.554418 | false |
icodemachine/Stem | stem/descriptor/tordnsel.py | 1 | 3950 | # Copyright 2013-2014, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Parsing for `TorDNSEL <https://www.torproject.org/projects/tordnsel.html.en>`_
exit list files.
::
TorDNSEL - Exit list provided by TorDNSEL
"""
import stem.util.connection
import stem.util.str_tools
import stem.util.tor_tools
from stem.descriptor import (
Descriptor,
_read_until_keywords,
_get_descriptor_components,
)
def _parse_file(tordnsel_file, validate = False, **kwargs):
"""
Iterates over a tordnsel file.
:returns: iterator for :class:`~stem.descriptor.tordnsel.TorDNSEL`
instances in the file
:raises:
* **ValueError** if the contents is malformed and validate is **True**
* **IOError** if the file can't be read
"""
# skip content prior to the first ExitNode
_read_until_keywords('ExitNode', tordnsel_file, skip = True)
while True:
contents = _read_until_keywords('ExitAddress', tordnsel_file)
contents += _read_until_keywords('ExitNode', tordnsel_file)
if contents:
yield TorDNSEL(bytes.join(b'', contents), validate, **kwargs)
else:
break # done parsing file
class TorDNSEL(Descriptor):
"""
TorDNSEL descriptor (`exitlist specification
<https://www.torproject.org/tordnsel/exitlist-spec.txt>`_)
:var str fingerprint: **\*** authority's fingerprint
:var datetime published: **\*** time in UTC when this descriptor was made
:var datetime last_status: **\*** time in UTC when the relay was seen in a v2 network status
:var list exit_addresses: **\*** list of (str address, datetime date) tuples consisting of the found IPv4 exit address and the time
**\*** attribute is either required when we're parsed with validation or has
a default value, others are left as **None** if undefined
"""
def __init__(self, raw_contents, validate):
super(TorDNSEL, self).__init__(raw_contents)
raw_contents = stem.util.str_tools._to_unicode(raw_contents)
entries = _get_descriptor_components(raw_contents, validate)
self.fingerprint = None
self.published = None
self.last_status = None
self.exit_addresses = []
self._parse(entries, validate)
def _parse(self, entries, validate):
for keyword, values in list(entries.items()):
value, block_type, block_content = values[0]
if validate and block_content:
raise ValueError('Unexpected block content: %s' % block_content)
if keyword == 'ExitNode':
if validate and not stem.util.tor_tools.is_valid_fingerprint(value):
raise ValueError('Tor relay fingerprints consist of forty hex digits: %s' % value)
self.fingerprint = value
elif keyword == 'Published':
try:
self.published = stem.util.str_tools._parse_timestamp(value)
except ValueError:
if validate:
raise ValueError("Published time wasn't parsable: %s" % value)
elif keyword == 'LastStatus':
try:
self.last_status = stem.util.str_tools._parse_timestamp(value)
except ValueError:
if validate:
raise ValueError("LastStatus time wasn't parsable: %s" % value)
elif keyword == 'ExitAddress':
for value, block_type, block_content in values:
address, date = value.split(' ', 1)
if validate:
if not stem.util.connection.is_valid_ipv4_address(address):
raise ValueError("ExitAddress isn't a valid IPv4 address: %s" % address)
elif block_content:
raise ValueError('Unexpected block content: %s' % block_content)
try:
date = stem.util.str_tools._parse_timestamp(date)
self.exit_addresses.append((address, date))
except ValueError:
if validate:
raise ValueError("ExitAddress found time wasn't parsable: %s" % value)
elif validate:
raise ValueError('Unrecognized keyword: %s' % keyword)
| lgpl-3.0 | -5,719,108,573,632,208,000 | 32.760684 | 133 | 0.661772 | false |
our-city-app/oca-backend | src/rogerthat/pages/service_page.py | 1 | 7102 | # -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
import json
import logging
import webapp2
from google.appengine.ext import webapp
from mcfw.properties import azzert
from rogerthat.bizz.communities.communities import get_community
from rogerthat.bizz.friend_helper import FriendHelper
from rogerthat.bizz.service.i18n import excel_export, excel_import
from rogerthat.dal.friend import get_friends_map
from rogerthat.dal.profile import get_service_profile
from rogerthat.dal.service import get_friend_serviceidentity_connection
from rogerthat.models import ProfileHashIndex
from rogerthat.rpc import users
from rogerthat.rpc.service import BusinessException
from rogerthat.templates import render
from rogerthat.to.friends import FriendTO, FRIEND_TYPE_SERVICE
from rogerthat.translations import DEFAULT_LANGUAGE
from rogerthat.utils import safe_file_name, filename_friendly_time
from rogerthat.utils.channel import broadcast_via_iframe_result
from rogerthat.utils.crypto import md5_hex
from rogerthat.utils.service import add_slash_default
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
class ServicePageHandler(webapp.RequestHandler):
def get(self):
service_email = self.request.GET.get('service')
azzert(service_email)
user = users.get_current_user()
service_identity_user = add_slash_default(users.User(service_email))
azzert(get_friend_serviceidentity_connection(user, service_identity_user),
"%s tried to get service page of service %s, but is not connected" % (user.email(), service_identity_user.email()))
params = {'service_email': service_email, 'container_id': 'servicePageContainer_%s' % md5_hex(service_email)}
self.response.out.write(render('service_page', [DEFAULT_LANGUAGE], params, 'web'))
class ServiceMenuItemBrandingHandler(webapp.RequestHandler):
def get(self):
service_email = self.request.GET.get('service')
azzert(service_email)
user = users.get_current_user()
service_identity_user = add_slash_default(users.User(service_email))
azzert(get_friend_serviceidentity_connection(user, service_identity_user),
"%s tried to get a menu item page of service %s, but is not connected" % (user.email(), service_identity_user.email()))
branding = self.request.GET.get('branding')
azzert(branding)
params = {'container_id': 'smi_branding_container_%s' %
branding, 'branding': branding, 'service_email': service_email}
self.response.out.write(render('smi_branding', [DEFAULT_LANGUAGE], params, 'web'))
class ServiceAboutPageHandler(webapp.RequestHandler):
def get(self):
service_email = self.request.GET.get('service')
azzert(service_email)
user = users.get_current_user()
service_identity_user = add_slash_default(users.User(service_email))
azzert(get_friend_serviceidentity_connection(user, service_identity_user),
"%s tried to get About page of service %s, but is not connected" % (user.email(), service_identity_user.email()))
helper = FriendHelper.from_data_store(service_identity_user, FRIEND_TYPE_SERVICE)
service = FriendTO.fromDBFriendMap(helper, get_friends_map(user), service_identity_user,
includeServiceDetails=True, targetUser=user)
azzert(service.type == FriendTO.TYPE_SERVICE)
params = {'service': service,
'service_name': service.name or service.email,
'container_id': 'serviceAboutPageContainer_%s' % md5_hex(service_email)}
self.response.out.write(render('service_about', [DEFAULT_LANGUAGE], params, 'web'))
class EditableTranslationSetExcelDownloadHandler(webapp2.RequestHandler):
def get(self):
browser_timezone_str = self.request.get('tz_offset', '0')
try:
browser_timezone = int(browser_timezone_str)
except ValueError:
logging.warning("Invalid browser timezone offset: [%s]" % browser_timezone_str)
browser_timezone = 0
if abs(browser_timezone) > 24 * 3600:
logging.warning("Invalid browser timezone offset: [%s]" % browser_timezone_str)
browser_timezone = 0
service_user = users.get_current_user()
book, latest_export_timestamp = excel_export(service_user, browser_timezone)
# Return
output = StringIO()
book.save(output)
output.seek(0)
filename = "Rogerthat_%s_%s.xls" % (filename_friendly_time(latest_export_timestamp), service_user.email())
self.response.headers['Content-Type'] = 'application/vnd.ms-excel'
self.response.headers['Content-Disposition'] = 'attachment; filename=%s' % safe_file_name(filename)
self.response.out.write(output.getvalue())
class PostEditableTranslationSetExcelHandler(webapp2.RequestHandler):
def post(self):
import xlrd
try:
service_user = users.get_current_user()
file_ = self.request.POST.get('file').file
book = xlrd.open_workbook(file_contents=file_.read())
excel_import(service_user, book)
except BusinessException as be:
self.response.out.write(broadcast_via_iframe_result(
u'rogerthat.service.translations.post_result', error=be.message))
return
except:
self.response.out.write(broadcast_via_iframe_result(
u'rogerthat.service.translations.post_result', error=u"Unknown error has occurred."))
logging.exception("Failure receiving translations!")
return
self.response.out.write(broadcast_via_iframe_result(u'rogerthat.service.translations.post_result'))
class GetServiceAppHandler(webapp2.RequestHandler):
def get_default_app_id(self, user_hash):
index = ProfileHashIndex.get(ProfileHashIndex.create_key(user_hash))
if not index:
logging.debug('No profile found with user_hash %s', user_hash)
return None
profile = get_service_profile(index.user)
if not profile:
logging.debug('Profile not found: %s', index.user)
community = get_community(profile.community_id)
return community.default_app
def get(self):
user_hash = self.request.GET['user']
self.response.out.write(json.dumps({'app_id': self.get_default_app_id(user_hash)}))
| apache-2.0 | 8,330,832,025,943,103,000 | 41.023669 | 134 | 0.689383 | false |
edx/ecommerce | ecommerce/extensions/offer/migrations/0046_offerassignmentemailsentrecord.py | 1 | 1471 | # Generated by Django 2.2.16 on 2020-11-02 07:04
from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('offer', '0045_codeassignmentnudgeemails'),
]
operations = [
migrations.CreateModel(
name='OfferAssignmentEmailSentRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('enterprise_customer', models.UUIDField(help_text='UUID for an EnterpriseCustomer from the Enterprise Service.')),
('email_type', models.CharField(choices=[('assign', 'Assign'), ('remind', 'Remind'), ('revoke', 'Revoke')], max_length=32)),
('template_id', models.PositiveIntegerField(null=True)),
('template_content_type', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
],
options={
'get_latest_by': 'modified',
'abstract': False,
},
),
]
| agpl-3.0 | -4,716,142,410,958,759,000 | 44.96875 | 148 | 0.622706 | false |
kzcashteam/kzcash | share/qt/extract_strings_qt.py | 1 | 1857 | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
import os
import sys
OUT_CPP="qt/kzcashstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *kzcash_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("kzcash-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
| mit | 8,326,102,238,316,943,000 | 22.807692 | 79 | 0.581583 | false |
ebeshero/Pittsburgh_Frankenstein | collateXPrep/python/Part9-allWitnessIM_collation_to_xml.py | 1 | 11044 | from collatex import *
from xml.dom import pulldom
import re
import glob
from datetime import datetime, date
# import pytz
# from tzlocal import get_localzone
# today = date.today()
# utc_dt = datetime(today, tzinfo=pytz.utc)
# dateTime = utc_dt.astimezone(get_localzone())
# strDateTime = str(dateTime)
now = datetime.utcnow()
nowStr = str(now)
print('test: ', dir(Collation))
regexWhitespace = re.compile(r'\s+')
regexNonWhitespace = re.compile(r'\S+')
regexEmptyTag = re.compile(r'/>$')
regexBlankLine = re.compile(r'\n{2,}')
regexLeadingBlankLine = re.compile(r'^\n')
regexPageBreak = re.compile(r'<pb.+?/>')
RE_MARKUP = re.compile(r'<.+?>')
RE_PARA = re.compile(r'<p\s[^<]+?/>')
RE_INCLUDE = re.compile(r'<include[^<]*/>')
RE_MILESTONE = re.compile(r'<milestone[^<]*/>')
RE_HEAD = re.compile(r'<head[^<]*/>')
RE_AB = re.compile(r'<ab[^<]*/>')
# 2018-10-1 ebb: ampersands are apparently not treated in python regex as entities any more than angle brackets.
# RE_AMP_NSB = re.compile(r'\S&\s')
# RE_AMP_NSE = re.compile(r'\s&\S')
# RE_AMP_SQUISH = re.compile(r'\S&\S')
# RE_AMP = re.compile(r'\s&\s')
RE_AMP = re.compile(r'&')
# RE_MULTICAPS = re.compile(r'(?<=\W|\s|\>)[A-Z][A-Z]+[A-Z]*\s')
# RE_INNERCAPS = re.compile(r'(?<=hi\d"/>)[A-Z]+[A-Z]+[A-Z]+[A-Z]*')
# TITLE_MultiCaps = match(RE_MULTICAPS).lower()
RE_DELSTART = re.compile(r'<del[^<]*>')
RE_ADDSTART = re.compile(r'<add[^<]*>')
RE_MDEL = re.compile(r'<mdel[^<]*>.+?</mdel>')
RE_SHI = re.compile(r'<shi[^<]*>.+?</shi>')
RE_METAMARK = re.compile(r'<metamark[^<]*>.+?</metamark>')
RE_HI = re.compile(r'<hi\s[^<]*/>')
RE_PB = re.compile(r'<pb[^<]*/>')
RE_LB = re.compile(r'<lb[^<]*/>')
RE_LG = re.compile(r'<lg[^<]*/>')
RE_L = re.compile(r'<l\s[^<]*/>')
RE_CIT = re.compile(r'<cit\s[^<]*/>')
RE_QUOTE = re.compile(r'<quote\s[^<]*/>')
RE_OPENQT = re.compile(r'“')
RE_CLOSEQT = re.compile(r'”')
RE_GAP = re.compile(r'<gap\s[^<]*/>')
# <milestone unit="tei:p"/>
RE_sgaP = re.compile(r'<milestone\sunit="tei:p"[^<]*/>')
# ebb: RE_MDEL = those pesky deletions of two letters or less that we want to normalize out of the collation, but preserve in the output.
# Element types: xml, div, head, p, hi, pb, note, lg, l; comment()
# Tags to ignore, with content to keep: xml, comment, anchor
# Structural elements: div, p, lg, l
# Inline elements (empty) retained in normalization: pb, milestone, xi:include
# Inline and block elements (with content) retained in normalization: note, hi, head, ab
# GIs fall into one three classes
# 2017-05-21 ebb: Due to trouble with pulldom parsing XML comments, I have converted these to comment elements,
# 2017-05-21 ebb: to be ignored during collation.
# 2017-05-30 ebb: Determined that comment elements cannot really be ignored when they have text nodes (the text is
# 2017-05-30 ebb: collated but the tags are not). Decision to make the comments into self-closing elements with text
# 2017-05-30 ebb: contents as attribute values, and content such as tags simplified to be legal attribute values.
# 2017-05-22 ebb: I've set anchor elements with @xml:ids to be the indicators of collation "chunks" to process together
ignore = ['sourceDoc', 'xml', 'comment', 'w', 'mod', 'anchor', 'include', 'delSpan', 'addSpan', 'add', 'handShift', 'damage', 'restore', 'zone', 'surface', 'graphic', 'unclear', 'retrace']
blockEmpty = ['pb', 'p', 'div', 'milestone', 'lg', 'l', 'note', 'cit', 'quote', 'bibl', 'ab', 'head']
inlineEmpty = ['lb', 'gap', 'del', 'hi']
# 2018-05-12 (mysteriously removed but reinstated 2018-09-27) ebb: I'm setting a white space on either side of the inlineEmpty elements in line 103
# 2018-07-20: ebb: CHECK: are there white spaces on either side of empty elements in the output?
inlineContent = ['metamark', 'mdel', 'shi']
#2018-07-17 ebb: I moved the following list up into inlineEmpty, since they are all now empty elements: blockElement = ['lg', 'l', 'note', 'cit', 'quote', 'bibl']
# ebb: Tried removing 'comment', from blockElement list above, because we don't want these to be collated.
# 10-23-2017 ebb rv:
def normalizeSpace(inText):
"""Replaces all whitespace spans with single space characters"""
if regexNonWhitespace.search(inText):
return regexWhitespace.sub('\n', inText)
else:
return ''
def extract(input_xml):
"""Process entire input XML document, firing on events"""
# Start pulling; it continues automatically
doc = pulldom.parse(input_xml)
output = ''
for event, node in doc:
# elements to ignore: xml
if event == pulldom.START_ELEMENT and node.localName in ignore:
continue
# copy comments intact
elif event == pulldom.COMMENT:
doc.expandNode(node)
output += node.toxml()
# ebb: Next (below): empty block elements: pb, milestone, lb, lg, l, p, ab, head, hi,
# We COULD set white spaces around these like this ' ' + node.toxml() + ' '
# but what seems to happen is that the white spaces get added to tokens; they aren't used to
# isolate the markup into separate tokens, which is really what we'd want.
# So, I'm removing the white spaces here.
# NOTE: Removing the white space seems to improve/expand app alignment
elif event == pulldom.START_ELEMENT and node.localName in blockEmpty:
output += node.toxml()
# ebb: empty inline elements that do not take surrounding white spaces:
elif event == pulldom.START_ELEMENT and node.localName in inlineEmpty:
output += node.toxml()
# non-empty inline elements: mdel, shi, metamark
elif event == pulldom.START_ELEMENT and node.localName in inlineContent:
output += regexEmptyTag.sub('>', node.toxml())
elif event == pulldom.END_ELEMENT and node.localName in inlineContent:
output += '</' + node.localName + '>'
# elif event == pulldom.START_ELEMENT and node.localName in blockElement:
# output += '\n<' + node.localName + '>\n'
#elif event == pulldom.END_ELEMENT and node.localName in blockElement:
# output += '\n</' + node.localName + '>'
elif event == pulldom.CHARACTERS:
output += normalizeSpace(node.data)
else:
continue
return output
def normalize(inputText):
# 2018-09-23 ebb THIS WORKS, SOMETIMES, BUT NOT EVERWHERE: RE_MULTICAPS.sub(format(re.findall(RE_MULTICAPS, inputText, flags=0)).title(), \
# RE_INNERCAPS.sub(format(re.findall(RE_INNERCAPS, inputText, flags=0)).lower(), \
return RE_MILESTONE.sub('', \
RE_INCLUDE.sub('', \
RE_AB.sub('', \
RE_HEAD.sub('', \
RE_AMP.sub('and', \
RE_MDEL.sub('', \
RE_SHI.sub('', \
RE_HI.sub('', \
RE_LB.sub('', \
RE_PB.sub('', \
RE_PARA.sub('<p/>', \
RE_sgaP.sub('<p/>', \
RE_LG.sub('<lg/>', \
RE_L.sub('<l/>', \
RE_CIT.sub('', \
RE_QUOTE.sub('', \
RE_OPENQT.sub('"', \
RE_CLOSEQT.sub('"', \
RE_GAP.sub('', \
RE_DELSTART.sub('<del>', \
RE_ADDSTART.sub('<add>', \
RE_METAMARK.sub('', inputText)))))))))))))))))))))).lower()
# to lowercase the normalized tokens, add .lower() to the end.
# return regexPageBreak('',inputText)
# ebb: The normalize function makes it possible to return normalized tokens that screen out some markup, but not all.
def processToken(inputText):
return {"t": inputText + ' ', "n": normalize(inputText)}
def processWitness(inputWitness, id):
return {'id': id, 'tokens': [processToken(token) for token in inputWitness]}
for name in glob.glob('../collChunks-Part9/1818_fullFlat_*'):
try:
matchString = name.split("fullFlat_", 1)[1]
# ebb: above gets C30.xml for example
matchStr = matchString.split(".", 1)[0]
# ebb: above strips off the file extension
with open(name, 'rb') as f1818file, \
open('../collChunks-Part9/1823_fullFlat_' + matchString, 'rb') as f1823file, \
open('../collChunks-Part9/Thomas_fullFlat_' + matchString, 'rb') as fThomasfile, \
open('../collChunks-Part9/1831_fullFlat_' + matchString, 'rb') as f1831file, \
open('../collChunks-Part9/msColl_' + matchString, 'rb') as fMSfile, \
open('../Full_Part9_xmlOutput/collation_' + matchStr + '.xml', 'w') as outputFile:
# open('collationChunks/msColl_c56_' + matchString, 'rb') as fMSc56file, \
# open('collationChunks/msColl_c58_' + matchString, 'rb') as fMSc58file, \
# open('collationChunks/msColl_c57Frag_' + matchString, 'rb') as fMSc57Fragfile, \
# open('collationChunks/msColl_c58Frag_' + matchString, 'rb') as fMSc58Fragfile, \
# fMSc56_tokens = regexLeadingBlankLine.sub('', regexBlankLine.sub('\n', extract(fMSc56file))).split('\n')
# fMSc58_tokens = regexLeadingBlankLine.sub('', regexBlankLine.sub('\n', extract(fMSc58file))).split('\n')
# fMSc57Frag_tokens = regexLeadingBlankLine.sub('', regexBlankLine.sub('\n', extract(fMSc57Fragfile))).split('\n')
# fMSc58Frag_tokens = regexLeadingBlankLine.sub('', regexBlankLine.sub('\n', extract(fMSc58Fragfile))).split('\n')
f1818_tokens = regexLeadingBlankLine.sub('', regexBlankLine.sub('\n', extract(f1818file))).split('\n')
fThomas_tokens = regexLeadingBlankLine.sub('', regexBlankLine.sub('\n', extract(fThomasfile))).split('\n')
f1823_tokens = regexLeadingBlankLine.sub('', regexBlankLine.sub('\n', extract(f1823file))).split('\n')
f1831_tokens = regexLeadingBlankLine.sub('', regexBlankLine.sub('\n', extract(f1831file))).split('\n')
fMS_tokens = regexLeadingBlankLine.sub('', regexBlankLine.sub('\n', extract(fMSfile))).split('\n')
f1818_tokenlist = processWitness(f1818_tokens, 'f1818')
fThomas_tokenlist = processWitness(fThomas_tokens, 'fThomas')
f1823_tokenlist = processWitness(f1823_tokens, 'f1823')
f1831_tokenlist = processWitness(f1831_tokens, 'f1831')
fMS_tokenlist = processWitness(fMS_tokens, 'fMS')
# fMSc56_tokenlist = processWitness(fMSc56_tokens, 'fMSc56')
# fMSc58_tokenlist = processWitness(fMSc58_tokens, 'fMSc58')
# fMSc57Frag_tokenlist = processWitness(fMSc57Frag_tokens, 'fMSc57Frag')
# fMSc58Frag_tokenlist = processWitness(fMSc58Frag_tokens, 'fMSc58Frag')
collation_input = {"witnesses": [f1818_tokenlist, f1823_tokenlist, fThomas_tokenlist, f1831_tokenlist, fMS_tokenlist]}
# table = collate(collation_input, output='tei', segmentation=True)
# table = collate(collation_input, segmentation=True, layout='vertical')
table = collate(collation_input, output='xml', segmentation=True)
print(table + '<!-- ' + nowStr + ' -->', file=outputFile)
# print(table, file=outputFile)
except IOError:
pass
| agpl-3.0 | -5,091,329,732,725,098,000 | 50.830986 | 188 | 0.631431 | false |
mganeva/mantid | scripts/Muon/GUI/Common/utilities/xml_utils.py | 1 | 4492 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
from __future__ import (absolute_import, division, print_function)
import os
import xml.etree.ElementTree as ET
import Muon.GUI.Common.utilities.run_string_utils as run_string_utils
from Muon.GUI.Common.muon_group import MuonGroup
from Muon.GUI.Common.muon_pair import MuonPair
def _create_XML_subElement_for_groups(root_node, groups):
group_nodes = []
for group in groups:
child = ET.SubElement(root_node, 'group', name=group.name)
id_string = run_string_utils.run_list_to_string(group.detectors)
ids = ET.SubElement(child, 'ids', val=id_string)
child.extend(ids)
group_nodes += [child]
return group_nodes
def _create_XML_subElement_for_pairs(root_node, pairs):
pair_nodes = []
for pair in pairs:
child = ET.SubElement(root_node, 'pair', name=pair.name)
fwd_group = ET.SubElement(child, 'forward-group', val=pair.forward_group)
bwd_group = ET.SubElement(child, 'backward-group', val=pair.backward_group)
alpha = ET.SubElement(child, 'alpha', val=str(pair.alpha))
child.extend(fwd_group)
child.extend(bwd_group)
child.extend(alpha)
pair_nodes += [child]
return pair_nodes
def save_grouping_to_XML(groups, pairs, filename, save=True, description=''):
"""
Save a set of muon group and pair parameters to XML format file. Fewer checks are performed
than with the XML loading.
:param groups: A list of MuonGroup objects to save.
:param pairs: A list of MuonPair objects to save.
:param filename: The name of the XML file to save to.
:param save: Whether to actually save the file.
:return: the XML tree (used in testing).
"""
# some basic checks
if filename == "":
raise AttributeError("File must be specified for saving to XML")
if os.path.splitext(filename)[-1].lower() != ".xml":
raise AttributeError("File extension must be XML")
if sum([0 if isinstance(group, MuonGroup) else 1 for group in groups]) > 0:
raise AttributeError("groups must be MuonGroup type")
if sum([0 if isinstance(pair, MuonPair) else 1 for pair in pairs]) > 0:
raise AttributeError("pairs must be MuonPair type")
root = ET.Element("detector-grouping")
if description:
root.set('description', description)
# handle groups
_create_XML_subElement_for_groups(root, groups)
# handle pairs
_create_XML_subElement_for_pairs(root, pairs)
tree = ET.ElementTree(root)
if save:
tree.write(filename)
return tree
def load_grouping_from_XML(filename):
"""
Load group/pair data from an XML file (which can be produced using the save_grouping_to_XML() function
:param filename: Full filepath to an xml file.
:return: (groups, pairs), lists of MuonGroup, MuonPair objects respectively.
"""
tree = ET.parse(filename)
root = tree.getroot()
description = root.get('description')
group_names, group_ids = _get_groups_from_XML(root)
pair_names, pair_groups, pair_alphas = _get_pairs_from_XML(root)
groups, pairs = [], []
for i, group_name in enumerate(group_names):
groups += [MuonGroup(group_name=group_name,
detector_ids=group_ids[i])]
for i, pair_name in enumerate(pair_names):
pairs += [MuonPair(pair_name=pair_name,
forward_group_name=pair_groups[i][0],
backward_group_name=pair_groups[i][1],
alpha=pair_alphas[i])]
return groups, pairs, description
def _get_groups_from_XML(root):
names, ids = [], []
for child in root:
if child.tag == "group":
names += [child.attrib['name']]
ids += [run_string_utils.run_string_to_list(child.find('ids').attrib['val'])]
return names, ids
def _get_pairs_from_XML(root):
names, groups, alphas = [], [], []
for child in root:
if child.tag == "pair":
names += [child.attrib['name']]
groups += [[child.find('forward-group').attrib['val'], child.find('backward-group').attrib['val']]]
alphas += [child.find('alpha').attrib['val']]
return names, groups, alphas
| gpl-3.0 | 2,430,794,358,631,994,000 | 35.819672 | 111 | 0.642698 | false |
ZobairAlijan/osf.io | api_tests/base/test_serializers.py | 1 | 5406 | # -*- coding: utf-8 -*-
import httplib as http
from nose.tools import * # flake8: noqa
from tests.base import ApiTestCase, DbTestCase
from tests import factories
from tests.utils import make_drf_request
from api.base.settings.defaults import API_BASE
from api.base.serializers import JSONAPISerializer
from api.nodes.serializers import NodeSerializer, RelationshipField
class TestApiBaseSerializers(ApiTestCase):
def setUp(self):
super(TestApiBaseSerializers, self).setUp()
self.node = factories.ProjectFactory(is_public=True)
for i in range(5):
factories.ProjectFactory(is_public=True, parent=self.node)
self.url = '/{}nodes/{}/'.format(API_BASE, self.node._id)
def test_counts_not_included_in_link_fields_by_default(self):
res = self.app.get(self.url)
relationships = res.json['data']['relationships']
for relation in relationships.values():
link = relation['links'].values()[0]
assert_not_in('count', link['meta'])
def test_counts_included_in_link_fields_with_related_counts_query_param(self):
res = self.app.get(self.url, params={'related_counts': True})
relationships = res.json['data']['relationships']
for key, relation in relationships.iteritems():
field = NodeSerializer._declared_fields[key]
if (field.related_meta or {}).get('count'):
link = relation['links'].values()[0]
assert_in('count', link['meta'])
def test_related_counts_excluded_query_param_false(self):
res = self.app.get(self.url, params={'related_counts': False})
relationships = res.json['data']['relationships']
for relation in relationships.values():
link = relation['links'].values()[0]
assert_not_in('count', link['meta'])
def test_invalid_param_raises_bad_request(self):
res = self.app.get(self.url, params={'related_counts': 'fish'}, expect_errors=True)
assert_equal(res.status_code, http.BAD_REQUEST)
class TestRelationshipField(DbTestCase):
# We need a Serializer to test the Relationship field (needs context)
class BasicNodeSerializer(JSONAPISerializer):
parent = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<pk>'}
)
parent_with_meta = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<pk>'},
related_meta={'count': 'get_count', 'extra': 'get_extra'},
)
self_and_related_field = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<pk>'},
self_view='nodes:node-contributors',
self_view_kwargs={'node_id': '<pk>'},
)
two_url_kwargs = RelationshipField(
# fake url, for testing purposes
related_view='nodes:node-pointer-detail',
related_view_kwargs={'node_id': '<pk>', 'node_link_id': '<pk>'},
)
not_attribute_on_target = RelationshipField(
# fake url, for testing purposes
related_view='nodes:node-children',
related_view_kwargs={'node_id': '12345'}
)
class Meta:
type_ = 'nodes'
def get_count(self, obj):
return 1
def get_extra(self, obj):
return 'foo'
# TODO: Expand tests
# Regression test for https://openscience.atlassian.net/browse/OSF-4832
def test_serializing_meta(self):
req = make_drf_request()
project = factories.ProjectFactory()
node = factories.NodeFactory(parent=project)
data = self.BasicNodeSerializer(node, context={'request': req}).data['data']
meta = data['relationships']['parent_with_meta']['links']['related']['meta']
assert_not_in('count', meta)
assert_in('extra', meta)
assert_equal(meta['extra'], 'foo')
def test_self_and_related_fields(self):
req = make_drf_request()
project = factories.ProjectFactory()
node = factories.NodeFactory(parent=project)
data = self.BasicNodeSerializer(node, context={'request': req}).data['data']
relationship_field = data['relationships']['self_and_related_field']['links']
assert_in('/v2/nodes/{}/contributors/'.format(node._id), relationship_field['self']['href'])
assert_in('/v2/nodes/{}/'.format(node._id), relationship_field['related']['href'])
def test_field_with_two_kwargs(self):
req = make_drf_request()
project = factories.ProjectFactory()
node = factories.NodeFactory(parent=project)
data = self.BasicNodeSerializer(node, context={'request': req}).data['data']
field = data['relationships']['two_url_kwargs']['links']
assert_in('/v2/nodes/{}/node_links/{}/'.format(node._id, node._id), field['related']['href'])
def test_field_with_non_attribute(self):
req = make_drf_request()
project = factories.ProjectFactory()
node = factories.NodeFactory(parent=project)
data = self.BasicNodeSerializer(node, context={'request': req}).data['data']
field = data['relationships']['not_attribute_on_target']['links']
assert_in('/v2/nodes/{}/children/'.format('12345'), field['related']['href'])
| apache-2.0 | -2,315,594,354,815,570,400 | 37.340426 | 101 | 0.620607 | false |
fernandog/Medusa | ext/tornado/gen.py | 1 | 47881 | """``tornado.gen`` implements generator-based coroutines.
.. note::
The "decorator and generator" approach in this module is a
precursor to native coroutines (using ``async def`` and ``await``)
which were introduced in Python 3.5. Applications that do not
require compatibility with older versions of Python should use
native coroutines instead. Some parts of this module are still
useful with native coroutines, notably `multi`, `sleep`,
`WaitIterator`, and `with_timeout`. Some of these functions have
counterparts in the `asyncio` module which may be used as well,
although the two may not necessarily be 100% compatible.
Coroutines provide an easier way to work in an asynchronous
environment than chaining callbacks. Code using coroutines is
technically asynchronous, but it is written as a single generator
instead of a collection of separate functions.
For example, the following asynchronous handler:
.. testcode::
class AsyncHandler(RequestHandler):
@asynchronous
def get(self):
http_client = AsyncHTTPClient()
http_client.fetch("http://example.com",
callback=self.on_fetch)
def on_fetch(self, response):
do_something_with_response(response)
self.render("template.html")
.. testoutput::
:hide:
could be written with ``gen`` as:
.. testcode::
class GenAsyncHandler(RequestHandler):
@gen.coroutine
def get(self):
http_client = AsyncHTTPClient()
response = yield http_client.fetch("http://example.com")
do_something_with_response(response)
self.render("template.html")
.. testoutput::
:hide:
Most asynchronous functions in Tornado return a `.Future`;
yielding this object returns its ``Future.result``.
You can also yield a list or dict of ``Futures``, which will be
started at the same time and run in parallel; a list or dict of results will
be returned when they are all finished:
.. testcode::
@gen.coroutine
def get(self):
http_client = AsyncHTTPClient()
response1, response2 = yield [http_client.fetch(url1),
http_client.fetch(url2)]
response_dict = yield dict(response3=http_client.fetch(url3),
response4=http_client.fetch(url4))
response3 = response_dict['response3']
response4 = response_dict['response4']
.. testoutput::
:hide:
If the `~functools.singledispatch` library is available (standard in
Python 3.4, available via the `singledispatch
<https://pypi.python.org/pypi/singledispatch>`_ package on older
versions), additional types of objects may be yielded. Tornado includes
support for ``asyncio.Future`` and Twisted's ``Deferred`` class when
``tornado.platform.asyncio`` and ``tornado.platform.twisted`` are imported.
See the `convert_yielded` function to extend this mechanism.
.. versionchanged:: 3.2
Dict support added.
.. versionchanged:: 4.1
Support added for yielding ``asyncio`` Futures and Twisted Deferreds
via ``singledispatch``.
"""
from __future__ import absolute_import, division, print_function
import collections
import functools
import itertools
import os
import sys
import types
from tornado.concurrent import (Future, is_future, chain_future, future_set_exc_info,
future_add_done_callback, future_set_result_unless_cancelled)
from tornado.ioloop import IOLoop
from tornado.log import app_log
from tornado import stack_context
from tornado.util import PY3, raise_exc_info, TimeoutError
try:
try:
# py34+
from functools import singledispatch # type: ignore
except ImportError:
from singledispatch import singledispatch # backport
except ImportError:
# In most cases, singledispatch is required (to avoid
# difficult-to-diagnose problems in which the functionality
# available differs depending on which invisble packages are
# installed). However, in Google App Engine third-party
# dependencies are more trouble so we allow this module to be
# imported without it.
if 'APPENGINE_RUNTIME' not in os.environ:
raise
singledispatch = None
try:
try:
# py35+
from collections.abc import Generator as GeneratorType # type: ignore
except ImportError:
from backports_abc import Generator as GeneratorType # type: ignore
try:
# py35+
from inspect import isawaitable # type: ignore
except ImportError:
from backports_abc import isawaitable
except ImportError:
if 'APPENGINE_RUNTIME' not in os.environ:
raise
from types import GeneratorType
def isawaitable(x): # type: ignore
return False
if PY3:
import builtins
else:
import __builtin__ as builtins
class KeyReuseError(Exception):
pass
class UnknownKeyError(Exception):
pass
class LeakedCallbackError(Exception):
pass
class BadYieldError(Exception):
pass
class ReturnValueIgnoredError(Exception):
pass
def _value_from_stopiteration(e):
try:
# StopIteration has a value attribute beginning in py33.
# So does our Return class.
return e.value
except AttributeError:
pass
try:
# Cython backports coroutine functionality by putting the value in
# e.args[0].
return e.args[0]
except (AttributeError, IndexError):
return None
def _create_future():
future = Future()
# Fixup asyncio debug info by removing extraneous stack entries
source_traceback = getattr(future, "_source_traceback", ())
while source_traceback:
# Each traceback entry is equivalent to a
# (filename, self.lineno, self.name, self.line) tuple
filename = source_traceback[-1][0]
if filename == __file__:
del source_traceback[-1]
else:
break
return future
def engine(func):
"""Callback-oriented decorator for asynchronous generators.
This is an older interface; for new code that does not need to be
compatible with versions of Tornado older than 3.0 the
`coroutine` decorator is recommended instead.
This decorator is similar to `coroutine`, except it does not
return a `.Future` and the ``callback`` argument is not treated
specially.
In most cases, functions decorated with `engine` should take
a ``callback`` argument and invoke it with their result when
they are finished. One notable exception is the
`~tornado.web.RequestHandler` :ref:`HTTP verb methods <verbs>`,
which use ``self.finish()`` in place of a callback argument.
"""
func = _make_coroutine_wrapper(func, replace_callback=False)
@functools.wraps(func)
def wrapper(*args, **kwargs):
future = func(*args, **kwargs)
def final_callback(future):
if future.result() is not None:
raise ReturnValueIgnoredError(
"@gen.engine functions cannot return values: %r" %
(future.result(),))
# The engine interface doesn't give us any way to return
# errors but to raise them into the stack context.
# Save the stack context here to use when the Future has resolved.
future_add_done_callback(future, stack_context.wrap(final_callback))
return wrapper
def coroutine(func, replace_callback=True):
"""Decorator for asynchronous generators.
Any generator that yields objects from this module must be wrapped
in either this decorator or `engine`.
Coroutines may "return" by raising the special exception
`Return(value) <Return>`. In Python 3.3+, it is also possible for
the function to simply use the ``return value`` statement (prior to
Python 3.3 generators were not allowed to also return values).
In all versions of Python a coroutine that simply wishes to exit
early may use the ``return`` statement without a value.
Functions with this decorator return a `.Future`. Additionally,
they may be called with a ``callback`` keyword argument, which
will be invoked with the future's result when it resolves. If the
coroutine fails, the callback will not be run and an exception
will be raised into the surrounding `.StackContext`. The
``callback`` argument is not visible inside the decorated
function; it is handled by the decorator itself.
From the caller's perspective, ``@gen.coroutine`` is similar to
the combination of ``@return_future`` and ``@gen.engine``.
.. warning::
When exceptions occur inside a coroutine, the exception
information will be stored in the `.Future` object. You must
examine the result of the `.Future` object, or the exception
may go unnoticed by your code. This means yielding the function
if called from another coroutine, using something like
`.IOLoop.run_sync` for top-level calls, or passing the `.Future`
to `.IOLoop.add_future`.
"""
return _make_coroutine_wrapper(func, replace_callback=True)
def _make_coroutine_wrapper(func, replace_callback):
"""The inner workings of ``@gen.coroutine`` and ``@gen.engine``.
The two decorators differ in their treatment of the ``callback``
argument, so we cannot simply implement ``@engine`` in terms of
``@coroutine``.
"""
# On Python 3.5, set the coroutine flag on our generator, to allow it
# to be used with 'await'.
wrapped = func
if hasattr(types, 'coroutine'):
func = types.coroutine(func)
@functools.wraps(wrapped)
def wrapper(*args, **kwargs):
future = _create_future()
if replace_callback and 'callback' in kwargs:
callback = kwargs.pop('callback')
IOLoop.current().add_future(
future, lambda future: callback(future.result()))
try:
result = func(*args, **kwargs)
except (Return, StopIteration) as e:
result = _value_from_stopiteration(e)
except Exception:
future_set_exc_info(future, sys.exc_info())
try:
return future
finally:
# Avoid circular references
future = None
else:
if isinstance(result, GeneratorType):
# Inline the first iteration of Runner.run. This lets us
# avoid the cost of creating a Runner when the coroutine
# never actually yields, which in turn allows us to
# use "optional" coroutines in critical path code without
# performance penalty for the synchronous case.
try:
orig_stack_contexts = stack_context._state.contexts
yielded = next(result)
if stack_context._state.contexts is not orig_stack_contexts:
yielded = _create_future()
yielded.set_exception(
stack_context.StackContextInconsistentError(
'stack_context inconsistency (probably caused '
'by yield within a "with StackContext" block)'))
except (StopIteration, Return) as e:
future_set_result_unless_cancelled(future, _value_from_stopiteration(e))
except Exception:
future_set_exc_info(future, sys.exc_info())
else:
# Provide strong references to Runner objects as long
# as their result future objects also have strong
# references (typically from the parent coroutine's
# Runner). This keeps the coroutine's Runner alive.
# We do this by exploiting the public API
# add_done_callback() instead of putting a private
# attribute on the Future.
# (Github issues #1769, #2229).
runner = Runner(result, future, yielded)
future.add_done_callback(lambda _: runner)
yielded = None
try:
return future
finally:
# Subtle memory optimization: if next() raised an exception,
# the future's exc_info contains a traceback which
# includes this stack frame. This creates a cycle,
# which will be collected at the next full GC but has
# been shown to greatly increase memory usage of
# benchmarks (relative to the refcount-based scheme
# used in the absence of cycles). We can avoid the
# cycle by clearing the local variable after we return it.
future = None
future_set_result_unless_cancelled(future, result)
return future
wrapper.__wrapped__ = wrapped
wrapper.__tornado_coroutine__ = True
return wrapper
def is_coroutine_function(func):
"""Return whether *func* is a coroutine function, i.e. a function
wrapped with `~.gen.coroutine`.
.. versionadded:: 4.5
"""
return getattr(func, '__tornado_coroutine__', False)
class Return(Exception):
"""Special exception to return a value from a `coroutine`.
If this exception is raised, its value argument is used as the
result of the coroutine::
@gen.coroutine
def fetch_json(url):
response = yield AsyncHTTPClient().fetch(url)
raise gen.Return(json_decode(response.body))
In Python 3.3, this exception is no longer necessary: the ``return``
statement can be used directly to return a value (previously
``yield`` and ``return`` with a value could not be combined in the
same function).
By analogy with the return statement, the value argument is optional,
but it is never necessary to ``raise gen.Return()``. The ``return``
statement can be used with no arguments instead.
"""
def __init__(self, value=None):
super(Return, self).__init__()
self.value = value
# Cython recognizes subclasses of StopIteration with a .args tuple.
self.args = (value,)
class WaitIterator(object):
"""Provides an iterator to yield the results of futures as they finish.
Yielding a set of futures like this:
``results = yield [future1, future2]``
pauses the coroutine until both ``future1`` and ``future2``
return, and then restarts the coroutine with the results of both
futures. If either future is an exception, the expression will
raise that exception and all the results will be lost.
If you need to get the result of each future as soon as possible,
or if you need the result of some futures even if others produce
errors, you can use ``WaitIterator``::
wait_iterator = gen.WaitIterator(future1, future2)
while not wait_iterator.done():
try:
result = yield wait_iterator.next()
except Exception as e:
print("Error {} from {}".format(e, wait_iterator.current_future))
else:
print("Result {} received from {} at {}".format(
result, wait_iterator.current_future,
wait_iterator.current_index))
Because results are returned as soon as they are available the
output from the iterator *will not be in the same order as the
input arguments*. If you need to know which future produced the
current result, you can use the attributes
``WaitIterator.current_future``, or ``WaitIterator.current_index``
to get the index of the future from the input list. (if keyword
arguments were used in the construction of the `WaitIterator`,
``current_index`` will use the corresponding keyword).
On Python 3.5, `WaitIterator` implements the async iterator
protocol, so it can be used with the ``async for`` statement (note
that in this version the entire iteration is aborted if any value
raises an exception, while the previous example can continue past
individual errors)::
async for result in gen.WaitIterator(future1, future2):
print("Result {} received from {} at {}".format(
result, wait_iterator.current_future,
wait_iterator.current_index))
.. versionadded:: 4.1
.. versionchanged:: 4.3
Added ``async for`` support in Python 3.5.
"""
def __init__(self, *args, **kwargs):
if args and kwargs:
raise ValueError(
"You must provide args or kwargs, not both")
if kwargs:
self._unfinished = dict((f, k) for (k, f) in kwargs.items())
futures = list(kwargs.values())
else:
self._unfinished = dict((f, i) for (i, f) in enumerate(args))
futures = args
self._finished = collections.deque()
self.current_index = self.current_future = None
self._running_future = None
for future in futures:
future_add_done_callback(future, self._done_callback)
def done(self):
"""Returns True if this iterator has no more results."""
if self._finished or self._unfinished:
return False
# Clear the 'current' values when iteration is done.
self.current_index = self.current_future = None
return True
def next(self):
"""Returns a `.Future` that will yield the next available result.
Note that this `.Future` will not be the same object as any of
the inputs.
"""
self._running_future = Future()
if self._finished:
self._return_result(self._finished.popleft())
return self._running_future
def _done_callback(self, done):
if self._running_future and not self._running_future.done():
self._return_result(done)
else:
self._finished.append(done)
def _return_result(self, done):
"""Called set the returned future's state that of the future
we yielded, and set the current future for the iterator.
"""
chain_future(done, self._running_future)
self.current_future = done
self.current_index = self._unfinished.pop(done)
def __aiter__(self):
return self
def __anext__(self):
if self.done():
# Lookup by name to silence pyflakes on older versions.
raise getattr(builtins, 'StopAsyncIteration')()
return self.next()
class YieldPoint(object):
"""Base class for objects that may be yielded from the generator.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def start(self, runner):
"""Called by the runner after the generator has yielded.
No other methods will be called on this object before ``start``.
"""
raise NotImplementedError()
def is_ready(self):
"""Called by the runner to determine whether to resume the generator.
Returns a boolean; may be called more than once.
"""
raise NotImplementedError()
def get_result(self):
"""Returns the value to use as the result of the yield expression.
This method will only be called once, and only after `is_ready`
has returned true.
"""
raise NotImplementedError()
class Callback(YieldPoint):
"""Returns a callable object that will allow a matching `Wait` to proceed.
The key may be any value suitable for use as a dictionary key, and is
used to match ``Callbacks`` to their corresponding ``Waits``. The key
must be unique among outstanding callbacks within a single run of the
generator function, but may be reused across different runs of the same
function (so constants generally work fine).
The callback may be called with zero or one arguments; if an argument
is given it will be returned by `Wait`.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def __init__(self, key):
self.key = key
def start(self, runner):
self.runner = runner
runner.register_callback(self.key)
def is_ready(self):
return True
def get_result(self):
return self.runner.result_callback(self.key)
class Wait(YieldPoint):
"""Returns the argument passed to the result of a previous `Callback`.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def __init__(self, key):
self.key = key
def start(self, runner):
self.runner = runner
def is_ready(self):
return self.runner.is_ready(self.key)
def get_result(self):
return self.runner.pop_result(self.key)
class WaitAll(YieldPoint):
"""Returns the results of multiple previous `Callbacks <Callback>`.
The argument is a sequence of `Callback` keys, and the result is
a list of results in the same order.
`WaitAll` is equivalent to yielding a list of `Wait` objects.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def __init__(self, keys):
self.keys = keys
def start(self, runner):
self.runner = runner
def is_ready(self):
return all(self.runner.is_ready(key) for key in self.keys)
def get_result(self):
return [self.runner.pop_result(key) for key in self.keys]
def Task(func, *args, **kwargs):
"""Adapts a callback-based asynchronous function for use in coroutines.
Takes a function (and optional additional arguments) and runs it with
those arguments plus a ``callback`` keyword argument. The argument passed
to the callback is returned as the result of the yield expression.
.. versionchanged:: 4.0
``gen.Task`` is now a function that returns a `.Future`, instead of
a subclass of `YieldPoint`. It still behaves the same way when
yielded.
"""
future = _create_future()
def handle_exception(typ, value, tb):
if future.done():
return False
future_set_exc_info(future, (typ, value, tb))
return True
def set_result(result):
if future.done():
return
future_set_result_unless_cancelled(future, result)
with stack_context.ExceptionStackContext(handle_exception):
func(*args, callback=_argument_adapter(set_result), **kwargs)
return future
class YieldFuture(YieldPoint):
def __init__(self, future):
"""Adapts a `.Future` to the `YieldPoint` interface.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
"""
self.future = future
self.io_loop = IOLoop.current()
def start(self, runner):
if not self.future.done():
self.runner = runner
self.key = object()
runner.register_callback(self.key)
self.io_loop.add_future(self.future, runner.result_callback(self.key))
else:
self.runner = None
self.result_fn = self.future.result
def is_ready(self):
if self.runner is not None:
return self.runner.is_ready(self.key)
else:
return True
def get_result(self):
if self.runner is not None:
return self.runner.pop_result(self.key).result()
else:
return self.result_fn()
def _contains_yieldpoint(children):
"""Returns True if ``children`` contains any YieldPoints.
``children`` may be a dict or a list, as used by `MultiYieldPoint`
and `multi_future`.
"""
if isinstance(children, dict):
return any(isinstance(i, YieldPoint) for i in children.values())
if isinstance(children, list):
return any(isinstance(i, YieldPoint) for i in children)
return False
def multi(children, quiet_exceptions=()):
"""Runs multiple asynchronous operations in parallel.
``children`` may either be a list or a dict whose values are
yieldable objects. ``multi()`` returns a new yieldable
object that resolves to a parallel structure containing their
results. If ``children`` is a list, the result is a list of
results in the same order; if it is a dict, the result is a dict
with the same keys.
That is, ``results = yield multi(list_of_futures)`` is equivalent
to::
results = []
for future in list_of_futures:
results.append(yield future)
If any children raise exceptions, ``multi()`` will raise the first
one. All others will be logged, unless they are of types
contained in the ``quiet_exceptions`` argument.
If any of the inputs are `YieldPoints <YieldPoint>`, the returned
yieldable object is a `YieldPoint`. Otherwise, returns a `.Future`.
This means that the result of `multi` can be used in a native
coroutine if and only if all of its children can be.
In a ``yield``-based coroutine, it is not normally necessary to
call this function directly, since the coroutine runner will
do it automatically when a list or dict is yielded. However,
it is necessary in ``await``-based coroutines, or to pass
the ``quiet_exceptions`` argument.
This function is available under the names ``multi()`` and ``Multi()``
for historical reasons.
Cancelling a `.Future` returned by ``multi()`` does not cancel its
children. `asyncio.gather` is similar to ``multi()``, but it does
cancel its children.
.. versionchanged:: 4.2
If multiple yieldables fail, any exceptions after the first
(which is raised) will be logged. Added the ``quiet_exceptions``
argument to suppress this logging for selected exception types.
.. versionchanged:: 4.3
Replaced the class ``Multi`` and the function ``multi_future``
with a unified function ``multi``. Added support for yieldables
other than `YieldPoint` and `.Future`.
"""
if _contains_yieldpoint(children):
return MultiYieldPoint(children, quiet_exceptions=quiet_exceptions)
else:
return multi_future(children, quiet_exceptions=quiet_exceptions)
Multi = multi
class MultiYieldPoint(YieldPoint):
"""Runs multiple asynchronous operations in parallel.
This class is similar to `multi`, but it always creates a stack
context even when no children require it. It is not compatible with
native coroutines.
.. versionchanged:: 4.2
If multiple ``YieldPoints`` fail, any exceptions after the first
(which is raised) will be logged. Added the ``quiet_exceptions``
argument to suppress this logging for selected exception types.
.. versionchanged:: 4.3
Renamed from ``Multi`` to ``MultiYieldPoint``. The name ``Multi``
remains as an alias for the equivalent `multi` function.
.. deprecated:: 4.3
Use `multi` instead.
"""
def __init__(self, children, quiet_exceptions=()):
self.keys = None
if isinstance(children, dict):
self.keys = list(children.keys())
children = children.values()
self.children = []
for i in children:
if not isinstance(i, YieldPoint):
i = convert_yielded(i)
if is_future(i):
i = YieldFuture(i)
self.children.append(i)
assert all(isinstance(i, YieldPoint) for i in self.children)
self.unfinished_children = set(self.children)
self.quiet_exceptions = quiet_exceptions
def start(self, runner):
for i in self.children:
i.start(runner)
def is_ready(self):
finished = list(itertools.takewhile(
lambda i: i.is_ready(), self.unfinished_children))
self.unfinished_children.difference_update(finished)
return not self.unfinished_children
def get_result(self):
result_list = []
exc_info = None
for f in self.children:
try:
result_list.append(f.get_result())
except Exception as e:
if exc_info is None:
exc_info = sys.exc_info()
else:
if not isinstance(e, self.quiet_exceptions):
app_log.error("Multiple exceptions in yield list",
exc_info=True)
if exc_info is not None:
raise_exc_info(exc_info)
if self.keys is not None:
return dict(zip(self.keys, result_list))
else:
return list(result_list)
def multi_future(children, quiet_exceptions=()):
"""Wait for multiple asynchronous futures in parallel.
This function is similar to `multi`, but does not support
`YieldPoints <YieldPoint>`.
.. versionadded:: 4.0
.. versionchanged:: 4.2
If multiple ``Futures`` fail, any exceptions after the first (which is
raised) will be logged. Added the ``quiet_exceptions``
argument to suppress this logging for selected exception types.
.. deprecated:: 4.3
Use `multi` instead.
"""
if isinstance(children, dict):
keys = list(children.keys())
children = children.values()
else:
keys = None
children = list(map(convert_yielded, children))
assert all(is_future(i) or isinstance(i, _NullFuture) for i in children)
unfinished_children = set(children)
future = _create_future()
if not children:
future_set_result_unless_cancelled(future,
{} if keys is not None else [])
def callback(f):
unfinished_children.remove(f)
if not unfinished_children:
result_list = []
for f in children:
try:
result_list.append(f.result())
except Exception as e:
if future.done():
if not isinstance(e, quiet_exceptions):
app_log.error("Multiple exceptions in yield list",
exc_info=True)
else:
future_set_exc_info(future, sys.exc_info())
if not future.done():
if keys is not None:
future_set_result_unless_cancelled(future,
dict(zip(keys, result_list)))
else:
future_set_result_unless_cancelled(future, result_list)
listening = set()
for f in children:
if f not in listening:
listening.add(f)
future_add_done_callback(f, callback)
return future
def maybe_future(x):
"""Converts ``x`` into a `.Future`.
If ``x`` is already a `.Future`, it is simply returned; otherwise
it is wrapped in a new `.Future`. This is suitable for use as
``result = yield gen.maybe_future(f())`` when you don't know whether
``f()`` returns a `.Future` or not.
.. deprecated:: 4.3
This function only handles ``Futures``, not other yieldable objects.
Instead of `maybe_future`, check for the non-future result types
you expect (often just ``None``), and ``yield`` anything unknown.
"""
if is_future(x):
return x
else:
fut = _create_future()
fut.set_result(x)
return fut
def with_timeout(timeout, future, quiet_exceptions=()):
"""Wraps a `.Future` (or other yieldable object) in a timeout.
Raises `tornado.util.TimeoutError` if the input future does not
complete before ``timeout``, which may be specified in any form
allowed by `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or
an absolute time relative to `.IOLoop.time`)
If the wrapped `.Future` fails after it has timed out, the exception
will be logged unless it is of a type contained in ``quiet_exceptions``
(which may be an exception type or a sequence of types).
Does not support `YieldPoint` subclasses.
The wrapped `.Future` is not canceled when the timeout expires,
permitting it to be reused. `asyncio.wait_for` is similar to this
function but it does cancel the wrapped `.Future` on timeout.
.. versionadded:: 4.0
.. versionchanged:: 4.1
Added the ``quiet_exceptions`` argument and the logging of unhandled
exceptions.
.. versionchanged:: 4.4
Added support for yieldable objects other than `.Future`.
"""
# TODO: allow YieldPoints in addition to other yieldables?
# Tricky to do with stack_context semantics.
#
# It's tempting to optimize this by cancelling the input future on timeout
# instead of creating a new one, but A) we can't know if we are the only
# one waiting on the input future, so cancelling it might disrupt other
# callers and B) concurrent futures can only be cancelled while they are
# in the queue, so cancellation cannot reliably bound our waiting time.
future = convert_yielded(future)
result = _create_future()
chain_future(future, result)
io_loop = IOLoop.current()
def error_callback(future):
try:
future.result()
except Exception as e:
if not isinstance(e, quiet_exceptions):
app_log.error("Exception in Future %r after timeout",
future, exc_info=True)
def timeout_callback():
if not result.done():
result.set_exception(TimeoutError("Timeout"))
# In case the wrapped future goes on to fail, log it.
future_add_done_callback(future, error_callback)
timeout_handle = io_loop.add_timeout(
timeout, timeout_callback)
if isinstance(future, Future):
# We know this future will resolve on the IOLoop, so we don't
# need the extra thread-safety of IOLoop.add_future (and we also
# don't care about StackContext here.
future_add_done_callback(
future, lambda future: io_loop.remove_timeout(timeout_handle))
else:
# concurrent.futures.Futures may resolve on any thread, so we
# need to route them back to the IOLoop.
io_loop.add_future(
future, lambda future: io_loop.remove_timeout(timeout_handle))
return result
def sleep(duration):
"""Return a `.Future` that resolves after the given number of seconds.
When used with ``yield`` in a coroutine, this is a non-blocking
analogue to `time.sleep` (which should not be used in coroutines
because it is blocking)::
yield gen.sleep(0.5)
Note that calling this function on its own does nothing; you must
wait on the `.Future` it returns (usually by yielding it).
.. versionadded:: 4.1
"""
f = _create_future()
IOLoop.current().call_later(duration,
lambda: future_set_result_unless_cancelled(f, None))
return f
class _NullFuture(object):
"""_NullFuture resembles a Future that finished with a result of None.
It's not actually a `Future` to avoid depending on a particular event loop.
Handled as a special case in the coroutine runner.
"""
def result(self):
return None
def done(self):
return True
# _null_future is used as a dummy value in the coroutine runner. It differs
# from moment in that moment always adds a delay of one IOLoop iteration
# while _null_future is processed as soon as possible.
_null_future = _NullFuture()
moment = _NullFuture()
moment.__doc__ = \
"""A special object which may be yielded to allow the IOLoop to run for
one iteration.
This is not needed in normal use but it can be helpful in long-running
coroutines that are likely to yield Futures that are ready instantly.
Usage: ``yield gen.moment``
.. versionadded:: 4.0
.. deprecated:: 4.5
``yield None`` (or ``yield`` with no argument) is now equivalent to
``yield gen.moment``.
"""
class Runner(object):
"""Internal implementation of `tornado.gen.engine`.
Maintains information about pending callbacks and their results.
The results of the generator are stored in ``result_future`` (a
`.Future`)
"""
def __init__(self, gen, result_future, first_yielded):
self.gen = gen
self.result_future = result_future
self.future = _null_future
self.yield_point = None
self.pending_callbacks = None
self.results = None
self.running = False
self.finished = False
self.had_exception = False
self.io_loop = IOLoop.current()
# For efficiency, we do not create a stack context until we
# reach a YieldPoint (stack contexts are required for the historical
# semantics of YieldPoints, but not for Futures). When we have
# done so, this field will be set and must be called at the end
# of the coroutine.
self.stack_context_deactivate = None
if self.handle_yield(first_yielded):
gen = result_future = first_yielded = None
self.run()
def register_callback(self, key):
"""Adds ``key`` to the list of callbacks."""
if self.pending_callbacks is None:
# Lazily initialize the old-style YieldPoint data structures.
self.pending_callbacks = set()
self.results = {}
if key in self.pending_callbacks:
raise KeyReuseError("key %r is already pending" % (key,))
self.pending_callbacks.add(key)
def is_ready(self, key):
"""Returns true if a result is available for ``key``."""
if self.pending_callbacks is None or key not in self.pending_callbacks:
raise UnknownKeyError("key %r is not pending" % (key,))
return key in self.results
def set_result(self, key, result):
"""Sets the result for ``key`` and attempts to resume the generator."""
self.results[key] = result
if self.yield_point is not None and self.yield_point.is_ready():
try:
future_set_result_unless_cancelled(self.future,
self.yield_point.get_result())
except:
future_set_exc_info(self.future, sys.exc_info())
self.yield_point = None
self.run()
def pop_result(self, key):
"""Returns the result for ``key`` and unregisters it."""
self.pending_callbacks.remove(key)
return self.results.pop(key)
def run(self):
"""Starts or resumes the generator, running until it reaches a
yield point that is not ready.
"""
if self.running or self.finished:
return
try:
self.running = True
while True:
future = self.future
if not future.done():
return
self.future = None
try:
orig_stack_contexts = stack_context._state.contexts
exc_info = None
try:
value = future.result()
except Exception:
self.had_exception = True
exc_info = sys.exc_info()
future = None
if exc_info is not None:
try:
yielded = self.gen.throw(*exc_info)
finally:
# Break up a reference to itself
# for faster GC on CPython.
exc_info = None
else:
yielded = self.gen.send(value)
if stack_context._state.contexts is not orig_stack_contexts:
self.gen.throw(
stack_context.StackContextInconsistentError(
'stack_context inconsistency (probably caused '
'by yield within a "with StackContext" block)'))
except (StopIteration, Return) as e:
self.finished = True
self.future = _null_future
if self.pending_callbacks and not self.had_exception:
# If we ran cleanly without waiting on all callbacks
# raise an error (really more of a warning). If we
# had an exception then some callbacks may have been
# orphaned, so skip the check in that case.
raise LeakedCallbackError(
"finished without waiting for callbacks %r" %
self.pending_callbacks)
future_set_result_unless_cancelled(self.result_future,
_value_from_stopiteration(e))
self.result_future = None
self._deactivate_stack_context()
return
except Exception:
self.finished = True
self.future = _null_future
future_set_exc_info(self.result_future, sys.exc_info())
self.result_future = None
self._deactivate_stack_context()
return
if not self.handle_yield(yielded):
return
yielded = None
finally:
self.running = False
def handle_yield(self, yielded):
# Lists containing YieldPoints require stack contexts;
# other lists are handled in convert_yielded.
if _contains_yieldpoint(yielded):
yielded = multi(yielded)
if isinstance(yielded, YieldPoint):
# YieldPoints are too closely coupled to the Runner to go
# through the generic convert_yielded mechanism.
self.future = Future()
def start_yield_point():
try:
yielded.start(self)
if yielded.is_ready():
future_set_result_unless_cancelled(self.future, yielded.get_result())
else:
self.yield_point = yielded
except Exception:
self.future = Future()
future_set_exc_info(self.future, sys.exc_info())
if self.stack_context_deactivate is None:
# Start a stack context if this is the first
# YieldPoint we've seen.
with stack_context.ExceptionStackContext(
self.handle_exception) as deactivate:
self.stack_context_deactivate = deactivate
def cb():
start_yield_point()
self.run()
self.io_loop.add_callback(cb)
return False
else:
start_yield_point()
else:
try:
self.future = convert_yielded(yielded)
except BadYieldError:
self.future = Future()
future_set_exc_info(self.future, sys.exc_info())
if self.future is moment:
self.io_loop.add_callback(self.run)
return False
elif not self.future.done():
def inner(f):
# Break a reference cycle to speed GC.
f = None # noqa
self.run()
self.io_loop.add_future(
self.future, inner)
return False
return True
def result_callback(self, key):
return stack_context.wrap(_argument_adapter(
functools.partial(self.set_result, key)))
def handle_exception(self, typ, value, tb):
if not self.running and not self.finished:
self.future = Future()
future_set_exc_info(self.future, (typ, value, tb))
self.run()
return True
else:
return False
def _deactivate_stack_context(self):
if self.stack_context_deactivate is not None:
self.stack_context_deactivate()
self.stack_context_deactivate = None
Arguments = collections.namedtuple('Arguments', ['args', 'kwargs'])
def _argument_adapter(callback):
"""Returns a function that when invoked runs ``callback`` with one arg.
If the function returned by this function is called with exactly
one argument, that argument is passed to ``callback``. Otherwise
the args tuple and kwargs dict are wrapped in an `Arguments` object.
"""
def wrapper(*args, **kwargs):
if kwargs or len(args) > 1:
callback(Arguments(args, kwargs))
elif args:
callback(args[0])
else:
callback(None)
return wrapper
# Convert Awaitables into Futures.
try:
import asyncio
except ImportError:
# Py2-compatible version for use with Cython.
# Copied from PEP 380.
@coroutine
def _wrap_awaitable(x):
if hasattr(x, '__await__'):
_i = x.__await__()
else:
_i = iter(x)
try:
_y = next(_i)
except StopIteration as _e:
_r = _value_from_stopiteration(_e)
else:
while 1:
try:
_s = yield _y
except GeneratorExit as _e:
try:
_m = _i.close
except AttributeError:
pass
else:
_m()
raise _e
except BaseException as _e:
_x = sys.exc_info()
try:
_m = _i.throw
except AttributeError:
raise _e
else:
try:
_y = _m(*_x)
except StopIteration as _e:
_r = _value_from_stopiteration(_e)
break
else:
try:
if _s is None:
_y = next(_i)
else:
_y = _i.send(_s)
except StopIteration as _e:
_r = _value_from_stopiteration(_e)
break
raise Return(_r)
else:
try:
_wrap_awaitable = asyncio.ensure_future
except AttributeError:
# asyncio.ensure_future was introduced in Python 3.4.4, but
# Debian jessie still ships with 3.4.2 so try the old name.
_wrap_awaitable = getattr(asyncio, 'async')
def convert_yielded(yielded):
"""Convert a yielded object into a `.Future`.
The default implementation accepts lists, dictionaries, and Futures.
If the `~functools.singledispatch` library is available, this function
may be extended to support additional types. For example::
@convert_yielded.register(asyncio.Future)
def _(asyncio_future):
return tornado.platform.asyncio.to_tornado_future(asyncio_future)
.. versionadded:: 4.1
"""
# Lists and dicts containing YieldPoints were handled earlier.
if yielded is None or yielded is moment:
return moment
elif yielded is _null_future:
return _null_future
elif isinstance(yielded, (list, dict)):
return multi(yielded)
elif is_future(yielded):
return yielded
elif isawaitable(yielded):
return _wrap_awaitable(yielded)
else:
raise BadYieldError("yielded unknown object %r" % (yielded,))
if singledispatch is not None:
convert_yielded = singledispatch(convert_yielded)
| gpl-3.0 | 5,848,268,275,463,006,000 | 34.91973 | 93 | 0.60454 | false |
saadmk11/your-query | src/questions/urls.py | 1 | 1139 | from django.conf.urls import url
from .views import (question_list,
question_detail,
question_ask,
question_update,
category_list,
category,
answer_update,
question_delete,
answer_delete,
notification)
urlpatterns = [
url(r'^$', question_list, name='question_list'),
url(r'^ask/$', question_ask, name='question_ask'),
url(r'^notifications/$', notification, name='notification'),
url(r'^categories/$', category_list, name='category_list'),
url(r'^categories/(?P<slug>[\w-]+)/$', category, name='category'),
url(r'^(?P<slug>[\w-]+)/delete/$', question_delete, name='question_delete'),
url(r'^(?P<slug>[\w-]+)/edit/$', question_update, name='question_update'),
url(r'^(?P<slug>[\w-]+)/answer/(?P<pk>\d+)/edit/$', answer_update, name='answer_update'),
url(r'^(?P<slug>[\w-]+)/answer/(?P<pk>\d+)/delete/$', answer_delete, name='answer_delete'),
url(r'^(?P<slug>[\w-]+)/$', question_detail, name='question_detail'),
]
| mit | -2,008,724,676,322,331,100 | 44.56 | 95 | 0.533802 | false |
parroyo/python_menu | python_menu/menu.py | 1 | 5783 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
from dialog import Dialog, _common_args_syntax
import sys
import os
import inspect
MENU_KEY_TYPE = 'type'
MENU_KEY_COMMON = 'common'
MENU_KEY_ACTION = 'action'
MENU_KEY_GO = 'go '
MENU_KEY_BACK = 'back'
MENU_KEY_EXIT = 'exit'
MENU_KEY_CLEAR = 'clear'
MENU_KEY_CHOICES = 'choices'
class UndefinedScreen(Exception):
""" Screen is not present in the model
"""
def __init__(self, screen_name):
super(UndefinedScreen, self).__init__(
"Screen '{0}' is not present in the model".format(screen_name))
class InvalidMethod(Exception):
""" Invalid Method name
"""
def __init__(self, method_name):
super(InvalidMethod, self).__init__(
"Invalid Method name '{0}'".format(method_name))
class Menu(object):
""" Class Menu
"""
def __init__(self, menu_data, debug=False):
self._screens = []
self._screen_values = {}
self._dialog = Dialog()
self._dialog_methods = dict(inspect.getmembers(self._dialog))
self._custom_methods = dict(inspect.getmembers(self))
self._common_args = list(_common_args_syntax.keys())
self._debug_enable = debug
if sys.version_info.major == 2:
self.debug = self.debug_python2
self._menu = menu_data
self._load_common()
def show(self, screen_name):
""" Show the screen
Args:
screen_name(string): name of the screen to show
Raises:
UndefinedScreen
InvalidMethod
"""
self._screens.append(screen_name)
while (self._screens != []):
self._show_current_screen()
def debug_python2(self, msg):
if self._debug_enable:
raw_input(msg)
def debug(self, msg):
if self._debug_enable:
input(msg)
def clear(self):
""" Clear the screen
"""
os.system('cls' if os.name == 'nt' else 'clear')
def get_value(self, screen_name):
""" Get the value stored by the screen
Args:
screen_name(string): name of the screen to get the value
"""
value = None
if screen_name in self._screen_values:
value = self._screen_values[screen_name]
return value
def _load_common(self):
self._common = {}
for item in self._menu[MENU_KEY_COMMON]:
self._common[item] = self._menu[MENU_KEY_COMMON][item]
def _show_current_screen(self):
current_screen = self._screens[-1]
(dialog_exit, dialog_value) = self._show_dialog(current_screen)
self._screen_values[current_screen] = dialog_value
self._do_actions(current_screen, dialog_exit, dialog_value)
def _show_dialog(self, item):
try:
dialog_type = self._menu[item][MENU_KEY_TYPE]
except KeyError as e:
raise UndefinedScreen(str(e))
if dialog_type in self._dialog_methods:
screen = self._dialog_methods[dialog_type]
(allowed_args, varargs, keywords, locals) = inspect.getargspec(screen)
args = self._common.copy()
screen_args = dict([(i, self._menu[item][i]) for i in self._menu[item] if i in allowed_args or i in self._common_args])
args.update(screen_args)
self.debug("args: %s" % args)
dialog_exit = self._dialog_methods[dialog_type](**args)
dialog_value = [None]
if type(dialog_exit) is tuple:
dialog_exit, dialog_value = dialog_exit[0], dialog_exit[1:]
return (dialog_exit, dialog_value)
def _do_actions(self, item, dialog_exit, dialog_value):
""" Do actions
"""
action = self._menu[item].get(MENU_KEY_ACTION, {}).get(dialog_exit)
if action is None:
return
if type(action) is dict:
action = action.get(dialog_value[0])
if type(action) is str:
self._do_action(action)
if type(action) is list:
for action_item in action:
self._do_action(action_item)
def _do_action(self, action):
""" Do action
"""
if MENU_KEY_EXIT in action:
self._screens = []
elif MENU_KEY_GO in action:
new_screen = action.split(' ')[1]
if new_screen == MENU_KEY_BACK:
self._screens.pop()
else:
self._screens.append(new_screen)
else:
# Custom method
self._call_custom_method(action)
def _call_custom_method(self, action):
""" Call custom method
"""
method_name = action
parameters = {}
if type(action) is list:
if len(action) > 0:
method_name = action[0]
if len(action) > 1:
parameters = action[1]
if method_name in self._custom_methods:
self._custom_methods[method_name](**parameters)
else:
raise InvalidMethod(action)
| lgpl-3.0 | -3,854,620,723,446,406,000 | 30.950276 | 131 | 0.583434 | false |
jserver/pagila | pagila/settings.py | 1 | 2178 | """
Django settings for pagila project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ['PAGILA_SECRET_KEY']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'films',
'store',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'pagila.urls'
WSGI_APPLICATION = 'pagila.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'pagila',
'USER': os.environ['PAGILA_DB_USER'],
'PASSWORD': os.environ['PAGILA_DB_PASS'],
'HOST': os.environ['PAGILA_DB'],
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
# Media
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
| bsd-3-clause | -8,704,175,519,857,630,000 | 21.926316 | 71 | 0.705234 | false |
openstack/smaug | karbor/tests/unit/operationengine/engine/triggers/timetrigger/test_time_trigger_multi_node.py | 1 | 9846 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import namedtuple
from datetime import datetime
from datetime import timedelta
import eventlet
import functools
import heapq
import mock
from oslo_config import cfg
from oslo_utils import uuidutils
from karbor import context as karbor_context
from karbor import exception
from karbor.services.operationengine.engine.triggers.timetrigger import \
time_trigger_multi_node as tt
from karbor.services.operationengine.engine.triggers.timetrigger import utils
from karbor.tests import base
TriggerExecution = namedtuple('TriggerExecution',
['execution_time', 'id', 'trigger_id'])
class FakeTimeFormat(object):
def __init__(self, start_time, pattern):
super(FakeTimeFormat, self).__init__()
@classmethod
def check_time_format(cls, pattern):
pass
def compute_next_time(self, current_time):
return current_time + timedelta(seconds=0.5)
def get_min_interval(self):
return cfg.CONF.min_interval
class FakeExecutor(object):
def __init__(self):
super(FakeExecutor, self).__init__()
self._ops = {}
def execute_operation(self, operation_id, triggered_time,
expect_start_time, window):
if operation_id not in self._ops:
self._ops[operation_id] = 0
self._ops[operation_id] += 1
eventlet.sleep(0.5)
class FakeTimeTrigger(object):
@classmethod
def get_time_format(cls, *args, **kwargs):
return FakeTimeFormat
class FakeDb(object):
def __init__(self):
self._db = []
def trigger_execution_get_next(self, context):
if len(self._db) == 0:
return None
return self._db[0]
def trigger_execution_create(self, context, trigger_id, time):
element = TriggerExecution(time, uuidutils.generate_uuid(), trigger_id)
heapq.heappush(self._db, element)
def trigger_execution_update(self, context, id, current_time, new_time):
for idx, element in enumerate(self._db):
if element.id == id:
if element.execution_time != current_time:
return False
self._db[idx] = TriggerExecution(new_time, element.id,
element.trigger_id)
break
heapq.heapify(self._db)
return True
def trigger_execution_delete(self, context, id, trigger_id):
removed_ids = []
for idx, element in enumerate(self._db):
if (id and element.id == id) or (trigger_id and
element.trigger_id == trigger_id):
removed_ids.append(idx)
for idx in reversed(removed_ids):
self._db.pop(idx)
heapq.heapify(self._db)
return len(removed_ids)
def time_trigger_test(func):
@functools.wraps(func)
@mock.patch.object(tt, 'db', FakeDb())
@mock.patch.object(karbor_context, 'get_admin_context', lambda: None)
@mock.patch.object(utils, 'get_time_format_class',
FakeTimeTrigger.get_time_format)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
class TimeTriggerTestCase(base.TestCase):
_tid = 0
_default_executor = FakeExecutor()
def setUp(self):
super(TimeTriggerTestCase, self).setUp()
self._set_configuration()
def test_check_configuration(self):
self._set_configuration(10, 20, 30)
self.assertRaisesRegex(exception.InvalidInput,
"Configurations of time trigger are invalid",
tt.TimeTrigger.check_configuration)
self._set_configuration()
@time_trigger_test
def test_check_trigger_property_start_time(self):
trigger_property = {
"pattern": "",
"start_time": ""
}
self.assertRaisesRegex(exception.InvalidInput,
"The trigger\'s start time is unknown",
tt.TimeTrigger.check_trigger_definition,
trigger_property)
trigger_property['start_time'] = 'abc'
self.assertRaisesRegex(exception.InvalidInput,
"The format of trigger .* is not correct",
tt.TimeTrigger.check_trigger_definition,
trigger_property)
trigger_property['start_time'] = 123
self.assertRaisesRegex(exception.InvalidInput,
"The trigger .* is not an instance of string",
tt.TimeTrigger.check_trigger_definition,
trigger_property)
@mock.patch.object(FakeTimeFormat, 'get_min_interval')
@time_trigger_test
def test_check_trigger_property_interval(self, get_min_interval):
get_min_interval.return_value = 0
trigger_property = {
"start_time": '2016-8-18 01:03:04'
}
self.assertRaisesRegex(exception.InvalidInput,
"The interval of two adjacent time points .*",
tt.TimeTrigger.check_trigger_definition,
trigger_property)
@time_trigger_test
def test_check_trigger_property_window(self):
trigger_property = {
"window": "abc",
"start_time": '2016-8-18 01:03:04'
}
self.assertRaisesRegex(exception.InvalidInput,
"The trigger window.* is not integer",
tt.TimeTrigger.check_trigger_definition,
trigger_property)
trigger_property['window'] = 1000
self.assertRaisesRegex(exception.InvalidInput,
"The trigger windows .* must be between .*",
tt.TimeTrigger.check_trigger_definition,
trigger_property)
@time_trigger_test
def test_check_trigger_property_end_time(self):
trigger_property = {
"window": 15,
"start_time": '2016-8-18 01:03:04',
"end_time": "abc"
}
self.assertRaisesRegex(exception.InvalidInput,
"The format of trigger .* is not correct",
tt.TimeTrigger.check_trigger_definition,
trigger_property)
@time_trigger_test
def test_register_operation(self):
trigger = self._generate_trigger()
operation_id = "1"
trigger.register_operation(operation_id)
eventlet.sleep(2)
self.assertGreaterEqual(self._default_executor._ops[operation_id], 1)
self.assertRaisesRegex(exception.ScheduledOperationExist,
"The operation_id.* is exist",
trigger.register_operation,
operation_id)
@time_trigger_test
def test_unregister_operation(self):
trigger = self._generate_trigger()
operation_id = "2"
trigger.register_operation(operation_id)
self.assertIn(operation_id, trigger._operation_ids)
trigger.unregister_operation(operation_id)
self.assertNotIn(trigger._id, trigger._operation_ids)
@time_trigger_test
def test_update_trigger_property(self):
trigger = self._generate_trigger()
trigger_property = {
"pattern": "",
"window": 15,
"start_time": datetime.utcnow(),
"end_time": datetime.utcnow()
}
self.assertRaisesRegex(exception.InvalidInput,
".*Can not find the first run tim",
trigger.update_trigger_property,
trigger_property)
@time_trigger_test
def test_update_trigger_property_success(self):
trigger = self._generate_trigger()
trigger.register_operation('7')
eventlet.sleep(0.2)
trigger_property = {
"pattern": "",
"window": 15,
"start_time": datetime.utcnow(),
"end_time": ''
}
with mock.patch.object(FakeTimeFormat, 'compute_next_time') as c:
c.return_value = datetime.utcnow() + timedelta(seconds=20)
trigger.update_trigger_property(trigger_property)
def _generate_trigger(self, end_time=None):
if not end_time:
end_time = datetime.utcnow() + timedelta(seconds=1)
trigger_property = {
"pattern": "",
"window": 15,
"start_time": datetime.utcnow(),
"end_time": end_time
}
return tt.TimeTrigger(
uuidutils.generate_uuid(),
trigger_property,
self._default_executor,
)
def _set_configuration(self, min_window=15,
max_window=30, min_interval=60, poll_interval=1):
self.override_config('min_interval', min_interval)
self.override_config('min_window_time', min_window)
self.override_config('max_window_time', max_window)
self.override_config('trigger_poll_interval', poll_interval)
| apache-2.0 | -7,400,826,263,336,520,000 | 34.039146 | 79 | 0.57729 | false |
jmvrbanac/Specter | specter/reporting/xunit.py | 1 | 5284 | from xml.etree.ElementTree import Element, tostring as element_to_str
import six
from specter import _
from specter.spec import DescribeEvent
from specter.reporting import AbstractParallelReporter, AbstractSerialReporter
class XUnitReporter(AbstractSerialReporter, AbstractParallelReporter):
""" A simple xUnit format report generator for the Specter framework. """
# TODO: Make this more efficient!
def __init__(self):
self.suites = []
self.filename = ''
def add_arguments(self, argparser):
argparser.add_argument(
'--xunit-results', dest='xunit_results', metavar='',
help=_('Saves out xUnit compatible results to a specifed file'))
def process_arguments(self, args):
if args.xunit_results:
self.filename = args.xunit_results
def get_name(self):
return 'xUnit report generator'
def subscribe_to_spec(self, spec):
spec.add_listener(DescribeEvent.COMPLETE, self.describe_complete)
def describe_complete(self, evt):
describe = evt.payload
suite = XUnitTestSuite()
suite.assign_describe(describe)
self.suites.append(suite)
def convert_to_xml(self):
test_suites = Element('testsuites')
for suite in self.suites:
test_suites.append(suite.convert_to_xml())
return test_suites
def finished(self):
if not self.filename:
return
body = element_to_str(self.convert_to_xml(), encoding='utf8')
handle = open(self.filename, 'w')
handle.write(body.decode('utf8'))
handle.close()
class XUnitTestSuite(object):
def __init__(self):
self.describe = None
self.tests = []
def assign_describe(self, describe):
self.describe = describe
for key, case in six.iteritems(self.describe.cases):
test_case = XUnitTestCase()
test_case.assign_case_wrapper(case)
self.tests.append(test_case)
@property
def name(self):
return self.describe.name
@property
def time(self):
return str(self.describe.total_time)
@property
def errors(self):
return str(len([test for test in self.tests if test.error]))
@property
def failures(self):
return str(len([test for test in self.tests if not test.success]))
@property
def skipped(self):
return str(len([test for test in self.tests if test.skipped]))
def convert_to_xml(self):
element = Element('testsuite', {'name': self.name,
'tests': str(len(self.tests)),
'errors': self.errors,
'failures': self.failures,
'skipped': self.skipped,
'time': self.time})
for test in self.tests:
element.append(test.convert_to_xml())
return element
def __str__(self):
return element_to_str(self.convert_to_xml(), encoding='utf8')
class XUnitTestCase(object):
def __init__(self):
self.case_wrapper = None
@property
def name(self):
return str(self.case_wrapper.name)
@property
def error(self):
return self.case_wrapper.error
@property
def success(self):
return self.case_wrapper.success
@property
def skipped(self):
return self.case_wrapper.skipped
@property
def failures(self):
return [expect for expect in self.case_wrapper.expects
if not expect.success]
@property
def module(self):
return self.case_wrapper.parent.real_class_path
@property
def time(self):
return str(self.case_wrapper.elapsed_time)
def assign_case_wrapper(self, wrapper):
self.case_wrapper = wrapper
def convert_to_xml(self):
failure_msg = """
<![CDATA[
Target: {target}: {target_param}
Expected: {expected}: {expected_param}
]]>"""
element = Element('testcase', {'classname': self.module,
'name': self.name,
'time': self.time})
# Add failures
for expect in self.failures:
failure = Element('failure', {
'message': 'Failed: {0}'.format(expect)})
failure.text = failure_msg.format(
target=expect.target,
target_param=expect.target_src_param,
expected=expect.expected,
expected_param=expect.expected_src_param)
element.append(failure)
# Add Skipped
if self.skipped:
skipped_element = Element('skipped')
skipped_element.text = self.case_wrapper.skip_reason
element.append(skipped_element)
# Add Errors
if self.error:
msg = '<![CDATA['
for err in self.error:
msg += '{0}\n'.format(err)
msg += ']]>'
error_element = Element('error')
error_element.text = msg
element.append(error_element)
return element
def __str__(self):
return element_to_str(self.convert_to_xml(), encoding='utf8')
| mit | -3,182,528,697,849,075,700 | 27.874317 | 78 | 0.576268 | false |
adamnew123456/jobmon | jobmon/ticker.py | 1 | 3108 | """
A tickers are responsible for calling into the supervisor periodically, and
getting it to handle restarts.
"""
import logging
import os
import select
import threading
import time
from jobmon import util
LOGGER = logging.getLogger('jobmon.ticker')
class Ticker(threading.Thread, util.TerminableThreadMixin):
"""
A ticker is responsible for keeping track of a bunch of timeouts (each of
which is associated with a key), and then calling a function with
that key when the timeout expires.
"""
def __init__(self, callback):
threading.Thread.__init__(self)
util.TerminableThreadMixin.__init__(self)
# This is used to force ticks when new events are registered
reader, writer = os.pipe()
self.tick_reader = os.fdopen(reader, 'rb')
self.tick_writer = os.fdopen(writer, 'wb')
self.timeout_lock = threading.Lock()
self.timeouts = {}
self.callback = callback
def __contains__(self, key):
return key in self.timeouts
def register(self, key, abstime):
"""
Registers a new timeout, to be run at the given absolute time.
"""
LOGGER.info('Registering %s at %d', key, abstime)
with self.timeout_lock:
self.timeouts[key] = abstime
self.tick_writer.write(b' ')
self.tick_writer.flush()
def unregister(self, key):
"""
Removes a timeout from the ticker, if it already exists.
"""
LOGGER.info('Removing %s', key)
with self.timeout_lock:
if key in self.timeouts:
del self.timeouts[key]
def run_timeouts(self):
"""
Runs all the expired timeouts.
"""
expired = []
now = time.time()
with self.timeout_lock:
for key, timeout in self.timeouts.items():
if timeout <= now:
expired.append(key)
for key in expired:
LOGGER.info('Running callback on %s', key)
self.callback(key)
self.unregister(key)
@util.log_crashes(LOGGER, 'Error in ticker')
def run(self):
"""
Runs the timeout loop, calling the timeout function when appropriate.
"""
while True:
try:
min_wait_time = min(self.timeouts.values()) - time.time()
if min_wait_time < 0:
min_wait_time = 0
except ValueError:
min_wait_time = None
readers, _, _ = select.select(
[self.tick_reader, self.exit_reader], [], [],
min_wait_time)
self.run_timeouts()
if self.exit_reader in readers:
break
if self.tick_reader in readers:
# Flush the pipe, since we don't want it to get backed up
LOGGER.info('Woken up by registration')
self.tick_reader.read(1)
LOGGER.info('Closing...')
self.cleanup()
self.tick_reader.close()
self.tick_writer.close()
| bsd-2-clause | 7,884,936,955,516,481,000 | 27.777778 | 78 | 0.559524 | false |
cebel/pyctd | src/pyctd/manager/database.py | 1 | 17991 | # -*- coding: utf-8 -*-
"""PyCTD loads all CTD content in the database. Content is available via functions."""
import configparser
import gzip
import io
import logging
import os
import re
import sys
import time
from configparser import RawConfigParser
import numpy as np
import pandas as pd
from requests.compat import urlparse
from sqlalchemy import create_engine, inspect
from sqlalchemy.engine import reflection
from sqlalchemy.orm import sessionmaker, scoped_session
from sqlalchemy.sql import sqltypes
from . import defaults
from . import models
from . import table_conf
from .table import get_table_configurations
from ..constants import PYCTD_DATA_DIR, PYCTD_DIR, bcolors
if sys.version_info[0] == 3:
from urllib.request import urlretrieve
else:
from urllib import urlretrieve
log = logging.getLogger(__name__)
alchemy_pandas_dytpe_mapper = {
sqltypes.Text: np.unicode,
sqltypes.String: np.unicode,
sqltypes.Integer: np.float,
sqltypes.REAL: np.double
}
def get_connection_string(connection=None):
"""return SQLAlchemy connection string if it is set
:param connection: get the SQLAlchemy connection string #TODO
:rtype: str
"""
if not connection:
config = configparser.ConfigParser()
cfp = defaults.config_file_path
if os.path.exists(cfp):
log.info('fetch database configuration from %s', cfp)
config.read(cfp)
connection = config['database']['sqlalchemy_connection_string']
log.info('load connection string from %s: %s', cfp, connection)
else:
with open(cfp, 'w') as config_file:
connection = defaults.sqlalchemy_connection_string_default
config['database'] = {'sqlalchemy_connection_string': connection}
config.write(config_file)
log.info('create configuration file %s', cfp)
return connection
class BaseDbManager(object):
"""Creates a connection to database and a persistient session using SQLAlchemy"""
def __init__(self, connection=None, echo=False):
"""
:param str connection: SQLAlchemy
:param bool echo: True or False for SQL output of SQLAlchemy engine
"""
log.setLevel(logging.INFO)
handler = logging.FileHandler(os.path.join(PYCTD_DIR, defaults.TABLE_PREFIX + 'database.log'))
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
try:
self.connection = get_connection_string(connection)
self.engine = create_engine(self.connection, echo=echo)
self.inspector = reflection.Inspector.from_engine(self.engine)
self.sessionmaker = sessionmaker(bind=self.engine, autoflush=False, expire_on_commit=False)
self.session = scoped_session(self.sessionmaker)()
except:
self.set_connection_string_by_user_input()
self.__init__()
def set_connection_string_by_user_input(self):
"""Prompts the user to input a connection string"""
user_connection = input(
bcolors.WARNING + "\nFor any reason connection to " + bcolors.ENDC +
bcolors.FAIL + "{}".format(self.connection) + bcolors.ENDC +
bcolors.WARNING + " is not possible.\n\n" + bcolors.ENDC +
"For more information about SQLAlchemy connection strings go to:\n" +
"http://docs.sqlalchemy.org/en/latest/core/engines.html\n\n"
"Please insert a valid connection string:\n" +
bcolors.UNDERLINE + "Examples:\n\n" + bcolors.ENDC +
"MySQL (recommended):\n" +
bcolors.OKGREEN + "\tmysql+pymysql://user:passwd@localhost/database?charset=utf8\n" + bcolors.ENDC +
"PostgreSQL:\n" +
bcolors.OKGREEN + "\tpostgresql://scott:tiger@localhost/mydatabase\n" + bcolors.ENDC +
"MsSQL (pyodbc have to be installed):\n" +
bcolors.OKGREEN + "\tmssql+pyodbc://user:passwd@database\n" + bcolors.ENDC +
"SQLite (always works):\n" +
" - Linux:\n" +
bcolors.OKGREEN + "\tsqlite:////absolute/path/to/database.db\n" + bcolors.ENDC +
" - Windows:\n" +
bcolors.OKGREEN + "\tsqlite:///C:\\path\\to\\database.db\n" + bcolors.ENDC +
"Oracle:\n" +
bcolors.OKGREEN + "\toracle://user:[email protected]:1521/database\n\n" + bcolors.ENDC +
"[RETURN] for standard connection {}:\n".format(defaults.sqlalchemy_connection_string_default)
)
if not (user_connection or user_connection.strip()):
user_connection = defaults.sqlalchemy_connection_string_default
set_connection(user_connection.strip())
def create_all(self, checkfirst=True):
"""Creates all tables from models in the database
:param bool checkfirst: Check if tables already exists
"""
log.info('creating tables in %s', self.engine.url)
models.Base.metadata.create_all(self.engine, checkfirst=checkfirst)
def drop_all(self):
"""Drops all tables in the database"""
log.info('dropping tables in %s', self.engine.url)
self.session.commit()
models.Base.metadata.drop_all(self.engine)
self.session.commit()
class DbManager(BaseDbManager):
"""Implements functions to upload CTD files into a database. Preferred SQL Alchemy database is MySQL with
:mod:`pymysql`.
"""
__mapper = {}
pyctd_data_dir = PYCTD_DATA_DIR
def __init__(self, connection=None):
"""
:param str connection: custom database connection SQL Alchemy string
"""
super(DbManager, self).__init__(connection=connection)
self.tables = get_table_configurations()
def db_import(self, urls=None, force_download=False):
"""Updates the CTD database
1. downloads all files from CTD
2. drops all tables in database
3. creates all tables in database
4. import all data from CTD files
:param iter[str] urls: An iterable of URL strings
:param bool force_download: force method to download
"""
if not urls:
urls = [
defaults.url_base + table_conf.tables[model]['file_name']
for model in table_conf.tables
]
log.info('Update CTD database from %s', urls)
self.drop_all()
self.download_urls(urls=urls, force_download=force_download)
self.create_all()
self.import_tables()
self.session.close()
@property
def mapper(self):
"""returns a dictionary with keys of pyctd.manager.table_con.domains_to_map and pandas.DataFrame as values.
DataFrames column names:
- domain_id (represents the domain identifier of e.g. chemical)
- domain__id (represents the primary key in domain table)
:return: dict of pandas DataFrames (keys:domain_name, values:DataFrame)
:rtype: dict of pandas.DataFrame
"""
if not self.__mapper:
for model in table_conf.models_to_map:
domain = model.table_suffix
tab_conf = table_conf.tables[model]
file_path = os.path.join(self.pyctd_data_dir, tab_conf['file_name'])
col_name_in_file, col_name_in_db = tab_conf['domain_id_column']
column_index = self.get_index_of_column(col_name_in_file, file_path)
df = pd.read_table(
file_path,
names=[col_name_in_db],
header=None,
usecols=[column_index],
comment='#',
index_col=False,
dtype=self.get_dtypes(model)
)
if domain == 'chemical':
df[col_name_in_db] = df[col_name_in_db].str.replace('MESH:', '').str.strip()
df[domain + '__id'] = df.index + 1
self.__mapper[domain] = df
return self.__mapper
def import_tables(self, only_tables=None, exclude_tables=None):
"""Imports all data in database tables
:param set[str] only_tables: names of tables to be imported
:param set[str] exclude_tables: names of tables to be excluded
"""
for table in self.tables:
if only_tables is not None and table.name not in only_tables:
continue
if exclude_tables is not None and table.name in exclude_tables:
continue
self.import_table(table)
@classmethod
def get_index_of_column(cls, column, file_path):
"""Get index of a specific column name in a CTD file
:param column:
:param file_path:
:return: Optional[int]
"""
columns = cls.get_column_names_from_file(file_path)
if column in columns:
return columns.index(column)
@classmethod
def get_index_and_columns_order(cls, columns_in_file_expected, columns_dict, file_path):
"""
:param columns_in_file_expected:
:param columns_dict:
:param file_path:
:rtype: tuple[list,list]
"""
use_columns_with_index = []
column_names_in_db = []
column_names_from_file = cls.get_column_names_from_file(file_path)
if not set(columns_in_file_expected).issubset(column_names_from_file):
log.exception(
'%s columns are not a subset of columns %s in file %s',
columns_in_file_expected,
column_names_from_file,
file_path
)
else:
for index, column in enumerate(column_names_from_file):
if column in columns_dict:
use_columns_with_index.append(index)
column_names_in_db.append(columns_dict[column])
return use_columns_with_index, column_names_in_db
def import_table(self, table):
"""import table by Table object
:param `manager.table_conf.Table` table: Table object
"""
file_path = os.path.join(self.pyctd_data_dir, table.file_name)
log.info('importing %s data into table %s', file_path, table.name)
table_import_timer = time.time()
use_columns_with_index, column_names_in_db = self.get_index_and_columns_order(
table.columns_in_file_expected,
table.columns_dict,
file_path
)
self.import_table_in_db(file_path, use_columns_with_index, column_names_in_db, table)
for column_in_file, column_in_one2many_table in table.one_to_many:
o2m_column_index = self.get_index_of_column(column_in_file, file_path)
self.import_one_to_many(file_path, o2m_column_index, table, column_in_one2many_table)
log.info('done importing %s in %.2f seconds', table.name, time.time() - table_import_timer)
def import_one_to_many(self, file_path, column_index, parent_table, column_in_one2many_table):
"""
:param file_path:
:param column_index:
:param parent_table:
:param column_in_one2many_table:
"""
chunks = pd.read_table(
file_path,
usecols=[column_index],
header=None,
comment='#',
index_col=False,
chunksize=1000000,
dtype=self.get_dtypes(parent_table.model)
)
for chunk in chunks:
child_values = []
parent_id_values = []
chunk.dropna(inplace=True)
chunk.index += 1
for parent_id, values in chunk.iterrows():
entry = values[column_index]
if not isinstance(entry, str):
entry = str(entry)
for value in entry.split("|"):
parent_id_values.append(parent_id)
child_values.append(value.strip())
parent_id_column_name = parent_table.name + '__id'
o2m_table_name = defaults.TABLE_PREFIX + parent_table.name + '__' + column_in_one2many_table
pd.DataFrame({
parent_id_column_name: parent_id_values,
column_in_one2many_table: child_values
}).to_sql(name=o2m_table_name, if_exists='append', con=self.engine, index=False)
# TODO document get_dtypes
@staticmethod
def get_dtypes(sqlalchemy_model):
"""
:param sqlalchemy_model:
:rtype: dict
"""
mapper = inspect(sqlalchemy_model)
return {
x.key: alchemy_pandas_dytpe_mapper[type(x.type)]
for x in mapper.columns
if x.key != 'id'
}
def import_table_in_db(self, file_path, use_columns_with_index, column_names_in_db, table):
"""Imports data from CTD file into database
:param str file_path: path to file
:param list[int] use_columns_with_index: list of column indices in file
:param list[str] column_names_in_db: list of column names (have to fit to models except domain_id column name)
:param table: `manager.table.Table` object
"""
chunks = pd.read_table(
file_path,
usecols=use_columns_with_index,
names=column_names_in_db,
header=None, comment='#',
index_col=False,
chunksize=1000000,
dtype=self.get_dtypes(table.model)
)
for chunk in chunks:
# this is an evil hack because CTD is not using the MESH prefix in this table
if table.name == 'exposure_event':
chunk.disease_id = 'MESH:' + chunk.disease_id
chunk['id'] = chunk.index + 1
if table.model not in table_conf.models_to_map:
for model in table_conf.models_to_map:
domain = model.table_suffix
domain_id = domain + "_id"
if domain_id in column_names_in_db:
chunk = pd.merge(chunk, self.mapper[domain], on=domain_id, how='left')
del chunk[domain_id]
chunk.set_index('id', inplace=True)
table_with_prefix = defaults.TABLE_PREFIX + table.name
chunk.to_sql(name=table_with_prefix, if_exists='append', con=self.engine)
del chunks
@staticmethod
def get_column_names_from_file(file_path):
"""returns column names from CTD download file
:param str file_path: path to CTD download file
"""
if file_path.endswith('.gz'):
file_handler = io.TextIOWrapper(io.BufferedReader(gzip.open(file_path)))
else:
file_handler = open(file_path, 'r')
fields_line = False
with file_handler as file:
for line in file:
line = line.strip()
if not fields_line and re.search('#\s*Fields\s*:$', line):
fields_line = True
elif fields_line and not (line == '' or line == '#'):
return [column.strip() for column in line[1:].split("\t")]
@classmethod
def download_urls(cls, urls, force_download=False):
"""Downloads all CTD URLs that don't exist
:param iter[str] urls: iterable of URL of CTD
:param bool force_download: force method to download
"""
for url in urls:
file_path = cls.get_path_to_file_from_url(url)
if os.path.exists(file_path) and not force_download:
log.info('already downloaded %s to %s', url, file_path)
else:
log.info('downloading %s to %s', url, file_path)
download_timer = time.time()
urlretrieve(url, file_path)
log.info('downloaded in %.2f seconds', time.time() - download_timer)
@classmethod
def get_path_to_file_from_url(cls, url):
"""standard file path
:param str url: CTD download URL
:rtype: str
"""
file_name = urlparse(url).path.split('/')[-1]
return os.path.join(cls.pyctd_data_dir, file_name)
def update(connection=None, urls=None, force_download=False):
"""Updates CTD database
:param iter[str] urls: list of urls to download
:param str connection: custom database connection string
:param bool force_download: force method to download
"""
db = DbManager(connection)
db.db_import(urls=urls, force_download=force_download)
db.session.close()
def set_mysql_connection(host='localhost', user='pyctd_user', password='pyctd_passwd', db='pyctd', charset='utf8'):
"""Sets the connection using MySQL Parameters"""
set_connection('mysql+pymysql://{user}:{passwd}@{host}/{db}?charset={charset}'.format(
host=host,
user=user,
passwd=password,
db=db,
charset=charset)
)
def set_test_connection():
"""Sets the connection with the default SQLite test database"""
set_connection(defaults.DEFAULT_SQLITE_TEST_DATABASE_NAME)
def set_connection(connection=defaults.sqlalchemy_connection_string_default):
"""Set the connection string for SQLAlchemy
:param str connection: SQLAlchemy connection string
"""
cfp = defaults.config_file_path
config = RawConfigParser()
if not os.path.exists(cfp):
with open(cfp, 'w') as config_file:
config['database'] = {'sqlalchemy_connection_string': connection}
config.write(config_file)
log.info('create configuration file %s', cfp)
else:
config.read(cfp)
config.set('database', 'sqlalchemy_connection_string', connection)
with open(cfp, 'w') as configfile:
config.write(configfile)
| apache-2.0 | -5,251,697,769,737,774,000 | 35.942505 | 118 | 0.593908 | false |
lindemann09/pyForceDAQ | forceDAQ/data_handling/convert.py | 1 | 8601 | #!/usr/bin/env python
"""
Functions to convert force data
This module can be also executed.
"""
__author__ = 'Oliver Lindemann'
import os
import sys
import gzip
import numpy as np
from .read_force_data import read_raw_data, data_frame_to_text
PAUSE_CRITERION = 500
MSEC_PER_SAMPLES = 1
REF_SAMPLE_PROBE = 1000
MIN_DELAY_ENDSTREAM = 2
CONVERTED_SUFFIX = ".conv.csv.gz"
CONVERTED_SUBFOLDER = "converted"
def _periods_from_daq_events(daq_events):
periods = {}
started = None
sensor_id = None
evt = np.array(daq_events["value"])
times = np.array(daq_events["time"]).astype(int)
idx = np.argsort(times)
for t, v in zip(times[idx], evt[idx]):
try:
sensor_id = int(v.split(":")[1])
except:
sensor_id = None
if sensor_id not in periods:
periods[sensor_id] = []
if v.startswith("started"):
if started is None:
started = t
else:
periods[sensor_id].append((started, None))
started = None
elif v.startswith("pause"):
periods[sensor_id].append((started, t))
started = None
# sort remaining
if started is not None:
periods[sensor_id].append((started, None))
return periods
def _pauses_idx_from_timeline(time, pause_criterion):
pauses_idx = np.where(np.diff(time) > pause_criterion)[0]
last_pause = -1
rtn = []
for idx in np.append(pauses_idx, len(time)-1):
rtn.append((last_pause+1, idx))
last_pause = idx
return rtn
def _most_frequent_value(values):
(v, cnt) = np.unique(values, return_counts=True)
idx = np.argmax(cnt)
return v[idx]
def print_histogram(values):
(v, cnt) = np.unique(values, return_counts=True)
for a,b in zip(v,cnt):
print("{} -- {}".format(a,b))
def _end_stream_sample(timestamps, min_delay=MIN_DELAY_ENDSTREAM):
"""finds end of the data stream, that is, sample before next long waiting
sample or returns None if no end can be detected"""
next_t_diffs = np.diff(timestamps)
try:
return np.where(next_t_diffs >= min_delay)[0][0] #+1-1
except:
return None
def _linear_timeline_matched_by_single_reference_sample(irregular_timeline,
id_ref_sample, msec_per_sample):
"""match timeline that differences between the two is minimal
new times can not be after irregular times
"""
t_ref = irregular_timeline[id_ref_sample]
t_first = t_ref - (id_ref_sample*msec_per_sample)
t_last = t_first + ((len(irregular_timeline) - 1) * msec_per_sample)
return np.arange(t_first, t_last + msec_per_sample, step=msec_per_sample)
def _timeline_matched_by_delay_chunked_samples(times, msec_per_sample):
rtn = np.empty(len(times))*np.NaN
p = 0
while p<len(times):
next_ref_sample = _end_stream_sample(times[p:])
if next_ref_sample is not None:
ref_time = times[p+next_ref_sample]
rtn[p:(p+next_ref_sample+1)] = np.arange(
start = ref_time - (next_ref_sample*msec_per_sample),
stop = ref_time + msec_per_sample,
step = msec_per_sample)
p = p + next_ref_sample + 1
else:
# no further refence samples
rtn[p:] = times[p:]
break
return rtn
class Method(object):
types = {1: "single reference sample (forced linearity)",
2: "multiple delayed chunked samples (no linearity assumed)"}
def __init__(self, id):
if id not in Method.types:
raise RuntimeError("Unkown resampling method")
self.id = id
@property
def description(self):
return Method.types[self.id]
@staticmethod
def get_method_from_description(description):
for id, desc in Method.types.items():
if desc == description:
return Method(id)
return None
def _adjusted_timestamps(timestamps, pauses_idx, evt_periods, method):
"""
method=Method(1): _linear_timeline_matched_by_single_reference_sample
method=Method(2): _timeline_matched_by_delay_chunked_samples
"""
# adapting timestamps
rtn = np.empty(len(timestamps))*np.NaN
period_counter = 0
for idx, evt_per in zip(pauses_idx, evt_periods):
# loop over periods
# logging
period_counter += 1
n_samples = idx[1] - idx[0] + 1
if evt_per[1]: # end time
sample_diff = n_samples - (1+(evt_per[1]-evt_per[0])//MSEC_PER_SAMPLES)
if sample_diff!=0:
print("Period {}: Sample difference of {}".format(
period_counter, sample_diff))
else:
print("Period {}: No pause sampling time.".format(period_counter))
#convert times
times = timestamps[idx[0]:idx[1] + 1]
if method.id==1:
# match refe samples
next_ref = _end_stream_sample(times[REF_SAMPLE_PROBE:(REF_SAMPLE_PROBE + 1000)])
if next_ref is None:
next_ref = 0
newtimes = _linear_timeline_matched_by_single_reference_sample(
times, id_ref_sample=REF_SAMPLE_PROBE + next_ref,
msec_per_sample=MSEC_PER_SAMPLES)
elif method.id==2:
# using delays
newtimes = _timeline_matched_by_delay_chunked_samples(times,
msec_per_sample=MSEC_PER_SAMPLES)
else:
newtimes = times
rtn[idx[0]:idx[1] + 1] = newtimes
return rtn.astype(int)
def converted_filename(flname):
"""returns path and filename of the converted data file"""
if flname.endswith(".gz"):
tmp = flname[:-7]
else:
tmp = flname[:-4]
path, new_filename = os.path.split(tmp)
converted_path = os.path.join(path, CONVERTED_SUBFOLDER)
return converted_path, new_filename + CONVERTED_SUFFIX
def convert_raw_data(filepath, method, save_time_adjustments=False,
keep_delay_variable=False):
"""preprocessing raw pyForceData:
"""
# todo only one sensor
assert(isinstance(method, Method))
filepath = os.path.join(os.path.split(sys.argv[0])[0], filepath)
print("Converting {}".format(filepath))
print("Method: {}".format(method.description))
data, udp_event, daq_events, comments = read_raw_data(filepath)
print("{} samples".format(len(data["time"])))
sensor_id = 1
if not keep_delay_variable:
data.pop("delay", None)
timestamps = np.array(data["time"]).astype(int)
#pauses
pauses_idx = _pauses_idx_from_timeline(timestamps, pause_criterion=PAUSE_CRITERION)
evt_periods = _periods_from_daq_events(daq_events)
if len(pauses_idx) != len(evt_periods[sensor_id]):
raise RuntimeError("Pauses in DAQ events do not match recording pauses")
else:
data["time"] = _adjusted_timestamps(timestamps=timestamps,
pauses_idx=pauses_idx,
evt_periods=evt_periods[
sensor_id],
method=method)
if save_time_adjustments:
data["time_adjustment"] = timestamps-data["time"]
#print("Time difference historgram")
#print_histogram(data["time_adjustment"])
#save
folder, new_filename = converted_filename(filepath)
try:
os.makedirs(folder)
except:
pass
new_filename = os.path.join(folder, new_filename)
with gzip.open(new_filename, "wt") as fl:
fl.write(comments.strip() + "\n")
fl.write(data_frame_to_text(data))
def get_all_data_files(folder):
rtn = []
for flname in os.listdir(folder):
if (flname.endswith(".csv") or flname.endswith(".csv.gz")) and not \
flname.endswith(CONVERTED_SUFFIX):
flname = os.path.join(folder, flname)
rtn.append(flname)
return rtn
def get_all_unconverted_data_files(folder):
rtn = []
files = get_all_data_files(folder)
try: # make subfolder
c_path, _ = converted_filename(files[0])
converted_files = os.listdir(c_path)
except:
converted_files = []
for flname in files:
_, c_flname = converted_filename(flname)
if c_flname not in converted_files:
rtn.append(flname)
return rtn
| mit | -2,560,907,721,749,583,000 | 30.276364 | 99 | 0.58063 | false |
Donkyhotay/MoonPy | twisted/protocols/sip.py | 1 | 41973 | # -*- test-case-name: twisted.test.test_sip -*-
# Copyright (c) 2001-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""Session Initialization Protocol.
Documented in RFC 2543.
[Superceded by 3261]
This module contains a deprecated implementation of HTTP Digest authentication.
See L{twisted.cred.credentials} and L{twisted.cred._digest} for its new home.
"""
# system imports
import socket, time, sys, random, warnings
from zope.interface import implements, Interface
# twisted imports
from twisted.python import log, util
from twisted.python.deprecate import deprecated
from twisted.python.versions import Version
from twisted.python.hashlib import md5
from twisted.internet import protocol, defer, reactor
from twisted import cred
import twisted.cred.error
from twisted.cred.credentials import UsernameHashedPassword, UsernamePassword
# sibling imports
from twisted.protocols import basic
PORT = 5060
# SIP headers have short forms
shortHeaders = {"call-id": "i",
"contact": "m",
"content-encoding": "e",
"content-length": "l",
"content-type": "c",
"from": "f",
"subject": "s",
"to": "t",
"via": "v",
}
longHeaders = {}
for k, v in shortHeaders.items():
longHeaders[v] = k
del k, v
statusCodes = {
100: "Trying",
180: "Ringing",
181: "Call Is Being Forwarded",
182: "Queued",
183: "Session Progress",
200: "OK",
300: "Multiple Choices",
301: "Moved Permanently",
302: "Moved Temporarily",
303: "See Other",
305: "Use Proxy",
380: "Alternative Service",
400: "Bad Request",
401: "Unauthorized",
402: "Payment Required",
403: "Forbidden",
404: "Not Found",
405: "Method Not Allowed",
406: "Not Acceptable",
407: "Proxy Authentication Required",
408: "Request Timeout",
409: "Conflict", # Not in RFC3261
410: "Gone",
411: "Length Required", # Not in RFC3261
413: "Request Entity Too Large",
414: "Request-URI Too Large",
415: "Unsupported Media Type",
416: "Unsupported URI Scheme",
420: "Bad Extension",
421: "Extension Required",
423: "Interval Too Brief",
480: "Temporarily Unavailable",
481: "Call/Transaction Does Not Exist",
482: "Loop Detected",
483: "Too Many Hops",
484: "Address Incomplete",
485: "Ambiguous",
486: "Busy Here",
487: "Request Terminated",
488: "Not Acceptable Here",
491: "Request Pending",
493: "Undecipherable",
500: "Internal Server Error",
501: "Not Implemented",
502: "Bad Gateway", # no donut
503: "Service Unavailable",
504: "Server Time-out",
505: "SIP Version not supported",
513: "Message Too Large",
600: "Busy Everywhere",
603: "Decline",
604: "Does not exist anywhere",
606: "Not Acceptable",
}
specialCases = {
'cseq': 'CSeq',
'call-id': 'Call-ID',
'www-authenticate': 'WWW-Authenticate',
}
def dashCapitalize(s):
''' Capitalize a string, making sure to treat - as a word seperator '''
return '-'.join([ x.capitalize() for x in s.split('-')])
def unq(s):
if s[0] == s[-1] == '"':
return s[1:-1]
return s
def DigestCalcHA1(
pszAlg,
pszUserName,
pszRealm,
pszPassword,
pszNonce,
pszCNonce,
):
m = md5()
m.update(pszUserName)
m.update(":")
m.update(pszRealm)
m.update(":")
m.update(pszPassword)
HA1 = m.digest()
if pszAlg == "md5-sess":
m = md5()
m.update(HA1)
m.update(":")
m.update(pszNonce)
m.update(":")
m.update(pszCNonce)
HA1 = m.digest()
return HA1.encode('hex')
DigestCalcHA1 = deprecated(Version("Twisted", 9, 0, 0))(DigestCalcHA1)
def DigestCalcResponse(
HA1,
pszNonce,
pszNonceCount,
pszCNonce,
pszQop,
pszMethod,
pszDigestUri,
pszHEntity,
):
m = md5()
m.update(pszMethod)
m.update(":")
m.update(pszDigestUri)
if pszQop == "auth-int":
m.update(":")
m.update(pszHEntity)
HA2 = m.digest().encode('hex')
m = md5()
m.update(HA1)
m.update(":")
m.update(pszNonce)
m.update(":")
if pszNonceCount and pszCNonce: # pszQop:
m.update(pszNonceCount)
m.update(":")
m.update(pszCNonce)
m.update(":")
m.update(pszQop)
m.update(":")
m.update(HA2)
hash = m.digest().encode('hex')
return hash
DigestCalcResponse = deprecated(Version("Twisted", 9, 0, 0))(DigestCalcResponse)
_absent = object()
class Via(object):
"""
A L{Via} is a SIP Via header, representing a segment of the path taken by
the request.
See RFC 3261, sections 8.1.1.7, 18.2.2, and 20.42.
@ivar transport: Network protocol used for this leg. (Probably either "TCP"
or "UDP".)
@type transport: C{str}
@ivar branch: Unique identifier for this request.
@type branch: C{str}
@ivar host: Hostname or IP for this leg.
@type host: C{str}
@ivar port: Port used for this leg.
@type port C{int}, or None.
@ivar rportRequested: Whether to request RFC 3581 client processing or not.
@type rportRequested: C{bool}
@ivar rportValue: Servers wishing to honor requests for RFC 3581 processing
should set this parameter to the source port the request was received
from.
@type rportValue: C{int}, or None.
@ivar ttl: Time-to-live for requests on multicast paths.
@type ttl: C{int}, or None.
@ivar maddr: The destination multicast address, if any.
@type maddr: C{str}, or None.
@ivar hidden: Obsolete in SIP 2.0.
@type hidden: C{bool}
@ivar otherParams: Any other parameters in the header.
@type otherParams: C{dict}
"""
def __init__(self, host, port=PORT, transport="UDP", ttl=None,
hidden=False, received=None, rport=_absent, branch=None,
maddr=None, **kw):
"""
Set parameters of this Via header. All arguments correspond to
attributes of the same name.
To maintain compatibility with old SIP
code, the 'rport' argument is used to determine the values of
C{rportRequested} and C{rportValue}. If None, C{rportRequested} is set
to True. (The deprecated method for doing this is to pass True.) If an
integer, C{rportValue} is set to the given value.
Any arguments not explicitly named here are collected into the
C{otherParams} dict.
"""
self.transport = transport
self.host = host
self.port = port
self.ttl = ttl
self.hidden = hidden
self.received = received
if rport is True:
warnings.warn(
"rport=True is deprecated since Twisted 9.0.",
DeprecationWarning,
stacklevel=2)
self.rportValue = None
self.rportRequested = True
elif rport is None:
self.rportValue = None
self.rportRequested = True
elif rport is _absent:
self.rportValue = None
self.rportRequested = False
else:
self.rportValue = rport
self.rportRequested = False
self.branch = branch
self.maddr = maddr
self.otherParams = kw
def _getrport(self):
"""
Returns the rport value expected by the old SIP code.
"""
if self.rportRequested == True:
return True
elif self.rportValue is not None:
return self.rportValue
else:
return None
def _setrport(self, newRPort):
"""
L{Base._fixupNAT} sets C{rport} directly, so this method sets
C{rportValue} based on that.
@param newRPort: The new rport value.
@type newRPort: C{int}
"""
self.rportValue = newRPort
self.rportRequested = False
rport = property(_getrport, _setrport)
def toString(self):
"""
Serialize this header for use in a request or response.
"""
s = "SIP/2.0/%s %s:%s" % (self.transport, self.host, self.port)
if self.hidden:
s += ";hidden"
for n in "ttl", "branch", "maddr", "received":
value = getattr(self, n)
if value is not None:
s += ";%s=%s" % (n, value)
if self.rportRequested:
s += ";rport"
elif self.rportValue is not None:
s += ";rport=%s" % (self.rport,)
etc = self.otherParams.items()
etc.sort()
for k, v in etc:
if v is None:
s += ";" + k
else:
s += ";%s=%s" % (k, v)
return s
def parseViaHeader(value):
"""
Parse a Via header.
@return: The parsed version of this header.
@rtype: L{Via}
"""
parts = value.split(";")
sent, params = parts[0], parts[1:]
protocolinfo, by = sent.split(" ", 1)
by = by.strip()
result = {}
pname, pversion, transport = protocolinfo.split("/")
if pname != "SIP" or pversion != "2.0":
raise ValueError, "wrong protocol or version: %r" % value
result["transport"] = transport
if ":" in by:
host, port = by.split(":")
result["port"] = int(port)
result["host"] = host
else:
result["host"] = by
for p in params:
# it's the comment-striping dance!
p = p.strip().split(" ", 1)
if len(p) == 1:
p, comment = p[0], ""
else:
p, comment = p
if p == "hidden":
result["hidden"] = True
continue
parts = p.split("=", 1)
if len(parts) == 1:
name, value = parts[0], None
else:
name, value = parts
if name in ("rport", "ttl"):
value = int(value)
result[name] = value
return Via(**result)
class URL:
"""A SIP URL."""
def __init__(self, host, username=None, password=None, port=None,
transport=None, usertype=None, method=None,
ttl=None, maddr=None, tag=None, other=None, headers=None):
self.username = username
self.host = host
self.password = password
self.port = port
self.transport = transport
self.usertype = usertype
self.method = method
self.tag = tag
self.ttl = ttl
self.maddr = maddr
if other == None:
self.other = []
else:
self.other = other
if headers == None:
self.headers = {}
else:
self.headers = headers
def toString(self):
l = []; w = l.append
w("sip:")
if self.username != None:
w(self.username)
if self.password != None:
w(":%s" % self.password)
w("@")
w(self.host)
if self.port != None:
w(":%d" % self.port)
if self.usertype != None:
w(";user=%s" % self.usertype)
for n in ("transport", "ttl", "maddr", "method", "tag"):
v = getattr(self, n)
if v != None:
w(";%s=%s" % (n, v))
for v in self.other:
w(";%s" % v)
if self.headers:
w("?")
w("&".join([("%s=%s" % (specialCases.get(h) or dashCapitalize(h), v)) for (h, v) in self.headers.items()]))
return "".join(l)
def __str__(self):
return self.toString()
def __repr__(self):
return '<URL %s:%s@%s:%r/%s>' % (self.username, self.password, self.host, self.port, self.transport)
def parseURL(url, host=None, port=None):
"""Return string into URL object.
URIs are of of form 'sip:[email protected]'.
"""
d = {}
if not url.startswith("sip:"):
raise ValueError("unsupported scheme: " + url[:4])
parts = url[4:].split(";")
userdomain, params = parts[0], parts[1:]
udparts = userdomain.split("@", 1)
if len(udparts) == 2:
userpass, hostport = udparts
upparts = userpass.split(":", 1)
if len(upparts) == 1:
d["username"] = upparts[0]
else:
d["username"] = upparts[0]
d["password"] = upparts[1]
else:
hostport = udparts[0]
hpparts = hostport.split(":", 1)
if len(hpparts) == 1:
d["host"] = hpparts[0]
else:
d["host"] = hpparts[0]
d["port"] = int(hpparts[1])
if host != None:
d["host"] = host
if port != None:
d["port"] = port
for p in params:
if p == params[-1] and "?" in p:
d["headers"] = h = {}
p, headers = p.split("?", 1)
for header in headers.split("&"):
k, v = header.split("=")
h[k] = v
nv = p.split("=", 1)
if len(nv) == 1:
d.setdefault("other", []).append(p)
continue
name, value = nv
if name == "user":
d["usertype"] = value
elif name in ("transport", "ttl", "maddr", "method", "tag"):
if name == "ttl":
value = int(value)
d[name] = value
else:
d.setdefault("other", []).append(p)
return URL(**d)
def cleanRequestURL(url):
"""Clean a URL from a Request line."""
url.transport = None
url.maddr = None
url.ttl = None
url.headers = {}
def parseAddress(address, host=None, port=None, clean=0):
"""Return (name, uri, params) for From/To/Contact header.
@param clean: remove unnecessary info, usually for From and To headers.
"""
address = address.strip()
# simple 'sip:foo' case
if address.startswith("sip:"):
return "", parseURL(address, host=host, port=port), {}
params = {}
name, url = address.split("<", 1)
name = name.strip()
if name.startswith('"'):
name = name[1:]
if name.endswith('"'):
name = name[:-1]
url, paramstring = url.split(">", 1)
url = parseURL(url, host=host, port=port)
paramstring = paramstring.strip()
if paramstring:
for l in paramstring.split(";"):
if not l:
continue
k, v = l.split("=")
params[k] = v
if clean:
# rfc 2543 6.21
url.ttl = None
url.headers = {}
url.transport = None
url.maddr = None
return name, url, params
class SIPError(Exception):
def __init__(self, code, phrase=None):
if phrase is None:
phrase = statusCodes[code]
Exception.__init__(self, "SIP error (%d): %s" % (code, phrase))
self.code = code
self.phrase = phrase
class RegistrationError(SIPError):
"""Registration was not possible."""
class Message:
"""A SIP message."""
length = None
def __init__(self):
self.headers = util.OrderedDict() # map name to list of values
self.body = ""
self.finished = 0
def addHeader(self, name, value):
name = name.lower()
name = longHeaders.get(name, name)
if name == "content-length":
self.length = int(value)
self.headers.setdefault(name,[]).append(value)
def bodyDataReceived(self, data):
self.body += data
def creationFinished(self):
if (self.length != None) and (self.length != len(self.body)):
raise ValueError, "wrong body length"
self.finished = 1
def toString(self):
s = "%s\r\n" % self._getHeaderLine()
for n, vs in self.headers.items():
for v in vs:
s += "%s: %s\r\n" % (specialCases.get(n) or dashCapitalize(n), v)
s += "\r\n"
s += self.body
return s
def _getHeaderLine(self):
raise NotImplementedError
class Request(Message):
"""A Request for a URI"""
def __init__(self, method, uri, version="SIP/2.0"):
Message.__init__(self)
self.method = method
if isinstance(uri, URL):
self.uri = uri
else:
self.uri = parseURL(uri)
cleanRequestURL(self.uri)
def __repr__(self):
return "<SIP Request %d:%s %s>" % (id(self), self.method, self.uri.toString())
def _getHeaderLine(self):
return "%s %s SIP/2.0" % (self.method, self.uri.toString())
class Response(Message):
"""A Response to a URI Request"""
def __init__(self, code, phrase=None, version="SIP/2.0"):
Message.__init__(self)
self.code = code
if phrase == None:
phrase = statusCodes[code]
self.phrase = phrase
def __repr__(self):
return "<SIP Response %d:%s>" % (id(self), self.code)
def _getHeaderLine(self):
return "SIP/2.0 %s %s" % (self.code, self.phrase)
class MessagesParser(basic.LineReceiver):
"""A SIP messages parser.
Expects dataReceived, dataDone repeatedly,
in that order. Shouldn't be connected to actual transport.
"""
version = "SIP/2.0"
acceptResponses = 1
acceptRequests = 1
state = "firstline" # or "headers", "body" or "invalid"
debug = 0
def __init__(self, messageReceivedCallback):
self.messageReceived = messageReceivedCallback
self.reset()
def reset(self, remainingData=""):
self.state = "firstline"
self.length = None # body length
self.bodyReceived = 0 # how much of the body we received
self.message = None
self.setLineMode(remainingData)
def invalidMessage(self):
self.state = "invalid"
self.setRawMode()
def dataDone(self):
# clear out any buffered data that may be hanging around
self.clearLineBuffer()
if self.state == "firstline":
return
if self.state != "body":
self.reset()
return
if self.length == None:
# no content-length header, so end of data signals message done
self.messageDone()
elif self.length < self.bodyReceived:
# aborted in the middle
self.reset()
else:
# we have enough data and message wasn't finished? something is wrong
raise RuntimeError, "this should never happen"
def dataReceived(self, data):
try:
basic.LineReceiver.dataReceived(self, data)
except:
log.err()
self.invalidMessage()
def handleFirstLine(self, line):
"""Expected to create self.message."""
raise NotImplementedError
def lineLengthExceeded(self, line):
self.invalidMessage()
def lineReceived(self, line):
if self.state == "firstline":
while line.startswith("\n") or line.startswith("\r"):
line = line[1:]
if not line:
return
try:
a, b, c = line.split(" ", 2)
except ValueError:
self.invalidMessage()
return
if a == "SIP/2.0" and self.acceptResponses:
# response
try:
code = int(b)
except ValueError:
self.invalidMessage()
return
self.message = Response(code, c)
elif c == "SIP/2.0" and self.acceptRequests:
self.message = Request(a, b)
else:
self.invalidMessage()
return
self.state = "headers"
return
else:
assert self.state == "headers"
if line:
# XXX support multi-line headers
try:
name, value = line.split(":", 1)
except ValueError:
self.invalidMessage()
return
self.message.addHeader(name, value.lstrip())
if name.lower() == "content-length":
try:
self.length = int(value.lstrip())
except ValueError:
self.invalidMessage()
return
else:
# CRLF, we now have message body until self.length bytes,
# or if no length was given, until there is no more data
# from the connection sending us data.
self.state = "body"
if self.length == 0:
self.messageDone()
return
self.setRawMode()
def messageDone(self, remainingData=""):
assert self.state == "body"
self.message.creationFinished()
self.messageReceived(self.message)
self.reset(remainingData)
def rawDataReceived(self, data):
assert self.state in ("body", "invalid")
if self.state == "invalid":
return
if self.length == None:
self.message.bodyDataReceived(data)
else:
dataLen = len(data)
expectedLen = self.length - self.bodyReceived
if dataLen > expectedLen:
self.message.bodyDataReceived(data[:expectedLen])
self.messageDone(data[expectedLen:])
return
else:
self.bodyReceived += dataLen
self.message.bodyDataReceived(data)
if self.bodyReceived == self.length:
self.messageDone()
class Base(protocol.DatagramProtocol):
"""Base class for SIP clients and servers."""
PORT = PORT
debug = False
def __init__(self):
self.messages = []
self.parser = MessagesParser(self.addMessage)
def addMessage(self, msg):
self.messages.append(msg)
def datagramReceived(self, data, addr):
self.parser.dataReceived(data)
self.parser.dataDone()
for m in self.messages:
self._fixupNAT(m, addr)
if self.debug:
log.msg("Received %r from %r" % (m.toString(), addr))
if isinstance(m, Request):
self.handle_request(m, addr)
else:
self.handle_response(m, addr)
self.messages[:] = []
def _fixupNAT(self, message, (srcHost, srcPort)):
# RFC 2543 6.40.2,
senderVia = parseViaHeader(message.headers["via"][0])
if senderVia.host != srcHost:
senderVia.received = srcHost
if senderVia.port != srcPort:
senderVia.rport = srcPort
message.headers["via"][0] = senderVia.toString()
elif senderVia.rport == True:
senderVia.received = srcHost
senderVia.rport = srcPort
message.headers["via"][0] = senderVia.toString()
def deliverResponse(self, responseMessage):
"""Deliver response.
Destination is based on topmost Via header."""
destVia = parseViaHeader(responseMessage.headers["via"][0])
# XXX we don't do multicast yet
host = destVia.received or destVia.host
port = destVia.rport or destVia.port or self.PORT
destAddr = URL(host=host, port=port)
self.sendMessage(destAddr, responseMessage)
def responseFromRequest(self, code, request):
"""Create a response to a request message."""
response = Response(code)
for name in ("via", "to", "from", "call-id", "cseq"):
response.headers[name] = request.headers.get(name, [])[:]
return response
def sendMessage(self, destURL, message):
"""Send a message.
@param destURL: C{URL}. This should be a *physical* URL, not a logical one.
@param message: The message to send.
"""
if destURL.transport not in ("udp", None):
raise RuntimeError, "only UDP currently supported"
if self.debug:
log.msg("Sending %r to %r" % (message.toString(), destURL))
self.transport.write(message.toString(), (destURL.host, destURL.port or self.PORT))
def handle_request(self, message, addr):
"""Override to define behavior for requests received
@type message: C{Message}
@type addr: C{tuple}
"""
raise NotImplementedError
def handle_response(self, message, addr):
"""Override to define behavior for responses received.
@type message: C{Message}
@type addr: C{tuple}
"""
raise NotImplementedError
class IContact(Interface):
"""A user of a registrar or proxy"""
class Registration:
def __init__(self, secondsToExpiry, contactURL):
self.secondsToExpiry = secondsToExpiry
self.contactURL = contactURL
class IRegistry(Interface):
"""Allows registration of logical->physical URL mapping."""
def registerAddress(domainURL, logicalURL, physicalURL):
"""Register the physical address of a logical URL.
@return: Deferred of C{Registration} or failure with RegistrationError.
"""
def unregisterAddress(domainURL, logicalURL, physicalURL):
"""Unregister the physical address of a logical URL.
@return: Deferred of C{Registration} or failure with RegistrationError.
"""
def getRegistrationInfo(logicalURL):
"""Get registration info for logical URL.
@return: Deferred of C{Registration} object or failure of LookupError.
"""
class ILocator(Interface):
"""Allow looking up physical address for logical URL."""
def getAddress(logicalURL):
"""Return physical URL of server for logical URL of user.
@param logicalURL: a logical C{URL}.
@return: Deferred which becomes URL or fails with LookupError.
"""
class Proxy(Base):
"""SIP proxy."""
PORT = PORT
locator = None # object implementing ILocator
def __init__(self, host=None, port=PORT):
"""Create new instance.
@param host: our hostname/IP as set in Via headers.
@param port: our port as set in Via headers.
"""
self.host = host or socket.getfqdn()
self.port = port
Base.__init__(self)
def getVia(self):
"""Return value of Via header for this proxy."""
return Via(host=self.host, port=self.port)
def handle_request(self, message, addr):
# send immediate 100/trying message before processing
#self.deliverResponse(self.responseFromRequest(100, message))
f = getattr(self, "handle_%s_request" % message.method, None)
if f is None:
f = self.handle_request_default
try:
d = f(message, addr)
except SIPError, e:
self.deliverResponse(self.responseFromRequest(e.code, message))
except:
log.err()
self.deliverResponse(self.responseFromRequest(500, message))
else:
if d is not None:
d.addErrback(lambda e:
self.deliverResponse(self.responseFromRequest(e.code, message))
)
def handle_request_default(self, message, (srcHost, srcPort)):
"""Default request handler.
Default behaviour for OPTIONS and unknown methods for proxies
is to forward message on to the client.
Since at the moment we are stateless proxy, thats basically
everything.
"""
def _mungContactHeader(uri, message):
message.headers['contact'][0] = uri.toString()
return self.sendMessage(uri, message)
viaHeader = self.getVia()
if viaHeader.toString() in message.headers["via"]:
# must be a loop, so drop message
log.msg("Dropping looped message.")
return
message.headers["via"].insert(0, viaHeader.toString())
name, uri, tags = parseAddress(message.headers["to"][0], clean=1)
# this is broken and needs refactoring to use cred
d = self.locator.getAddress(uri)
d.addCallback(self.sendMessage, message)
d.addErrback(self._cantForwardRequest, message)
def _cantForwardRequest(self, error, message):
error.trap(LookupError)
del message.headers["via"][0] # this'll be us
self.deliverResponse(self.responseFromRequest(404, message))
def deliverResponse(self, responseMessage):
"""Deliver response.
Destination is based on topmost Via header."""
destVia = parseViaHeader(responseMessage.headers["via"][0])
# XXX we don't do multicast yet
host = destVia.received or destVia.host
port = destVia.rport or destVia.port or self.PORT
destAddr = URL(host=host, port=port)
self.sendMessage(destAddr, responseMessage)
def responseFromRequest(self, code, request):
"""Create a response to a request message."""
response = Response(code)
for name in ("via", "to", "from", "call-id", "cseq"):
response.headers[name] = request.headers.get(name, [])[:]
return response
def handle_response(self, message, addr):
"""Default response handler."""
v = parseViaHeader(message.headers["via"][0])
if (v.host, v.port) != (self.host, self.port):
# we got a message not intended for us?
# XXX note this check breaks if we have multiple external IPs
# yay for suck protocols
log.msg("Dropping incorrectly addressed message")
return
del message.headers["via"][0]
if not message.headers["via"]:
# this message is addressed to us
self.gotResponse(message, addr)
return
self.deliverResponse(message)
def gotResponse(self, message, addr):
"""Called with responses that are addressed at this server."""
pass
class IAuthorizer(Interface):
def getChallenge(peer):
"""Generate a challenge the client may respond to.
@type peer: C{tuple}
@param peer: The client's address
@rtype: C{str}
@return: The challenge string
"""
def decode(response):
"""Create a credentials object from the given response.
@type response: C{str}
"""
class BasicAuthorizer:
"""Authorizer for insecure Basic (base64-encoded plaintext) authentication.
This form of authentication is broken and insecure. Do not use it.
"""
implements(IAuthorizer)
def __init__(self):
"""
This method exists solely to issue a deprecation warning.
"""
warnings.warn(
"twisted.protocols.sip.BasicAuthorizer was deprecated "
"in Twisted 9.0.0",
category=DeprecationWarning,
stacklevel=2)
def getChallenge(self, peer):
return None
def decode(self, response):
# At least one SIP client improperly pads its Base64 encoded messages
for i in range(3):
try:
creds = (response + ('=' * i)).decode('base64')
except:
pass
else:
break
else:
# Totally bogus
raise SIPError(400)
p = creds.split(':', 1)
if len(p) == 2:
return UsernamePassword(*p)
raise SIPError(400)
class DigestedCredentials(UsernameHashedPassword):
"""Yet Another Simple Digest-MD5 authentication scheme"""
def __init__(self, username, fields, challenges):
warnings.warn(
"twisted.protocols.sip.DigestedCredentials was deprecated "
"in Twisted 9.0.0",
category=DeprecationWarning,
stacklevel=2)
self.username = username
self.fields = fields
self.challenges = challenges
def checkPassword(self, password):
method = 'REGISTER'
response = self.fields.get('response')
uri = self.fields.get('uri')
nonce = self.fields.get('nonce')
cnonce = self.fields.get('cnonce')
nc = self.fields.get('nc')
algo = self.fields.get('algorithm', 'MD5')
qop = self.fields.get('qop-options', 'auth')
opaque = self.fields.get('opaque')
if opaque not in self.challenges:
return False
del self.challenges[opaque]
user, domain = self.username.split('@', 1)
if uri is None:
uri = 'sip:' + domain
expected = DigestCalcResponse(
DigestCalcHA1(algo, user, domain, password, nonce, cnonce),
nonce, nc, cnonce, qop, method, uri, None,
)
return expected == response
class DigestAuthorizer:
CHALLENGE_LIFETIME = 15
implements(IAuthorizer)
def __init__(self):
warnings.warn(
"twisted.protocols.sip.DigestAuthorizer was deprecated "
"in Twisted 9.0.0",
category=DeprecationWarning,
stacklevel=2)
self.outstanding = {}
def generateNonce(self):
c = tuple([random.randrange(sys.maxint) for _ in range(3)])
c = '%d%d%d' % c
return c
def generateOpaque(self):
return str(random.randrange(sys.maxint))
def getChallenge(self, peer):
c = self.generateNonce()
o = self.generateOpaque()
self.outstanding[o] = c
return ','.join((
'nonce="%s"' % c,
'opaque="%s"' % o,
'qop-options="auth"',
'algorithm="MD5"',
))
def decode(self, response):
response = ' '.join(response.splitlines())
parts = response.split(',')
auth = dict([(k.strip(), unq(v.strip())) for (k, v) in [p.split('=', 1) for p in parts]])
try:
username = auth['username']
except KeyError:
raise SIPError(401)
try:
return DigestedCredentials(username, auth, self.outstanding)
except:
raise SIPError(400)
class RegisterProxy(Proxy):
"""A proxy that allows registration for a specific domain.
Unregistered users won't be handled.
"""
portal = None
registry = None # should implement IRegistry
authorizers = {
'digest': DigestAuthorizer(),
}
def __init__(self, *args, **kw):
Proxy.__init__(self, *args, **kw)
self.liveChallenges = {}
def handle_ACK_request(self, message, (host, port)):
# XXX
# ACKs are a client's way of indicating they got the last message
# Responding to them is not a good idea.
# However, we should keep track of terminal messages and re-transmit
# if no ACK is received.
pass
def handle_REGISTER_request(self, message, (host, port)):
"""Handle a registration request.
Currently registration is not proxied.
"""
if self.portal is None:
# There is no portal. Let anyone in.
self.register(message, host, port)
else:
# There is a portal. Check for credentials.
if not message.headers.has_key("authorization"):
return self.unauthorized(message, host, port)
else:
return self.login(message, host, port)
def unauthorized(self, message, host, port):
m = self.responseFromRequest(401, message)
for (scheme, auth) in self.authorizers.iteritems():
chal = auth.getChallenge((host, port))
if chal is None:
value = '%s realm="%s"' % (scheme.title(), self.host)
else:
value = '%s %s,realm="%s"' % (scheme.title(), chal, self.host)
m.headers.setdefault('www-authenticate', []).append(value)
self.deliverResponse(m)
def login(self, message, host, port):
parts = message.headers['authorization'][0].split(None, 1)
a = self.authorizers.get(parts[0].lower())
if a:
try:
c = a.decode(parts[1])
except SIPError:
raise
except:
log.err()
self.deliverResponse(self.responseFromRequest(500, message))
else:
c.username += '@' + self.host
self.portal.login(c, None, IContact
).addCallback(self._cbLogin, message, host, port
).addErrback(self._ebLogin, message, host, port
).addErrback(log.err
)
else:
self.deliverResponse(self.responseFromRequest(501, message))
def _cbLogin(self, (i, a, l), message, host, port):
# It's stateless, matey. What a joke.
self.register(message, host, port)
def _ebLogin(self, failure, message, host, port):
failure.trap(cred.error.UnauthorizedLogin)
self.unauthorized(message, host, port)
def register(self, message, host, port):
"""Allow all users to register"""
name, toURL, params = parseAddress(message.headers["to"][0], clean=1)
contact = None
if message.headers.has_key("contact"):
contact = message.headers["contact"][0]
if message.headers.get("expires", [None])[0] == "0":
self.unregister(message, toURL, contact)
else:
# XXX Check expires on appropriate URL, and pass it to registry
# instead of having registry hardcode it.
if contact is not None:
name, contactURL, params = parseAddress(contact, host=host, port=port)
d = self.registry.registerAddress(message.uri, toURL, contactURL)
else:
d = self.registry.getRegistrationInfo(toURL)
d.addCallbacks(self._cbRegister, self._ebRegister,
callbackArgs=(message,),
errbackArgs=(message,)
)
def _cbRegister(self, registration, message):
response = self.responseFromRequest(200, message)
if registration.contactURL != None:
response.addHeader("contact", registration.contactURL.toString())
response.addHeader("expires", "%d" % registration.secondsToExpiry)
response.addHeader("content-length", "0")
self.deliverResponse(response)
def _ebRegister(self, error, message):
error.trap(RegistrationError, LookupError)
# XXX return error message, and alter tests to deal with
# this, currently tests assume no message sent on failure
def unregister(self, message, toURL, contact):
try:
expires = int(message.headers["expires"][0])
except ValueError:
self.deliverResponse(self.responseFromRequest(400, message))
else:
if expires == 0:
if contact == "*":
contactURL = "*"
else:
name, contactURL, params = parseAddress(contact)
d = self.registry.unregisterAddress(message.uri, toURL, contactURL)
d.addCallback(self._cbUnregister, message
).addErrback(self._ebUnregister, message
)
def _cbUnregister(self, registration, message):
msg = self.responseFromRequest(200, message)
msg.headers.setdefault('contact', []).append(registration.contactURL.toString())
msg.addHeader("expires", "0")
self.deliverResponse(msg)
def _ebUnregister(self, registration, message):
pass
class InMemoryRegistry:
"""A simplistic registry for a specific domain."""
implements(IRegistry, ILocator)
def __init__(self, domain):
self.domain = domain # the domain we handle registration for
self.users = {} # map username to (IDelayedCall for expiry, address URI)
def getAddress(self, userURI):
if userURI.host != self.domain:
return defer.fail(LookupError("unknown domain"))
if self.users.has_key(userURI.username):
dc, url = self.users[userURI.username]
return defer.succeed(url)
else:
return defer.fail(LookupError("no such user"))
def getRegistrationInfo(self, userURI):
if userURI.host != self.domain:
return defer.fail(LookupError("unknown domain"))
if self.users.has_key(userURI.username):
dc, url = self.users[userURI.username]
return defer.succeed(Registration(int(dc.getTime() - time.time()), url))
else:
return defer.fail(LookupError("no such user"))
def _expireRegistration(self, username):
try:
dc, url = self.users[username]
except KeyError:
return defer.fail(LookupError("no such user"))
else:
dc.cancel()
del self.users[username]
return defer.succeed(Registration(0, url))
def registerAddress(self, domainURL, logicalURL, physicalURL):
if domainURL.host != self.domain:
log.msg("Registration for domain we don't handle.")
return defer.fail(RegistrationError(404))
if logicalURL.host != self.domain:
log.msg("Registration for domain we don't handle.")
return defer.fail(RegistrationError(404))
if self.users.has_key(logicalURL.username):
dc, old = self.users[logicalURL.username]
dc.reset(3600)
else:
dc = reactor.callLater(3600, self._expireRegistration, logicalURL.username)
log.msg("Registered %s at %s" % (logicalURL.toString(), physicalURL.toString()))
self.users[logicalURL.username] = (dc, physicalURL)
return defer.succeed(Registration(int(dc.getTime() - time.time()), physicalURL))
def unregisterAddress(self, domainURL, logicalURL, physicalURL):
return self._expireRegistration(logicalURL.username)
| gpl-3.0 | -9,162,486,065,959,605,000 | 30.464018 | 119 | 0.565435 | false |
pakpoomton/CellmodellerShadow | Models/Biofilm_g40.py | 1 | 2119 | import random
from CellModeller.Regulation.ModuleRegulator import ModuleRegulator
from CellModeller.Biophysics.BacterialModels.CLBacterium import CLBacterium
from CellModeller.GUI import Renderers
import numpy
import math
max_cells = 400000
#cell_colors = {0:[0.0, 1.0, 0.0],
# 1:[0.0, 0.0, 1.0],
# 2:[1.0, 0.0, 0.0],
# 3:[0.0, 1.0, 1.0]}
cell_colors = numpy.random.uniform(0,1,(9,3))
def setup(sim):
# Set biophysics, signalling, and regulation models
biophys = CLBacterium(sim, max_substeps=8, max_cells=max_cells, max_contacts=32, max_sqs=192**2, jitter_z=False, reg_param=0.04, gamma=40)
#biophys.addPlane((0,0,-0.5), (0,0,1), 1.0)
#biophys.addPlane((0,0,0.5), (0,0,-1), math.sqrt(7.5e-4))
regul = ModuleRegulator(sim, __file__) # use this file for reg too
# Only biophys and regulation
sim.init(biophys, regul, None, None)
sim.addCell(cellType=0, pos=(0,0,0))
#sim.addCell(cellType=0, pos=(0,-10.0,0))
#sim.addCell(cellType=1, pos=(0,10.0,0))
#sim.addCell(cellType=0, pos=(16,16,0))
#sim.addCell(cellType=1, pos=(0,16,0))
#sim.addCell(cellType=2, pos=(-16,16,0))
#sim.addCell(cellType=3, pos=(16,0,0))
#sim.addCell(cellType=4, pos=(0,0,0))
#sim.addCell(cellType=5, pos=(-16,0,0))
#sim.addCell(cellType=6, pos=(16,-16,0))
#sim.addCell(cellType=7, pos=(0,-16,0))
#sim.addCell(cellType=8, pos=(-16,-16,0))
# Add some objects to draw the models
therenderer = Renderers.GLBacteriumRenderer(sim)
sim.addRenderer(therenderer)
sim.savePickle = True
sim.pickleSteps = 20
def init(cell):
cell.targetVol = 3.5 + random.uniform(0.0,0.5)
cell.growthRate = 1.0
def numSignals():
return 0
def numSpecies():
return 0
def update(cells):
for (id, cell) in cells.iteritems():
cell.color = cell_colors[cell.cellType]
if cell.volume > cell.targetVol:
cell.asymm = [1,1]
cell.divideFlag = True
def divide(parent, d1, d2):
d1.targetVol = 3.5 + random.uniform(0.0,0.5)
d2.targetVol = 3.5 + random.uniform(0.0,0.5)
| bsd-3-clause | -3,343,838,500,989,668,000 | 29.271429 | 142 | 0.632374 | false |
markokr/sysca | sysca/keys.py | 1 | 6591 | """Key handling
"""
import os
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import ec, rsa, dsa
from cryptography.hazmat.primitives.hashes import SHA256, SHA384, SHA512
from cryptography.hazmat.primitives.serialization import (Encoding, PublicFormat)
from .exceptions import UnsupportedParameter
from .compat import (
ed25519, ed448, EDDSA_PRIVKEY_CLASSES,
EC_CURVES, PUBKEY_CLASSES, PRIVKEY_CLASSES,
)
__all__ = (
"get_curve_for_name", "get_ec_curves", "get_hash_algo", "get_key_name",
"is_safe_bits", "is_safe_curve",
"new_dsa_key", "new_ec_key", "new_key", "new_rsa_key",
"new_serial_number", "same_pubkey", "set_unsafe",
"valid_privkey", "valid_pubkey", "get_invalid_key_usage",
)
#
# Key parameters
#
UNSAFE = False
# safe choices
SAFE_BITS_RSA = (2048, 3072, 4096)
SAFE_BITS_DSA = (2048, 3072)
SAFE_CURVES = ("secp256r1", "secp384r1", "secp521r1", "ed25519", "ed448",
"brainpoolp256r1", "brainpoolp384r1", "brainpoolp512r1")
def get_curve_for_name(name):
"""Lookup curve by name.
"""
name2 = name.lower()
if name2 not in EC_CURVES:
raise UnsupportedParameter("Unknown curve: %s" % name)
if not is_safe_curve(name2):
raise UnsupportedParameter("Unsafe curve: %s" % name)
return EC_CURVES[name2]
def same_pubkey(o1, o2):
"""Compare public keys.
"""
k1, k2 = o1, o2
if not isinstance(k1, PUBKEY_CLASSES):
k1 = o1.public_key()
if k1 is None:
raise ValueError("object %r gave None .public_key()" % o1)
if not isinstance(k2, PUBKEY_CLASSES):
k2 = k2.public_key()
if k2 is None:
raise ValueError("object %r gave None .public_key()" % o2)
fmt = PublicFormat.SubjectPublicKeyInfo
p1 = k1.public_bytes(Encoding.PEM, fmt)
p2 = k2.public_bytes(Encoding.PEM, fmt)
return p1 == p2
def get_hash_algo(privkey, ctx):
"""Return signature hash algo based on privkey.
"""
if isinstance(privkey, EDDSA_PRIVKEY_CLASSES):
return None
if isinstance(privkey, ec.EllipticCurvePrivateKey):
if privkey.key_size > 500:
return SHA512()
if privkey.key_size > 300:
return SHA384()
return SHA256()
def get_invalid_key_usage(pubkey):
"""KeyUsage types not supported by key"""
bad = ("key_encipherment", "data_encipherment", "encipher_only", "decipher_only", "key_agreement")
if UNSAFE or isinstance(pubkey, rsa.RSAPublicKey) or pubkey is None:
return ()
return bad
def is_safe_bits(bits, bitlist):
"""Allow bits"""
return UNSAFE or bits in bitlist
def is_safe_curve(name):
"""Allow curve"""
return UNSAFE or name.lower() in SAFE_CURVES
def get_ec_curves():
"""Return supported curve names.
"""
lst = list(EC_CURVES.keys())
if ed25519 is not None:
lst.append("ed25519")
if ed448 is not None:
lst.append("ed448")
return [n for n in sorted(lst) if is_safe_curve(n)]
def new_ec_key(name="secp256r1"):
"""New Elliptic Curve key
"""
name = name.lower()
if name == "ed25519":
if ed25519 is not None:
return ed25519.Ed25519PrivateKey.generate()
raise UnsupportedParameter("ed25519 not supported")
if name == "ed448":
if ed448 is not None:
return ed448.Ed448PrivateKey.generate()
raise UnsupportedParameter("ed448 not supported")
curve = get_curve_for_name(name)
return ec.generate_private_key(curve=curve, backend=default_backend())
def new_rsa_key(bits=2048):
"""New RSA key.
"""
if not is_safe_bits(bits, SAFE_BITS_RSA):
raise UnsupportedParameter("Bad value for RSA bits: %d" % bits)
return rsa.generate_private_key(key_size=bits, public_exponent=65537, backend=default_backend())
def new_dsa_key(bits=2048):
"""New DSA key.
"""
if not is_safe_bits(bits, SAFE_BITS_DSA):
raise UnsupportedParameter("Bad value for DSA bits: %d" % bits)
return dsa.generate_private_key(key_size=bits, backend=default_backend())
def new_key(keydesc="ec"):
"""Create new key.
"""
short = {"ec": "ec:secp256r1", "rsa": "rsa:2048", "dsa": "dsa:2048"}
keydesc = short.get(keydesc, keydesc)
# create key
tmp = keydesc.lower().split(":")
if len(tmp) != 2:
raise UnsupportedParameter("Bad key spec: %s" % keydesc)
t, v = tmp
if t == "ec":
return new_ec_key(v)
elif t == "rsa":
return new_rsa_key(int(v))
elif t == "dsa":
return new_dsa_key(int(v))
raise UnsupportedParameter("Bad key type: %s" % keydesc)
def valid_pubkey(pubkey):
"""Return True if usable public key.
"""
if isinstance(pubkey, rsa.RSAPublicKey):
return is_safe_bits(pubkey.key_size, SAFE_BITS_RSA)
if isinstance(pubkey, dsa.DSAPublicKey):
return is_safe_bits(pubkey.key_size, SAFE_BITS_DSA)
if isinstance(pubkey, ec.EllipticCurvePublicKey):
return is_safe_curve(pubkey.curve.name)
return isinstance(pubkey, PUBKEY_CLASSES)
def valid_privkey(privkey):
"""Return True if usable private key.
"""
if isinstance(privkey, rsa.RSAPrivateKey):
return is_safe_bits(privkey.key_size, SAFE_BITS_RSA)
if isinstance(privkey, dsa.DSAPrivateKey):
return is_safe_bits(privkey.key_size, SAFE_BITS_DSA)
if isinstance(privkey, ec.EllipticCurvePrivateKey):
return is_safe_curve(privkey.curve.name)
return isinstance(privkey, PRIVKEY_CLASSES)
def get_key_name(key):
"""Return key type.
"""
if isinstance(key, (rsa.RSAPublicKey, rsa.RSAPrivateKey)):
return "rsa:%d" % key.key_size
if isinstance(key, (dsa.DSAPublicKey, dsa.DSAPrivateKey)):
return "dsa:%d" % key.key_size
if isinstance(key, (ec.EllipticCurvePublicKey, ec.EllipticCurvePrivateKey)):
return "ec:%s" % key.curve.name
if ed25519 is not None and isinstance(key, (ed25519.Ed25519PublicKey, ed25519.Ed25519PrivateKey)):
return "ec:ed25519"
if ed448 is not None and isinstance(key, (ed448.Ed448PublicKey, ed448.Ed448PrivateKey)):
return "ec:ed448"
return "<unknown key type>"
def set_unsafe(flag):
global UNSAFE
UNSAFE = flag
def new_serial_number():
"""Return serial number with max allowed entropy.
"""
# serial should have at least 20 bits of entropy and fit into 20 bytes
seed = int.from_bytes(os.urandom(20), "big", signed=False)
# avoid sign problems by setting highest bit
return (seed >> 1) | (1 << 158)
| isc | -4,393,379,388,745,196,000 | 29.655814 | 102 | 0.648308 | false |
pendingchaos/WIP12 | scripts/update_extensions.py | 1 | 1594 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
extensions = os.listdir("../include/extensions")
try:
extensions.remove("extensions.h")
except ValueError:
pass
output = open("../src/extensions/extensions.cpp", "w")
output.write("""//Generated by update_extensions.py. Do not edit. Edit update_extensions.py instead
#include "extensions/extensions.h"
#include "memory.h"
""")
for extension in extensions:
output.write("extern void *init_%s();\nextern void deinit_%s(void *ext);\n" % (extension, extension))
output.write("\nstruct Extensions\n{\n")
for extension in extensions:
output.write(" void *%s;\n" % (extension))
output.write("};\n")
output.write("""
void *initExtensions()
{
Extensions *ext = NEW(Extensions);
""")
for extension in extensions:
output.write(" ((Extensions *)ext)->%s = init_%s();\n" % (extension, extension))
output.write(" return ext;\n}\n\nvoid deinitExtensions(void *ext)\n{\n")
for extension in extensions:
output.write(" deinit_%s(((Extensions *)ext)->%s);\n" % (extension, extension))
output.write(" DELETE((Extensions *)ext);\n}\n\n")
for extension in extensions:
output.write("void *get_%s(void *exts)\n{\n return ((Extensions *)exts)->%s;\n}\n\n" % (extension, extension))
output = open("../include/extensions/extensions.h", "w")
output.write("""#ifndef EXTENSIONS_H
#define EXTENSIONS_H
void *initExtensions();
void deinitExtensions(void *ext);
""")
for extension in extensions:
output.write("void *get_%s(void *exts);\n" % (extension))
output.write("#endif // EXTENSIONS_H\n")
| gpl-3.0 | 6,310,849,901,766,840,000 | 24.709677 | 117 | 0.666876 | false |
kdebrab/pandas | pandas/tests/frame/test_timeseries.py | 1 | 29973 | # -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, time
import pytest
from numpy import nan
from numpy.random import randn
import numpy as np
from pandas import (DataFrame, Series, Index,
Timestamp, DatetimeIndex, MultiIndex,
to_datetime, date_range, period_range)
import pandas as pd
import pandas.tseries.offsets as offsets
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assert_index_equal,
assert_raises_regex)
import pandas.util.testing as tm
from pandas.compat import product
from pandas.tests.frame.common import TestData
class TestDataFrameTimeSeriesMethods(TestData):
def test_diff(self):
the_diff = self.tsframe.diff(1)
assert_series_equal(the_diff['A'],
self.tsframe['A'] - self.tsframe['A'].shift(1))
# int dtype
a = 10000000000000000
b = a + 1
s = Series([a, b])
rs = DataFrame({'s': s}).diff()
assert rs.s[1] == 1
# mixed numeric
tf = self.tsframe.astype('float32')
the_diff = tf.diff(1)
assert_series_equal(the_diff['A'],
tf['A'] - tf['A'].shift(1))
# issue 10907
df = pd.DataFrame({'y': pd.Series([2]), 'z': pd.Series([3])})
df.insert(0, 'x', 1)
result = df.diff(axis=1)
expected = pd.DataFrame({'x': np.nan, 'y': pd.Series(
1), 'z': pd.Series(1)}).astype('float64')
assert_frame_equal(result, expected)
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_diff_datetime_axis0(self, tz):
# GH 18578
df = DataFrame({0: date_range('2010', freq='D', periods=2, tz=tz),
1: date_range('2010', freq='D', periods=2, tz=tz)})
result = df.diff(axis=0)
expected = DataFrame({0: pd.TimedeltaIndex(['NaT', '1 days']),
1: pd.TimedeltaIndex(['NaT', '1 days'])})
assert_frame_equal(result, expected)
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_diff_datetime_axis1(self, tz):
# GH 18578
df = DataFrame({0: date_range('2010', freq='D', periods=2, tz=tz),
1: date_range('2010', freq='D', periods=2, tz=tz)})
if tz is None:
result = df.diff(axis=1)
expected = DataFrame({0: pd.TimedeltaIndex(['NaT', 'NaT']),
1: pd.TimedeltaIndex(['0 days',
'0 days'])})
assert_frame_equal(result, expected)
else:
with pytest.raises(NotImplementedError):
result = df.diff(axis=1)
def test_diff_timedelta(self):
# GH 4533
df = DataFrame(dict(time=[Timestamp('20130101 9:01'),
Timestamp('20130101 9:02')],
value=[1.0, 2.0]))
res = df.diff()
exp = DataFrame([[pd.NaT, np.nan],
[pd.Timedelta('00:01:00'), 1]],
columns=['time', 'value'])
assert_frame_equal(res, exp)
def test_diff_mixed_dtype(self):
df = DataFrame(np.random.randn(5, 3))
df['A'] = np.array([1, 2, 3, 4, 5], dtype=object)
result = df.diff()
assert result[0].dtype == np.float64
def test_diff_neg_n(self):
rs = self.tsframe.diff(-1)
xp = self.tsframe - self.tsframe.shift(-1)
assert_frame_equal(rs, xp)
def test_diff_float_n(self):
rs = self.tsframe.diff(1.)
xp = self.tsframe.diff(1)
assert_frame_equal(rs, xp)
def test_diff_axis(self):
# GH 9727
df = DataFrame([[1., 2.], [3., 4.]])
assert_frame_equal(df.diff(axis=1), DataFrame(
[[np.nan, 1.], [np.nan, 1.]]))
assert_frame_equal(df.diff(axis=0), DataFrame(
[[np.nan, np.nan], [2., 2.]]))
def test_pct_change(self):
rs = self.tsframe.pct_change(fill_method=None)
assert_frame_equal(rs, self.tsframe / self.tsframe.shift(1) - 1)
rs = self.tsframe.pct_change(2)
filled = self.tsframe.fillna(method='pad')
assert_frame_equal(rs, filled / filled.shift(2) - 1)
rs = self.tsframe.pct_change(fill_method='bfill', limit=1)
filled = self.tsframe.fillna(method='bfill', limit=1)
assert_frame_equal(rs, filled / filled.shift(1) - 1)
rs = self.tsframe.pct_change(freq='5D')
filled = self.tsframe.fillna(method='pad')
assert_frame_equal(rs,
(filled / filled.shift(freq='5D') - 1)
.reindex_like(filled))
def test_pct_change_shift_over_nas(self):
s = Series([1., 1.5, np.nan, 2.5, 3.])
df = DataFrame({'a': s, 'b': s})
chg = df.pct_change()
expected = Series([np.nan, 0.5, 0., 2.5 / 1.5 - 1, .2])
edf = DataFrame({'a': expected, 'b': expected})
assert_frame_equal(chg, edf)
@pytest.mark.parametrize("freq, periods, fill_method, limit",
[('5B', 5, None, None),
('3B', 3, None, None),
('3B', 3, 'bfill', None),
('7B', 7, 'pad', 1),
('7B', 7, 'bfill', 3),
('14B', 14, None, None)])
def test_pct_change_periods_freq(self, freq, periods, fill_method, limit):
# GH 7292
rs_freq = self.tsframe.pct_change(freq=freq,
fill_method=fill_method,
limit=limit)
rs_periods = self.tsframe.pct_change(periods,
fill_method=fill_method,
limit=limit)
assert_frame_equal(rs_freq, rs_periods)
empty_ts = DataFrame(index=self.tsframe.index,
columns=self.tsframe.columns)
rs_freq = empty_ts.pct_change(freq=freq,
fill_method=fill_method,
limit=limit)
rs_periods = empty_ts.pct_change(periods,
fill_method=fill_method,
limit=limit)
assert_frame_equal(rs_freq, rs_periods)
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50', freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
assert np.issubdtype(df['B'].dtype, np.dtype('M8[ns]'))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50', freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
assert np.issubdtype(df['A'].dtype, np.dtype('M8[ns]'))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O')).values
assert df[unit].dtype == ns_dtype
assert (df[unit].values == ex_vals).all()
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O')).values
assert (tmp['dates'].values == ex_vals).all()
def test_shift(self):
# naive shift
shiftedFrame = self.tsframe.shift(5)
tm.assert_index_equal(shiftedFrame.index, self.tsframe.index)
shiftedSeries = self.tsframe['A'].shift(5)
assert_series_equal(shiftedFrame['A'], shiftedSeries)
shiftedFrame = self.tsframe.shift(-5)
tm.assert_index_equal(shiftedFrame.index, self.tsframe.index)
shiftedSeries = self.tsframe['A'].shift(-5)
assert_series_equal(shiftedFrame['A'], shiftedSeries)
# shift by 0
unshifted = self.tsframe.shift(0)
assert_frame_equal(unshifted, self.tsframe)
# shift by DateOffset
shiftedFrame = self.tsframe.shift(5, freq=offsets.BDay())
assert len(shiftedFrame) == len(self.tsframe)
shiftedFrame2 = self.tsframe.shift(5, freq='B')
assert_frame_equal(shiftedFrame, shiftedFrame2)
d = self.tsframe.index[0]
shifted_d = d + offsets.BDay(5)
assert_series_equal(self.tsframe.xs(d),
shiftedFrame.xs(shifted_d), check_names=False)
# shift int frame
int_shifted = self.intframe.shift(1) # noqa
# Shifting with PeriodIndex
ps = tm.makePeriodFrame()
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, ps.index)
tm.assert_index_equal(unshifted.index, ps.index)
tm.assert_numpy_array_equal(unshifted.iloc[:, 0].dropna().values,
ps.iloc[:-1, 0].values)
shifted2 = ps.shift(1, 'B')
shifted3 = ps.shift(1, offsets.BDay())
assert_frame_equal(shifted2, shifted3)
assert_frame_equal(ps, shifted2.shift(-1, 'B'))
tm.assert_raises_regex(ValueError,
'does not match PeriodIndex freq',
ps.shift, freq='D')
# shift other axis
# GH 6371
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat([DataFrame(np.nan, index=df.index,
columns=[0]),
df.iloc[:, 0:-1]],
ignore_index=True, axis=1)
result = df.shift(1, axis=1)
assert_frame_equal(result, expected)
# shift named axis
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat([DataFrame(np.nan, index=df.index,
columns=[0]),
df.iloc[:, 0:-1]],
ignore_index=True, axis=1)
result = df.shift(1, axis='columns')
assert_frame_equal(result, expected)
def test_shift_bool(self):
df = DataFrame({'high': [True, False],
'low': [False, False]})
rs = df.shift(1)
xp = DataFrame(np.array([[np.nan, np.nan],
[True, False]], dtype=object),
columns=['high', 'low'])
assert_frame_equal(rs, xp)
def test_shift_categorical(self):
# GH 9416
s1 = pd.Series(['a', 'b', 'c'], dtype='category')
s2 = pd.Series(['A', 'B', 'C'], dtype='category')
df = DataFrame({'one': s1, 'two': s2})
rs = df.shift(1)
xp = DataFrame({'one': s1.shift(1), 'two': s2.shift(1)})
assert_frame_equal(rs, xp)
def test_shift_empty(self):
# Regression test for #8019
df = DataFrame({'foo': []})
rs = df.shift(-1)
assert_frame_equal(df, rs)
def test_shift_duplicate_columns(self):
# GH 9092; verify that position-based shifting works
# in the presence of duplicate columns
column_lists = [list(range(5)), [1] * 5, [1, 1, 2, 2, 1]]
data = np.random.randn(20, 5)
shifted = []
for columns in column_lists:
df = pd.DataFrame(data.copy(), columns=columns)
for s in range(5):
df.iloc[:, s] = df.iloc[:, s].shift(s + 1)
df.columns = range(5)
shifted.append(df)
# sanity check the base case
nulls = shifted[0].isna().sum()
assert_series_equal(nulls, Series(range(1, 6), dtype='int64'))
# check all answers are the same
assert_frame_equal(shifted[0], shifted[1])
assert_frame_equal(shifted[0], shifted[2])
def test_tshift(self):
# PeriodIndex
ps = tm.makePeriodFrame()
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
assert_frame_equal(unshifted, ps)
shifted2 = ps.tshift(freq='B')
assert_frame_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=offsets.BDay())
assert_frame_equal(shifted, shifted3)
tm.assert_raises_regex(
ValueError, 'does not match', ps.tshift, freq='M')
# DatetimeIndex
shifted = self.tsframe.tshift(1)
unshifted = shifted.tshift(-1)
assert_frame_equal(self.tsframe, unshifted)
shifted2 = self.tsframe.tshift(freq=self.tsframe.index.freq)
assert_frame_equal(shifted, shifted2)
inferred_ts = DataFrame(self.tsframe.values,
Index(np.asarray(self.tsframe.index)),
columns=self.tsframe.columns)
shifted = inferred_ts.tshift(1)
unshifted = shifted.tshift(-1)
assert_frame_equal(shifted, self.tsframe.tshift(1))
assert_frame_equal(unshifted, inferred_ts)
no_freq = self.tsframe.iloc[[0, 5, 7], :]
pytest.raises(ValueError, no_freq.tshift)
def test_truncate(self):
ts = self.tsframe[::3]
start, end = self.tsframe.index[3], self.tsframe.index[6]
start_missing = self.tsframe.index[2]
end_missing = self.tsframe.index[7]
# neither specified
truncated = ts.truncate()
assert_frame_equal(truncated, ts)
# both specified
expected = ts[1:3]
truncated = ts.truncate(start, end)
assert_frame_equal(truncated, expected)
truncated = ts.truncate(start_missing, end_missing)
assert_frame_equal(truncated, expected)
# start specified
expected = ts[1:]
truncated = ts.truncate(before=start)
assert_frame_equal(truncated, expected)
truncated = ts.truncate(before=start_missing)
assert_frame_equal(truncated, expected)
# end specified
expected = ts[:3]
truncated = ts.truncate(after=end)
assert_frame_equal(truncated, expected)
truncated = ts.truncate(after=end_missing)
assert_frame_equal(truncated, expected)
pytest.raises(ValueError, ts.truncate,
before=ts.index[-1] - 1,
after=ts.index[0] + 1)
def test_truncate_copy(self):
index = self.tsframe.index
truncated = self.tsframe.truncate(index[5], index[10])
truncated.values[:] = 5.
assert not (self.tsframe.values[5:11] == 5).any()
def test_truncate_nonsortedindex(self):
# GH 17935
df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e']},
index=[5, 3, 2, 9, 0])
with tm.assert_raises_regex(ValueError,
'truncate requires a sorted index'):
df.truncate(before=3, after=9)
rng = pd.date_range('2011-01-01', '2012-01-01', freq='W')
ts = pd.DataFrame({'A': np.random.randn(len(rng)),
'B': np.random.randn(len(rng))},
index=rng)
with tm.assert_raises_regex(ValueError,
'truncate requires a sorted index'):
ts.sort_values('A', ascending=False).truncate(before='2011-11',
after='2011-12')
df = pd.DataFrame({3: np.random.randn(5),
20: np.random.randn(5),
2: np.random.randn(5),
0: np.random.randn(5)},
columns=[3, 20, 2, 0])
with tm.assert_raises_regex(ValueError,
'truncate requires a sorted index'):
df.truncate(before=2, after=20, axis=1)
def test_asfreq(self):
offset_monthly = self.tsframe.asfreq(offsets.BMonthEnd())
rule_monthly = self.tsframe.asfreq('BM')
tm.assert_almost_equal(offset_monthly['A'], rule_monthly['A'])
filled = rule_monthly.asfreq('B', method='pad') # noqa
# TODO: actually check that this worked.
# don't forget!
filled_dep = rule_monthly.asfreq('B', method='pad') # noqa
# test does not blow up on length-0 DataFrame
zero_length = self.tsframe.reindex([])
result = zero_length.asfreq('BM')
assert result is not zero_length
def test_asfreq_datetimeindex(self):
df = DataFrame({'A': [1, 2, 3]},
index=[datetime(2011, 11, 1), datetime(2011, 11, 2),
datetime(2011, 11, 3)])
df = df.asfreq('B')
assert isinstance(df.index, DatetimeIndex)
ts = df['A'].asfreq('B')
assert isinstance(ts.index, DatetimeIndex)
def test_asfreq_fillvalue(self):
# test for fill value during upsampling, related to issue 3715
# setup
rng = pd.date_range('1/1/2016', periods=10, freq='2S')
ts = pd.Series(np.arange(len(rng)), index=rng)
df = pd.DataFrame({'one': ts})
# insert pre-existing missing value
df.loc['2016-01-01 00:00:08', 'one'] = None
actual_df = df.asfreq(freq='1S', fill_value=9.0)
expected_df = df.asfreq(freq='1S').fillna(9.0)
expected_df.loc['2016-01-01 00:00:08', 'one'] = None
assert_frame_equal(expected_df, actual_df)
expected_series = ts.asfreq(freq='1S').fillna(9.0)
actual_series = ts.asfreq(freq='1S', fill_value=9.0)
assert_series_equal(expected_series, actual_series)
@pytest.mark.parametrize("data,idx,expected_first,expected_last", [
({'A': [1, 2, 3]}, [1, 1, 2], 1, 2),
({'A': [1, 2, 3]}, [1, 2, 2], 1, 2),
({'A': [1, 2, 3, 4]}, ['d', 'd', 'd', 'd'], 'd', 'd'),
({'A': [1, np.nan, 3]}, [1, 1, 2], 1, 2),
({'A': [np.nan, np.nan, 3]}, [1, 1, 2], 2, 2),
({'A': [1, np.nan, 3]}, [1, 2, 2], 1, 2)])
def test_first_last_valid(self, data, idx,
expected_first, expected_last):
N = len(self.frame.index)
mat = randn(N)
mat[:5] = nan
mat[-5:] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
index = frame.first_valid_index()
assert index == frame.index[5]
index = frame.last_valid_index()
assert index == frame.index[-6]
# GH12800
empty = DataFrame()
assert empty.last_valid_index() is None
assert empty.first_valid_index() is None
# GH17400: no valid entries
frame[:] = nan
assert frame.last_valid_index() is None
assert frame.first_valid_index() is None
# GH20499: its preserves freq with holes
frame.index = date_range("20110101", periods=N, freq="B")
frame.iloc[1] = 1
frame.iloc[-2] = 1
assert frame.first_valid_index() == frame.index[1]
assert frame.last_valid_index() == frame.index[-2]
assert frame.first_valid_index().freq == frame.index.freq
assert frame.last_valid_index().freq == frame.index.freq
# GH 21441
df = DataFrame(data, index=idx)
assert expected_first == df.first_valid_index()
assert expected_last == df.last_valid_index()
def test_first_subset(self):
ts = tm.makeTimeDataFrame(freq='12h')
result = ts.first('10d')
assert len(result) == 20
ts = tm.makeTimeDataFrame(freq='D')
result = ts.first('10d')
assert len(result) == 10
result = ts.first('3M')
expected = ts[:'3/31/2000']
assert_frame_equal(result, expected)
result = ts.first('21D')
expected = ts[:21]
assert_frame_equal(result, expected)
result = ts[:0].first('3M')
assert_frame_equal(result, ts[:0])
def test_first_raises(self):
# GH20725
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.first('1D')
def test_last_subset(self):
ts = tm.makeTimeDataFrame(freq='12h')
result = ts.last('10d')
assert len(result) == 20
ts = tm.makeTimeDataFrame(nper=30, freq='D')
result = ts.last('10d')
assert len(result) == 10
result = ts.last('21D')
expected = ts['2000-01-10':]
assert_frame_equal(result, expected)
result = ts.last('21D')
expected = ts[-21:]
assert_frame_equal(result, expected)
result = ts[:0].last('3M')
assert_frame_equal(result, ts[:0])
def test_last_raises(self):
# GH20725
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.last('1D')
def test_at_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
assert (rs.index.hour == rng[1].hour).all()
assert (rs.index.minute == rng[1].minute).all()
assert (rs.index.second == rng[1].second).all()
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_frame_equal(result, expected)
result = ts.loc[time(9, 30)]
expected = ts.loc[(rng.hour == 9) & (rng.minute == 30)]
assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time('16:00')
assert len(rs) == 0
def test_at_time_raises(self):
# GH20725
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.at_time('00:00')
def test_between_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
assert len(filtered) == exp_len
for rs in filtered.index:
t = rs.time()
if inc_start:
assert t >= stime
else:
assert t > stime
if inc_end:
assert t <= etime
else:
assert t < etime
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_frame_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
assert len(filtered) == exp_len
for rs in filtered.index:
t = rs.time()
if inc_start:
assert (t >= stime) or (t <= etime)
else:
assert (t > stime) or (t <= etime)
if inc_end:
assert (t <= etime) or (t >= stime)
else:
assert (t < etime) or (t >= stime)
def test_between_time_raises(self):
# GH20725
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.between_time(start_time='00:00', end_time='12:00')
def test_operation_on_NaT(self):
# Both NaT and Timestamp are in DataFrame.
df = pd.DataFrame({'foo': [pd.NaT, pd.NaT,
pd.Timestamp('2012-05-01')]})
res = df.min()
exp = pd.Series([pd.Timestamp('2012-05-01')], index=["foo"])
tm.assert_series_equal(res, exp)
res = df.max()
exp = pd.Series([pd.Timestamp('2012-05-01')], index=["foo"])
tm.assert_series_equal(res, exp)
# GH12941, only NaTs are in DataFrame.
df = pd.DataFrame({'foo': [pd.NaT, pd.NaT]})
res = df.min()
exp = pd.Series([pd.NaT], index=["foo"])
tm.assert_series_equal(res, exp)
res = df.max()
exp = pd.Series([pd.NaT], index=["foo"])
tm.assert_series_equal(res, exp)
def test_datetime_assignment_with_NaT_and_diff_time_units(self):
# GH 7492
data_ns = np.array([1, 'nat'], dtype='datetime64[ns]')
result = pd.Series(data_ns).to_frame()
result['new'] = data_ns
expected = pd.DataFrame({0: [1, None],
'new': [1, None]}, dtype='datetime64[ns]')
tm.assert_frame_equal(result, expected)
# OutOfBoundsDatetime error shouldn't occur
data_s = np.array([1, 'nat'], dtype='datetime64[s]')
result['new'] = data_s
expected = pd.DataFrame({0: [1, None],
'new': [1e9, None]}, dtype='datetime64[ns]')
tm.assert_frame_equal(result, expected)
def test_frame_to_period(self):
K = 5
dr = date_range('1/1/2000', '1/1/2001')
pr = period_range('1/1/2000', '1/1/2001')
df = DataFrame(randn(len(dr), K), index=dr)
df['mix'] = 'a'
pts = df.to_period()
exp = df.copy()
exp.index = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M')
tm.assert_index_equal(pts.index, exp.index.asfreq('M'))
df = df.T
pts = df.to_period(axis=1)
exp = df.copy()
exp.columns = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M', axis=1)
tm.assert_index_equal(pts.columns, exp.columns.asfreq('M'))
pytest.raises(ValueError, df.to_period, axis=2)
@pytest.mark.parametrize("fn", ['tz_localize', 'tz_convert'])
def test_tz_convert_and_localize(self, fn):
l0 = date_range('20140701', periods=5, freq='D')
l1 = date_range('20140701', periods=5, freq='D')
int_idx = Index(range(5))
if fn == 'tz_convert':
l0 = l0.tz_localize('UTC')
l1 = l1.tz_localize('UTC')
for idx in [l0, l1]:
l0_expected = getattr(idx, fn)('US/Pacific')
l1_expected = getattr(idx, fn)('US/Pacific')
df1 = DataFrame(np.ones(5), index=l0)
df1 = getattr(df1, fn)('US/Pacific')
assert_index_equal(df1.index, l0_expected)
# MultiIndex
# GH7846
df2 = DataFrame(np.ones(5), MultiIndex.from_arrays([l0, l1]))
df3 = getattr(df2, fn)('US/Pacific', level=0)
assert not df3.index.levels[0].equals(l0)
assert_index_equal(df3.index.levels[0], l0_expected)
assert_index_equal(df3.index.levels[1], l1)
assert not df3.index.levels[1].equals(l1_expected)
df3 = getattr(df2, fn)('US/Pacific', level=1)
assert_index_equal(df3.index.levels[0], l0)
assert not df3.index.levels[0].equals(l0_expected)
assert_index_equal(df3.index.levels[1], l1_expected)
assert not df3.index.levels[1].equals(l1)
df4 = DataFrame(np.ones(5),
MultiIndex.from_arrays([int_idx, l0]))
# TODO: untested
df5 = getattr(df4, fn)('US/Pacific', level=1) # noqa
assert_index_equal(df3.index.levels[0], l0)
assert not df3.index.levels[0].equals(l0_expected)
assert_index_equal(df3.index.levels[1], l1_expected)
assert not df3.index.levels[1].equals(l1)
# Bad Inputs
# Not DatetimeIndex / PeriodIndex
with assert_raises_regex(TypeError, 'DatetimeIndex'):
df = DataFrame(index=int_idx)
df = getattr(df, fn)('US/Pacific')
# Not DatetimeIndex / PeriodIndex
with assert_raises_regex(TypeError, 'DatetimeIndex'):
df = DataFrame(np.ones(5),
MultiIndex.from_arrays([int_idx, l0]))
df = getattr(df, fn)('US/Pacific', level=0)
# Invalid level
with assert_raises_regex(ValueError, 'not valid'):
df = DataFrame(index=l0)
df = getattr(df, fn)('US/Pacific', level=1)
| bsd-3-clause | -1,191,242,468,060,212,500 | 34.767303 | 78 | 0.523471 | false |
Adrimel/pdb-tools | pdb_fetch.py | 1 | 3126 | #!/usr/bin/env python
"""
Fetches a PDB file (optionally the biological unit) from the RCSB database.
usage: python pdb_fetch.py [-biounit] <pdb id>
example: python pdb_fetch.py 1CTF
Author: {0} ({1})
This program is part of the PDB tools distributed with HADDOCK
or with the HADDOCK tutorial. The utilities in this package
can be used to quickly manipulate PDB files, with the benefit
of 'piping' several different commands. This is a rewrite of old
FORTRAN77 code that was taking too much effort to compile. RIP.
"""
from __future__ import print_function
import gzip
import os
import re
import sys
import cStringIO
import urllib2
__author__ = "Joao Rodrigues"
__email__ = "[email protected]"
USAGE = __doc__.format(__author__, __email__)
def check_input(args):
"""Checks whether to read from stdin/file and validates user input/options."""
if len(args) == 1:
if not re.match('[0-9a-zA-Z]{4}$', args[0]):
sys.stderr.write('Invalid PDB code: ' + args[0] + '\n')
sys.stderr.write(USAGE)
sys.exit(1)
pdb_id = args[0]
biounit = False
elif len(args) == 2:
# Chain & File
if not re.match('\-biounit$', args[0]):
sys.stderr.write('Invalid option: ' + args[0] + '\n')
sys.stderr.write(USAGE)
sys.exit(1)
if not re.match('[0-9a-zA-Z]{4}$', args[1]):
sys.stderr.write('Invalid PDB code: ' + args[1] + '\n')
sys.stderr.write(USAGE)
sys.exit(1)
biounit = True
pdb_id = args[1]
else:
sys.stderr.write(USAGE)
sys.exit(1)
return (pdb_id, biounit)
def _fetch_structure(pdbid, biounit=False):
"""Enclosing logic in a function"""
base_url = 'http://www.rcsb.org/pdb/files/'
pdb_type = '.pdb1' if biounit else '.pdb'
pdb_url = base_url + pdbid.lower() + pdb_type + '.gz'
try:
request = urllib2.Request(pdb_url)
opener = urllib2.build_opener()
url_data = opener.open(request).read()
except urllib2.HTTPError as e:
print('[!] Error fetching structure: ({0}) {1}'.format(e.code, e.msg), file=sys.stderr)
return
else:
try:
buf = cStringIO.StringIO(url_data)
gz_handle = gzip.GzipFile(fileobj=buf, mode='rb')
for line in gz_handle:
yield line
except IOError as e:
print('[!] Error fetching structure: {0}'.format(e.msg), file=sys.stderr)
return
finally:
gz_handle.close()
if __name__ == '__main__':
# Check Input
pdb_id, biounit = check_input(sys.argv[1:])
# Do the job
pdb_structure = _fetch_structure(pdb_id, biounit)
if not pdb_structure:
sys.exit(1)
try:
sys.stdout.write(''.join(pdb_structure))
sys.stdout.flush()
except IOError:
# This is here to catch Broken Pipes
# for example to use 'head' or 'tail' without
# the error message showing up
pass
# last line of the script
# We can close it even if it is sys.stdin
sys.exit(0)
| mit | -5,713,528,853,672,365,000 | 28.214953 | 95 | 0.590531 | false |
silly-wacky-3-town-toon/SOURCE-COD | toontown/toon/DistributedNPCSpecialQuestGiverAI.py | 1 | 8582 | from direct.task.Task import Task
from panda3d.core import *
from panda3d.direct import *
from DistributedNPCToonBaseAI import *
from toontown.quest import Quests
class DistributedNPCSpecialQuestGiverAI(DistributedNPCToonBaseAI):
def __init__(self, air, npcId, questCallback = None, hq = 0):
DistributedNPCToonBaseAI.__init__(self, air, npcId, questCallback)
self.hq = hq
self.tutorial = 0
self.pendingAvId = None
return
def getTutorial(self):
return self.tutorial
def setTutorial(self, val):
self.tutorial = val
def getHq(self):
return self.hq
def avatarEnter(self):
avId = self.air.getAvatarIdFromSender()
self.notify.debug('avatar enter ' + str(avId))
self.air.questManager.requestInteract(avId, self)
DistributedNPCToonBaseAI.avatarEnter(self)
def chooseQuest(self, questId, quest = None):
avId = self.air.getAvatarIdFromSender()
self.notify.debug('chooseQuest: avatar %s choseQuest %s' % (avId, questId))
if not self.pendingAvId:
self.notify.warning('chooseQuest: not expecting an answer from any avatar: %s' % avId)
return
if self.pendingAvId != avId:
self.notify.warning('chooseQuest: not expecting an answer from this avatar: %s' % avId)
return
if questId == 0:
self.pendingAvId = None
self.pendingQuests = None
self.air.questManager.avatarCancelled(avId)
self.cancelChoseQuest(avId)
return
for quest in self.pendingQuests:
if questId == quest[0]:
self.pendingAvId = None
self.pendingQuests = None
self.air.questManager.avatarChoseQuest(avId, self, *quest)
return
self.air.questManager.avatarChoseQuest(avId, self, *quest)
self.notify.warning('chooseQuest: avatar: %s chose a quest not offered: %s' % (avId, questId))
self.pendingAvId = None
self.pendingQuests = None
return
def chooseTrack(self, trackId):
avId = self.air.getAvatarIdFromSender()
self.notify.debug('chooseTrack: avatar %s choseTrack %s' % (avId, trackId))
if not self.pendingAvId:
self.notify.warning('chooseTrack: not expecting an answer from any avatar: %s' % avId)
return
if self.pendingAvId != avId:
self.notify.warning('chooseTrack: not expecting an answer from this avatar: %s' % avId)
return
if trackId == -1:
self.pendingAvId = None
self.pendingTracks = None
self.pendingTrackQuest = None
self.air.questManager.avatarCancelled(avId)
self.cancelChoseTrack(avId)
return
for track in self.pendingTracks:
if trackId == track:
self.air.questManager.avatarChoseTrack(avId, self, self.pendingTrackQuest, trackId)
self.pendingAvId = None
self.pendingTracks = None
self.pendingTrackQuest = None
return
self.notify.warning('chooseTrack: avatar: %s chose a track not offered: %s' % (avId, trackId))
self.pendingAvId = None
self.pendingTracks = None
self.pendingTrackQuest = None
return
def sendTimeoutMovie(self, task):
self.pendingAvId = None
self.pendingQuests = None
self.pendingTracks = None
self.pendingTrackQuest = None
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_TIMEOUT,
self.npcId,
self.busy,
[],
ClockDelta.globalClockDelta.getRealNetworkTime()])
self.sendClearMovie(None)
self.busy = 0
return Task.done
def sendClearMovie(self, task):
self.pendingAvId = None
self.pendingQuests = None
self.pendingTracks = None
self.pendingTrackQuest = None
self.busy = 0
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_CLEAR,
self.npcId,
0,
[],
ClockDelta.globalClockDelta.getRealNetworkTime()])
return Task.done
def rejectAvatar(self, avId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_REJECT,
self.npcId,
avId,
[],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(5.5, self.sendClearMovie, self.uniqueName('clearMovie'))
def rejectAvatarTierNotDone(self, avId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_TIER_NOT_DONE,
self.npcId,
avId,
[],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(5.5, self.sendClearMovie, self.uniqueName('clearMovie'))
def completeQuest(self, avId, questId, rewardId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_COMPLETE,
self.npcId,
avId,
[questId, rewardId, 0],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def incompleteQuest(self, avId, questId, completeStatus, toNpcId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_INCOMPLETE,
self.npcId,
avId,
[questId, completeStatus, toNpcId],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def assignQuest(self, avId, questId, rewardId, toNpcId):
self.busy = avId
if self.questCallback:
self.questCallback()
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_ASSIGN,
self.npcId,
avId,
[questId, rewardId, toNpcId],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def presentQuestChoice(self, avId, quests):
self.busy = avId
self.pendingAvId = avId
self.pendingQuests = quests
flatQuests = []
for quest in quests:
flatQuests.extend(quest)
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_QUEST_CHOICE,
self.npcId,
avId,
flatQuests,
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def presentTrackChoice(self, avId, questId, tracks):
self.busy = avId
self.pendingAvId = avId
self.pendingTracks = tracks
self.pendingTrackQuest = questId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_TRACK_CHOICE,
self.npcId,
avId,
tracks,
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def cancelChoseQuest(self, avId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_QUEST_CHOICE_CANCEL,
self.npcId,
avId,
[],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def cancelChoseTrack(self, avId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_TRACK_CHOICE_CANCEL,
self.npcId,
avId,
[],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def setMovieDone(self):
avId = self.air.getAvatarIdFromSender()
self.notify.debug('setMovieDone busy: %s avId: %s' % (self.busy, avId))
if self.busy == avId:
taskMgr.remove(self.uniqueName('clearMovie'))
self.sendClearMovie(None)
elif self.busy:
self.air.writeServerEvent('suspicious', avId, 'DistributedNPCToonAI.setMovieDone busy with %s' % self.busy)
self.notify.warning('somebody called setMovieDone that I was not busy with! avId: %s' % avId)
return
| apache-2.0 | -1,137,695,837,540,636,300 | 36.973451 | 119 | 0.62433 | false |
munin/munin | utils/add_padding_script.py | 1 | 4569 | #!/usr/bin/python3
# This file is part of Munin.
# Munin is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# Munin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Munin; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# This work is Copyright (C)2006 by Andreas Jacobsen
# Individual portions may be copyright by individual contributors, and
# are included in this collective work with permission of the copyright
# owners.
import sys
import psycopg2
import psycopg2.extras
class migrator:
def __init__(self, cursor):
self.cursor = cursor
def add_padding(self):
for i in range(1, 92):
prop = self.find_single_prop_by_id(i)
if not prop or prop["active"] or prop["padding"]:
continue
(voters, yes, no) = self.get_voters_for_prop(prop["id"])
(
winners,
losers,
winning_total,
losing_total,
) = self.get_winners_and_losers(voters, yes, no)
query = "UPDATE %s_proposal SET " % (prop["prop_type"],)
query += " vote_result=%s,compensation=%s"
query += " WHERE id=%s"
args = (["no", "yes"][yes > no], losing_total, prop["id"])
print(query % args)
self.cursor.execute(query, args)
if self.cursor.rowcount < 1:
print("argh!")
def find_single_prop_by_id(self, prop_id):
query = "SELECT id, prop_type, proposer, person, created, padding, comment_text, active, closed FROM ("
query += "SELECT t1.id AS id, 'invite' AS prop_type, t2.pnick AS proposer, t1.person AS person, t1.padding AS padding, t1.created AS created,"
query += (
" t1.comment_text AS comment_text, t1.active AS active, t1.closed AS closed"
)
query += " FROM invite_proposal AS t1 INNER JOIN user_list AS t2 ON t1.proposer_id=t2.id UNION ("
query += " SELECT t3.id AS id, 'kick' AS prop_type, t4.pnick AS proposer, t5.pnick AS person, t3.padding AS padding, t3.created AS created,"
query += (
" t3.comment_text AS comment_text, t3.active AS active, t3.closed AS closed"
)
query += " FROM kick_proposal AS t3"
query += " INNER JOIN user_list AS t4 ON t3.proposer_id=t4.id"
query += (
" INNER JOIN user_list AS t5 ON t3.person_id=t5.id)) AS t6 WHERE t6.id=%s"
)
self.cursor.execute(query, (prop_id,))
return self.cursor.fetchone()
def get_winners_and_losers(self, voters, yes, no):
if yes > no:
losers = voters["no"]
winners = voters["yes"]
winning_total = yes
losing_total = no
else:
winners = voters["no"]
losers = voters["yes"]
winning_total = no
losing_total = yes
return (winners, losers, winning_total, losing_total)
def get_voters_for_prop(self, prop_id):
query = "SELECT t1.vote AS vote,t1.carebears AS carebears"
query += ", t1.prop_id AS prop_idd,t1.voter_id AS voter_id,t2.pnick AS pnick"
query += " FROM prop_vote AS t1"
query += " INNER JOIN user_list AS t2 ON t1.voter_id=t2.id"
query += " WHERE prop_id=%s"
self.cursor.execute(query, (prop_id,))
voters = {}
voters["yes"] = []
voters["no"] = []
voters["abstain"] = []
yes = 0
no = 0
for r in self.cursor.fetchall():
if r["vote"] == "yes":
yes += r["carebears"]
voters["yes"].append(r)
elif r["vote"] == "no":
no += r["carebears"]
voters["no"].append(r)
elif r["vote"] == "abstain":
voters["abstain"].append(r)
return (voters, yes, no)
user = "munin"
db = "patools30"
conn = psycopg2.connect("user=%s dbname=%s" % (user, db))
conn.serialize()
conn.autocommit()
curs = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
m = migrator(curs)
m.add_padding()
| gpl-2.0 | -1,024,861,984,553,755,300 | 36.760331 | 150 | 0.585905 | false |
fedora-infra/gilmsg | setup.py | 1 | 1729 | # This file is part of gilmsg.
# Copyright (C) 2015 Red Hat, Inc.
#
# gilmsg is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# gilmsg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with gilmsg; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 0.1.2.15.0 USA
#
# Authors: Ralph Bean <[email protected]>
#
try:
from setuptools import setup
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup
f = open('README.rst')
long_description = f.read().strip()
long_description = long_description.split('split here', 1)[-1]
f.close()
install_requires = [
'fedmsg',
'fedmsg[crypto]',
'fedmsg[consumers]',
'fedmsg[commands]',
]
setup(
name='gilmsg',
version='0.1.2',
description="A reliability layer on top of fedmsg",
long_description=long_description,
author='Ralph Bean',
author_email='[email protected]',
url='https://github.com/fedora-infra/gilmsg/',
license='LGPLv2+',
install_requires=install_requires,
py_modules=['gilmsg'],
include_package_data=True,
zip_safe=False,
entry_points={
'console_scripts': [
"gilmsg-logger=gilmsg:logger_cli",
],
},
)
| lgpl-2.1 | -1,347,992,148,444,289,300 | 28.305085 | 78 | 0.693464 | false |
pydsigner/taskit | taskit/common.py | 1 | 4251 | import time
import json
import pickle
import sys
from .log import null_logger, ERROR
__all__ = ['DEFAULT_PORT', 'STOP', 'KILL', 'STATUS', 'bytes', 'basestring',
'show_err', 'FirstBytesCorruptionError', 'FirstBytesProtocol',
'JSONCodec', 'PickleCodec']
DEFAULT_PORT = 54543
if bytes is str:
# Python 2
def bytes(s, enc):
return s
basestring = basestring
else:
# Python 3
bytes = bytes
basestring = str
STOP = '<stop>'
KILL = '<kill>'
STATUS = '<status>'
def show_err():
sys.excepthook(*sys.exc_info())
class FirstBytesCorruptionError(Exception):
"""
Exception raised when the first byte of a FB LMTP message is not a 0 or 1.
"""
class FirstBytesProtocol(object):
"""
A mixin class that has methods for sending and receiving information using
the First Bytes long message transfer protocol.
"""
first = 4
# '%0<first>x'
size_insert = '%04x'
def __init__(self, logger=null_logger, data_size=2048):
"""
data_size -- The maximum length of the data slices created. Will not be
exceeded, but in many cases will not ever be reached. This
value can be any positive "short", but the real-world
network concerns mentioned in the official documentation
for `socket.recv()` apply here -- be kind to the program
that your program is communicating with!
"""
self.set_size(data_size)
self.log = logger
def _size_bytes(self, size):
return bytes(self.size_insert % size, 'utf-8')
def _wire_recv(self, sock, size):
left = size
data = ''
while left:
chunk = sock.recv(left).decode()
if not chunk:
raise FirstBytesCorruptionError(
'Socket connection or remote codebase is broken!')
data += chunk
left -= len(chunk)
return data
def set_size(self, data_size):
"""
Set the data slice size.
"""
if len(str(data_size)) > self.first:
raise ValueError(
'Send size is too large for message size-field width!')
self.data_size = data_size
def recv(self, sock):
data = ''
# Cache the header size for speed
hsize = self.first + 1
while 1:
header = self._wire_recv(sock, hsize)
bit = header[0]
if bit not in ('0', '1'):
self.log(ERROR, 'First char %r not one of "0" or "1"!' % bit)
raise FirstBytesCorruptionError(
'Protocol corruption detected -- '
'first char in message was not a 0 or a 1!'
)
# So, how big a piece do we need to grab?
size = int(header[1:], 16)
# Get it.
data += self._wire_recv(sock, size)
# If nothing else will be sent, then we are finished.
if bit == '0':
return data
def send(self, sock, data):
# Cache max data size for speed
ds = self.data_size
# Also cache the "max data size"-sized-data prefix
norm = b'1' + self._size_bytes(ds)
data = bytes(data, 'utf-8')
while data:
dlen = len(data)
if dlen < ds:
pre = b'0' + self._size_bytes(dlen)
else:
pre = norm
sock.sendall(pre + data[:ds])
data = data[ds:]
class JSONCodec(object):
"""
Standard codec using JSON. Good balance of scope and support.
"""
@staticmethod
def encode(obj):
return json.dumps(obj)
@staticmethod
def decode(enc):
return json.loads(enc)
class PickleCodec(object):
"""
Basic codec using pickle (default version) for encoding. Do not use if
cross-language support is desired.
"""
@staticmethod
def encode(obj):
return pickle.dumps(obj)
@staticmethod
def decode(enc):
return pickle.loads(enc)
| lgpl-3.0 | -2,156,986,932,203,192,000 | 25.905063 | 80 | 0.530699 | false |
botswana-harvard/edc-sync | edc_sync/tests/test_sync.py | 1 | 10697 | from django.apps import apps as django_apps
from django.core.exceptions import MultipleObjectsReturned
from django.test import TestCase, tag
from django.test.utils import override_settings
from ..constants import INSERT, UPDATE
from ..models import OutgoingTransaction
from ..site_sync_models import site_sync_models
from ..sync_model import SyncHistoricalManagerError, SyncUuidPrimaryKeyMissing
from ..sync_model import SyncModel
from ..sync_model import SyncNaturalKeyMissing, SyncGetByNaturalKeyMissing
from .models import TestModel, BadTestModel, AnotherBadTestModel, YetAnotherBadTestModel
from .models import TestModelWithFkProtected
from .models import TestSyncModelNoHistoryManager, TestSyncModelNoUuid
Crypt = django_apps.get_model('django_crypto_fields.crypt')
edc_device_app_config = django_apps.get_app_config('edc_device')
site_sync_models
class TestSync(TestCase):
databases = '__all__'
def setUp(self):
site_sync_models.registry = {}
site_sync_models.loaded = False
sync_models = ['edc_sync.testmodel',
'edc_sync.badtestmodel',
'edc_sync.anotherbadtestmodel',
'edc_sync.yetanotherbadtestmodel',
'edc_sync.testmodelwithfkprotected',
'edc_sync.testmodelwithm2m',
'edc_sync.testsyncmodelnohistorymanager',
'edc_sync.testsyncmodelnouuid']
site_sync_models.register(models=sync_models)
def get_credentials(self):
return self.create_apikey(username=self.username,
api_key=self.api_client_key)
def test_str(self):
obj = TestModel()
obj = SyncModel(obj)
self.assertTrue(str(obj))
self.assertTrue(repr(obj))
def test_raises_on_missing_natural_key(self):
with override_settings(DEVICE_ID='10'):
with self.assertRaises(SyncNaturalKeyMissing):
BadTestModel.objects.using('client').create()
def test_raises_on_missing_get_by_natural_key(self):
with override_settings(DEVICE_ID='10'):
with self.assertRaises(SyncGetByNaturalKeyMissing):
AnotherBadTestModel.objects.using('client').create()
def test_raises_on_wrong_type_of_historical_manager(self):
with override_settings(DEVICE_ID='10'):
with self.assertRaises(SyncHistoricalManagerError):
YetAnotherBadTestModel.objects.using('client').create()
def test_raises_on_no_historical_manager(self):
with override_settings(DEVICE_ID='10'):
try:
TestSyncModelNoHistoryManager.objects.using('client').create()
except SyncHistoricalManagerError:
self.fail('SyncHistoricalManagerError unexpectedly raised.')
def test_raises_on_missing_uuid_primary_key(self):
with override_settings(DEVICE_ID='10'):
with self.assertRaises(SyncUuidPrimaryKeyMissing):
TestSyncModelNoUuid.objects.using('client').create()
def test_creates_outgoing_on_add(self):
with override_settings(DEVICE_ID='10'):
test_model = TestModel.objects.using('client').create(f1='erik')
with self.assertRaises(OutgoingTransaction.DoesNotExist):
try:
OutgoingTransaction.objects.using('client').get(
tx_pk=test_model.pk,
tx_name='edc_sync.testmodel',
action=INSERT)
except OutgoingTransaction.DoesNotExist:
pass
else:
raise OutgoingTransaction.DoesNotExist()
history_obj = test_model.history.using(
'client').get(id=test_model.id)
with self.assertRaises(OutgoingTransaction.DoesNotExist):
try:
OutgoingTransaction.objects.using('client').get(
tx_pk=history_obj.history_id,
tx_name='edc_sync.historicaltestmodel',
action=INSERT)
except OutgoingTransaction.DoesNotExist:
pass
else:
raise OutgoingTransaction.DoesNotExist()
@tag('1')
def test_creates_outgoing_on_add_with_fk_in_order(self):
with override_settings(DEVICE_ID='10'):
outgoing = {}
test_model = TestModel.objects.using('client').create(f1='erik')
test_model_with_fk = TestModelWithFkProtected.objects.using(
'client').create(f1='f1', test_model=test_model)
outgoing.update(
test_model=OutgoingTransaction.objects.using('client').get(
tx_pk=test_model.pk,
tx_name='edc_sync.testmodel',
action=INSERT))
history_obj = test_model.history.using(
'client').get(id=test_model.id)
outgoing.update(
test_model_historical=OutgoingTransaction.objects.using('client').get(
tx_pk=history_obj.history_id,
tx_name='edc_sync.historicaltestmodel',
action=INSERT))
with self.assertRaises(OutgoingTransaction.DoesNotExist):
try:
outgoing.update(
test_model_with_fk=OutgoingTransaction.objects.using('client').get(
tx_pk=test_model_with_fk.pk,
tx_name='edc_sync.testmodelwithfkprotected',
action=INSERT))
except OutgoingTransaction.DoesNotExist:
pass
else:
raise OutgoingTransaction.DoesNotExist()
history_obj = test_model_with_fk.history.using(
'client').get(id=test_model_with_fk.id)
with self.assertRaises(OutgoingTransaction.DoesNotExist):
try:
outgoing.update(
test_model_with_fk_historical=(
OutgoingTransaction.objects.using('client').get(
tx_pk=history_obj.history_id,
tx_name='edc_sync.historicaltestmodelwithfkprotected',
action=INSERT)))
except OutgoingTransaction.DoesNotExist:
pass
else:
raise OutgoingTransaction.DoesNotExist()
@override_settings(ALLOW_MODEL_SERIALIZATION=False)
def test_does_not_create_outgoing(self):
with override_settings(DEVICE_ID='10', ALLOW_MODEL_SERIALIZATION=False):
test_model = TestModel.objects.using('client').create(f1='erik')
with self.assertRaises(OutgoingTransaction.DoesNotExist):
OutgoingTransaction.objects.using(
'client').get(tx_pk=test_model.pk)
def test_creates_outgoing_on_change(self):
with override_settings(DEVICE_ID='10'):
test_model = TestModel.objects.using('client').create(f1='erik')
test_model.save(using='client')
with self.assertRaises(OutgoingTransaction.DoesNotExist):
try:
OutgoingTransaction.objects.using('client').get(
tx_pk=test_model.pk,
tx_name='edc_sync.testmodel',
action=INSERT)
OutgoingTransaction.objects.using('client').get(
tx_pk=test_model.pk,
tx_name='edc_sync.testmodel',
action=UPDATE)
except OutgoingTransaction.DoesNotExist:
pass
else:
raise OutgoingTransaction.DoesNotExist()
self.assertEqual(
2, OutgoingTransaction.objects.using('client').filter(
tx_name='edc_sync.historicaltestmodel',
action=INSERT).count())
def test_timestamp_is_default_order(self):
with override_settings(DEVICE_ID='10'):
test_model = TestModel.objects.using('client').create(f1='erik')
test_model.save(using='client')
last = 0
for obj in OutgoingTransaction.objects.using('client').all():
self.assertGreaterEqual(int(obj.timestamp), last)
last = int(obj.timestamp)
def test_created_obj_serializes_to_correct_db(self):
"""Asserts that the obj and the audit obj serialize to the
correct DB in a multi-database environment.
"""
TestModel.objects.using('client').create(f1='erik')
result = [
obj.tx_name for obj in OutgoingTransaction.objects.using('client').all()]
result.sort()
self.assertListEqual(
result,
['edc_sync.historicaltestmodel', 'edc_sync.testmodel'])
self.assertListEqual(
[obj.tx_name for obj in OutgoingTransaction.objects.using('server').all()], [])
self.assertRaises(
OutgoingTransaction.DoesNotExist,
OutgoingTransaction.objects.using('server').get,
tx_name='edc_sync.testmodel')
self.assertRaises(
MultipleObjectsReturned,
OutgoingTransaction.objects.using('client').get,
tx_name__contains='testmodel')
def test_updated_obj_serializes_to_correct_db(self):
"""Asserts that the obj and the audit obj serialize to the
correct DB in a multi-database environment.
"""
test_model = TestModel.objects.using('client').create(f1='erik')
result = [obj.tx_name for obj in OutgoingTransaction.objects.using(
'client').filter(action=INSERT)]
result.sort()
self.assertListEqual(
result, ['edc_sync.historicaltestmodel',
'edc_sync.testmodel'])
self.assertListEqual(
[obj.tx_name for obj in OutgoingTransaction.objects.using(
'client').filter(action=UPDATE)],
[])
test_model.save(using='client')
self.assertListEqual(
[obj.tx_name for obj in OutgoingTransaction.objects.using(
'client').filter(action=UPDATE)],
['edc_sync.testmodel'])
result = [obj.tx_name for obj in OutgoingTransaction.objects.using(
'client').filter(action=INSERT)]
result.sort()
self.assertListEqual(
result,
['edc_sync.historicaltestmodel',
'edc_sync.historicaltestmodel',
'edc_sync.testmodel'])
| gpl-2.0 | 1,377,763,837,804,787,700 | 44.135021 | 91 | 0.586146 | false |
conan-io/conan | conans/test/functional/conanfile/runner_test.py | 1 | 9622 | import os
import platform
import textwrap
import unittest
import pytest
import six
from conans.client.runner import ConanRunner
from conans.client.tools import environment_append
from conans.test.utils.mocks import TestBufferConanOutput
from conans.test.utils.tools import TestClient
class RunnerTest(unittest.TestCase):
def _install_and_build(self, conanfile_text, runner=None):
client = TestClient(runner=runner)
files = {"conanfile.py": conanfile_text}
test_folder = os.path.join(client.current_folder, "test_folder")
self.assertFalse(os.path.exists(test_folder))
client.save(files)
client.run("install .")
client.run("build .")
return client
def test_ignore_error(self):
conanfile = """from conans import ConanFile
class Pkg(ConanFile):
def source(self):
ret = self.run("not_a_command", ignore_errors=True)
self.output.info("RETCODE %s" % (ret!=0))
"""
client = TestClient()
client.save({"conanfile.py": conanfile})
client.run("source .")
self.assertIn("RETCODE True", client.out)
def test_basic(self):
conanfile = '''
from conans import ConanFile
from conans.client.runner import ConanRunner
import platform
class ConanFileToolsTest(ConanFile):
def build(self):
self._runner = ConanRunner()
self.run("mkdir test_folder")
'''
client = self._install_and_build(conanfile)
test_folder = os.path.join(client.current_folder, "test_folder")
self.assertTrue(os.path.exists(test_folder))
def test_write_to_stringio(self):
runner = ConanRunner(print_commands_to_output=True,
generate_run_log_file=True,
log_run_to_output=True)
out = six.StringIO()
runner("python --version", output=out)
self.assertIn("""---Running------
> python --version
-----------------""", out.getvalue())
@pytest.mark.tool_cmake
def test_log(self):
conanfile = '''
from conans import ConanFile
class ConanFileToolsTest(ConanFile):
def build(self):
self.run("cmake --version")
'''
# A runner logging everything
output = TestBufferConanOutput()
runner = ConanRunner(print_commands_to_output=True,
generate_run_log_file=True,
log_run_to_output=True,
output=output)
self._install_and_build(conanfile, runner=runner)
self.assertIn("--Running---", output)
self.assertIn("> cmake --version", output)
self.assertIn("cmake version", output)
self.assertIn("Logging command output to file ", output)
# A runner logging everything
output = TestBufferConanOutput()
runner = ConanRunner(print_commands_to_output=True,
generate_run_log_file=False,
log_run_to_output=True,
output=output)
self._install_and_build(conanfile, runner=runner)
self.assertIn("--Running---", output)
self.assertIn("> cmake --version", output)
self.assertIn("cmake version", output)
self.assertNotIn("Logging command output to file ", output)
output = TestBufferConanOutput()
runner = ConanRunner(print_commands_to_output=False,
generate_run_log_file=True,
log_run_to_output=True,
output=output)
self._install_and_build(conanfile, runner=runner)
self.assertNotIn("--Running---", output)
self.assertNotIn("> cmake --version", output)
self.assertIn("cmake version", output)
self.assertIn("Logging command output to file ", output)
output = TestBufferConanOutput()
runner = ConanRunner(print_commands_to_output=False,
generate_run_log_file=False,
log_run_to_output=True,
output=output)
self._install_and_build(conanfile, runner=runner)
self.assertNotIn("--Running---", output)
self.assertNotIn("> cmake --version", output)
self.assertIn("cmake version", output)
self.assertNotIn("Logging command output to file ", output)
output = TestBufferConanOutput()
runner = ConanRunner(print_commands_to_output=False,
generate_run_log_file=False,
log_run_to_output=False,
output=output)
self._install_and_build(conanfile, runner=runner)
self.assertNotIn("--Running---", output)
self.assertNotIn("> cmake --version", output)
self.assertNotIn("cmake version", output)
self.assertNotIn("Logging command output to file ", output)
output = TestBufferConanOutput()
runner = ConanRunner(print_commands_to_output=False,
generate_run_log_file=True,
log_run_to_output=False,
output=output)
self._install_and_build(conanfile, runner=runner)
self.assertNotIn("--Running---", output)
self.assertNotIn("> cmake --version", output)
self.assertNotIn("cmake version", output)
self.assertIn("Logging command output to file ", output)
def test_cwd(self):
conanfile = '''
from conans import ConanFile
from conans.client.runner import ConanRunner
import platform
class ConanFileToolsTest(ConanFile):
def build(self):
self._runner = ConanRunner()
self.run("mkdir test_folder", cwd="child_folder")
'''
files = {"conanfile.py": conanfile}
client = TestClient()
os.makedirs(os.path.join(client.current_folder, "child_folder"))
test_folder = os.path.join(client.current_folder, "child_folder", "test_folder")
self.assertFalse(os.path.exists(test_folder))
client.save(files)
client.run("install .")
client.run("build .")
self.assertTrue(os.path.exists(test_folder))
def test_cwd_error(self):
conanfile = '''
from conans import ConanFile
from conans.client.runner import ConanRunner
import platform
class ConanFileToolsTest(ConanFile):
def build(self):
self._runner = ConanRunner()
self.run("mkdir test_folder", cwd="non_existing_folder")
'''
files = {"conanfile.py": conanfile}
client = TestClient()
test_folder = os.path.join(client.current_folder, "child_folder", "test_folder")
self.assertFalse(os.path.exists(test_folder))
client.save(files)
client.run("install .")
client.run("build .", assert_error=True)
self.assertIn("Error while executing 'mkdir test_folder'", client.out)
self.assertFalse(os.path.exists(test_folder))
def test_runner_capture_output(self):
conanfile = textwrap.dedent("""
from conans import ConanFile
class Pkg(ConanFile):
def source(self):
self.run("echo 'hello Conan!'")
""")
client = TestClient()
client.save({"conanfile.py": conanfile})
client.run("source .")
self.assertIn("hello Conan!", client.out)
def test_custom_stream_error(self):
# https://github.com/conan-io/conan/issues/7888
conanfile = textwrap.dedent("""
from conans import ConanFile
class Pkg(ConanFile):
def source(self):
class Buf:
def __init__(self):
self.buf = []
def write(self, data):
self.buf.append(data)
my_buf = Buf()
self.run('echo "Hello"', output=my_buf)
self.output.info("Buffer got msgs {}".format(len(my_buf.buf)))
""")
client = TestClient()
client.save({"conanfile.py": conanfile})
client.run("source .")
self.assertIn("Buffer got msgs 1", client.out)
def test_credentials_removed(self):
conanfile = textwrap.dedent("""
import os
import platform
from conans import ConanFile
class Recipe(ConanFile):
def export(self):
self.output.info(">> key: {}<<".format(os.getenv('CONAN_LOGIN_ENCRYPTION_KEY')))
self.output.info(">> var: {}<<".format(os.getenv('OTHER_VAR')))
if platform.system() == 'Windows':
self.run("echo key: %CONAN_LOGIN_ENCRYPTION_KEY%--")
self.run("echo var: %OTHER_VAR%--")
else:
self.run("echo key: $CONAN_LOGIN_ENCRYPTION_KEY--")
self.run("echo var: $OTHER_VAR--")
""")
with environment_append({'CONAN_LOGIN_ENCRYPTION_KEY': 'secret!', 'OTHER_VAR': 'other_var'}):
client = TestClient()
client.save({"conanfile.py": conanfile})
client.run("export . name/version@")
self.assertIn("name/version: >> key: secret!<<", client.out)
self.assertIn("name/version: >> var: other_var<<", client.out)
if platform.system() == 'Windows':
self.assertIn("key: %CONAN_LOGIN_ENCRYPTION_KEY%--", client.out)
else:
self.assertIn("key: --", client.out)
self.assertIn("var: other_var--", client.out)
| mit | 4,628,935,908,947,907,000 | 37.488 | 101 | 0.574101 | false |
xu2243051/easyui-menu | easyui/utils.py | 1 | 1633 | #coding:utf-8
'''
这个通用的自定义功能页面
'''
from django.conf.urls import patterns, url
def model_serialize(queryset, extra_fields=[], remove_fields = [], fields = []):
"""
@param queryset queryset
@return a list of dict [{}, {}]
自定义的json转换函数,跟extramixin中的get_fields密切相关
"""
return_list = []
for object in queryset:
value_dict = dict(object.get_fields(field_verbose=False, value_verbose=True,
fields=fields, remove_fields=remove_fields, extra_fields=extra_fields))
return_list.append(value_dict)
return return_list
def register_views(app_name, view_filename, urlpatterns=None):
"""
app_name APP名
view_filename views 所在的文件
urlpatterns url中已经存在的urlpatterns
return urlpatterns
只导入View结尾的,是类的视图
"""
app_module = __import__(app_name)
view_module = getattr(app_module, view_filename)
views = dir(view_module)
for view_name in views:
if view_name.endswith('View'):
view = getattr(view_module, view_name)
if isinstance(view, object):
if urlpatterns:
urlpatterns += patterns('',
url(r'^(?i)%s/$' % view_name, view.as_view(), name=view_name),
)
else:
urlpatterns = patterns('',
url(r'^(?i)%s/$' % view_name, view.as_view(), name=view_name),
)
else:
pass
return urlpatterns
| apache-2.0 | 5,187,833,718,034,250,000 | 30.854167 | 91 | 0.551341 | false |
abramhindle/UnnaturalCodeFork | python/testdata/launchpad/lib/lp/services/mail/signedmessage.py | 1 | 6389 | # Copyright 2009-2012 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Classes for simpler handling of PGP signed email messages."""
__metaclass__ = type
__all__ = [
'SignedMessage',
'signed_message_from_string',
'strip_pgp_signature',
]
import email
import re
from zope.interface import implements
from lp.services.mail.interfaces import ISignedMessage
clearsigned_re = re.compile(
r'-----BEGIN PGP SIGNED MESSAGE-----'
'.*?(?:\r\n|\n)(?:\r\n|\n)(.*)(?:\r\n|\n)'
'(-----BEGIN PGP SIGNATURE-----'
'.*'
'-----END PGP SIGNATURE-----)',
re.DOTALL)
# Regexp for matching the signed content in multipart messages.
multipart_signed_content = (
r'%(boundary)s\n(?P<signed_content>.*?)\n%(boundary)s\n.*?\n%(boundary)s')
# Lines that start with '-' are escaped with '- '.
dash_escaped = re.compile('^- ', re.MULTILINE)
def signed_message_from_string(string):
"""Parse the string and return a SignedMessage.
It makes sure that the SignedMessage instance has access to the
parsed string.
"""
msg = email.message_from_string(string, _class=SignedMessage)
msg.parsed_string = string
return msg
class SignedMessage(email.Message.Message):
"""Provides easy access to signed content and the signature"""
implements(ISignedMessage)
parsed_string = None
def _getSignatureAndSignedContent(self):
"""Returns the PGP signature and the content that's signed.
The signature is returned as a string, and the content is
returned as a string.
If the message isn't signed, both signature and the content is
None.
"""
assert self.parsed_string is not None, (
'Use signed_message_from_string() to create the message.')
signed_content = signature = None
# Check for MIME/PGP signed message first.
# See: RFC3156 - MIME Security with OpenPGP
# RFC3156 says that in order to be a complient signed message, there
# must be two and only two parts and that the second part must have
# content_type 'application/pgp-signature'.
if self.is_multipart():
payload = self.get_payload()
if len(payload) == 2:
content_part, signature_part = payload
sig_content_type = signature_part.get_content_type()
if sig_content_type == 'application/pgp-signature':
# We need to extract the signed content from the
# parsed string, since content_part.as_string()
# isn't guarenteed to return the exact string it was
# created from.
boundary = '--' + self.get_boundary()
match = re.search(
multipart_signed_content % {
'boundary': re.escape(boundary)},
self.parsed_string, re.DOTALL)
signed_content = match.group('signed_content')
signature = signature_part.get_payload()
return signature, signed_content
# If we still have no signature, then we have one of several cases:
# 1) We do not have a multipart message
# 2) We have a multipart message with two parts, but the second part
# isn't a signature. E.g.
# multipart/mixed
# text/plain <- clear signed review comment
# text/x-diff <- patch
# 3) We have a multipart message with more than two parts.
# multipart/mixed
# text/plain <- clear signed body text
# text/x-diff <- patch or merge directoive
# application/pgp-signature <- detached signature
# Now we can handle one and two by walking the content and stopping at
# the first part that isn't multipart, and getting a signature out of
# that. We can partly handle number three by at least checking the
# clear text signed message, but we don't check the detached signature
# for the attachment.
for part in self.walk():
if part.is_multipart():
continue
match = clearsigned_re.search(part.get_payload())
if match is not None:
signed_content_unescaped = match.group(1)
signed_content = dash_escaped.sub(
'', signed_content_unescaped)
signature = match.group(2)
return signature, signed_content
# Stop processing after the first non-multipart part.
break
return signature, signed_content
@property
def signedMessage(self):
"""Returns the PGP signed content as a message.
Returns None if the message wasn't signed.
"""
signature, signed_content = self._getSignatureAndSignedContent()
if signed_content is None:
return None
else:
if (not self.is_multipart() and
clearsigned_re.search(self.get_payload())):
# Add a new line so that a message with no headers will
# be created.
signed_content = '\n' + signed_content
return signed_message_from_string(signed_content)
@property
def signedContent(self):
"""Returns the PGP signed content as a string.
Returns None if the message wasn't signed.
"""
signature, signed_content = self._getSignatureAndSignedContent()
return signed_content
@property
def signature(self):
"""Returns the PGP signature used to sign the message.
Returns None if the message wasn't signed.
"""
signature, signed_content = self._getSignatureAndSignedContent()
return signature
@property
def raw_length(self):
"""Return the length in bytes of the underlying raw form."""
return len(self.parsed_string)
def strip_pgp_signature(text):
"""Strip any PGP signature from the supplied text."""
signed_message = signed_message_from_string(text)
# For unsigned text the signedContent will be None.
if signed_message.signedContent is not None:
return signed_message.signedContent
else:
return text
| agpl-3.0 | -9,070,205,200,195,638,000 | 36.804734 | 78 | 0.604633 | false |
alex/warehouse | tests/unit/test_csp.py | 1 | 8157 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import pretend
from warehouse import csp
class TestCSPTween:
def test_csp_policy(self):
response = pretend.stub(headers={})
handler = pretend.call_recorder(lambda request: response)
settings = {
"csp": {
"default-src": ["*"],
"style-src": ["'self'", "example.net"],
},
}
registry = pretend.stub(settings=settings)
tween = csp.content_security_policy_tween_factory(handler, registry)
request = pretend.stub(
path="/project/foobar/",
find_service=pretend.call_recorder(
lambda *args, **kwargs: settings["csp"]
),
)
assert tween(request) is response
assert response.headers == {
"Content-Security-Policy":
"default-src *; style-src 'self' example.net",
}
def test_csp_policy_default(self):
response = pretend.stub(headers={})
handler = pretend.call_recorder(lambda request: response)
registry = pretend.stub(settings={})
tween = csp.content_security_policy_tween_factory(handler, registry)
request = pretend.stub(
path="/path/to/nowhere/",
find_service=pretend.raiser(ValueError),
)
assert tween(request) is response
assert response.headers == {}
def test_csp_policy_debug_disables(self):
response = pretend.stub(headers={})
handler = pretend.call_recorder(lambda request: response)
settings = {
"csp": {
"default-src": ["*"],
"style-src": ["'self'", "example.net"],
},
}
registry = pretend.stub(settings=settings)
tween = csp.content_security_policy_tween_factory(handler, registry)
request = pretend.stub(
path="/_debug_toolbar/foo/",
find_service=pretend.call_recorder(
lambda *args, **kwargs: settings["csp"]
),
)
assert tween(request) is response
assert response.headers == {}
def test_csp_policy_inject(self):
response = pretend.stub(headers={})
def handler(request):
request.find_service("csp")["default-src"].append("example.com")
return response
settings = {
"csp": {
"default-src": ["*"],
"style-src": ["'self'"],
},
}
registry = pretend.stub(settings=settings)
tween = csp.content_security_policy_tween_factory(handler, registry)
request = pretend.stub(
path="/example",
find_service=pretend.call_recorder(
lambda *args, **kwargs: settings["csp"]
),
)
assert tween(request) is response
assert response.headers == {
"Content-Security-Policy":
"default-src * example.com; style-src 'self'",
}
def test_csp_policy_default_inject(self):
settings = collections.defaultdict(list)
response = pretend.stub(headers={})
registry = pretend.stub(settings=settings)
def handler(request):
request.find_service("csp")["default-src"].append("example.com")
return response
tween = csp.content_security_policy_tween_factory(handler, registry)
request = pretend.stub(
path="/path/to/nowhere/",
find_service=pretend.call_recorder(
lambda *args, **kwargs: settings
),
)
assert tween(request) is response
assert response.headers == {
"Content-Security-Policy": "default-src example.com"
}
def test_devel_csp(self):
settings = {
"csp": {
"script-src": ["{request.scheme}://{request.host}"],
}
}
response = pretend.stub(headers={})
registry = pretend.stub(settings=settings)
handler = pretend.call_recorder(lambda request: response)
tween = csp.content_security_policy_tween_factory(handler, registry)
request = pretend.stub(
scheme="https",
host="example.com",
path="/path/to/nowhere",
find_service=pretend.call_recorder(
lambda *args, **kwargs: settings["csp"],
),
)
assert tween(request) is response
assert response.headers == {
"Content-Security-Policy": "script-src https://example.com",
}
class TestCSPPolicy:
def test_create(self):
policy = csp.CSPPolicy({"foo": ["bar"]})
assert isinstance(policy, collections.defaultdict)
def test_merge(self):
policy = csp.CSPPolicy({"foo": ["bar"]})
policy.merge({
"foo": ["baz"],
"something": ["else"],
})
assert policy == {
"foo": ["bar", "baz"],
"something": ["else"],
}
def test_includeme():
config = pretend.stub(
register_service_factory=pretend.call_recorder(
lambda fact, name: None),
add_settings=pretend.call_recorder(lambda settings: None),
add_tween=pretend.call_recorder(lambda tween: None),
registry=pretend.stub(settings={
"camo.url": "camo.url.value",
"statuspage.url": "https://2p66nmmycsj3.statuspage.io",
}),
)
csp.includeme(config)
assert config.register_service_factory.calls == [
pretend.call(csp.csp_factory, name="csp")
]
assert config.add_tween.calls == [
pretend.call("warehouse.csp.content_security_policy_tween_factory"),
]
assert config.add_settings.calls == [
pretend.call({
"csp": {
"base-uri": ["'self'"],
"block-all-mixed-content": [],
"connect-src": [
"'self'",
"https://2p66nmmycsj3.statuspage.io",
],
"default-src": ["'none'"],
"font-src": ["'self'", "fonts.gstatic.com"],
"form-action": ["'self'"],
"frame-ancestors": ["'none'"],
"frame-src": ["'none'"],
"img-src": [
"'self'",
"camo.url.value",
"https://secure.gravatar.com",
],
"referrer": ["origin-when-cross-origin"],
"reflected-xss": ["block"],
"script-src": ["'self'", "www.google-analytics.com"],
"style-src": ["'self'", "fonts.googleapis.com"],
},
})
]
class TestFactory:
def test_copy(self):
settings = {
"csp": {
"foo": "bar",
},
}
request = pretend.stub(
registry=pretend.stub(
settings=settings
)
)
result = csp.csp_factory(None, request)
assert isinstance(result, csp.CSPPolicy)
assert result == settings["csp"]
# ensure changes to factory result don't propagate back to the
# settings
result["baz"] = "foo"
assert result == {"foo": "bar", "baz": "foo"}
assert settings == {"csp": {"foo": "bar"}}
def test_default(self):
request = pretend.stub(
registry=pretend.stub(
settings={}
)
)
result = csp.csp_factory(None, request)
assert isinstance(result, csp.CSPPolicy)
assert result == {}
| apache-2.0 | 5,533,382,380,218,571,000 | 30.015209 | 76 | 0.533897 | false |
shichao-an/ctci | chapter9/question9.5.py | 1 | 1696 | from __future__ import print_function
"""
Write a method to compute all permutations of a string
"""
def get_permutations1(s):
"""
Append (or prepend) every character to each permutation of the
string which does not contain the current character
"""
if not s:
return ['']
else:
res = []
for i, c in enumerate(s):
rest_s = s[:i] + s[i + 1:]
rest_perms = get_permutations1(rest_s)
for perm in rest_perms:
res.append(perm + c)
return res
def insert_at(s, c, i):
return s[:i] + c + s[i:]
def get_permutations2(s):
"""
Insert the first (or last) character to every spot of each permutation
of the remaining string after this character
"""
if not s:
return ['']
else:
res = []
c = s[0]
rest_s = s[1:]
rest_perms = get_permutations2(rest_s)
for perm in rest_perms:
for i in range(len(perm) + 1):
ns = insert_at(perm, c, i)
res.append(ns)
return res
def get_permutations3_aux(s, cand, res):
"""Backtrack"""
if not s:
res.append(cand)
else:
for i, c in enumerate(s):
get_permutations3_aux(s[:i] + s[i + 1:], cand + c, res)
def get_permutations3(s):
res = []
cand = ''
get_permutations3_aux(s, cand, res)
return res
def _test():
pass
def _print():
s1 = 'abc'
r1 = get_permutations1(s1)
r2 = get_permutations2(s1)
r3 = get_permutations3(s1)
r1.sort()
r2.sort()
r3.sort()
print(r1)
print(r2)
print(r3)
if __name__ == '__main__':
_test()
_print()
| bsd-2-clause | 3,653,016,667,504,779,000 | 19.190476 | 74 | 0.527123 | false |
jzrake/mara-tools | mara_tools/lic/lic.py | 1 | 1617 | import numpy as np
def lic_flow(vectors,len_pix=10):
vectors = np.asarray(vectors)
m,n,two = vectors.shape
if two!=2:
raise ValueError
result = np.zeros((2*len_pix+1,m,n,2),dtype=np.int32) # FIXME: int16?
center = len_pix
result[center,:,:,0] = np.arange(m)[:,np.newaxis]
result[center,:,:,1] = np.arange(n)[np.newaxis,:]
for i in range(m):
for j in range(n):
y = i
x = j
fx = 0.5
fy = 0.5
for k in range(len_pix):
vx, vy = vectors[y,x]
print x, y, vx, vy
if vx>=0:
tx = (1-fx)/vx
else:
tx = -fx/vx
if vy>=0:
ty = (1-fy)/vy
else:
ty = -fy/vy
if tx<ty:
print "x step"
if vx>0:
x+=1
fy+=vy*tx
fx=0.
else:
x-=1
fy+=vy*tx
fx=1.
else:
print "y step"
if vy>0:
y+=1
fx+=vx*ty
fy=0.
else:
y-=1
fx+=vx*ty
fy=1.
if x<0: x=0
if y<0: y=0
if x>=n: x=n-1
if y>=m: y=m-1
result[center+k+1,i,j,:] = y, x
return result
| gpl-2.0 | -3,010,048,274,979,160,600 | 25.95 | 73 | 0.30303 | false |
SupayrPoney/RopeSkippingTimer | main.py | 1 | 7577 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import sys
import PyQt5
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import time
import math
import winsound
EXERCISETIME = 60
class MainWidget(QWidget):
freqChangeSignal = pyqtSignal(object)
togglerSignal = pyqtSignal()
"""docstring for MainWidget"""
def __init__(self, parent=None):
super(MainWidget, self).__init__(parent)
self.mainLayout = QVBoxLayout()
self.buttonLayout = QHBoxLayout()
self.barsLayout = QHBoxLayout()
self.inputLayout = QHBoxLayout()
## self.frequencyLayout = QHBoxLayout()
self.input1Layout = QVBoxLayout()
self.input2Layout = QVBoxLayout()
self.input3Layout = QVBoxLayout()
self.startButton = QPushButton("Start")
self.startButton.clicked.connect(self.start)
self.buttonLayout.addWidget(self.startButton)
self.stopButton = QPushButton("Stop")
self.stopButton.clicked.connect(self.stop)
self.buttonLayout.addWidget(self.stopButton)
self.resetButton = QPushButton("Reset")
self.resetButton.clicked.connect(self.reset)
self.buttonLayout.addWidget(self.resetButton)
self.minFreqInput = QLineEdit()
self.minFreqInput.setValidator(QIntValidator(1, 65535, self))
self.minFreqText = QLabel("Minimum Frequency")
self.input1Layout.addWidget(self.minFreqText)
self.input1Layout.addWidget(self.minFreqInput)
self.maxFreqInput = QLineEdit()
self.maxFreqInput.setValidator(QIntValidator(1, 65535, self))
self.maxFreqText = QLabel("Maximum Frequency")
self.input2Layout.addWidget(self.maxFreqText)
self.input2Layout.addWidget(self.maxFreqInput)
self.intervalInput = QLineEdit()
self.intervalInput.setValidator(QIntValidator(1, 65535, self))
self.intervalText = QLabel("Interval")
self.input3Layout.addWidget(self.intervalText)
self.input3Layout.addWidget(self.intervalInput)
self.inputLayout.addLayout(self.input1Layout)
self.inputLayout.addLayout(self.input2Layout)
self.inputLayout.addLayout(self.input3Layout)
## self.frequency = QLabel("0")
## f = QFont("Arial", 40)
## self.frequency.setFont( f)
## self.frequencyLayout.addWidget(self.frequency)
## self.frequencyLayout.setAlignment(Qt.AlignHCenter)
self.progressBar = QProgressBar()
self.progressBar.setTextVisible(False)
self.barsLayout.addWidget(self.progressBar)
self.mainLayout.addLayout(self.inputLayout)
self.mainLayout.addLayout(self.buttonLayout)
## self.mainLayout.addLayout(self.frequencyLayout)
self.mainLayout.addLayout(self.barsLayout)
self.setLayout(self.mainLayout)
self.setWindowTitle('Rope skipping Metronoom')
def start(self):
interval = self.intervalInput.text()
minFreq = self.minFreqInput.text()
maxFreq = self.maxFreqInput.text()
if interval == "":
interval = "5"
if minFreq != "" and maxFreq!="":
self.interval = int(interval)
self.maxFreq = int(maxFreq)
self.minFreq = int(minFreq)
self.timerThread = TimerThread(self.interval, self.minFreq, self.maxFreq)
self.timerThread.tick.connect(self.update)
self.timerThread.stopSignal.connect(self.stop)
self.timerThread.start()
self.beeperThread = Beeper(self.minFreq, self.freqChangeSignal, self.togglerSignal, self.interval)
self.beeperThread.start()
else:
QMessageBox.warning(self, "Input missing", "No frequency.", QMessageBox.Ok)
def update(self, currentFreq, updateFreq, percentage):
## if updateFreq:
## self.frequency.setText(str(round(currentFreq)))
self.progressBar.setValue(100*percentage)
self.freqChangeSignal.emit(currentFreq)
def stop(self):
self.timerThread.stop()
self.togglerSignal.emit()
def reset(self):
self.stop()
## self.frequency.setText("0")
self.progressBar.setValue(0)
class TimerThread(QThread):
tick = pyqtSignal(object, object, object)
stopSignal = pyqtSignal()
def __init__(self, interval, minFreq, maxFreq):
QThread.__init__(self)
self._isStopped = False
self.interval = interval
self.minFreq = minFreq
self.maxFreq = maxFreq
self.deltaFreq = 2 * (self.interval * (self.maxFreq - self.minFreq))/ EXERCISETIME
def run(self):
startTime = time.time()
currentTime = time.time()
currentFreq = self.minFreq
counter = 0
while counter <= EXERCISETIME/2:
counter += 1
if not self._isStopped:
currentFreq += self.deltaFreq/self.interval
updateFreq = counter%self.interval == 0
self.tick.emit(min(currentFreq,self.maxFreq), updateFreq, 2*counter/EXERCISETIME)
time.sleep(1)
while counter <= EXERCISETIME:
counter += 1
if not self._isStopped:
currentFreq -= self.deltaFreq/self.interval
updateFreq = counter%self.interval == 0
self.tick.emit(min(currentFreq,self.maxFreq), updateFreq, 2 - 2*counter/EXERCISETIME)
time.sleep(1)
self.stopSignal.emit()
def stop(self):
self._isStopped = True
class Beeper(QThread):
"""docstring for Beeper"""
def __init__(self, freq, freqChangesignal, togglerSignal, interval):
super(Beeper, self).__init__()
self.freq = freq
self.signal = freqChangesignal
self.signal.connect(self.setFreq)
self.timerToggle = togglerSignal
self.timerToggle.connect(self.toggle)
self.stop = False
self.timeToSleep = 1/(self.freq/60)
self.timeToSleepInc = self.timeToSleep/100
self.freqChange = False
self.interval = interval
def setFreq(self, newFreq):
self.freq = newFreq
self.newTimeToSleep = 1/(self.freq/60)
def run(self):
while True:
if not self.stop:
acc = self.timeToSleep
timeSlept = 0
self.playSound()
while timeSlept < acc:
minimum = min(self.interval,self.timeToSleep)
time.sleep(minimum)
timeSlept += minimum
acc = min(self.timeToSleep-minimum,self.newTimeToSleep)
self.timeToSleep = self.newTimeToSleep
# acc = self.timeToSleep
# print(self.freq)
# print(self.timeToSleep)
# print()
# for i in range(10):
# if self.freqChange:
# self.freqChange = False
# break
# else:
# time.sleep(self.timeToSleep/10)
def playSound(self):
winsound.PlaySound('Ticking-clock-sound.wav', winsound.SND_FILENAME)
def toggle(self):
self.stop = True
def get_elapsed(start):
return time.time() - start
def main():
app = QApplication(sys.argv)
w = MainWidget()
w.resize(450, 150)
w.move(300, 300)
w.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| mit | -8,393,405,739,307,940,000 | 30.702929 | 110 | 0.605649 | false |
note286/lan-encryption-file-transfer | recvUI.py | 1 | 4152 | from tkinter.filedialog import *
import lan_ip
import tkinter as tk
from tkinter import ttk
from fileTransfer import *
from receiver import *
import _thread
from custom_function import *
from tkinter import messagebox
class RecvPage(tk.Frame):
def __init__(self, parent, root):
super().__init__(parent)
def pickfile():
self.key_file.set(askopenfilename(initialdir = 'C:/Users/mcc'))
def set_save_path():
self.file_path.set(askdirectory(initialdir = 'C:/Users/mcc'))
def prepare_recv():
"""
检测参数,处理发送过程
"""
keys = get_key(self.key_input.get())
ip = self.recevIP.get()
port = self.port.get()
file_path = self.file_path.get()
file_name = self.file_name.get()
# 如果保存路径为空,不加斜杠
file = file_name if file_path=="" else file_path +'\\'+ file_name
encrypFlag = self.encrypFlag.get()
file = file.replace('/','\\')
# 检测ip和端口
if prepare(ip,port):
# 如果ip和端口合法,进行socket通信,根据返回值判断结果
if receiver(ip,int(port),file,encrypFlag,keys):
messagebox.showinfo(message='接收成功')
else :
messagebox.showinfo(message='接收失败')
else:
return
def input_key():
if self.encrypFlag.get() == '1':
key_entry['state'] = 'valid'
else:
key_entry['state'] ="readonly"
root.title("文件传输 接收端")
self.port = StringVar()
self.port.set(9995)
self.recevIP = StringVar()
self.encrypFlag = StringVar()
self.encrypFlag.set(0)
self.file_path = StringVar()
self.key_input = StringVar()
self.file_name = StringVar()
self.recevIP.set(lan_ip.get_lan_ip())
self.recvFlag = StringVar()
mainframe = self
#输入框
# #文本标签
ttk.Label(mainframe, text="接收方IP").grid( row=1,column=1, sticky=W)
# #框架名,样式,显示文本
recevIP_entry = recevIP_entry = ttk.Entry(self, width=10, textvariable=self.recevIP)
recevIP_entry.grid( row=1, column=2,sticky=(W, E))
ttk.Label(mainframe, text="端口").grid( row=1,column=3, sticky=W)
port_entry = ttk.Entry(mainframe, width=8, textvariable=self.port)
port_entry.grid( row=1, column=4,sticky=(W, E))
#按钮
ttk.Button(mainframe, text="选择保存路径", command=set_save_path).grid( row=2,column=1, sticky=W)
filename_entry = ttk.Entry(mainframe, width=25, textvariable=self.file_path)
filename_entry.grid(row=2, column=2,columnspan=3, sticky=(W, E))
ttk.Label(mainframe, text="保存为 文件名").grid(row=3,column=1, sticky=W)
filename_entry = ttk.Entry(mainframe, textvariable=self.file_name)
filename_entry.grid(row=3, column=2,columnspan=3, sticky=(W, E))
#单选按钮
R1 = ttk.Radiobutton(mainframe, text="不加密", variable=self.encrypFlag, value=0,command=input_key)
R2 = ttk.Radiobutton(mainframe, text="加密", variable=self.encrypFlag, value=1,command=input_key)
R1.grid(row = 4,column=1,sticky=(W, E))
R2.grid(row = 4,column=2,sticky=(W, E))
ttk.Label(mainframe, text="输入密钥").grid( row=5,column=1, sticky=W)
key_entry = ttk.Entry(mainframe, width=15, textvariable=self.key_input,state='readonly')
key_entry.grid( row=5, column=2,columnspan=3,sticky=(W, E))
ttk.Button(mainframe, text="开始接收",command=prepare_recv,).grid( row=6,column=2,columnspan=2, sticky=W)
button1 = ttk.Button(self, text="转到发送", command=lambda: root.show_frame_send()).grid( row=6,column=1, sticky=W)
for child in mainframe.winfo_children(): child.grid_configure(padx=5, pady=5) | mit | -3,757,951,539,771,199,500 | 39.553191 | 119 | 0.574027 | false |
scottw13/BET-1 | test/test_sensitivity/test_chooseQoIs.py | 1 | 12612 | # Copyright (C) 2014-2015 The BET Development Team
"""
This module contains tests for :module:`bet.sensitivity.chooseQoIs`.
Most of these tests should make sure certain values are within a tolerance
rather than exact due to machine precision.
"""
import unittest
import bet.sensitivity.gradients as grad
import bet.sensitivity.chooseQoIs as cQoIs
import numpy as np
import numpy.testing as nptest
from itertools import combinations
import sys
class ChooseQoIsMethods:
"""
Test :module:`bet.sensitivity.chooseQoIs`.
"""
def test_calculate_avg_condnum(self):
"""
Test :meth:`bet.sensitivity.chooseQoIs.calculate_avg_condnum`.
"""
self.qoi_set = range(0, self.Lambda_dim)
(self.condnum, self.singvals) = cQoIs.calculate_avg_condnum(self.G,
self.qoi_set)
# Check that condnum and singvals are the right size
self.assertEqual(isinstance(self.condnum, float), True)
self.assertEqual(self.singvals.shape, (self.num_centers,
self.Lambda_dim))
def test_calculate_avg_volume(self):
"""
Test :meth:`bet.sensitivity.chooseQoIs.calculate_avg_volume`.
"""
self.qoi_set = range(0, self.Lambda_dim)
(self.volume, self.singvals) = cQoIs.calculate_avg_volume(self.G,
self.qoi_set)
# Check that condnum and singvals are the right size
self.assertEqual(isinstance(self.volume, float), True)
self.assertEqual(self.singvals.shape, (self.num_centers,
self.Lambda_dim))
def test_chooseOptQoIs(self):
"""
Test :meth:`bet.sensitivity.chooseQoIs.chooseOptQoIs`.
"""
self.qoiIndices = range(0, self.num_qois)
self.condnum_indices_mat = cQoIs.chooseOptQoIs(self.G, self.qoiIndices,
self.num_qois_return, self.num_optsets_return)
self.condnum_indices_mat_vol = cQoIs.chooseOptQoIs(self.G,
self.qoiIndices, self.num_qois_return, self.num_optsets_return,
volume=True)
# Test the method returns the correct size array
self.assertEqual(self.condnum_indices_mat.shape,
(self.num_optsets_return, self.num_qois_return + 1))
self.assertEqual(self.condnum_indices_mat_vol.shape,
(self.num_optsets_return, self.num_qois_return + 1))
# Check that the 'global condition number' is greater than or equal to 1
nptest.assert_array_less(1.0, self.condnum_indices_mat[:, 0])
# For volume, check that it is greater than or equal to 0
nptest.assert_array_less(0.0, self.condnum_indices_mat_vol[:, 0])
# Test the method returns the known best set of QoIs (chosen to be
# last Lambda_dim indices)
nptest.assert_array_less(self.num_qois-self.Lambda_dim-1,
self.condnum_indices_mat[0, 1:])
nptest.assert_array_less(self.num_qois-self.Lambda_dim-1,
self.condnum_indices_mat_vol[0, 1:])
# Test that none of the best chosen QoIs are the same
self.assertEqual(len(np.unique(self.condnum_indices_mat[0, 1:])),
len(self.condnum_indices_mat[0, 1:]))
self.assertEqual(len(np.unique(self.condnum_indices_mat[0, 1:])),
len(self.condnum_indices_mat_vol[0, 1:]))
##########
# Test the method for a set of QoIs rather than all possible. Choose
# this set so that the optimal choice is not removed.
self.qoiIndices = np.concatenate([range(1, 3, 2),
range(4, self.num_qois)])
self.condnum_indices_mat = cQoIs.chooseOptQoIs(self.G, self.qoiIndices,
self.num_qois_return, self.num_optsets_return)
self.condnum_indices_mat_vol = cQoIs.chooseOptQoIs(self.G,
self.qoiIndices, self.num_qois_return, self.num_optsets_return,
volume=True)
# Test the method returns the correct number of qois
self.assertEqual(self.condnum_indices_mat.shape,
(self.num_optsets_return, self.num_qois_return + 1))
self.assertEqual(self.condnum_indices_mat_vol.shape,
(self.num_optsets_return, self.num_qois_return + 1))
# Check that the 'global condidtion number' is greater than or equal
# to 1
nptest.assert_array_less(1.0, self.condnum_indices_mat[:, 0])
nptest.assert_array_less(0.0, self.condnum_indices_mat_vol[:, 0])
# Test the method returns the known best set of QoIs (chosen to be
# last Lambda_dim indices)
nptest.assert_array_less(self.num_qois-self.Lambda_dim-1,
self.condnum_indices_mat[0, 1:])
nptest.assert_array_less(self.num_qois-self.Lambda_dim-1,
self.condnum_indices_mat_vol[0, 1:])
# Test that none of the best chosen QoIs are the same
self.assertEqual(len(np.unique(self.condnum_indices_mat[0, 1:])),
len(self.condnum_indices_mat[0, 1:]))
self.assertEqual(len(np.unique(self.condnum_indices_mat[0, 1:])),
len(self.condnum_indices_mat_vol[0, 1:]))
def test_chooseOptQoIs_verbose(self):
"""
Test :meth:`bet.sensitivity.chooseQoIs.chooseOptQoIs_verbose`.
"""
self.qoiIndices = range(0, self.num_qois)
[self.condnum_indices_mat, self.optsingvals] = \
cQoIs.chooseOptQoIs_verbose(self.G, self.qoiIndices,
self.num_qois_return, self.num_optsets_return)
# Test that optsingvals is the right shape
self.assertEqual(self.optsingvals.shape, ((self.num_centers,
self.num_qois_return, self.num_optsets_return)))
def test_find_unique_vecs(self):
"""
Test :meth:`bet.sensitivity.chooseQoIs.find_unique_vecs`.
"""
self.qoiIndices = range(0, self.num_qois)
unique_indices = cQoIs.find_unique_vecs(self.G, self.inner_prod_tol,
self.qoiIndices)
# Test that pairwise inner products are <= inner_prod_tol
pairs = np.array(list(combinations(list(unique_indices), 2)))
for pair in range(pairs.shape[0]):
curr_set = pairs[pair]
curr_inner_prod = np.sum(self.G[:, curr_set[0], :] * self.G[:,
curr_set[1], :]) / self.G.shape[0]
nptest.assert_array_less(curr_inner_prod, self.inner_prod_tol)
def test_chooseOptQoIs_large(self):
"""
Test :meth:`bet.sensitivity.chooseQoIs.chooseOptQoIs_large`.
"""
self.qoiIndices = range(0, self.num_qois)
best_sets = cQoIs.chooseOptQoIs_large(self.G, qoiIndices=self.qoiIndices,
inner_prod_tol=self.inner_prod_tol, cond_tol=self.cond_tol)
if self.cond_tol == np.inf:
self.cond_tol = sys.float_info[0]
# Test that the best_sets have condition number less than the tolerance
for Ldim in range(self.Lambda_dim - 1):
inds = best_sets[Ldim][:, 0] != np.inf
nptest.assert_array_less(best_sets[Ldim][inds, 0], self.cond_tol)
def test_chooseOptQoIs_large_verbose(self):
"""
Test :meth:`bet.sensitivity.chooseQoIs.chooseOptQoIs_large_verbose`.
"""
self.qoiIndices = range(0, self.num_qois)
[best_sets, optsingvals_list] = cQoIs.chooseOptQoIs_large_verbose(self.G,
qoiIndices=self.qoiIndices, num_optsets_return=self.num_optsets_return,
inner_prod_tol=self.inner_prod_tol, cond_tol=self.cond_tol)
# Test that Lambda_dim - 1 optsingval tensors are returned
self.assertEqual(len(optsingvals_list), self.Lambda_dim - 1)
# Test that each tensor is the right shape
for i in range(self.Lambda_dim - 1):
self.assertEqual(optsingvals_list[i].shape, (self.num_centers,
i + 2, self.num_optsets_return))
class test_2to20_choose2(ChooseQoIsMethods, unittest.TestCase):
def setUp(self):
self.Lambda_dim = 2
self.num_qois_return = 2
self.num_optsets_return = 5
self.radius = 0.01
np.random.seed(0)
self.num_centers = 10
self.centers = np.random.random((self.num_centers, self.Lambda_dim))
self.samples = grad.sample_l1_ball(self.centers,
self.Lambda_dim + 1, self.radius)
self.num_qois = 20
coeffs = np.random.random((self.Lambda_dim,
self.num_qois-self.Lambda_dim))
self.coeffs = np.append(coeffs, np.eye(self.Lambda_dim), axis=1)
self.data = self.samples.dot(self.coeffs)
self.G = grad.calculate_gradients_rbf(self.samples, self.data,
self.centers)
self.inner_prod_tol = 1.0
self.cond_tol = 100.0
class test_4to20_choose4(ChooseQoIsMethods, unittest.TestCase):
def setUp(self):
self.Lambda_dim = 4
self.num_qois_return = 4
self.num_optsets_return = 5
self.radius = 0.01
np.random.seed(0)
self.num_centers = 100
self.centers = np.random.random((self.num_centers, self.Lambda_dim))
self.samples = grad.sample_l1_ball(self.centers,
self.Lambda_dim + 1, self.radius)
self.num_qois = 20
coeffs = np.random.random((self.Lambda_dim,
self.num_qois-self.Lambda_dim))
self.coeffs = np.append(coeffs, np.eye(self.Lambda_dim), axis=1)
self.data = self.samples.dot(self.coeffs)
self.G = grad.calculate_gradients_rbf(self.samples, self.data,
self.centers)
self.inner_prod_tol = 0.9
self.cond_tol = 20.0
class test_9to15_choose9(ChooseQoIsMethods, unittest.TestCase):
def setUp(self):
self.Lambda_dim = 9
self.num_qois_return = 9
self.num_optsets_return = 50
self.radius = 0.01
np.random.seed(0)
self.num_centers = 15
self.centers = np.random.random((self.num_centers, self.Lambda_dim))
self.samples = grad.sample_l1_ball(self.centers, self.Lambda_dim + \
1, self.radius)
self.num_qois = 15
coeffs = np.random.random((self.Lambda_dim,
self.num_qois - self.Lambda_dim))
self.coeffs = np.append(coeffs, np.eye(self.Lambda_dim), axis=1)
self.data = self.samples.dot(self.coeffs)
self.G = grad.calculate_gradients_rbf(self.samples, self.data,
self.centers)
self.inner_prod_tol = 0.8
self.cond_tol = 100.0
class test_9to15_choose4(ChooseQoIsMethods, unittest.TestCase):
def setUp(self):
self.Lambda_dim = 9
self.num_qois_return = 4
self.num_optsets_return = 1
self.radius = 0.01
np.random.seed(0)
self.num_centers = 11
self.centers = np.random.random((self.num_centers, self.Lambda_dim))
self.samples = grad.sample_l1_ball(self.centers,
self.Lambda_dim + 1, self.radius)
self.num_qois = 15
coeffs = np.random.random((self.Lambda_dim, self.num_qois - \
self.Lambda_dim))
self.coeffs = np.append(coeffs, np.eye(self.Lambda_dim), axis=1)
self.data = self.samples.dot(self.coeffs)
self.G = grad.calculate_gradients_rbf(self.samples, self.data,
self.centers)
self.inner_prod_tol = 0.9
self.cond_tol = 50.0
class test_2to28_choose2_zeros(ChooseQoIsMethods, unittest.TestCase):
def setUp(self):
self.Lambda_dim = 2
self.num_qois_return = 2
self.num_optsets_return = 5
self.radius = 0.01
np.random.seed(0)
self.num_centers = 10
self.centers = np.random.random((self.num_centers, self.Lambda_dim))
self.samples = grad.sample_l1_ball(self.centers,
self.Lambda_dim + 1, self.radius)
self.num_qois = 28
coeffs = np.zeros((self.Lambda_dim, 2*self.Lambda_dim))
coeffs = np.append(coeffs, np.random.random((self.Lambda_dim,
self.num_qois - 3 * self.Lambda_dim)), axis=1)
self.coeffs = np.append(coeffs, np.eye(self.Lambda_dim), axis=1)
self.data = self.samples.dot(self.coeffs)
self.G = grad.calculate_gradients_rbf(self.samples, self.data,
self.centers)
self.inner_prod_tol = 0.9
self.cond_tol = np.inf
| gpl-3.0 | 5,350,519,979,868,272,000 | 40.215686 | 83 | 0.601729 | false |
qsnake/gpaw | gpaw/test/parallel/ut_invops.py | 1 | 19638 | #!/usr/bin/env python
import sys
import numpy as np
try:
# Matplotlib is not a dependency
import matplotlib as mpl
mpl.use('Agg') # force the antigrain backend
except (ImportError, RuntimeError):
mpl = None
from ase.units import Bohr
from gpaw.mpi import world, distribute_cpus
from gpaw.utilities.tools import md5_array
from gpaw.utilities.gauss import gaussian_wave
from gpaw.band_descriptor import BandDescriptor
from gpaw.grid_descriptor import GridDescriptor
from gpaw.kpt_descriptor import KPointDescriptor
from gpaw.kohnsham_layouts import BandLayouts
from gpaw.parameters import InputParameters
from gpaw.xc import XC
from gpaw.setup import SetupData, Setups
from gpaw.wavefunctions.base import WaveFunctions
from gpaw.wavefunctions.fd import FDWaveFunctions
from gpaw.fd_operators import Laplace # required but not really used
from gpaw.pair_overlap import GridPairOverlap, ProjectorPairOverlap
# -------------------------------------------------------------------
from gpaw.test.ut_common import ase_svnversion, shapeopt, TestCase, \
TextTestRunner, CustomTextTestRunner, defaultTestLoader, \
initialTestLoader, create_random_atoms, create_parsize_minbands
# -------------------------------------------------------------------
p = InputParameters(spinpol=False)
xc = XC(p.xc)
p.setups = dict([(symbol, SetupData(symbol, xc.name)) for symbol in 'HO'])
class UTDomainParallelSetup(TestCase):
"""
Setup a simple domain parallel calculation."""
# Number of bands
nbands = 1
# Spin-paired, single kpoint
nspins = 1
nibzkpts = 1
# Mean spacing and number of grid points per axis (G x G x G)
h = 0.25 / Bohr
G = 48
# Type of boundary conditions employed
boundaries = None
def setUp(self):
for virtvar in ['boundaries']:
assert getattr(self,virtvar) is not None, 'Virtual "%s"!' % virtvar
parsize, parsize_bands = create_parsize_minbands(self.nbands, world.size)
assert self.nbands % np.prod(parsize_bands) == 0
domain_comm, kpt_comm, band_comm = distribute_cpus(parsize,
parsize_bands, self.nspins, self.nibzkpts)
# Set up band descriptor:
self.bd = BandDescriptor(self.nbands, band_comm)
# Set up grid descriptor:
res, ngpts = shapeopt(300, self.G**3, 3, 0.2)
cell_c = self.h * np.array(ngpts)
pbc_c = {'zero' : False, \
'periodic': True, \
'mixed' : (True, False, True)}[self.boundaries]
self.gd = GridDescriptor(ngpts, cell_c, pbc_c, domain_comm, parsize)
# What to do about kpoints?
self.kpt_comm = kpt_comm
def tearDown(self):
del self.bd, self.gd, self.kpt_comm
# =================================
def verify_comm_sizes(self):
if world.size == 1:
return
comm_sizes = tuple([comm.size for comm in [world, self.bd.comm, \
self.gd.comm, self.kpt_comm]])
self._parinfo = '%d world, %d band, %d domain, %d kpt' % comm_sizes
self.assertEqual(self.nbands % self.bd.comm.size, 0)
self.assertEqual((self.nspins*self.nibzkpts) % self.kpt_comm.size, 0)
class UTDomainParallelSetup_Zero(UTDomainParallelSetup):
__doc__ = UTDomainParallelSetup.__doc__
boundaries = 'zero'
class UTDomainParallelSetup_Periodic(UTDomainParallelSetup):
__doc__ = UTDomainParallelSetup.__doc__
boundaries = 'periodic'
class UTDomainParallelSetup_Mixed(UTDomainParallelSetup):
__doc__ = UTDomainParallelSetup.__doc__
boundaries = 'mixed'
# -------------------------------------------------------------------
# Helper functions/classes here
class FDWFS(FDWaveFunctions):
def __init__(self, gd, bd, kd, setups, dtype): # override constructor
assert kd.comm.size == 1
WaveFunctions.__init__(self, gd, 1, setups, bd, dtype, world,
kd, None)
self.kin = Laplace(gd, -0.5, dtype=dtype, allocate=False)
self.diagksl = None
self.orthoksl = BandLayouts(gd, bd, dtype)
self.initksl = None
self.overlap = None
self.rank_a = None
def allocate_arrays_for_projections(self, my_atom_indices): # no alloc
pass
def collect_projections(self, P_ani):
if self.gd.comm.size == 1 and self.bd.comm.size == 1:
return np.concatenate([P_ni.T for P_ni in P_ani.values()])
assert len(self.kpt_u) == 1
self.kpt_u[0].P_ani = P_ani
all_P_ni = WaveFunctions.collect_projections(self, 0, 0)
if self.world.rank == 0:
P_In = all_P_ni.T.copy()
else:
nproj = sum([setup.ni for setup in self.setups])
P_In = np.empty((nproj, self.nbands), self.pt.dtype)
self.world.broadcast(P_In, 0)
return P_In
# -------------------------------------------------------------------
class UTGaussianWavefunctionSetup(UTDomainParallelSetup):
__doc__ = UTDomainParallelSetup.__doc__ + """
The pseudo wavefunctions are moving gaussians centered around each atom."""
allocated = False
dtype = None
# Default arguments for scaled Gaussian wave
_sigma0 = 2.0 #0.75
_k0_c = 2*np.pi*np.array([1/5., 1/3., 0.])
def setUp(self):
UTDomainParallelSetup.setUp(self)
for virtvar in ['dtype']:
assert getattr(self,virtvar) is not None, 'Virtual "%s"!' % virtvar
# Create randomized atoms
self.atoms = create_random_atoms(self.gd, 5) # also tested: 10xNH3/BDA
# XXX DEBUG START
if False:
from ase import view
view(self.atoms*(1+2*self.gd.pbc_c))
# XXX DEBUG END
# Do we agree on the atomic positions?
pos_ac = self.atoms.get_positions()
pos_rac = np.empty((world.size,)+pos_ac.shape, pos_ac.dtype)
world.all_gather(pos_ac, pos_rac)
if (pos_rac-pos_rac[world.rank,...][np.newaxis,...]).any():
raise RuntimeError('Discrepancy in atomic positions detected.')
# Create setups for atoms
self.Z_a = self.atoms.get_atomic_numbers()
self.setups = Setups(self.Z_a, p.setups, p.basis,
p.lmax, xc)
# K-point descriptor
bzk_kc = np.array([[0, 0, 0]], dtype=float)
self.kd = KPointDescriptor(bzk_kc, 1)
self.kd.set_symmetry(self.atoms, self.setups, True)
self.kd.set_communicator(self.kpt_comm)
# Create gamma-point dummy wavefunctions
self.wfs = FDWFS(self.gd, self.bd, self.kd, self.setups,
self.dtype)
spos_ac = self.atoms.get_scaled_positions() % 1.0
self.wfs.set_positions(spos_ac)
self.pt = self.wfs.pt # XXX shortcut
## Also create pseudo partial waveves
#from gpaw.lfc import LFC
#self.phit = LFC(self.gd, [setup.phit_j for setup in self.setups], \
# self.kpt_comm, dtype=self.dtype)
#self.phit.set_positions(spos_ac)
self.r_cG = None
self.buf_G = None
self.psit_nG = None
self.allocate()
def tearDown(self):
UTDomainParallelSetup.tearDown(self)
del self.r_cG, self.buf_G, self.psit_nG
del self.pt, self.setups, self.atoms
self.allocated = False
def allocate(self):
self.r_cG = self.gd.get_grid_point_coordinates()
cell_cv = self.atoms.get_cell() / Bohr
assert np.abs(cell_cv-self.gd.cell_cv).max() < 1e-9
center_c = 0.5*cell_cv.diagonal()
self.buf_G = self.gd.empty(dtype=self.dtype)
self.psit_nG = self.gd.empty(self.bd.mynbands, dtype=self.dtype)
for myn,psit_G in enumerate(self.psit_nG):
n = self.bd.global_index(myn)
psit_G[:] = self.get_scaled_gaussian_wave(center_c, scale=10+2j*n)
k_c = 2*np.pi*np.array([1/2., -1/7., 0.])
for pos_c in self.atoms.get_positions() / Bohr:
sigma = self._sigma0/(1+np.sum(pos_c**2))**0.5
psit_G += self.get_scaled_gaussian_wave(pos_c, sigma, k_c, n+5j)
self.allocated = True
def get_scaled_gaussian_wave(self, pos_c, sigma=None, k_c=None, scale=None):
if sigma is None:
sigma = self._sigma0
if k_c is None:
k_c = self._k0_c
if scale is None:
A = None
else:
# 4*pi*int(exp(-r^2/(2*w^2))^2*r^2, r=0...infinity)= w^3*pi^(3/2)
# = scale/A^2 -> A = scale*(sqrt(Pi)*w)^(-3/2) hence int -> scale^2
A = scale/(sigma*(np.pi)**0.5)**1.5
return gaussian_wave(self.r_cG, pos_c, sigma, k_c, A, self.dtype, self.buf_G)
def check_and_plot(self, P_ani, P0_ani, digits, keywords=''):
# Collapse into viewable matrices
P_In = self.wfs.collect_projections(P_ani)
P0_In = self.wfs.collect_projections(P0_ani)
# Construct fingerprint of input matrices for comparison
fingerprint = np.array([md5_array(P_In, numeric=True),
md5_array(P0_In, numeric=True)])
# Compare fingerprints across all processors
fingerprints = np.empty((world.size, 2), np.int64)
world.all_gather(fingerprint, fingerprints)
if fingerprints.ptp(0).any():
raise RuntimeError('Distributed matrices are not identical!')
# If assertion fails, catch temporarily while plotting, then re-raise
try:
self.assertAlmostEqual(np.abs(P_In-P0_In).max(), 0, digits)
except AssertionError:
if world.rank == 0 and mpl is not None:
from matplotlib.figure import Figure
fig = Figure()
ax = fig.add_axes([0.0, 0.1, 1.0, 0.83])
ax.set_title(self.__class__.__name__)
im = ax.imshow(np.abs(P_In-P0_In), interpolation='nearest')
fig.colorbar(im)
fig.text(0.5, 0.05, 'Keywords: ' + keywords, \
horizontalalignment='center', verticalalignment='top')
from matplotlib.backends.backend_agg import FigureCanvasAgg
img = 'ut_invops_%s_%s.png' % (self.__class__.__name__, \
'_'.join(keywords.split(',')))
FigureCanvasAgg(fig).print_figure(img.lower(), dpi=90)
raise
# =================================
def test_projection_linearity(self):
kpt = self.wfs.kpt_u[0]
Q_ani = self.pt.dict(self.bd.mynbands)
self.pt.integrate(self.psit_nG, Q_ani, q=kpt.q)
for Q_ni in Q_ani.values():
self.assertTrue(Q_ni.dtype == self.dtype)
P0_ani = dict([(a,Q_ni.copy()) for a,Q_ni in Q_ani.items()])
self.pt.add(self.psit_nG, Q_ani, q=kpt.q)
self.pt.integrate(self.psit_nG, P0_ani, q=kpt.q)
#rank_a = self.gd.get_ranks_from_positions(spos_ac)
#my_atom_indices = np.argwhere(self.gd.comm.rank == rank_a).ravel()
# ~ a ~ a'
#TODO XXX should fix PairOverlap-ish stuff for < p | phi > overlaps
# i i'
#spos_ac = self.pt.spos_ac # NewLFC doesn't have this
spos_ac = self.atoms.get_scaled_positions() % 1.0
gpo = GridPairOverlap(self.gd, self.setups)
B_aa = gpo.calculate_overlaps(spos_ac, self.pt)
# Compare fingerprints across all processors
fingerprint = np.array([md5_array(B_aa, numeric=True)])
fingerprints = np.empty(world.size, np.int64)
world.all_gather(fingerprint, fingerprints)
if fingerprints.ptp(0).any():
raise RuntimeError('Distributed matrices are not identical!')
P_ani = dict([(a,Q_ni.copy()) for a,Q_ni in Q_ani.items()])
for a1 in range(len(self.atoms)):
if a1 in P_ani.keys():
P_ni = P_ani[a1]
else:
# Atom a1 is not in domain so allocate a temporary buffer
P_ni = np.zeros((self.bd.mynbands,self.setups[a1].ni,),
dtype=self.dtype)
for a2, Q_ni in Q_ani.items():
# B_aa are the projector overlaps across atomic pairs
B_ii = gpo.extract_atomic_pair_matrix(B_aa, a1, a2)
P_ni += np.dot(Q_ni, B_ii.T) #sum over a2 and last i in B_ii
self.gd.comm.sum(P_ni)
self.check_and_plot(P_ani, P0_ani, 8, 'projection,linearity')
def test_extrapolate_overlap(self):
kpt = self.wfs.kpt_u[0]
ppo = ProjectorPairOverlap(self.wfs, self.atoms)
# Compare fingerprints across all processors
fingerprint = np.array([md5_array(ppo.B_aa, numeric=True)])
fingerprints = np.empty(world.size, np.int64)
world.all_gather(fingerprint, fingerprints)
if fingerprints.ptp(0).any():
raise RuntimeError('Distributed matrices are not identical!')
work_nG = np.empty_like(self.psit_nG)
P_ani = ppo.apply(self.psit_nG, work_nG, self.wfs, kpt, \
calculate_P_ani=True, extrapolate_P_ani=True)
P0_ani = self.pt.dict(self.bd.mynbands)
self.pt.integrate(work_nG, P0_ani, kpt.q)
del work_nG
self.check_and_plot(P_ani, P0_ani, 11, 'extrapolate,overlap')
def test_extrapolate_inverse(self):
kpt = self.wfs.kpt_u[0]
ppo = ProjectorPairOverlap(self.wfs, self.atoms)
# Compare fingerprints across all processors
fingerprint = np.array([md5_array(ppo.B_aa, numeric=True)])
fingerprints = np.empty(world.size, np.int64)
world.all_gather(fingerprint, fingerprints)
if fingerprints.ptp(0).any():
raise RuntimeError('Distributed matrices are not identical!')
work_nG = np.empty_like(self.psit_nG)
P_ani = ppo.apply_inverse(self.psit_nG, work_nG, self.wfs, kpt, \
calculate_P_ani=True, extrapolate_P_ani=True)
P0_ani = self.pt.dict(self.bd.mynbands)
self.pt.integrate(work_nG, P0_ani, kpt.q)
del work_nG
self.check_and_plot(P_ani, P0_ani, 11, 'extrapolate,inverse')
def test_overlap_inverse_after(self):
kpt = self.wfs.kpt_u[0]
kpt.P_ani = self.pt.dict(self.bd.mynbands)
ppo = ProjectorPairOverlap(self.wfs, self.atoms)
# Compare fingerprints across all processors
fingerprint = np.array([md5_array(ppo.B_aa, numeric=True)])
fingerprints = np.empty(world.size, np.int64)
world.all_gather(fingerprint, fingerprints)
if fingerprints.ptp(0).any():
raise RuntimeError('Distributed matrices are not identical!')
work_nG = np.empty_like(self.psit_nG)
self.pt.integrate(self.psit_nG, kpt.P_ani, kpt.q)
P0_ani = dict([(a,P_ni.copy()) for a,P_ni in kpt.P_ani.items()])
ppo.apply(self.psit_nG, work_nG, self.wfs, kpt, calculate_P_ani=False)
res_nG = np.empty_like(self.psit_nG)
ppo.apply_inverse(work_nG, res_nG, self.wfs, kpt, calculate_P_ani=True)
del work_nG
P_ani = self.pt.dict(self.bd.mynbands)
self.pt.integrate(res_nG, P_ani, kpt.q)
abserr = np.empty(1, dtype=float)
for n in range(self.nbands):
band_rank, myn = self.bd.who_has(n)
if band_rank == self.bd.comm.rank:
abserr[:] = np.abs(self.psit_nG[myn] - res_nG[myn]).max()
self.gd.comm.max(abserr)
self.bd.comm.broadcast(abserr, band_rank)
self.assertAlmostEqual(abserr.item(), 0, 10)
self.check_and_plot(P_ani, P0_ani, 10, 'overlap,inverse,after')
def test_overlap_inverse_before(self):
kpt = self.wfs.kpt_u[0]
kpt.P_ani = self.pt.dict(self.bd.mynbands)
ppo = ProjectorPairOverlap(self.wfs, self.atoms)
# Compare fingerprints across all processors
fingerprint = np.array([md5_array(ppo.B_aa, numeric=True)])
fingerprints = np.empty(world.size, np.int64)
world.all_gather(fingerprint, fingerprints)
if fingerprints.ptp(0).any():
raise RuntimeError('Distributed matrices are not identical!')
work_nG = np.empty_like(self.psit_nG)
self.pt.integrate(self.psit_nG, kpt.P_ani, kpt.q)
P0_ani = dict([(a,P_ni.copy()) for a,P_ni in kpt.P_ani.items()])
ppo.apply_inverse(self.psit_nG, work_nG, self.wfs, kpt, calculate_P_ani=False)
res_nG = np.empty_like(self.psit_nG)
ppo.apply(work_nG, res_nG, self.wfs, kpt, calculate_P_ani=True)
del work_nG
P_ani = self.pt.dict(self.bd.mynbands)
self.pt.integrate(res_nG, P_ani, kpt.q)
abserr = np.empty(1, dtype=float)
for n in range(self.nbands):
band_rank, myn = self.bd.who_has(n)
if band_rank == self.bd.comm.rank:
abserr[:] = np.abs(self.psit_nG[myn] - res_nG[myn]).max()
self.gd.comm.max(abserr)
self.bd.comm.broadcast(abserr, band_rank)
self.assertAlmostEqual(abserr.item(), 0, 10)
self.check_and_plot(P_ani, P0_ani, 10, 'overlap,inverse,before')
# -------------------------------------------------------------------
def UTGaussianWavefunctionFactory(boundaries, dtype):
sep = '_'
classname = 'UTGaussianWavefunctionSetup' \
+ sep + {'zero':'Zero', 'periodic':'Periodic', 'mixed':'Mixed'}[boundaries] \
+ sep + {float:'Float', complex:'Complex'}[dtype]
class MetaPrototype(UTGaussianWavefunctionSetup, object):
__doc__ = UTGaussianWavefunctionSetup.__doc__
boundaries = boundaries
dtype = dtype
MetaPrototype.__name__ = classname
return MetaPrototype
# -------------------------------------------------------------------
if __name__ in ['__main__', '__builtin__']:
# We may have been imported by test.py, if so we should redirect to logfile
if __name__ == '__builtin__':
testrunner = CustomTextTestRunner('ut_invops.log', verbosity=2)
else:
from gpaw.utilities import devnull
stream = (world.rank == 0) and sys.stdout or devnull
testrunner = TextTestRunner(stream=stream, verbosity=2)
parinfo = []
for test in [UTDomainParallelSetup_Zero, UTDomainParallelSetup_Periodic, \
UTDomainParallelSetup_Mixed]:
info = ['', test.__name__, test.__doc__.strip('\n'), '']
testsuite = initialTestLoader.loadTestsFromTestCase(test)
map(testrunner.stream.writeln, info)
testresult = testrunner.run(testsuite)
assert testresult.wasSuccessful(), 'Initial verification failed!'
parinfo.extend([' Parallelization options: %s' % tci._parinfo for \
tci in testsuite._tests if hasattr(tci, '_parinfo')])
parinfo = np.unique(np.sort(parinfo)).tolist()
testcases = []
for boundaries in ['zero', 'periodic', 'mixed']:
for dtype in [float, complex]:
testcases.append(UTGaussianWavefunctionFactory(boundaries, \
dtype))
for test in testcases:
info = ['', test.__name__, test.__doc__.strip('\n')] + parinfo + ['']
testsuite = defaultTestLoader.loadTestsFromTestCase(test)
map(testrunner.stream.writeln, info)
testresult = testrunner.run(testsuite)
# Provide feedback on failed tests if imported by test.py
if __name__ == '__builtin__' and not testresult.wasSuccessful():
raise SystemExit('Test failed. Check ut_invops.log for details.')
| gpl-3.0 | 4,405,885,119,348,233,700 | 38.197605 | 86 | 0.58453 | false |
mindsnacks/Zinc | tests/testZincModels.py | 1 | 12102 | import os.path
from zinc.models import *
from zinc.helpers import *
from tests import *
class ZincIndexTestCase(TempDirTestCase):
def test_versions_for_nonexistant_bundle(self):
index = ZincIndex()
self.assertTrue(len(index.versions_for_bundle("meep")) == 0)
def test_add_version_for_bundle(self):
index = ZincIndex()
index.add_version_for_bundle("meep", 1)
self.assertTrue(1 in index.versions_for_bundle("meep"))
def test_add_duplicate_version_for_bundle(self):
index = ZincIndex()
index.add_version_for_bundle("meep", 1)
self.assertRaises(ValueError, index.add_version_for_bundle, "meep", 1)
self.assertTrue(1 in index.versions_for_bundle("meep"))
self.assertTrue(len(index.versions_for_bundle("meep")) == 1)
def test_del_version_for_nonexistant_bundle(self):
index = ZincIndex()
self.assertRaises(Exception, index.delete_bundle_version, "meep", 1)
self.assertTrue(len(index.versions_for_bundle("meep")) == 0)
def test_del_version_for_bundle(self):
index = ZincIndex()
index.add_version_for_bundle("meep", 1)
index.add_version_for_bundle("meep", 2)
index.delete_bundle_version("meep", 1)
self.assertTrue(len(index.versions_for_bundle("meep")) == 1)
self.assertTrue(2 in index.versions_for_bundle("meep"))
def test_del_nonexistant_version_for_bundle(self):
index = ZincIndex()
index.add_version_for_bundle("meep", 1)
index.delete_bundle_version("meep", 2)
self.assertTrue(len(index.versions_for_bundle("meep")) == 1)
self.assertTrue(1 in index.versions_for_bundle("meep"))
def test_del_version_for_bundle_in_active_distro_raises(self):
index = ZincIndex()
index.add_version_for_bundle("meep", 1)
index.update_distribution("live", "meep", 1)
self.assertRaises(Exception, index.delete_bundle_version, "meep", 1)
def test_update_distro_bad_bundle(self):
index = ZincIndex()
self.assertRaises(ValueError, index.update_distribution, "live", "beep", 1)
def test_update_distro_bad_bundle_version(self):
index = ZincIndex()
index.add_version_for_bundle("beep", 1)
self.assertRaises(ValueError, index.update_distribution, "live", "beep", 2)
def test_update_distro_ok(self):
index = ZincIndex()
index.add_version_for_bundle("meep", 1)
index.update_distribution("live", "meep", 1)
def test_delete_distro_ok(self):
index = ZincIndex()
index.add_version_for_bundle("meep", 1)
index.update_distribution("live", "meep", 1)
self.assertTrue(len(index.distributions_for_bundle("meep")) == 1)
index.delete_distribution("live", "meep")
self.assertTrue(len(index.distributions_for_bundle("meep")) == 0)
def test_version_for_distro(self):
index = ZincIndex()
index.add_version_for_bundle("meep", 1)
index.update_distribution("live", "meep", 1)
self.assertEquals(index.version_for_bundle("meep", "live"), 1)
def test_distributions_for_bundle_by_version_unknown_bundle(self):
"""Tests that an exception is raised if called with an unknown bundle name"""
index = ZincIndex()
self.assertRaises(Exception, index.distributions_for_bundle_by_version, "meep")
def test_distributions_for_bundle_by_version_no_distros(self):
"""Tests that the result is empty if no distros exist"""
index = ZincIndex()
index.add_version_for_bundle("meep", 1)
distros = index.distributions_for_bundle_by_version("meep")
self.assertEquals(len(distros), 0)
def test_distributions_for_bundle_by_version_single_distro(self):
"""Tests that the result is correct if there is one distro associated
with the version."""
index = ZincIndex()
index.add_version_for_bundle("meep", 1)
index.update_distribution("master", "meep", 1)
distros = index.distributions_for_bundle_by_version("meep")
self.assertEquals(distros[1], ["master"])
def test_distributions_for_bundle_by_version_multiple_distros(self):
"""Tests that the result is correct if there is one distro associated
with the version."""
index = ZincIndex()
index.add_version_for_bundle("meep", 1)
index.update_distribution("master", "meep", 1)
index.update_distribution("test", "meep", 1)
distros = index.distributions_for_bundle_by_version("meep")
self.assertTrue("master" in distros[1])
self.assertTrue("test" in distros[1])
def test_next_version_for_bundle_from_old_index(self):
p = abs_path_for_fixture("index-pre-next_version.json")
index = ZincIndex.from_path(p)
bundle_name = "meep"
expected_next_version = 2
# ensure it returns the right value
next_version = index.next_version_for_bundle(bundle_name)
self.assertEquals(next_version, expected_next_version)
# ensure the 'next_version' key is written
next_version = index.to_dict()["bundles"][bundle_name]["next_version"]
self.assertEquals(next_version, expected_next_version)
def test_next_version_for_bundle_with_old_bad_key(self):
p = abs_path_for_fixture("index-with-bad-next-version.json")
index = ZincIndex.from_path(p)
bundle_name = "meep"
expected_next_version = 2
# ensure it returns the right value
next_version = index.next_version_for_bundle(bundle_name)
self.assertEquals(next_version, expected_next_version)
# ensure the 'next_version' key is written
next_version = index.to_dict()["bundles"][bundle_name]["next_version"]
self.assertEquals(next_version, expected_next_version)
# ensure the 'next-version' key is deleted
bad_key = index.to_dict()["bundles"][bundle_name].get('next-version')
self.assertTrue(bad_key is None)
def test_increment_next_version_for_bundle(self):
index = ZincIndex(mutable=True)
orig_version = index.next_version_for_bundle("meep")
index.increment_next_version_for_bundle("meep")
new_version = index.next_version_for_bundle("meep")
self.assertEquals(orig_version + 1, new_version)
def test_immutable(self):
index = ZincIndex(mutable=False)
self.assertFalse(index.is_mutable)
self.assertRaises(TypeError, index.add_version_for_bundle, "meep", 1)
self.assertRaises(TypeError, index.delete_bundle_version, "meep", 1)
self.assertRaises(TypeError, index.update_distribution, "master", "meep", 1)
self.assertRaises(TypeError, index.delete_distribution, "master", "meep")
self.assertRaises(TypeError, index.increment_next_version_for_bundle, "meep")
def test_immutable_from_dict(self):
index = ZincIndex(id='com.foo')
index.add_version_for_bundle("meep", 1)
d = index.to_dict()
immutable_index = ZincIndex.from_dict(d, mutable=False)
self.assertFalse(immutable_index.is_mutable)
class ZincFileListTestCase(unittest.TestCase):
def test_immutable(self):
filelist = ZincFileList(mutable=False)
self.assertFalse(filelist.is_mutable)
self.assertRaises(TypeError, filelist.add_file, "/tmp/foo", "123")
self.assertRaises(TypeError, filelist.add_flavor_for_file, "/tmp/foo",
"small")
def test_immutable_from_dict(self):
filelist = ZincFileList()
filelist.add_file('/tmp/foo', '123')
d = filelist.to_dict()
immutable_filelist = ZincFileList.from_dict(d, mutable=False)
self.assertFalse(immutable_filelist.is_mutable)
def test_get_path_for_file_that_does_not_exist(self):
filelist = ZincFileList()
self.assertTrue(filelist.sha_for_file('doesnotexist.toml') is None)
class ZincManifestTestCase(TempDirTestCase):
def test_save_and_load_with_files(self):
manifest1 = ZincManifest('com.mindsnacks.test', 'meep', 1)
manifest1.files = {
'a': {
'sha': 'ea502a7bbd407872e50b9328956277d0228272d4',
'formats': {
'raw': {
'size': 123
}
}
}
}
path = os.path.join(self.dir, "manifest.json")
manifest1.write(path)
manifest2 = ZincManifest.from_path(path)
self.assertEquals(manifest1, manifest2)
def test_save_and_load_with_flavors(self):
manifest1 = ZincManifest('com.mindsnacks.test', 'meep', 1)
manifest1._flavors = ['green']
path = os.path.join(self.dir, "manifest.json")
manifest1.write(path)
manifest2 = ZincManifest.from_path(path)
self.assertEquals(manifest1, manifest2)
def test_add_flavor(self):
manifest = ZincManifest('com.mindsnacks.test', 'meep', 1)
manifest.add_file('foo', 'ea502a7bbd407872e50b9328956277d0228272d4')
manifest.add_flavor_for_file('foo', 'green')
flavors = manifest.flavors_for_file('foo')
self.assertEquals(len(flavors), 1)
self.assertTrue('green' in flavors)
self.assertTrue('green' in manifest.flavors)
def test_flavors_are_added_from_files(self):
# 1) create a FileList object with flavors
filelist = ZincFileList()
filelist.add_file('foo', 'ea502a7bbd407872e50b9328956277d0228272d4')
filelist.add_flavor_for_file('foo', 'green')
# 2) manifest.files = (that FileList)
manifest = ZincManifest('com.mindsnacks.test', 'meep', 1)
manifest.files = filelist
# 3) self.assertTrue(flavor in manifest.flavors
self.assertTrue('green' in manifest.flavors)
def test_immutable(self):
manifest = ZincManifest('com.foo', 'stuff', 1, mutable=False)
# TODO: test files setter
self.assertRaises(TypeError, manifest.add_file, '/tmp/hi', '123')
self.assertRaises(TypeError, manifest.add_format_for_file, '/tmp/hi',
'gz', 123)
def test_immutable_from_dict(self):
manifest = ZincManifest('com.foo', 'stuff', 1)
d = manifest.to_dict()
immutable_manifest = ZincManifest.from_dict(d, mutable=False)
self.assertFalse(immutable_manifest.is_mutable)
class ZincFlavorSpecTestCase(unittest.TestCase):
def test_load_from_dict_1(self):
d = {'small': ['+ 50x50'], 'large': ['+ 100x100']}
spec = ZincFlavorSpec.from_dict(d)
self.assertTrue(spec is not None)
self.assertEquals(len(spec.flavors), 2)
def test_immutable(self):
spec = ZincFlavorSpec(mutable=False)
self.assertFalse(spec.is_mutable)
self.assertRaises(TypeError, spec.add_flavor, 'small', None)
# TODO: relocate?
class BundleDescriptorTestCase(unittest.TestCase):
def test_bundle_id_from_descriptor_without_flavor(self):
descriptor = 'com.foo.bar-1'
bundle_id = 'com.foo.bar'
self.assertEquals(bundle_id, bundle_id_from_bundle_descriptor(descriptor))
def test_bundle_id_from_descriptor_with_flavor(self):
descriptor = 'com.foo.bar-1~green'
bundle_id = 'com.foo.bar'
self.assertEquals(bundle_id, bundle_id_from_bundle_descriptor(descriptor))
def test_bundle_version_from_descriptor_without_flavor(self):
descriptor = 'com.foo.bar-1'
bundle_version = 1
self.assertEquals(bundle_version, bundle_version_from_bundle_descriptor(descriptor))
def test_bundle_version_from_descriptor_with_flavor(self):
descriptor = 'com.foo.bar-1~green'
bundle_version = 1
self.assertEquals(bundle_version, bundle_version_from_bundle_descriptor(descriptor))
def test_bundle_version_from_descriptor_with_flavor_with_dash(self):
descriptor = 'com.foo.bar-1~green-ish'
bundle_version = 1
self.assertEquals(bundle_version, bundle_version_from_bundle_descriptor(descriptor))
| mit | 810,171,094,399,158,000 | 40.023729 | 92 | 0.647248 | false |
NRGI/resource-contracts-etl | ocr-tests/ABBYY/AbbyyOnlineSdk.py | 1 | 3234 | #!/usr/bin/python
# Usage: process.py <input file> <output file> [-language <Language>] [-pdf|-txt|-rtf|-docx|-xml]
import argparse
import base64
import getopt
import MultipartPostHandler
import os
import re
import sys
import time
import urllib2
import urllib
import xml.dom.minidom
class ProcessingSettings:
Language = "English"
OutputFormat = "docx"
class Task:
Status = "Unknown"
Id = None
DownloadUrl = None
def IsActive( self ):
if self.Status == "InProgress" or self.Status == "Queued":
return True
else:
return False
class AbbyyOnlineSdk:
ServerUrl = "http://cloud.ocrsdk.com/"
# To create an application and obtain a password,
# register at http://cloud.ocrsdk.com/Account/Register
# More info on getting your application id and password at
# http://ocrsdk.com/documentation/faq/#faq3
ApplicationId = "rcdotorg"
Password = os.environ.get('ABBYY_PASS')
Proxy = None
enableDebugging = 0
def ProcessImage( self, filePath, settings ):
urlParams = urllib.urlencode({
"language" : settings.Language,
"exportFormat" : settings.OutputFormat
})
requestUrl = self.ServerUrl + "processImage?" + urlParams
bodyParams = { "file" : open( filePath, "rb" ) }
request = urllib2.Request( requestUrl, None, self.buildAuthInfo() )
response = self.getOpener().open(request, bodyParams).read()
if response.find( '<Error>' ) != -1 :
return None
# Any response other than HTTP 200 means error - in this case exception will be thrown
# parse response xml and extract task ID
task = self.DecodeResponse( response )
return task
def GetTaskStatus( self, task ):
urlParams = urllib.urlencode( { "taskId" : task.Id } )
statusUrl = self.ServerUrl + "getTaskStatus?" + urlParams
request = urllib2.Request( statusUrl, None, self.buildAuthInfo() )
response = self.getOpener().open( request ).read()
task = self.DecodeResponse( response )
return task
def DownloadResult( self, task, outputPath ):
getResultParams = urllib.urlencode( { "taskId" : task.Id } )
getResultUrl = self.ServerUrl + "getResult?" + getResultParams
request = urllib2.Request( getResultUrl, None, self.buildAuthInfo() )
fileResponse = self.getOpener().open( request ).read()
resultFile = open( outputPath, "wb" )
resultFile.write( fileResponse )
def DecodeResponse( self, xmlResponse ):
""" Decode xml response of the server. Return Task object """
dom = xml.dom.minidom.parseString( xmlResponse )
taskNode = dom.getElementsByTagName( "task" )[0]
task = Task()
task.Id = taskNode.getAttribute( "id" )
task.Status = taskNode.getAttribute( "status" )
if task.Status == "Completed":
task.DownloadUrl = taskNode.getAttribute( "resultUrl" )
return task
def buildAuthInfo( self ):
return { "Authorization" : "Basic %s" % base64.encodestring( "%s:%s" % (self.ApplicationId, self.Password) ) }
def getOpener( self ):
if self.Proxy == None:
self.opener = urllib2.build_opener( MultipartPostHandler.MultipartPostHandler,
urllib2.HTTPHandler(debuglevel=self.enableDebugging))
else:
self.opener = urllib2.build_opener(
self.Proxy,
MultipartPostHandler.MultipartPostHandler,
urllib2.HTTPHandler(debuglevel=self.enableDebugging))
return self.opener
| gpl-2.0 | -7,730,318,793,496,702,000 | 30.398058 | 112 | 0.717378 | false |
google/tink | python/tink/hybrid/_hybrid_wrapper.py | 1 | 4204 | # Copyright 2019 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""HybridDecrypt wrapper."""
from __future__ import absolute_import
from __future__ import division
# Placeholder for import for type annotations
from __future__ import print_function
from typing import Type
from absl import logging
from tink import core
from tink.hybrid import _hybrid_decrypt
from tink.hybrid import _hybrid_encrypt
class _WrappedHybridDecrypt(_hybrid_decrypt.HybridDecrypt):
"""Implements HybridDecrypt for a set of HybridDecrypt primitives."""
def __init__(self, pset: core.PrimitiveSet):
self._primitive_set = pset
def decrypt(self, ciphertext: bytes, context_info: bytes) -> bytes:
if len(ciphertext) > core.crypto_format.NON_RAW_PREFIX_SIZE:
prefix = ciphertext[:core.crypto_format.NON_RAW_PREFIX_SIZE]
ciphertext_no_prefix = ciphertext[core.crypto_format.NON_RAW_PREFIX_SIZE:]
for entry in self._primitive_set.primitive_from_identifier(prefix):
try:
return entry.primitive.decrypt(ciphertext_no_prefix,
context_info)
except core.TinkError as e:
logging.info(
'ciphertext prefix matches a key, but cannot decrypt: %s', e)
# Let's try all RAW keys.
for entry in self._primitive_set.raw_primitives():
try:
return entry.primitive.decrypt(ciphertext, context_info)
except core.TinkError as e:
pass
# nothing works.
raise core.TinkError('Decryption failed.')
class HybridDecryptWrapper(core.PrimitiveWrapper[_hybrid_decrypt.HybridDecrypt,
_hybrid_decrypt.HybridDecrypt]
):
"""HybridDecryptWrapper is the PrimitiveWrapper for HybridDecrypt.
The returned primitive works with a keyset (rather than a single key). To
decrypt, the primitive uses the prefix of the ciphertext to efficiently select
the right key in the set. If the keys associated with the prefix do not work,
the primitive tries all keys with OutputPrefixType RAW.
"""
def wrap(self,
pset: core.PrimitiveSet) -> _hybrid_decrypt.HybridDecrypt:
return _WrappedHybridDecrypt(pset)
def primitive_class(self) -> Type[_hybrid_decrypt.HybridDecrypt]:
return _hybrid_decrypt.HybridDecrypt
def input_primitive_class(self) -> Type[_hybrid_decrypt.HybridDecrypt]:
return _hybrid_decrypt.HybridDecrypt
class _WrappedHybridEncrypt(_hybrid_encrypt.HybridEncrypt):
"""Implements HybridEncrypt for a set of HybridEncrypt primitives."""
def __init__(self, pset: core.PrimitiveSet):
self._primitive_set = pset
def encrypt(self, plaintext: bytes, context_info: bytes) -> bytes:
primary = self._primitive_set.primary()
return primary.identifier + primary.primitive.encrypt(
plaintext, context_info)
class HybridEncryptWrapper(core.PrimitiveWrapper[_hybrid_encrypt.HybridEncrypt,
_hybrid_encrypt.HybridEncrypt]
):
"""HybridEncryptWrapper is the PrimitiveWrapper for HybridEncrypt.
The returned primitive works with a keyset (rather than a single key). To
encrypt a plaintext, it uses the primary key in the keyset, and prepends to
the ciphertext a certain prefix associated with the primary key.
"""
def wrap(self,
pset: core.PrimitiveSet) -> _hybrid_encrypt.HybridEncrypt:
return _WrappedHybridEncrypt(pset)
def primitive_class(self) -> Type[_hybrid_encrypt.HybridEncrypt]:
return _hybrid_encrypt.HybridEncrypt
def input_primitive_class(self) -> Type[_hybrid_encrypt.HybridEncrypt]:
return _hybrid_encrypt.HybridEncrypt
| apache-2.0 | -5,839,068,032,982,058,000 | 37.568807 | 80 | 0.705519 | false |
fintanr/hcup-stats | parse-hcup-results.py | 1 | 19302 | #!/usr/bin/python
#
# Extract data from the Wikipedia Heineken Cup Pool Stages
# and create a tidy data set for use in R
#
# A lot of this code handles corner cases and deals with variants
# in the wikipedia pages, and it hasn't been refactored
#
import re
import sys
import unicodedata
from urllib2 import urlopen
from bs4 import BeautifulSoup
infile = "input-pages.txt"
urlbase = "http://en.wikipedia.org/w/index.php?title"
urls = {}
ourData = []
headers = "season,poolId,matchDate,matchTime,homeTeam,awayTeam,matchReferee"
headers = "%s,matchAttendance,matchScore,homeTries,homePenaltyTries" % headers
headers = "%s,homeTriesTimes,homeConversions,awayTries,awayPenaltyTries" % headers
headers = "%s,awayTriesTimes,awayConversons,homePenalties,homePeanaltiesTimes,homeDrops" % headers
headers = "%s,homeDropTimes,awayPenalties,awayPenaltiesTimes" % headers
headers = "%s,awayDrops,awayDropsTimes" % headers
ourData.append(headers)
def getTries(inString):
myString = unicodedata.normalize('NFKD', inString).encode('ascii', 'ignore')
tries = re.split("'", myString)
penaltyTryCount = 0
theseTries = []
for thistry in tries:
thisTime = re.match("(.*)\s(\d+)", thistry)
if ( thisTime ):
theseTries.append(thisTime.group(2))
penaltyMatch = re.match(".*penalty try.*", thistry, re.IGNORECASE)
if ( penaltyMatch ):
penaltyTryCount += 1
return (penaltyTryCount, theseTries)
def getConversions(inString):
myString = unicodedata.normalize('NFKD', inString).encode('ascii', 'ignore')
# this is a little risky, but seems to get every case
# there are a number of commented out values, but our regex elimiates
# these
cons = re.split("\)", myString)
totalConversions = 0
for con in cons:
thisConCount = re.match(".*\[\[.*\|.*\]\]\s\((\d+)\/\d+", con)
if ( thisConCount ):
totalConversions += int(thisConCount.group(1))
return totalConversions
def getPenOrDrop(inString):
myString = unicodedata.normalize('NFKD', inString).encode('ascii', 'ignore')
pens = re.split("'", myString)
thesePenalties = []
for pen in pens:
penMatch = re.match(".*\s(\d+)(,|)", pen)
if ( penMatch ):
thesePenalties.append(penMatch.group(1))
return thesePenalties
def getMatchScore(inString):
myString = unicodedata.normalize('NFKD', inString).encode('ascii', 'ignore')
matchScore = re.sub("–", "-", myString)
return myString
def getReferee(inString):
myString = unicodedata.normalize('NFKD', inString).encode('ascii', 'ignore')
ref = "NA"
# 2012/ 13 match
refMatch = re.match(".*\[\[(.*)\]\](\s+|<).*", myString)
if ( refMatch ):
subTest = re.match(".*\|(.*)", refMatch.group(1))
if ( subTest ):
ref = subTest.group(1)
else:
ref = refMatch.group(1)
else:
# 2010 / 11 format, e.g.
# John Lacey ([[Irish Rugby Football Union|Ireland]])
refMatch = re.match("(.*)\s\(\[\[.*\]\]\)", myString)
if ( refMatch ):
ref = refMatch.group(1)
return ref
def getTeamName(inString):
myString = unicodedata.normalize('NFKD', inString).encode('ascii', 'ignore')
# teamMatch has a couple of possible formats, work through them
# all until we get the correct match and then extract the team name
team = "Not Found"
teamMatch = re.match(".*\[\[(.*)\]\].*", myString)
if ( teamMatch ):
filterMatch = re.match(".*\|(.*)", teamMatch.group(1))
if ( filterMatch ):
team = filterMatch.group(1)
else:
team = teamMatch.group(1)
else:
# 2010 / 11 formatting for team names
teamMatch = re.match("\s+{{.*}}\s+(.*)", myString)
if ( teamMatch ):
team = teamMatch.group(1)
else:
teamMatch = re.match("(.*)\s{{.*}}", myString)
if ( teamMatch ):
team = teamMatch.group(1)
# tidy up the whitespace around the name
team = re.sub("^\s+","", re.sub("\s+$", "", team))
return team
def buildTidyData(season, poolId, inData):
matchDate = re.sub("\s+$", "", inData.get('date'))
matchTime = re.sub("^\s+", "", re.sub("\s+$", "", inData.get('time')))
matchScore = re.sub("–", "-", inData.get('score'))
#matchScore = unicodedata.normalize('NFKD', matchScore).encode('utf8', 'ignore')
#matchScore = getMatchScore(inData.get('score'))
matchAttendance = inData.get('attendance')
matchAttendance = re.sub(",", "", matchAttendance)
homeTeam = getTeamName(inData.get('home'))
awayTeam = getTeamName(inData.get('away'))
matchReferee = getReferee(inData.get('referee'))
# default scoring data
homePenaltyTries = 0
homeTries = []
homeTriesTimes = ""
homeConversions = 0
awayPenaltyTries = 0
awayTries = []
awayTriesTimes = ""
awayConversions = 0
homePenalties = []
homePenaltiesTimes = ""
awayPenalties = []
awayPenaltiesTimes = ""
homeDrops = []
homeDropsTimes = ""
awayDrops = []
awayDropsTimes = ""
if 'try1' in inData.keys():
(homePenaltyTries, homeTries) = getTries(inData.get('try1'))
homeTriesTimes = extractTimes(homeTries)
if 'con1' in inData.keys():
homeConversions = getConversions(inData.get('con1'))
if 'try2' in inData.keys():
(awayPenaltyTries, awayTries) = getTries(inData.get('try2'))
awayTriesTimes = extractTimes(awayTries)
if 'con2' in inData.keys():
awayConversions = getConversions(inData.get('con2'))
if 'pen1' in inData.keys():
homePenalties = getPenOrDrop(inData.get('pen1'))
homePenaltiesTimes = extractTimes(homePenalties)
if 'pen2' in inData.keys():
awayPenalties = getPenOrDrop(inData.get('pen2'))
awayPenaltiesTimes = extractTimes(awayPenalties)
if 'drop1' in inData.keys():
homeDrops = getPenOrDrop(inData.get('drop1'))
homeDropsTimes = extractTimes(homeDrops)
if 'drop2' in inData.keys():
awayDrops = getPenOrDrop(inData.get('drop2'))
awayDropsTimes = extractTimes(awayDrops)
part1 = "%s,%s,%s,%s" % (season.decode('utf-8'), poolId, matchDate, matchTime )
part2 = "%s,%s,%s,%s,%s" % ( homeTeam, awayTeam, matchReferee, matchAttendance, matchScore)
part3 = "%s,%s,%s,%s" % ( len(homeTries), homePenaltyTries, homeTriesTimes, homeConversions)
part4 = "%s,%s,%s,%s" % ( len(awayTries), awayPenaltyTries, awayTriesTimes, awayConversions)
part5 = "%s,%s,%s,%s" % ( len(homePenalties), homePenaltiesTimes, len(homeDrops), homeDropsTimes)
part6 = "%s,%s,%s,%s" % ( len(awayPenalties), awayPenaltiesTimes, len(awayDrops), awayDropsTimes)
outString = "%s,%s,%s,%s,%s,%s" % ( part1, part2, part3, part4, part5, part6)
ourData.append(outString)
def loadUrls(inFile):
for s in (line.strip() for line in open(inFile)):
thisUrl = s.split('/', 4)[4]
season = thisUrl.split('_',4)[0]
# okay this is a horrible hack, if we use urllib2 unquote
# we end up with a "long" str so just sub out the values
# instead
season = re.sub("%E2%80%93","-", season)
fullUrl = "%s=%s&action=edit" % (urlbase, thisUrl)
urls[season] = fullUrl
return urls
def extractTimes(timeList):
pipedTimes = ""
for j in timeList:
pipedTimes = "%s|%s" % ( pipedTimes, j)
pipedTimes = re.sub("^\|","", pipedTimes)
return(pipedTimes)
def extractSeasonData(urls):
for season, url in urls.iteritems():
print "Extracting Data for Season: %s:" % season
u = urlopen(url)
r = u.read()
soup = BeautifulSoup(r)
textarea = soup.find_all('textarea')[0].contents
splitarea = re.split("\n",textarea[0])
text = iter(splitarea)
# this is rather horrible, but depending on the season we need to parse
# the data in different ways... so...
if ( season in ['2006-07', '2007-08', '2008-09', '2009-10']):
parseZeroNineSeasonData(season, text)
else:
parseSeasonData(season, text)
# we need to add functions for 2006-07 and 2007-08
# 2005 - 06 is missing too much data to be useful
def parseZeroNineSeasonData(season, text):
gameCounter = 0
for line in text:
pool = re.match("==(=|)Pool\s+(\d+)(|=)==", line)
if ( pool ):
while ( gameCounter < 12 ):
poolId = pool.group(2)
line = next(text)
foundMatch = re.match("\{\{rugbybox(|\s+\|)", line)
localData = {}
while ( foundMatch ):
line = next(text)
# and another horrible hack, if a line starts with a <!-
# skip it and go to the next
if ( re.match("<!-", line ) ):
line = next(text)
# in the 09 - 10 season lines end with referee = <blah> }}
foundEnd = re.match("(\||)referee(\s+|)=(.*)(}}|)", line)
if ( foundEnd ):
foundMatch = None
refBasic = foundEnd.group(3)
localData['referee'] = refBasic
buildTidyData(season, poolId, localData)
gameCounter += 1
else:
# we have some blank referee values, we need to deal
# with these
# add these values into our structure
# we take the re.split as a list and do some processing
# here for corner casee
myList = re.split("=", line)
if ( len(myList) > 2 ):
# we have gotten one of these odd elments with
# extra strings after the date
myTmp = re.split("<", myList[1])
thisKey = myList[0]
thisVal = myTmp[0]
else:
thisKey = myList[0]
if ( len(myList) < 2 ):
thisVal = "NA"
else:
thisVal = myList[1]
thisValCheck = re.match("(.*)\s+\|", thisVal)
if ( thisValCheck ):
thisVal = thisValCheck.group(1)
# homescore and awayscore are all one bundle in some of the
# earlier pages, so we need to split them out
thisKey = re.match("(\||)(\s+|)(.*)(\s+|)(\||)", thisKey)
thisKey = re.sub("\s+", "", thisKey.group(3))
if ( ( thisKey == 'homescore' ) or ( thisKey == 'awayscore' ) ):
( keySuffix, tries, conversions, penalties,
dropgoals ) = parseZeroNineScores(thisKey, thisVal)
tryName = "try%s" % keySuffix
conName = "con%s" % keySuffix
penName = "pen%s" % keySuffix
dropName = "drop%s" % keySuffix
if ( tries is not None ):
localData[tryName] = tries
if ( conversions is not None ):
localData[conName] = conversions
if ( penalties is not None ):
localData[penName] = penalties
if ( dropgoals is not None ):
localData[dropName] = dropgoals
else:
if ( thisKey == "date" ):
thisDateCheck = re.match("(.*)<br />(.*)", thisVal)
if ( thisDateCheck ):
thisVal = thisDateCheck.group(1)
localData['time'] = thisDateCheck.group(2)
if ( thisKey == "score"):
thisVal = unicodedata.normalize('NFKD', thisVal).encode('utf8')
thisVal = re.sub("–", "-", thisVal)
thisScoreSplit = re.match("(\s+|)(\d+)(\s+|)(-|\\xe2\\x80\\x93)(\s+|)(\d+)(\s+|)",
thisVal)
thisVal = "%s-%s" % (thisScoreSplit.group(2), thisScoreSplit.group(6))
localData[thisKey] = thisVal
gameCounter = 0
pool = None
def parseZeroNineScores(key, val):
# okay so these strings are bit all over the place, we need to
# firstly see if we tries in the string, if we do, lets try to
if ( key == 'homescore' ):
keySuffix = "1"
else:
keySuffix = "2"
tryName = "try%s" % keySuffix
conName = "con%s" % keySuffix
penName = "pen%s" % keySuffix
dropName = "drop%s" % keySuffix
triesString = None
penaltiesString = None
conversionsString = None
dropGoalsString = None
# this is absolutely horrible, but it allows us to carve up
# the away and home scores details
# clear out the trailing | for the 07-08 season
val = re.sub("\|$", "", val)
tries = re.match("(\s+|)'''(Tries|Try):'''(.*)", val)
if ( tries ):
# next see if we there were any conversions, if so extract those
# of course there is another exception here, so lets try a few
# combinations
conversions = re.match("(.*)'''Con:'''(.*)", tries.group(3))
if ( conversions ):
# split out penalties, and then drop goals
triesString = conversions.group(1)
penalties = re.match("(.*)'''Pen:'''(.*)", conversions.group(2))
if ( penalties ):
# final check for drop goals
conversionsString = penalties.group(1)
dropgoals = re.match("(.*)'''Drop:'''(.*)", penalties.group(2))
if ( dropgoals ):
penaltiesString = dropgoals.group(1)
dropGoalString = dropgoals.group(2)
else:
penaltiesString = penalties.group(2)
else:
penalties = re.match("(.*)'''Pen:'''(.*)", tries.group(3))
if ( penalties ):
triesString = penalties.group(1)
dropgoals = re.match("(.*)'''Drop:'''(.*)", penalties.group(2))
if ( dropgoals ):
penaltiesString = dropgoals.group(1)
dropGoalsString = dropgoals.group(2)
else:
penaltiesString = penalties.group(2)
else:
triesString = tries.group(2)
else:
# check for penalties, drop goals and so forth
penalties = re.match("(\s+|)'''Pen:'''(.*)", val)
if ( penalties ):
# check for drop goals
dropgoals = re.match("(.*)'''Drop:'''(.*)", penalties.group(2))
if ( dropgoals ):
penaltiesString = dropgoals.group(1)
dropGoalsString = dropgoals.group(2)
else:
penaltiesString = penalties.group(1)
else:
# check for drop goals (and then penalties, just in case
dropgoals = re.match("(\s+|)'''Drop:'''(.*)", val)
if ( dropgoals ):
penalties = re.match("(.*)'''Pen:'''(.*)", val)
if ( penalties ):
dropGoalsString = penalties.group(1)
penaltiesString = penalties.group(2)
else:
dropGoalsString = dropgoals.group(1)
return(keySuffix, triesString, conversionsString, penaltiesString,
dropGoalsString)
def parseSeasonData(season, text):
gameCounter = 0
for line in text:
pool = re.match("===Pool\s+(\d+)===", line)
if ( pool ):
# okay we have a pool, so we a pool, we have 12 games too
# extract data about
while ( gameCounter < 12 ):
poolId = pool.group(1)
line = next(text)
foundMatch = re.match("\{\{rugbybox", line)
localData = {}
while ( foundMatch ):
line = next(text)
# okay we now need to extract out each line, until we hit a }}
foundEnd = re.match("\}\}", line)
if ( foundEnd ):
foundMatch = None
buildTidyData(season, poolId, localData)
gameCounter += 1
else:
# add these values into our structure
# we take the re.split as a list and do some processing
# here for corner casee
myList = re.split("=", line)
if ( len(myList) > 2 ):
# we have gotten one of these odd elments with
# extra strings after the date
myTmp = re.split("<", myList[1])
thisKey = myList[0]
thisVal = myTmp[0]
else:
thisKey = myList[0]
thisVal = myList[1]
thisKey = re.match("^(\||\s+\|)(.*)\s+", thisKey)
thisKey = re.sub("\s+", "", thisKey.group(2))
# some years don't have a time aspect, its included
# in the date... .
if ( thisKey == "date" ):
thisDateCheck = re.match("(.*)<br />(.*)", thisVal)
if ( thisDateCheck ):
thisVal = thisDateCheck.group(1)
localData['time'] = thisDateCheck.group(2)
# scores are in a few different formats, and they get
# really messed up in unicode and are unusable in R
# we do some procesing here to avoid this
#
# to be clear this is a horrible hack...
#
if ( thisKey == "score"):
thisVal = unicodedata.normalize('NFKD', thisVal).encode('utf8')
thisVal = re.sub("–", "-", thisVal)
thisScoreSplit = re.match("(\s+|)(\d+)(\s+|)(-|\\xe2\\x80\\x93)(\s+|)(\d+)(\s+|)",
thisVal)
thisVal = "%s-%s" % (thisScoreSplit.group(2), thisScoreSplit.group(6))
localData[thisKey] = thisVal
gameCounter = 0
pool = None
urls = loadUrls(infile)
extractSeasonData(urls)
f = open("tidydata.csv", "w")
for line in ourData:
print >>f, line.encode('utf8')
f.close()
| apache-2.0 | 4,643,953,281,704,777,000 | 37.604 | 111 | 0.510258 | false |
Emmunaf/WarmaneAutovote | Mlogin.py | 1 | 6146 | #!/usr/bin/python
# Thanks: Glusk for the GREAT help
# karapidiola for the base script
from socket import *
import hashlib
try:
import _srp as srp
except:
print("Need py_srp")
exit(1)
def generate_K(S):
"""Generate K from S with SHA1 Interleaved"""
s_bytes = srp.long_to_bytes(S)
# Hash the odd bytes of S (session key)
hash_object = hashlib.sha1(s_bytes[::2])
odd_hashed = hash_object.digest()
# Hash the even bytes of S
hash_object = hashlib.sha1(s_bytes[1::2])
even_hashed = hash_object.digest()
K = ""
# Create K as alternate string concatenation
for o, e in zip(odd_hashed, even_hashed):
K += o + e # K = odd[0],even[0],odd[1],..
return K
class Mpacket:
def hex_print(self, data):
b = ""
for i in range(0, len(data)):
b += "%02x" % ord(data[i])
return b
def alchallenge_packet(self, username):
packet = "\x00" # Opcode (Auth Logon Challenge)
packet += "\x08" # (Error) da wireshark
packet += chr(30 + len(username))
packet += "\x00\x57\x6f\x57\x00" # Game name: <WoW>
packet += "\x03\x03\x05" # Version[1,2,3]: <335>
packet += "\x34\x30" # Build: <12340>
packet += "\x36\x38\x78\x00" # Platform: <x86>
packet += "\x6e\x69\x57\x00" # O.S. : <Win>
packet += "\x53\x55\x6e\x65" # Country: <enUS>
packet += "\x3c\x00\x00\x00" # Timezone bias: <60>
packet += "\xc0\xa8\x01\x02" # IP address: <192.168.1.2> #?? Need real local one, or is it the same?
packet += chr(len(username)) # SRP I length
packet += username.upper() # SRP I value
return packet
def alproof_packet(self, M1, A):
packet = "\x01" # Opcode (Auth Logon Proof)
# For CRC don't need real value (annoying, sha1 files)
CRC = "\xa4\x1f\xd3\xe0\x1f\x72\x40\x46\xa7\xd2\xe7\x44\x9e\x1d\x36\xcf\xaf\x72\xa3\x3a"
NULL_PAD = "\x00\x00"
A = srp.long_to_bytes(long(A))
print "------------------------------------------------------------------------------"
for i in range(0, 32):
packet += A[i]
for i in range(0, 20):
packet += M1[i]
packet += CRC
packet += NULL_PAD
return packet
def decode_packet(self, data):
opcodes = [("AUTH_LOGON_CHALLENGE", "\x00"), ("AUTH_LOGON_PROOF", "\x01")]
srp_vals = []
opcode = data[0]
for p in opcodes:
if opcode == p[1]:
error = data[1]
srp_vals.append(data[3:35]) # B, skip 1 field (Length_g)
srp_vals.append(data[36:37]) # g, skip 1 field (Length_n)
srp_vals.append(data[38:38 + 32]) # n
srp_vals.append(data[38 + 32:38 + (32 * 2)]) # s [salt]
srp_vals.append(data[38 + (32 * 2):len(data) - 1]) # CRC
print p[0] + " with error :" + hex(ord(error))
print "SRP B :" + self.hex_print(srp_vals[0]) + " " + str(len(srp_vals[0]))
print "SRP g :" + self.hex_print(srp_vals[1]) + " " + str(len(srp_vals[1]))
print "SRP N :" + self.hex_print(srp_vals[2]) + " " + str(len(srp_vals[2]))
print "SRP s :" + self.hex_print(srp_vals[3]) + " " + str(len(srp_vals[3]))
print "CRC :" + self.hex_print(srp_vals[4]) + " " + str(len(srp_vals[4]))
print srp_vals
return srp_vals
if opcode == p[2]:
print "We got it!"
X = Mpacket()
# Server data
host = "54.213.244.47"
port = 3724
# Login data (alexlorens, lolloasd) is a testing account
user = "alexlorens".upper()
password = "lolloasd".upper()
sck = socket(AF_INET, SOCK_STREAM)
sck.connect((host, port))
n_make = ""
b_make = ""
s_make = ""
sck.send(X.alchallenge_packet(user)) # Send Auth Logon Challenge
SRP_ARRAY = X.decode_packet(sck.recv(1024)) # Read SRP value for sending Logon Proof
############################################################################
g = srp.bytes_to_long(SRP_ARRAY[1])
N = srp.bytes_to_long(SRP_ARRAY[2])
hash_class = srp._hash_map[srp.SHA1] # Using sha1 hashing for everything except K (Sha1-Interleaved)
k = 3 # SRP-6
I = user
p = password
# Generate A
a = srp.get_random(32)
A = srp.reverse(pow(srp.reverse(g), srp.reverse(a), srp.reverse(N))) # Big endian
#
## PRINT TEST1
print("Calcolo A")
print ('a:', a)
print ('g:', SRP_ARRAY[1])
print ('N:', SRP_ARRAY[2])
print ('A:', A)
##END PRINT TEST 1
v = None
M = None
K = None
H_AMK = None
s = srp.bytes_to_long(SRP_ARRAY[3])
B = srp.bytes_to_long(SRP_ARRAY[0])
#print('B: ->', B)
#print('B: [bytes_to_long] ->',srp.bytes_to_long(SRP_ARRAY[0]))
#print('B: [reversed, used for calc] ->',srp.reverse(B))
if (B % N) == 0:
print "Error"
u = srp.H(hash_class, A, B)
x = srp.gen_x(hash_class, s, I, p) #
v = srp.reverse(pow(srp.reverse(g), srp.reverse(x), srp.reverse(N))) # Big endian
S = srp.reverse(pow((srp.reverse(B) - srp.reverse(k) * srp.reverse(v)),
srp.reverse(a) + srp.reverse(u) * srp.reverse(x), srp.reverse(N))) # Big endian
## PRINT TEST3
print "--------------####-----------------------"
print("Valori utili")
print ('N:', SRP_ARRAY[2])
print ('g:', SRP_ARRAY[1])
print ('I:', I)
print ('p:', p)
print ('s:', SRP_ARRAY[3])
print ('B:', SRP_ARRAY[0])
print ('[a]:', srp.long_to_bytes(a))
print "---------------####----------------------"
##END PRINT TEST 3
## PRINT TEST2
print "----------------------------------------"
print("Calcolo u, x, S")
print ('u:', u)
print ('x:', x)
print ('v:', v)
print ('S:', S)
print "----------------------------------------"
##END PRINT TEST 2
K = generate_K(S)
print ('K:', K)
M = srp.calculate_M(hash_class, N, g, I, s, A, B, K)
print ('K:', M)
############################################################################
sck.send(X.alproof_packet(M, A))
sck.recv(1024) # REALM_AUTH_NO_MATCH...:(
sck.send("\x10\x00\x00\x00\x00")
print sck.recv(1024)
# x.RecvedData(sck.recv(1024))
'''Note:
Use little endian for hashing,
Big endian while doing math:
(*,+,^,ModPow,...)
'''
| mit | -2,810,643,457,059,818,500 | 31.347368 | 109 | 0.522291 | false |
chrisbro-MSFT/Office-Online-Test-Tools-and-Documentation | docs/native/src/conf.py | 1 | 1133 | # -*- coding: utf-8 -*-
from path import Path
# Load shared config file
execfile(Path('../../_shared/conf.py').abspath())
# -- General configuration -----------------------------------------------------
project = u'Office Native WOPI Integration Documentation'
# Configure sphinx.ext.intersphinx
# noinspection PyUnresolvedReferences
intersphinx_mapping = {
'wopirest':
('https://wopi.readthedocs.io/projects/wopirest/en/latest/',
(
# Try to load from the locally built docs first
(rest_doc_path / local_object_inventory_path).normpath(),
# Fall back to loading from the built docs on readthedocs
'https://wopirest.readthedocs.io/' + rtd_object_inventory_path
)),
'officeonline':
('https://wopi.readthedocs.io/en/latest/',
(
# Try to load from the locally built docs first
(officeonline_doc_path / local_object_inventory_path).normpath(),
# Fall back to loading from the built docs on readthedocs
'https://wopi.readthedocs.io/' + rtd_object_inventory_path
)),
}
| mit | 5,119,103,167,326,265,000 | 33.333333 | 80 | 0.596646 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.