ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 7dfc57ca33a3a8f66323d20091fc530702fa9f8e | from itertools import count
import logging
import networkx
import ailment
from claripy.utils.orderedset import OrderedSet
from ...utils.graph import dfs_back_edges, subgraph_between_nodes, dominates, shallow_reverse
from .. import Analysis, register_analysis
from .utils import replace_last_statement
from .structurer_nodes import MultiNode, ConditionNode
from .graph_region import GraphRegion
from .condition_processor import ConditionProcessor
l = logging.getLogger(name=__name__)
# an ever-incrementing counter
CONDITIONNODE_ADDR = count(0xff000000)
class RegionIdentifier(Analysis):
"""
Identifies regions within a function.
"""
def __init__(self, func, cond_proc=None, graph=None):
self.function = func
self.cond_proc = cond_proc if cond_proc is not None else ConditionProcessor()
self._graph = graph if graph is not None else self.function.graph
self.region = None
self._start_node = None
self._loop_headers = None
self._analyze()
@staticmethod
def slice_graph(graph, node, frontier, include_frontier=False):
"""
Generate a slice of the graph from the head node to the given frontier.
:param networkx.DiGraph graph: The graph to work on.
:param node: The starting node in the graph.
:param frontier: A list of frontier nodes.
:param bool include_frontier: Whether the frontier nodes are included in the slice or not.
:return: A subgraph.
:rtype: networkx.DiGraph
"""
subgraph = subgraph_between_nodes(graph, node, frontier, include_frontier=include_frontier)
if not list(subgraph.nodes):
# HACK: FIXME: for infinite loop nodes, this would return an empty set, so we include the loop body itself
# Make sure this makes sense (EDG thinks it does)
if (node, node) in graph.edges:
subgraph.add_edge(node, node)
return subgraph
def _analyze(self):
# make a copy of the graph
graph = networkx.DiGraph(self._graph)
# preprocess: make it a super graph
self._make_supergraph(graph)
self._start_node = self._get_start_node(graph)
# preprocess: find loop headers
self._loop_headers = self._find_loop_headers(graph)
self.region = self._make_regions(graph)
@staticmethod
def _get_start_node(graph):
try:
return next(n for n in graph.nodes() if graph.in_degree(n) == 0)
except StopIteration:
return None
def _test_reducibility(self):
# make a copy of the graph
graph = networkx.DiGraph(self._graph)
# preprocess: make it a super graph
self._make_supergraph(graph)
while True:
changed = False
# find a node with a back-edge, remove the edge (deleting the loop), and replace it with a MultiNode
changed |= self._remove_self_loop(graph)
# find a node that has only one predecessor, and merge it with its predecessor (replace them with a
# MultiNode)
changed |= self._merge_single_entry_node(graph)
if not changed:
# a fixed-point is reached
break
def _make_supergraph(self, graph):
while True:
for src, dst, data in graph.edges(data=True):
type_ = data.get('type', None)
if type_ == 'fake_return':
if len(list(graph.successors(src))) == 1 and len(list(graph.predecessors(dst))) == 1:
self._merge_nodes(graph, src, dst, force_multinode=True)
break
elif type_ == 'call':
graph.remove_node(dst)
break
else:
break
def _find_loop_headers(self, graph):
return OrderedSet(sorted((t for _,t in dfs_back_edges(graph, self._start_node)), key=lambda x: x.addr))
def _find_initial_loop_nodes(self, graph, head):
# TODO optimize
latching_nodes = { s for s,t in dfs_back_edges(graph, self._start_node) if t == head }
loop_subgraph = self.slice_graph(graph, head, latching_nodes, include_frontier=True)
nodes = set(loop_subgraph.nodes())
return nodes
def _refine_loop(self, graph, head, initial_loop_nodes, initial_exit_nodes):
refined_loop_nodes = initial_loop_nodes.copy()
refined_exit_nodes = initial_exit_nodes.copy()
idom = networkx.immediate_dominators(graph, self._start_node)
new_exit_nodes = refined_exit_nodes
while len(refined_exit_nodes) > 1 and new_exit_nodes:
new_exit_nodes = set()
for n in list(refined_exit_nodes):
if all(pred in refined_loop_nodes for pred in graph.predecessors(n)) and dominates(idom, head, n):
refined_loop_nodes.add(n)
refined_exit_nodes.remove(n)
for u in (set(graph.successors(n)) - refined_loop_nodes):
new_exit_nodes.add(u)
refined_exit_nodes |= new_exit_nodes
refined_loop_nodes = refined_loop_nodes - refined_exit_nodes
return refined_loop_nodes, refined_exit_nodes
def _remove_self_loop(self, graph):
r = False
while True:
for node in graph.nodes():
if node in graph[node]:
# found a self loop
self._remove_node(graph, node)
r = True
break
else:
break
return r
def _merge_single_entry_node(self, graph):
r = False
while True:
for node in networkx.dfs_postorder_nodes(graph):
preds = graph.predecessors(node)
if len(preds) == 1:
# merge the two nodes
self._absorb_node(graph, preds[0], node)
r = True
break
else:
break
return r
def _make_regions(self, graph):
structured_loop_headers = set()
new_regions = [ ]
# FIXME: _get_start_node() will fail if the graph is just a loop
# Find all loops
while True:
restart = False
self._start_node = self._get_start_node(graph)
# Start from loops
for node in self._loop_headers:
if node in structured_loop_headers:
continue
region = self._make_cyclic_region(node, graph)
if region is not None:
l.debug("Structured a loop region %r.", region)
new_regions.append(region)
structured_loop_headers.add(node)
restart = True
break
if restart:
continue
break
new_regions.append(GraphRegion(self._get_start_node(graph), graph, None, None, False))
l.debug("Identified %d loop regions.", len(structured_loop_headers))
l.debug("No more loops left. Start structuring acyclic regions.")
# No more loops left. Structure acyclic regions.
while new_regions:
region = new_regions.pop(0)
head = region.head
subgraph = region.graph
failed_region_attempts = set()
while self._make_acyclic_region(head, subgraph, region.graph_with_successors, failed_region_attempts,
region.cyclic):
if head not in subgraph:
# update head
head = next(iter(n for n in subgraph.nodes() if n.addr == head.addr))
head = next(iter(n for n in subgraph.nodes() if n.addr == head.addr))
region.head = head
if len(graph.nodes()) == 1 and isinstance(list(graph.nodes())[0], GraphRegion):
return list(graph.nodes())[0]
# create a large graph region
new_head = self._get_start_node(graph)
region = GraphRegion(new_head, graph, None, None, False)
return region
#
# Cyclic regions
#
def _make_cyclic_region(self, head, graph):
l.debug("Found cyclic region at %#08x", head.addr)
initial_loop_nodes = self._find_initial_loop_nodes(graph, head)
l.debug("Initial loop nodes %s", self._dbg_block_list(initial_loop_nodes))
# Make sure there is no other loop contained in the current loop
if {n for n in initial_loop_nodes if n.addr != head.addr}.intersection(self._loop_headers):
return None
normal_entries = {n for n in graph.predecessors(head) if n not in initial_loop_nodes}
abnormal_entries = set()
for n in initial_loop_nodes:
if n == head:
continue
preds = set(graph.predecessors(n))
abnormal_entries |= (preds - initial_loop_nodes)
l.debug("Normal entries %s", self._dbg_block_list(normal_entries))
l.debug("Abnormal entries %s", self._dbg_block_list(abnormal_entries))
initial_exit_nodes = set()
for n in initial_loop_nodes:
succs = set(graph.successors(n))
initial_exit_nodes |= (succs - initial_loop_nodes)
l.debug("Initial exit nodes %s", self._dbg_block_list(initial_exit_nodes))
refined_loop_nodes, refined_exit_nodes = self._refine_loop(graph, head, initial_loop_nodes,
initial_exit_nodes)
l.debug("Refined loop nodes %s", self._dbg_block_list(refined_loop_nodes))
l.debug("Refined exit nodes %s", self._dbg_block_list(refined_exit_nodes))
if len(refined_exit_nodes) > 1:
# self._get_start_node(graph)
node_post_order = list(networkx.dfs_postorder_nodes(graph, head))
sorted_exit_nodes = sorted(list(refined_exit_nodes), key=node_post_order.index)
normal_exit_node = sorted_exit_nodes[0]
abnormal_exit_nodes = set(sorted_exit_nodes[1:])
else:
normal_exit_node = next(iter(refined_exit_nodes)) if len(refined_exit_nodes) > 0 else None
abnormal_exit_nodes = set()
region = self._abstract_cyclic_region(graph, refined_loop_nodes, head, normal_entries, abnormal_entries,
normal_exit_node, abnormal_exit_nodes)
if len(region.successors) > 1:
# multi-successor region. refinement is required
self._refine_loop_successors(region, graph)
return region
def _refine_loop_successors(self, region, graph):
"""
If there are multiple successors of a loop, convert them into conditional gotos. Eventually there should be
only one loop successor.
:param GraphRegion region: The cyclic region to refine.
:param networkx.DiGraph graph: The current graph that is being structured.
:return: None
"""
if len(region.successors) <= 1:
return
# recover reaching conditions
self.cond_proc.recover_reaching_conditions(region, with_successors=True)
successors = list(region.successors)
condnode_addr = next(CONDITIONNODE_ADDR)
# create a new successor
cond = ConditionNode(
condnode_addr,
None,
self.cond_proc.reaching_conditions[successors[0]],
successors[0],
false_node=None,
)
for succ in successors[1:]:
cond = ConditionNode(condnode_addr,
None,
self.cond_proc.reaching_conditions[succ],
succ,
false_node=cond,
)
g = region.graph_with_successors
# modify region in place
region.successors = {cond}
for succ in successors:
for src, _, data in list(g.in_edges(succ, data=True)):
removed_edges = [ ]
for src2src, _, data_ in list(g.in_edges(src, data=True)):
removed_edges.append((src2src, src, data_))
g.remove_edge(src2src, src)
g.remove_edge(src, succ)
# TODO: rewrite the conditional jumps in src so that it goes to cond-node instead.
# modify the last statement of src so that it jumps to cond
replaced_any_stmt = False
last_stmts = self.cond_proc.get_last_statements(src)
for last_stmt in last_stmts:
if isinstance(last_stmt, ailment.Stmt.ConditionalJump):
if last_stmt.true_target.value == succ.addr:
new_last_stmt = ailment.Stmt.ConditionalJump(
last_stmt.idx,
last_stmt.condition,
ailment.Expr.Const(None, None, condnode_addr, self.project.arch.bits),
last_stmt.false_target,
ins_addr=last_stmt.ins_addr,
)
elif last_stmt.false_target.value == succ.addr:
new_last_stmt = ailment.Stmt.ConditionalJump(
last_stmt.idx,
last_stmt.condition,
last_stmt.true_target,
ailment.Expr.Const(None, None, condnode_addr, self.project.arch.bits),
ins_addr=last_stmt.ins_addr,
)
else:
# none of the two branches is jumping out of the loop
continue
else:
new_last_stmt = ailment.Stmt.Jump(
last_stmt.idx,
ailment.Expr.Const(None, None, condnode_addr, self.project.arch.bits),
ins_addr=last_stmt.ins_addr,
)
replace_last_statement(src, last_stmt, new_last_stmt)
replaced_any_stmt = True
if not replaced_any_stmt:
l.warning("No statement was replaced. Is there anything wrong?")
# raise Exception()
# add src back
for src2src, _, data_ in removed_edges:
g.add_edge(src2src, src, **data_)
g.add_edge(src, cond, **data)
# modify graph
graph.add_edge(region, cond)
for succ in successors:
edge_data = graph.get_edge_data(region, succ)
graph.remove_edge(region, succ)
graph.add_edge(cond, succ, **edge_data)
#
# Acyclic regions
#
def _make_acyclic_region(self, head, graph, secondary_graph, failed_region_attempts, cyclic):
# pre-processing
# we need to create a copy of the original graph if
# - there are in edges to the head node, or
# - there are more than one end nodes
head_inedges = list(graph.in_edges(head))
if head_inedges:
# we need a copy of the graph to remove edges coming into the head
graph_copy = networkx.DiGraph(graph)
# remove any in-edge to the head node
for src, _ in head_inedges:
graph_copy.remove_edge(src, head)
else:
graph_copy = graph
endnodes = [node for node in graph_copy.nodes() if graph_copy.out_degree(node) == 0]
if len(endnodes) == 0:
# sanity check: there should be at least one end node
l.critical("No end node is found in a supposedly acyclic graph. Is it really acyclic?")
return False
if len(endnodes) > 1:
# we need a copy of the graph!
graph_copy = networkx.DiGraph(graph_copy)
# if this graph has multiple end nodes: create a single end node
dummy_endnode = None
if len(endnodes) > 1:
dummy_endnode = "DUMMY_ENDNODE"
for endnode in endnodes:
graph_copy.add_edge(endnode, dummy_endnode)
endnodes = [ dummy_endnode ]
else:
dummy_endnode = None
# compute dominator tree
doms = networkx.immediate_dominators(graph_copy, head)
# compute post-dominator tree
inverted_graph = shallow_reverse(graph_copy)
postdoms = networkx.immediate_dominators(inverted_graph, endnodes[0])
# dominance frontiers
df = networkx.algorithms.dominance_frontiers(graph_copy, head)
# visit the nodes in post-order
for node in networkx.dfs_postorder_nodes(graph_copy, source=head):
if node is dummy_endnode:
# skip the dummy endnode
continue
if cyclic and node is head:
continue
out_degree = graph_copy.out_degree[node]
if out_degree == 0:
# the root element of the region hierarchy should always be a GraphRegion,
# so we transform it into one, if necessary
if graph_copy.in_degree(node) == 0 and not isinstance(node, GraphRegion):
subgraph = networkx.DiGraph()
subgraph.add_node(node)
self._abstract_acyclic_region(graph, GraphRegion(node, subgraph, None, None, False), [],
secondary_graph=secondary_graph)
continue
# test if this node is an entry to a single-entry, single-successor region
levels = 0
postdom_node = postdoms.get(node, None)
while postdom_node is not None:
if (node, postdom_node) not in failed_region_attempts:
if self._check_region(graph_copy, node, postdom_node, doms, df):
frontier = [ postdom_node ]
region = self._compute_region(graph_copy, node, frontier, dummy_endnode=dummy_endnode)
if region is not None:
# l.debug("Walked back %d levels in postdom tree.", levels)
l.debug("Node %r, frontier %r.", node, frontier)
# l.debug("Identified an acyclic region %s.", self._dbg_block_list(region.graph.nodes()))
self._abstract_acyclic_region(graph, region, frontier, dummy_endnode=dummy_endnode,
secondary_graph=secondary_graph)
# assert dummy_endnode not in graph
return True
failed_region_attempts.add((node, postdom_node))
if not dominates(doms, node, postdom_node):
break
if postdom_node is postdoms.get(postdom_node, None):
break
postdom_node = postdoms.get(postdom_node, None)
levels += 1
# l.debug("Walked back %d levels in postdom tree and did not find anything for %r. Next.", levels, node)
return False
@staticmethod
def _check_region(graph, start_node, end_node, doms, df):
"""
:param graph:
:param start_node:
:param end_node:
:param doms:
:param df:
:return:
"""
# if the exit node is the header of a loop that contains the start node, the dominance frontier should only
# contain the exit node.
if not dominates(doms, start_node, end_node):
frontier = df.get(start_node, set())
for node in frontier:
if node is not start_node and node is not end_node:
return False
# no edges should enter the region.
for node in df.get(end_node, set()):
if dominates(doms, start_node, node) and node is not end_node:
return False
# no edges should leave the region.
for node in df.get(start_node, set()):
if node is start_node or node is end_node:
continue
if node not in df.get(end_node, set()):
return False
for pred in graph.predecessors(node):
if dominates(doms, start_node, pred) and not dominates(doms, end_node, pred):
return False
return True
@staticmethod
def _compute_region(graph, node, frontier, include_frontier=False, dummy_endnode=None):
subgraph = networkx.DiGraph()
frontier_edges = [ ]
queue = [ node ]
traversed = set()
while queue:
node_ = queue.pop()
if node_ in frontier:
continue
traversed.add(node_)
subgraph.add_node(node_)
for succ in graph.successors(node_):
edge_data = graph.get_edge_data(node_, succ)
if node_ in frontier and succ in traversed:
if include_frontier:
# if frontier nodes are included, do not keep traversing their successors
# however, if it has an edge to an already traversed node, we should add that edge
subgraph.add_edge(node_, succ, **edge_data)
else:
frontier_edges.append((node_, succ, edge_data))
continue
if succ is dummy_endnode:
continue
if succ in frontier:
if not include_frontier:
# skip all frontier nodes
frontier_edges.append((node_, succ, edge_data))
continue
subgraph.add_edge(node_, succ, **edge_data)
if succ in traversed:
continue
queue.append(succ)
if dummy_endnode is not None:
frontier = { n for n in frontier if n is not dummy_endnode }
if subgraph.number_of_nodes() > 1:
subgraph_with_frontier = networkx.DiGraph(subgraph)
for src, dst, edge_data in frontier_edges:
if dst is not dummy_endnode:
subgraph_with_frontier.add_edge(src, dst, **edge_data)
# assert dummy_endnode not in frontier
# assert dummy_endnode not in subgraph_with_frontier
return GraphRegion(node, subgraph, frontier, subgraph_with_frontier, False)
else:
return None
def _abstract_acyclic_region(self, graph, region, frontier, dummy_endnode=None, secondary_graph=None):
in_edges = self._region_in_edges(graph, region, data=True)
out_edges = self._region_out_edges(graph, region, data=True)
nodes_set = set()
for node_ in list(region.graph.nodes()):
nodes_set.add(node_)
if node_ is not dummy_endnode:
graph.remove_node(node_)
graph.add_node(region)
for src, _, data in in_edges:
if src not in nodes_set:
graph.add_edge(src, region, **data)
for _, dst, data in out_edges:
if dst not in nodes_set:
graph.add_edge(region, dst, **data)
if frontier:
for frontier_node in frontier:
if frontier_node is not dummy_endnode:
graph.add_edge(region, frontier_node)
if secondary_graph is not None:
self._abstract_acyclic_region(secondary_graph, region, { })
@staticmethod
def _abstract_cyclic_region(graph, loop_nodes, head, normal_entries, abnormal_entries, normal_exit_node,
abnormal_exit_nodes):
region = GraphRegion(head, None, None, None, True)
subgraph = networkx.DiGraph()
region_outedges = [ ]
graph.add_node(region)
for node in loop_nodes:
subgraph.add_node(node)
in_edges = list(graph.in_edges(node, data=True))
out_edges = list(graph.out_edges(node, data=True))
for src, dst, data in in_edges:
if src in normal_entries:
graph.add_edge(src, region, **data)
elif src in abnormal_entries:
data['region_dst_node'] = dst
graph.add_edge(src, region, **data)
elif src in loop_nodes:
subgraph.add_edge(src, dst, **data)
elif src is region:
subgraph.add_edge(head, dst, **data)
else:
assert 0
for src, dst, data in out_edges:
if dst in loop_nodes:
subgraph.add_edge(src, dst, **data)
elif dst is region:
subgraph.add_edge(src, head, **data)
elif dst is normal_exit_node:
region_outedges.append((node, dst))
graph.add_edge(region, dst, **data)
elif dst in abnormal_exit_nodes:
region_outedges.append((node, dst))
# data['region_src_node'] = src
graph.add_edge(region, dst, **data)
else:
assert 0
subgraph_with_exits = networkx.DiGraph(subgraph)
for src, dst in region_outedges:
subgraph_with_exits.add_edge(src, dst)
region.graph = subgraph
region.graph_with_successors = subgraph_with_exits
if normal_exit_node is not None:
region.successors = [normal_exit_node]
else:
region.successors = [ ]
region.successors += list(abnormal_exit_nodes)
for node in loop_nodes:
graph.remove_node(node)
return region
@staticmethod
def _region_in_edges(graph, region, data=False):
return list(graph.in_edges(region.head, data=data))
@staticmethod
def _region_out_edges(graph, region, data=False):
out_edges = [ ]
for node in region.graph.nodes():
out_ = graph.out_edges(node, data=data)
for _, dst, data_ in out_:
if dst in region.graph:
continue
out_edges.append((region, dst, data_))
return out_edges
def _remove_node(self, graph, node): # pylint:disable=no-self-use
in_edges = [ (src, dst, data) for (src, dst, data) in graph.in_edges(node, data=True) if not src is node ]
out_edges = [ (src, dst, data) for (src, dst, data) in graph.out_edges(node, data=True) if not dst is node ]
if len(in_edges) <= 1 and len(out_edges) <= 1:
# it forms a region by itself :-)
new_node = None
else:
new_node = MultiNode([ node ])
graph.remove_node(node)
if new_node is not None:
for src, _, data in in_edges:
graph.add_edge(src, new_node, **data)
for _, dst, data in out_edges:
graph.add_edge(new_node, dst, **data)
def _merge_nodes(self, graph, node_a, node_b, force_multinode=False): # pylint:disable=no-self-use
in_edges = list(graph.in_edges(node_a, data=True))
out_edges = list(graph.out_edges(node_b, data=True))
if not force_multinode and len(in_edges) <= 1 and len(out_edges) <= 1:
# it forms a region by itself :-)
new_node = None
else:
new_node = MultiNode([ node_a, node_b ])
graph.remove_node(node_a)
graph.remove_node(node_b)
if new_node is not None:
graph.add_node(new_node)
for src, _, data in in_edges:
if src is node_b:
src = new_node
graph.add_edge(src, new_node, **data)
for _, dst, data in out_edges:
if dst is node_a:
dst = new_node
graph.add_edge(new_node, dst, **data)
assert not node_a in graph
assert not node_b in graph
def _absorb_node(self, graph, node_mommy, node_kiddie, force_multinode=False): # pylint:disable=no-self-use
in_edges_mommy = graph.in_edges(node_mommy, data=True)
out_edges_mommy = graph.out_edges(node_mommy, data=True)
out_edges_kiddie = graph.out_edges(node_kiddie, data=True)
if not force_multinode and len(in_edges_mommy) <= 1 and len(out_edges_kiddie) <= 1:
# it forms a region by itself :-)
new_node = None
else:
new_node = MultiNode([node_mommy, node_kiddie])
graph.remove_node(node_mommy)
graph.remove_node(node_kiddie)
if new_node is not None:
graph.add_node(new_node)
for src, _, data in in_edges_mommy:
if src == node_kiddie:
src = new_node
graph.add_edge(src, new_node, **data)
for _, dst, data in out_edges_mommy:
if dst == node_kiddie:
continue
if dst == node_mommy:
dst = new_node
graph.add_edge(new_node, dst, **data)
for _, dst, data in out_edges_kiddie:
if dst == node_mommy:
dst = new_node
graph.add_edge(new_node, dst, **data)
assert not node_mommy in graph
assert not node_kiddie in graph
@staticmethod
def _dbg_block_list(blocks):
return [(hex(b.addr) if hasattr(b, 'addr') else repr(b)) for b in blocks]
register_analysis(RegionIdentifier, 'RegionIdentifier')
|
py | 7dfc580adeb309e598b416c52cf464fba652e44c | '''
__repr__
Called by the repr() built-in function and by string conversions (reverse quotes)
to compute the "official" string representation of an object. If at all possible,
this should look like a valid Python expression that could be used to recreate
an object with the same value (given an appropriate environment).
__str__
Called by the str() built-in function and by the print statement to compute the
"informal" string representation of an object.
Notes:
The __str__ is intended to be as human-readable as possible,
whereas the __repr__ should aim to be something that could be used to recreate the object,
although it often won't be exactly how it was created, as in this case.
It's also not unusual for both __str__ and __repr__ to return the same value
(certainly for built-in types).
'''
# EXAMPLE 1: str vs repr
# ==============================================================================
# The goal of __repr__ is to be unambiguous
# The goal of __str__ is to be readable
import datetime
import pytz
a = datetime.datetime.utcnow().replace(tzinfo=pytz.UTC)
b = str(b)
print
print 'str(a): {}'.format(str(a))
print 'str(b): {}'.format(str(b))
print
print 'repr(a): {}'.format(repr(a))
print 'repr(b): {}'.format(repr(b))
print |
py | 7dfc5892731d83cf1a6db0b624b0d9733862db97 | #!/usr/bin/env python
import os
import sys
import re
isochores = []
input = open(sys.argv[1], 'r') # output from IsoSegmenter
inputList = input.read()
seq = inputList.splitlines()
for i in range(1, len(seq)):
isochores.append ( seq[i].split(',') )
reads = []
input2 = open(sys.argv[2], 'r') # output from REAL
inputList2 = input2.read()
seq2 = inputList2.splitlines()
for i in range(1, len(seq2)):
reads.append ( seq2[i].split() )
counter = 0
no = 0;
for i in range(0, len(isochores)):
if ( isochores[i][3] != "gap" ):
counter = 0
for j in range(0, len(reads)):
start = float(reads[j][9])
end = float(reads[j][9]) + float(reads[j][6])
if start > float(isochores[i][1]):
break
if end > float(isochores[i][0]):
if float(reads[j][9]) < float(isochores[i][0]) and (float(reads[j][9]) + float(reads[j][6]) > float(isochores[i][0])) and ( ( float(reads[j][9]) + float(reads[j][6]) ) - float(isochores[i][0]) ) > ( float(reads[j][9]) + float(reads[j][6]) )/2:
counter = counter +1
elif float(reads[j][9]) > float(isochores[i][0]) and ( ( float(reads[j][9]) + float(reads[j][6]) ) < float(isochores[i][1]) ):
counter = counter +1
elif float(reads[j][9]) < float(isochores[i][1]) and (float(reads[j][9]) + float(reads[j][6]) )> float(isochores[i][1]) and float(isochores[i][1]) - float(reads[j][9]) > ( float(reads[j][9]) + float(reads[j][6]) )/2:
counter=counter+1
print( counter )
|
py | 7dfc59571ef06e583feecf0220b1955050c9adb3 | #
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
"""
Layer that transforms VNC config objects to database representation
"""
from __future__ import absolute_import
from __future__ import division
from future import standard_library
standard_library.install_aliases()
from builtins import zip
from builtins import str
from builtins import object
from past.utils import old_div
from cfgm_common.zkclient import ZookeeperClient, IndexAllocator
from gevent import monkey
monkey.patch_all()
import gevent
import gevent.event
import time
from pprint import pformat
import socket
from netaddr import IPNetwork, IPAddress
from .context import get_request
from cfgm_common.uve.vnc_api.ttypes import *
from cfgm_common import ignore_exceptions
from cfgm_common.exceptions import ResourceExhaustionError
from cfgm_common.exceptions import ResourceExistsError
from cfgm_common.exceptions import ResourceOutOfRangeError
from cfgm_common.vnc_cassandra import VncCassandraClient
from cfgm_common.vnc_kombu import VncKombuClient
from cfgm_common.utils import cgitb_hook
from cfgm_common.utils import shareinfo_from_perms2
from cfgm_common import vnc_greenlets
from cfgm_common import SGID_MIN_ALLOC
from cfgm_common import VNID_MIN_ALLOC
from . import utils
import copy
from cfgm_common import jsonutils as json
import uuid
import datetime
import os
from .provision_defaults import *
from cfgm_common.exceptions import *
from .vnc_quota import *
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
from sandesh_common.vns import constants
from .sandesh.traces.ttypes import DBRequestTrace, MessageBusNotifyTrace
import functools
import sys
def get_trace_id():
try:
req_id = get_request().headers.get(
'X-Request-Id', gevent.getcurrent().trace_request_id)
except AttributeError:
req_id = 'req-%s' % str(uuid.uuid4())
gevent.getcurrent().trace_request_id = req_id
return req_id
# end get_trace_id
@ignore_exceptions
def trace_msg(trace_objs=[], trace_name='', sandesh_hdl=None, error_msg=None):
for trace_obj in trace_objs:
if error_msg:
trace_obj.error = error_msg
trace_obj.trace_msg(name=trace_name, sandesh=sandesh_hdl)
# end trace_msg
class VncServerCassandraClient(VncCassandraClient):
# Useragent datastore keyspace + tables (used by neutron plugin currently)
_USERAGENT_KEYSPACE_NAME = constants.USERAGENT_KEYSPACE_NAME
_USERAGENT_KV_CF_NAME = 'useragent_keyval_table'
@classmethod
def get_db_info(cls):
db_info = VncCassandraClient.get_db_info() + \
[(cls._USERAGENT_KEYSPACE_NAME, [cls._USERAGENT_KV_CF_NAME])]
return db_info
# end get_db_info
def __init__(self, db_client_mgr, cass_srv_list, reset_config, db_prefix,
cassandra_credential, walk, obj_cache_entries,
obj_cache_exclude_types, debug_obj_cache_types,
log_response_time=None, ssl_enabled=False, ca_certs=None,
pool_size=20):
self._db_client_mgr = db_client_mgr
keyspaces = self._UUID_KEYSPACE.copy()
keyspaces[self._USERAGENT_KEYSPACE_NAME] = {
self._USERAGENT_KV_CF_NAME: {}}
super(VncServerCassandraClient, self).__init__(
cass_srv_list, db_prefix, keyspaces, None, self.config_log,
generate_url=db_client_mgr.generate_url, reset_config=reset_config,
credential=cassandra_credential, walk=walk,
obj_cache_entries=obj_cache_entries,
obj_cache_exclude_types=obj_cache_exclude_types,
debug_obj_cache_types=debug_obj_cache_types,
log_response_time=log_response_time, ssl_enabled=ssl_enabled,
ca_certs=ca_certs)
# end __init__
def config_log(self, msg, level):
self._db_client_mgr.config_log(msg, level)
# end config_log
def prop_collection_update(self, obj_type, obj_uuid, updates):
obj_class = self._db_client_mgr.get_resource_class(obj_type)
bch = self._obj_uuid_cf.batch()
for oper_param in updates:
oper = oper_param['operation']
prop_name = oper_param['field']
if prop_name in obj_class.prop_list_fields:
if oper == 'add':
prop_elem_val = oper_param['value']
prop_elem_pos = oper_param.get('position') or str(uuid.uuid4())
self._add_to_prop_list(bch, obj_uuid,
prop_name, prop_elem_val, prop_elem_pos)
elif oper == 'modify':
prop_elem_val = oper_param['value']
prop_elem_pos = oper_param['position']
# modify is practically an insert so use add
self._add_to_prop_list(bch, obj_uuid,
prop_name, prop_elem_val, prop_elem_pos)
elif oper == 'delete':
prop_elem_pos = oper_param['position']
self._delete_from_prop_list(bch, obj_uuid,
prop_name, prop_elem_pos)
elif prop_name in obj_class.prop_map_fields:
key_name = obj_class.prop_map_field_key_names[prop_name]
if oper == 'set':
prop_elem_val = oper_param['value']
position = prop_elem_val[key_name]
self._set_in_prop_map(bch, obj_uuid,
prop_name, prop_elem_val, position)
elif oper == 'delete':
position = oper_param['position']
self._delete_from_prop_map(bch, obj_uuid,
prop_name, position)
# end for all updates
self.update_last_modified(bch, obj_type, obj_uuid)
bch.send()
# end prop_collection_update
def ref_update(self, obj_type, obj_uuid, ref_obj_type, ref_uuid,
ref_data, operation, id_perms, relax_ref_for_delete=False):
bch = self._obj_uuid_cf.batch()
if operation == 'ADD':
self._create_ref(bch, obj_type, obj_uuid, ref_obj_type, ref_uuid,
ref_data)
if relax_ref_for_delete:
self._relax_ref_for_delete(bch, obj_uuid, ref_uuid)
elif operation == 'DELETE':
self._delete_ref(bch, obj_type, obj_uuid, ref_obj_type, ref_uuid)
else:
pass
self.update_last_modified(bch, obj_type, obj_uuid, id_perms)
bch.send()
# end ref_update
def ref_relax_for_delete(self, obj_uuid, ref_uuid):
bch = self._obj_uuid_cf.batch()
self._relax_ref_for_delete(bch, obj_uuid, ref_uuid)
bch.send()
# end ref_relax_for_delete
def _relax_ref_for_delete(self, bch, obj_uuid, ref_uuid):
send = False
if bch is None:
send = True
bch = self._obj_uuid_cf.batch()
self._cassandra_driver.insert(ref_uuid, {'relaxbackref:%s' % (obj_uuid):
json.dumps(None)},
batch=bch)
if send:
bch.send()
# end _relax_ref_for_delete
def get_relaxed_refs(self, obj_uuid):
relaxed_cols = self._cassandra_driver.get(
self._OBJ_UUID_CF_NAME, obj_uuid,
start='relaxbackref:', finish='relaxbackref;')
if not relaxed_cols:
return []
return [col.split(':')[1] for col in relaxed_cols]
# end get_relaxed_refs
def is_latest(self, id, tstamp):
id_perms = self.uuid_to_obj_perms(id)
if id_perms['last_modified'] == tstamp:
return True
else:
return False
# end is_latest
def uuid_to_obj_dict(self, id):
obj_cols = self._cassandra_driver.get(self._OBJ_UUID_CF_NAME, id)
if not obj_cols:
raise NoIdError(id)
return obj_cols
# end uuid_to_obj_dict
def uuid_to_obj_perms(self, id):
return self._cassandra_driver.get_one_col(self._OBJ_UUID_CF_NAME,
id,
'prop:id_perms')
# end uuid_to_obj_perms
# fetch perms2 for an object
def uuid_to_obj_perms2(self, id):
return self._cassandra_driver.get_one_col(self._OBJ_UUID_CF_NAME,
id,
'prop:perms2')
# end uuid_to_obj_perms2
def useragent_kv_store(self, key, value):
columns = {'value': value}
self.add(self._USERAGENT_KV_CF_NAME, key, columns)
# end useragent_kv_store
def useragent_kv_retrieve(self, key):
if key:
if isinstance(key, list):
rows = self._cassandra_driver.multiget(self._USERAGENT_KV_CF_NAME, key)
return [rows[row].get('value') for row in rows]
else:
row = self._cassandra_driver.get(self._USERAGENT_KV_CF_NAME, key)
if not row:
raise NoUserAgentKey
return row.get('value')
else: # no key specified, return entire contents
kv_list = []
for ua_key, ua_cols in self._cassandra_driver.get_range(
self._USERAGENT_KV_CF_NAME):
kv_list.append({'key': ua_key, 'value': ua_cols.get('value')})
return kv_list
# end useragent_kv_retrieve
def useragent_kv_delete(self, key):
if not self.delete(self._USERAGENT_KV_CF_NAME, key):
raise NoUserAgentKey
# end useragent_kv_delete
# end class VncServerCassandraClient
class VncServerKombuClient(VncKombuClient):
def __init__(self, db_client_mgr, rabbit_ip, rabbit_port,
rabbit_user, rabbit_password, rabbit_vhost, rabbit_ha_mode,
host_ip, rabbit_health_check_interval, **kwargs):
self._db_client_mgr = db_client_mgr
self._sandesh = db_client_mgr._sandesh
listen_port = db_client_mgr.get_server_port()
q_name = 'vnc_config.%s-%s' % (socket.getfqdn(host_ip),
listen_port)
super(VncServerKombuClient, self).__init__(
rabbit_ip, rabbit_port, rabbit_user, rabbit_password, rabbit_vhost,
rabbit_ha_mode, q_name, self._dbe_subscribe_callback,
self.config_log, heartbeat_seconds=rabbit_health_check_interval,
**kwargs)
# end __init__
def config_log(self, msg, level):
self._db_client_mgr.config_log(msg, level)
# end config_log
@ignore_exceptions
def _generate_msgbus_notify_trace(self, oper_info):
req_id = oper_info.get('request-id',
'req-%s' %(str(uuid.uuid4())))
gevent.getcurrent().trace_request_id = req_id
notify_trace = MessageBusNotifyTrace(request_id=req_id)
notify_trace.operation = oper_info.get('oper', '')
notify_trace.body = json.dumps(oper_info)
return notify_trace
# end _generate_msgbus_notify_trace
def _dbe_subscribe_callback(self, oper_info):
self._db_client_mgr.wait_for_resync_done()
try:
msg = "Notification Message: %s" %(pformat(oper_info))
self.config_log(msg, level=SandeshLevel.SYS_DEBUG)
trace = self._generate_msgbus_notify_trace(oper_info)
self._db_client_mgr.dbe_uve_trace(**oper_info)
if oper_info['oper'] == 'CREATE':
self._dbe_create_notification(oper_info)
elif oper_info['oper'] == 'UPDATE':
self._dbe_update_notification(oper_info)
elif oper_info['oper'] == 'DELETE':
self._dbe_delete_notification(oper_info)
else:
return
trace_msg([trace], 'MessageBusNotifyTraceBuf', self._sandesh)
except Exception:
string_buf = cStringIO.StringIO()
cgitb_hook(file=string_buf, format="text")
errmsg = string_buf.getvalue()
self.config_log(string_buf.getvalue(), level=SandeshLevel.SYS_ERR)
trace_msg([trace], name='MessageBusNotifyTraceBuf',
sandesh=self._sandesh, error_msg=errmsg)
# end _dbe_subscribe_callback
def dbe_publish(self, oper, obj_type, obj_id, fq_name, obj_dict=None,
extra_dict=None):
req_id = get_trace_id()
oper_info = {
'request-id': req_id,
'oper': oper,
'type': obj_type,
'uuid': obj_id,
'fq_name': fq_name,
}
if obj_dict is not None:
oper_info['obj_dict'] = obj_dict
if extra_dict is not None:
oper_info['extra_dict'] = extra_dict
self.publish(oper_info)
def _dbe_create_notification(self, obj_info):
obj_type = obj_info['type']
obj_uuid = obj_info['uuid']
try:
r_class = self._db_client_mgr.get_resource_class(obj_type)
ok, result = r_class.dbe_create_notification(
self._db_client_mgr,
obj_uuid,
obj_info.get('obj_dict'),
)
if not ok:
if result[0] == 404 and obj_uuid in result[1]:
raise NoIdError(obj_uuid)
else:
raise VncError(result)
except NoIdError as e:
# if NoIdError is for obj itself (as opposed to say for parent
# or ref), ignore notification
if e._unknown_id == obj_uuid:
msg = ("Create notification ignored as resource %s '%s' does "
"not exists anymore" % (obj_type, obj_uuid))
self.config_log(msg, level=SandeshLevel.SYS_DEBUG)
return
except Exception as e:
err_msg = ("Failed in dbe_create_notification: " + str(e))
self.config_log(err_msg, level=SandeshLevel.SYS_ERR)
raise
# end _dbe_create_notification
def _dbe_update_notification(self, obj_info):
obj_type = obj_info['type']
obj_uuid = obj_info['uuid']
extra_dict = obj_info.get('extra_dict')
try:
r_class = self._db_client_mgr.get_resource_class(obj_type)
ok, result = r_class.dbe_update_notification(obj_uuid, extra_dict)
if not ok:
if result[0] == 404 and obj_uuid in result[1]:
raise NoIdError(obj_uuid)
else:
raise VncError(result)
except NoIdError as e:
# if NoIdError is for obj itself (as opposed to say for parent
# or ref), ignore notification
if e._unknown_id == obj_uuid:
msg = ("Update notification ignored as resource %s '%s' does "
"not exists anymore" % (obj_type, obj_uuid))
self.config_log(msg, level=SandeshLevel.SYS_DEBUG)
return
except Exception as e:
msg = "Failure in dbe_update_notification: " + str(e)
self.config_log(msg, level=SandeshLevel.SYS_ERR)
raise
# end _dbe_update_notification
def _dbe_delete_notification(self, obj_info):
obj_type = obj_info['type']
obj_uuid = obj_info['uuid']
obj_dict = obj_info['obj_dict']
db_client_mgr = self._db_client_mgr
db_client_mgr._object_db.cache_uuid_to_fq_name_del(obj_uuid)
try:
r_class = self._db_client_mgr.get_resource_class(obj_type)
ok, result = r_class.dbe_delete_notification(obj_uuid, obj_dict)
if not ok:
if result[0] == 404 and obj_uuid in result[1]:
raise NoIdError(obj_uuid)
else:
raise VncError(result)
except NoIdError as e:
# if NoIdError is for obj itself (as opposed to say for parent
# or ref), ignore notification
if e._unknown_id == obj_uuid:
msg = ("Delete notification ignored as resource %s '%s' does "
"not exists anymore" % (obj_type, obj_uuid))
self.config_log(msg, level=SandeshLevel.SYS_DEBUG)
return
except Exception as e:
msg = "Failure in dbe_delete_notification: " + str(e)
self.config_log(msg, level=SandeshLevel.SYS_ERR)
raise
# end _dbe_delete_notification
# end class VncServerKombuClient
class VncZkClient(object):
_SUBNET_PATH = "/api-server/subnets"
_FQ_NAME_TO_UUID_PATH = "/fq-name-to-uuid"
_MAX_SUBNET_ADDR_ALLOC = 65535
_VN_ID_ALLOC_PATH = "/id/virtual-networks/"
_VN_MAX_ID = 1 << 24
_VPG_ID_ALLOC_PATH = "/id/virtual-port-group/"
_VPG_MIN_ID = 0
_VPG_MAX_ID = (1 << 16) - 1
_SG_ID_ALLOC_PATH = "/id/security-groups/id/"
_SG_MAX_ID = 1 << 32
_TAG_ID_ALLOC_ROOT_PATH = "/id/tags"
_TAG_TYPE_ID_ALLOC_PATH = "%s/types/" % _TAG_ID_ALLOC_ROOT_PATH
_TAG_VALUE_ID_ALLOC_PATH = "%s/values/%%s/" % _TAG_ID_ALLOC_ROOT_PATH
_TAG_TYPE_MAX_ID = (1 << 16) - 1
_TAG_TYPE_RESERVED_SIZE = 255
_TAG_VALUE_MAX_ID = (1 << 16) - 1
_AE_ID_ALLOC_PATH = "/id/aggregated-ethernet/%s/"
_AE_MAX_ID = (1 << 7) - 1
_SUB_CLUSTER_ID_ALLOC_PATH = "/id/sub-clusters/id/"
_SUB_CLUSTER_MAX_ID_2_BYTES = (1 << 16) - 1
_SUB_CLUSTER_MAX_ID_4_BYTES = (1 << 32) - 1
def __init__(self, instance_id, zk_server_ip, host_ip, reset_config, db_prefix,
sandesh_hdl, log_response_time=None):
self._db_prefix = db_prefix
if db_prefix:
client_pfx = db_prefix + '-'
zk_path_pfx = db_prefix
else:
client_pfx = ''
zk_path_pfx = ''
client_name = '%sapi-%s' %(client_pfx, instance_id)
self._subnet_path = zk_path_pfx + self._SUBNET_PATH
self._fq_name_to_uuid_path = zk_path_pfx + self._FQ_NAME_TO_UUID_PATH
_vn_id_alloc_path = zk_path_pfx + self._VN_ID_ALLOC_PATH
_sg_id_alloc_path = zk_path_pfx + self._SG_ID_ALLOC_PATH
_tag_type_id_alloc_path = zk_path_pfx + self._TAG_TYPE_ID_ALLOC_PATH
_vpg_id_alloc_path = zk_path_pfx + self._VPG_ID_ALLOC_PATH
self._tag_value_id_alloc_path = zk_path_pfx + self._TAG_VALUE_ID_ALLOC_PATH
self._ae_id_alloc_path = zk_path_pfx + self._AE_ID_ALLOC_PATH
self._zk_path_pfx = zk_path_pfx
self._sandesh = sandesh_hdl
self._reconnect_zk_greenlet = None
while True:
try:
self._zk_client = ZookeeperClient(client_name, zk_server_ip,
host_ip, self._sandesh,
log_response_time=log_response_time)
# set the lost callback to always reconnect
self._zk_client.set_lost_cb(self.reconnect_zk)
break
except gevent.event.Timeout as e:
pass
if reset_config:
self._zk_client.delete_node(self._subnet_path, True)
self._zk_client.delete_node(self._fq_name_to_uuid_path, True)
self._zk_client.delete_node(_vn_id_alloc_path, True)
self._zk_client.delete_node(_vpg_id_alloc_path, True)
self._zk_client.delete_node(_sg_id_alloc_path, True)
self._zk_client.delete_node(
zk_path_pfx + self._TAG_ID_ALLOC_ROOT_PATH, True)
self._subnet_allocators = {}
self._ae_id_allocator = {}
# Initialize the Aggregated Ethernet allocator
self._vpg_id_allocator = IndexAllocator(self._zk_client,
_vpg_id_alloc_path,
self._VPG_MAX_ID)
# Initialize the virtual network ID allocator
self._vn_id_allocator = IndexAllocator(self._zk_client,
_vn_id_alloc_path,
self._VN_MAX_ID)
# Initialize the security group ID allocator
self._sg_id_allocator = IndexAllocator(self._zk_client,
_sg_id_alloc_path,
self._SG_MAX_ID)
# 0 is not a valid sg id any more. So, if it was previously allocated,
# delete it and reserve it
if self._sg_id_allocator.read(0) != '__reserved__':
self._sg_id_allocator.delete(0)
self._sg_id_allocator.reserve(0, '__reserved__')
# Initialize tag type ID allocator
self._tag_type_id_allocator = IndexAllocator(
self._zk_client,
_tag_type_id_alloc_path,
size=self._TAG_TYPE_MAX_ID,
start_idx=self._TAG_TYPE_RESERVED_SIZE,
)
# Initialize the tag value ID allocator for pref-defined tag-type.
# One allocator per tag type
self._tag_value_id_allocator = {
type_name: IndexAllocator(
self._zk_client,
self._tag_value_id_alloc_path % type_name,
self._TAG_VALUE_MAX_ID,
) for type_name in list(constants.TagTypeNameToId.keys())}
# Initialize the sub-cluster ID allocator
self._sub_cluster_id_allocator = IndexAllocator(
self._zk_client,
zk_path_pfx + self._SUB_CLUSTER_ID_ALLOC_PATH,
start_idx=1,
size=self._SUB_CLUSTER_MAX_ID_4_BYTES)
def master_election(self, path, func, *args):
self._zk_client.master_election(
self._zk_path_pfx + path, os.getpid(),
func, *args)
# end master_election
def quota_counter(self, path, max_count=sys.maxsize, default=0):
return self._zk_client.quota_counter(self._zk_path_pfx + path,
max_count, default)
def quota_counter_exists(self, path):
return self._zk_client.exists(path)
def delete_quota_counter(self, path):
self._zk_client.delete_node(path, recursive=True)
def _reconnect_zk(self):
self._zk_client.connect()
self._reconnect_zk_greenlet = None
# end
def reconnect_zk(self):
if self._reconnect_zk_greenlet is None:
self._reconnect_zk_greenlet =\
vnc_greenlets.VncGreenlet("VNC ZK Reconnect",
self._reconnect_zk)
# end
def change_subnet_allocator(self, subnet,
subnet_alloc_list, alloc_unit):
allocator = self._subnet_allocators[subnet]
allocator.reallocate(
new_alloc_list=[{'start': old_div(x['start'],alloc_unit),
'end':old_div(x['end'],alloc_unit)}
for x in subnet_alloc_list])
# end
def create_subnet_allocator(self, subnet, subnet_alloc_list,
addr_from_start, should_persist,
start_subnet, size, alloc_unit):
# TODO handle subnet resizing change, ignore for now
if subnet not in self._subnet_allocators:
if addr_from_start is None:
addr_from_start = False
self._subnet_allocators[subnet] = IndexAllocator(
self._zk_client, self._subnet_path+'/'+subnet+'/',
size=old_div(size,alloc_unit), start_idx=old_div(start_subnet,alloc_unit),
reverse=not addr_from_start,
alloc_list=[{'start': old_div(x['start'],alloc_unit), 'end':old_div(x['end'],alloc_unit)}
for x in subnet_alloc_list],
max_alloc=old_div(self._MAX_SUBNET_ADDR_ALLOC,alloc_unit))
# end create_subnet_allocator
def delete_subnet_allocator(self, subnet, notify=True):
if subnet in self._subnet_allocators:
self._subnet_allocators.pop(subnet, None)
if not notify:
# ZK store subnet lock under 2 step depth folder
# <vn fq_name string>:<subnet prefix>/<subnet prefix len>
# As we prevent subnet overlaping on a same network, the first
# folder can contains only one prefix len folder. So we can safely
# remove first folder recursively.
prefix, _, _ = subnet.rpartition('/')
prefix_path = "%s/%s/" % (self._subnet_path, prefix)
IndexAllocator.delete_all(self._zk_client, prefix_path)
def _get_subnet_allocator(self, subnet):
return self._subnet_allocators.get(subnet)
# end _get_subnet_allocator
def subnet_is_addr_allocated(self, subnet, addr):
allocator = self._get_subnet_allocator(subnet)
return allocator.read(addr)
# end subnet_is_addr_allocated
def subnet_set_in_use(self, subnet, addr):
allocator = self._get_subnet_allocator(subnet)
allocator.set_in_use(addr)
# end subnet_set_in_use
def subnet_reset_in_use(self, subnet, addr):
allocator = self._get_subnet_allocator(subnet)
allocator.reset_in_use(addr)
# end subnet_reset_in_use
def subnet_reserve_req(self, subnet, addr, value):
allocator = self._get_subnet_allocator(subnet)
return allocator.reserve(addr, value)
# end subnet_reserve_req
def subnet_alloc_count(self, subnet):
allocator = self._get_subnet_allocator(subnet)
return allocator.get_alloc_count()
# end subnet_alloc_count
def subnet_alloc_req(self, subnet, value=None, alloc_pools=None,
alloc_unit=1):
allocator = self._get_subnet_allocator(subnet)
if alloc_pools:
alloc_list=[{'start': old_div(x['start'],alloc_unit), 'end':old_div(x['end'],alloc_unit)}
for x in alloc_pools]
else:
alloc_list = []
try:
return allocator.alloc(value=value, pools=alloc_list)
except ResourceExhaustionError:
return None
# end subnet_alloc_req
def subnet_free_req(self, subnet, addr):
allocator = self._get_subnet_allocator(subnet)
if allocator:
allocator.delete(addr)
# end subnet_free_req
def create_fq_name_to_uuid_mapping(self, obj_type, fq_name, id):
fq_name_str = ':'.join(fq_name)
zk_path = self._fq_name_to_uuid_path+'/%s:%s' %(obj_type, fq_name_str)
self._zk_client.create_node(zk_path, id)
# end create_fq_name_to_uuid_mapping
def get_fq_name_to_uuid_mapping(self, obj_type, fq_name):
fq_name_str = ':'.join(fq_name)
zk_path = self._fq_name_to_uuid_path+'/%s:%s' %(obj_type, fq_name_str)
obj_uuid, znode_stat = self._zk_client.read_node(
zk_path, include_timestamp=True)
return obj_uuid, znode_stat.ctime
# end get_fq_name_to_uuid_mapping
def delete_fq_name_to_uuid_mapping(self, obj_type, fq_name):
fq_name_str = ':'.join(fq_name)
zk_path = self._fq_name_to_uuid_path+'/%s:%s' %(obj_type, fq_name_str)
self._zk_client.delete_node(zk_path)
# end delete_fq_name_to_uuid_mapping
def is_connected(self):
return self._zk_client.is_connected()
# end is_connected
def alloc_vn_id(self, fq_name_str, id=None):
# If ID provided, it's a notify allocation, just lock allocated ID in
# memory
if id is not None:
if self.get_vn_from_id(id) is not None:
self._vn_id_allocator.set_in_use(id - VNID_MIN_ALLOC)
return id
elif fq_name_str is not None:
return self._vn_id_allocator.alloc(fq_name_str) + VNID_MIN_ALLOC
def free_vn_id(self, id, fq_name_str, notify=False):
if id is not None and id - VNID_MIN_ALLOC < self._VN_MAX_ID:
# If fq_name associated to the allocated ID does not correpond to
# freed resource fq_name, keep zookeeper lock
allocated_fq_name_str = self.get_vn_from_id(id)
if (allocated_fq_name_str is not None and
allocated_fq_name_str != fq_name_str):
return
if notify:
# If notify, the ZK allocation already removed, just remove
# lock in memory
self._vn_id_allocator.reset_in_use(id - VNID_MIN_ALLOC)
else:
self._vn_id_allocator.delete(id - VNID_MIN_ALLOC)
def alloc_vxlan_id(self, fq_name_str, id, notify=False):
if notify:
if self.get_vn_from_id(id) is not None:
self._vn_id_allocator.set_in_use(id - VNID_MIN_ALLOC)
return id
elif fq_name_str is not None:
allocated_fq_name_str = self.get_vn_from_id(id)
if (allocated_fq_name_str is not None and
allocated_fq_name_str == fq_name_str):
return id
return self._vn_id_allocator.reserve(id - VNID_MIN_ALLOC, fq_name_str)
def free_vxlan_id(self, id, fq_name_str, notify=False):
if id is not None and id - VNID_MIN_ALLOC < self._VN_MAX_ID:
# If fq_name associated to the allocated ID does not correpond to
# freed resource fq_name, keep zookeeper lock
allocated_fq_name_str = self.get_vn_from_id(id)
if (allocated_fq_name_str is not None and
allocated_fq_name_str != fq_name_str):
return
if notify:
# If notify, the ZK allocation already removed, just remove
# lock in memory
self._vn_id_allocator.reset_in_use(id - VNID_MIN_ALLOC)
else:
self._vn_id_allocator.delete(id - VNID_MIN_ALLOC)
def get_vn_from_id(self, id):
if id is not None and id - VNID_MIN_ALLOC < self._VN_MAX_ID:
return self._vn_id_allocator.read(id - VNID_MIN_ALLOC)
def alloc_vpg_id(self, fq_name_str, id=None):
# If ID provided, it's a notify allocation, just lock allocated ID in
# memory
if id is not None:
if self.get_vpg_from_id(id) is not None:
self._vpg_id_allocator.set_in_use(id - self._VPG_MIN_ID)
return id
elif fq_name_str is not None:
return self._vpg_id_allocator.alloc(fq_name_str) + self._VPG_MIN_ID
def free_vpg_id(self, id, fq_name_str, notify=False):
if id is not None and id - self._VPG_MIN_ID < self._VPG_MAX_ID:
# If fq_name associated to the allocated ID does not correspond to
# freed resource fq_name, keep zookeeper lock
allocated_fq_name_str = self.get_vpg_from_id(id)
if (allocated_fq_name_str is not None and
allocated_fq_name_str != fq_name_str):
return
if notify:
# If notify, the ZK allocation already removed, just remove
# lock in memory
self._vpg_id_allocator.reset_in_use(id - self._VPG_MIN_ID)
else:
self._vpg_id_allocator.delete(id - self._VPG_MIN_ID)
def get_vpg_from_id(self, id):
if id is not None and id - self._VPG_MIN_ID < self._VPG_MAX_ID:
return self._vpg_id_allocator.read(id - self._VPG_MIN_ID)
def alloc_sg_id(self, fq_name_str, id=None):
# If ID provided, it's a notify allocation, just lock allocated ID in
# memory
if id is not None:
if self.get_sg_from_id(id) is not None:
self._vn_id_allocator.set_in_use(id - SGID_MIN_ALLOC)
return id
elif fq_name_str is not None:
try:
return self._sg_id_allocator.reserve(
id - SGID_MIN_ALLOC, fq_name_str) + SGID_MIN_ALLOC
except ResourceExistsError:
return self._sg_id_allocator.alloc(
fq_name_str) + SGID_MIN_ALLOC
elif fq_name_str is not None:
return self._sg_id_allocator.alloc(fq_name_str) + SGID_MIN_ALLOC
def free_sg_id(self, id, fq_name_str, notify=False):
if id is not None and id > SGID_MIN_ALLOC and id < self._SG_MAX_ID:
# If fq_name associated to the allocated ID does not correspond to
# freed resource fq_name, keep zookeeper lock
allocated_fq_name_str = self.get_sg_from_id(id)
if (allocated_fq_name_str is not None and
allocated_fq_name_str != fq_name_str):
return
if notify:
# If notify, the ZK allocation already removed, just remove
# lock in memory
self._sg_id_allocator.reset_in_use(id - SGID_MIN_ALLOC)
else:
self._sg_id_allocator.delete(id - SGID_MIN_ALLOC)
def get_sg_from_id(self, id):
if id is not None and id > SGID_MIN_ALLOC and id < self._SG_MAX_ID:
return self._sg_id_allocator.read(id - SGID_MIN_ALLOC)
def alloc_tag_type_id(self, type_str, id=None):
# If ID provided, it's a notify allocation, just lock allocated ID in
# memory
if id is not None:
if self.get_tag_type_from_id(id) is not None:
self._tag_type_id_allocator.set_in_use(id)
return id
elif type_str is not None:
return self._tag_type_id_allocator.alloc(type_str)
def free_tag_type_id(self, id, type_str, notify=False):
if id is not None and id < self._TAG_TYPE_MAX_ID:
# If tag type name associated to the allocated ID does not
# correpond to freed tag type name, keep zookeeper lock
allocated_type_str = self.get_tag_type_from_id(id)
if (allocated_type_str is not None and
allocated_type_str != type_str):
return
if notify:
# If notify, the ZK allocation already removed, just remove
# lock in memory
self._tag_type_id_allocator.reset_in_use(id)
else:
IndexAllocator.delete_all(
self._zk_client, self._tag_value_id_alloc_path % type_str)
self._tag_type_id_allocator.delete(id)
self._tag_value_id_allocator.pop(type_str, None)
def get_tag_type_from_id(self, id):
if id is not None and id < self._TAG_TYPE_MAX_ID:
return self._tag_type_id_allocator.read(id)
def alloc_tag_value_id(self, type_str, fq_name_str, id=None):
tag_value_id_allocator = self._tag_value_id_allocator.setdefault(
type_str,
IndexAllocator(
self._zk_client,
self._tag_value_id_alloc_path % type_str,
self._TAG_VALUE_MAX_ID,
),
)
# If ID provided, it's a notify allocation, just lock allocated ID in
# memory
if id is not None:
if tag_value_id_allocator.read(id) is not None:
tag_value_id_allocator.set_in_use(id)
return id
elif fq_name_str is not None:
return tag_value_id_allocator.alloc(fq_name_str)
def free_tag_value_id(self, type_str, id, fq_name_str, notify=False):
tag_value_id_allocator = self._tag_value_id_allocator.setdefault(
type_str,
IndexAllocator(
self._zk_client,
self._tag_value_id_alloc_path % type_str,
self._TAG_VALUE_MAX_ID,
),
)
if id is not None and id < self._TAG_VALUE_MAX_ID:
# If tag value associated to the allocated ID does not correpond to
# freed tag value, keep zookeeper lock
if fq_name_str != tag_value_id_allocator.read(id):
return
if notify:
# If notify, the ZK allocation already removed, just remove
# lock in memory
tag_value_id_allocator.reset_in_use(id)
else:
tag_value_id_allocator.delete(id)
def get_tag_value_from_id(self, type_str, id):
if id is not None and id < self._TAG_VALUE_MAX_ID:
return self._tag_value_id_allocator.setdefault(
type_str,
IndexAllocator(
self._zk_client,
self._tag_value_id_alloc_path % type_str,
self._TAG_VALUE_MAX_ID,
),
).read(id)
def alloc_ae_id(self, phy_rtr_name, fq_name_str, id=None):
ae_id_allocator = self._ae_id_allocator.setdefault(
phy_rtr_name,
IndexAllocator(
self._zk_client,
self._ae_id_alloc_path % phy_rtr_name,
self._AE_MAX_ID,
),
)
# If ID provided, it's a notify allocation, just lock allocated ID in
# memory
if id is not None:
if ae_id_allocator.read(id) is not None:
ae_id_allocator.set_in_use(id)
return id
elif fq_name_str is not None:
return ae_id_allocator.alloc(fq_name_str)
def free_ae_id(self, phy_rtr_name, id, fq_name_str, notify=False):
ae_id_allocator = self._ae_id_allocator.setdefault(
phy_rtr_name,
IndexAllocator(
self._zk_client,
self._ae_id_alloc_path % phy_rtr_name,
self._AE_MAX_ID,
),
)
if id is not None and id < self._AE_MAX_ID:
# If tag value associated to the allocated ID does not correpond to
# freed tag value, keep zookeeper lock
if fq_name_str != ae_id_allocator.read(id):
return
if notify:
# If notify, the ZK allocation already removed, just remove
# lock in memory
ae_id_allocator.reset_in_use(id)
else:
ae_id_allocator.delete(id)
def _get_sub_cluster_from_id(self, sub_cluster_id):
return self._sub_cluster_id_allocator.read(sub_cluster_id)
def get_last_sub_cluster_allocated_id(self):
return self._sub_cluster_id_allocator.get_last_allocated_id()
def alloc_sub_cluster_id(self, asn, fq_name_str, sub_cluster_id=None):
if asn > 0xFFFF:
pool = {'start': 1, 'end': self._SUB_CLUSTER_MAX_ID_2_BYTES}
else:
pool = {'start': 1, 'end': self._SUB_CLUSTER_MAX_ID_4_BYTES}
if sub_cluster_id is None:
return self._sub_cluster_id_allocator.alloc(fq_name_str, [pool])
allocated_id = self._sub_cluster_id_allocator.reserve(
sub_cluster_id, fq_name_str, [pool])
# reserve returns none if requested ID is out of the allocation range
if not allocated_id:
raise ResourceOutOfRangeError(
sub_cluster_id,
self._sub_cluster_id_allocator._start_idx,
self._sub_cluster_id_allocator._start_idx +\
self._sub_cluster_id_allocator._size - 1)
return allocated_id
def free_sub_cluster_id(self, sub_cluster_id, fq_name_str, notify=False):
# If fq_name associated to the allocated ID does not correspond to
# freed resource fq_name, keep zookeeper lock
allocated_fq_name_str = self._get_sub_cluster_from_id(sub_cluster_id)
if allocated_fq_name_str and allocated_fq_name_str != fq_name_str:
return
if notify:
# If notify, the ZK allocation already removed, just remove
# lock in memory
self._sub_cluster_id_allocator.reset_in_use(sub_cluster_id)
else:
self._sub_cluster_id_allocator.delete(sub_cluster_id)
class VncDbClient(object):
def __init__(self, api_svr_mgr, db_srv_list, rabbit_servers, rabbit_port,
rabbit_user, rabbit_password, rabbit_vhost, rabbit_ha_mode,
host_ip, reset_config=False, zk_server_ip=None,
db_prefix='', db_credential=None, obj_cache_entries=0,
obj_cache_exclude_types=None, debug_obj_cache_types=None,
db_engine='cassandra', cassandra_use_ssl=False,
cassandra_ca_certs=None, **kwargs):
self._db_engine = db_engine
self._api_svr_mgr = api_svr_mgr
self._sandesh = api_svr_mgr._sandesh
self._UVEMAP = {
"virtual_network" : ("ObjectVNTable", False),
"virtual_machine" : ("ObjectVMTable", False),
"virtual_machine_interface" : ("ObjectVMITable", False),
"service_instance" : ("ObjectSITable", False),
"virtual_router" : ("ObjectVRouter", True),
"analytics_node" : ("ObjectCollectorInfo", True),
"analytics_snmp_node" : ("ObjectAnalyticsSNMPInfo", True),
"analytics_alarm_node" : ("ObjectAnalyticsAlarmInfo", True),
"database_node" : ("ObjectDatabaseInfo", True),
"config_database_node" : ("ObjectConfigDatabaseInfo", True),
"config_node" : ("ObjectConfigNode", True),
"service_chain" : ("ServiceChain", False),
"physical_router" : ("ObjectPRouter", True),
"bgp_router": ("ObjectBgpRouter", True),
"tag" : ("ObjectTagTable", False),
"project" : ("ObjectProjectTable", False),
"firewall_policy" : ("ObjectFirewallPolicyTable", False),
"firewall_rule" : ("ObjectFirewallRuleTable", False),
"address_group" : ("ObjectAddressGroupTable", False),
"service_group" : ("ObjectServiceGroupTable", False),
"application_policy_set" : ("ObjectApplicationPolicySetTable", False),
}
self._db_resync_done = gevent.event.Event()
self.log_cassandra_response_time = functools.partial(self.log_db_response_time, "CASSANDRA")
self.log_zk_response_time = functools.partial(self.log_db_response_time, "ZK")
msg = "Connecting to zookeeper on %s" % (zk_server_ip)
self.config_log(msg, level=SandeshLevel.SYS_NOTICE)
if db_engine == 'cassandra':
self._zk_db = VncZkClient(api_svr_mgr.get_worker_id(), zk_server_ip, host_ip,
reset_config, db_prefix, self.config_log,
log_response_time=self.log_zk_response_time)
def db_client_init():
msg = "Connecting to database on %s" % (db_srv_list)
self.config_log(msg, level=SandeshLevel.SYS_NOTICE)
if api_svr_mgr.get_worker_id() == 0:
walk = False # done as part of db_resync()
else:
walk = True
self._object_db = VncServerCassandraClient(
self, db_srv_list, reset_config, db_prefix,
db_credential, walk, obj_cache_entries,
obj_cache_exclude_types, debug_obj_cache_types,
self.log_cassandra_response_time,
ssl_enabled=cassandra_use_ssl, ca_certs=cassandra_ca_certs)
self._zk_db.master_election("/api-server-election", db_client_init)
else:
msg = ("Contrail API server does not support database backend "
"'%s'" % db_engine)
raise NotImplementedError(msg)
health_check_interval = api_svr_mgr.get_rabbit_health_check_interval()
if api_svr_mgr.get_worker_id() > 0:
health_check_interval = 0.0
self._msgbus = VncServerKombuClient(self, rabbit_servers,
rabbit_port, rabbit_user, rabbit_password,
rabbit_vhost, rabbit_ha_mode, host_ip,
health_check_interval, **kwargs)
def log_db_response_time(self, db, response_time, oper, level=SandeshLevel.SYS_DEBUG):
response_time_in_usec = ((response_time.days*24*60*60) +
(response_time.seconds*1000000) +
response_time.microseconds)
# Create latency stats object
try:
req_id = get_trace_id()
except Exception as e:
req_id = "NO-REQUESTID"
stats = VncApiLatencyStats(
operation_type=oper,
application=db,
response_time_in_usec=response_time_in_usec,
response_size=0,
identifier=req_id,
)
stats_log = VncApiLatencyStatsLog(
level=level,
node_name="issu-vm6",
api_latency_stats=stats,
sandesh=self._sandesh)
x=stats_log.send(sandesh=self._sandesh)
def _update_default_quota(self):
""" Read the default quotas from the configuration
and update it in the project object if not already
updated.
"""
default_quota = QuotaHelper.default_quota
proj_id = self.fq_name_to_uuid('project',
['default-domain', 'default-project'])
try:
(ok, result) = self.dbe_read('project', proj_id)
except NoIdError as e:
ok = False
result = 'Project Not Found: %s' %(proj_id)
if not ok:
self.config_log("Updating default quota failed: %s." %(result),
level=SandeshLevel.SYS_ERR)
return
proj_dict = result
proj_dict['quota'] = default_quota
self.dbe_update('project', proj_id, proj_dict)
# end _update_default_quota
def get_api_server(self):
return self._api_svr_mgr
# end get_api_server
def db_resync(self):
# Read contents from cassandra and perform DB update if required
start_time = datetime.datetime.utcnow()
self._object_db.walk(self._dbe_resync)
self.config_log("Cassandra DB walk completed.",
level=SandeshLevel.SYS_INFO)
self._update_default_quota()
end_time = datetime.datetime.utcnow()
msg = "Time elapsed in resyncing db: %s" % (str(end_time - start_time))
self.config_log(msg, level=SandeshLevel.SYS_DEBUG)
self._db_resync_done.set()
# end db_resync
def wait_for_resync_done(self):
self._db_resync_done.wait()
# end wait_for_resync_done
def db_check(self):
# Read contents from cassandra and report any read exceptions
check_results = self._object_db.walk(self._dbe_check)
return check_results
# end db_check
def db_read(self):
# Read contents from cassandra
read_results = self._object_db.walk(self._dbe_read)
return read_results
# end db_check
def _uuid_to_longs(self, id):
msb_id = id.int >> 64
lsb_id = id.int & ((1 << 64) - 1)
return {'uuid_mslong': msb_id, 'uuid_lslong': lsb_id}
# end _uuid_to_longs
def set_uuid(self, obj_type, obj_dict, id, do_lock=True):
if do_lock:
# set the mapping from name to uuid in zk to ensure single creator
fq_name = obj_dict['fq_name']
try:
self._zk_db.create_fq_name_to_uuid_mapping(obj_type, fq_name,
str(id))
except ResourceExistsError as rexist:
# see if stale and if so delete stale
_, ctime = self._zk_db.get_fq_name_to_uuid_mapping(
obj_type, fq_name)
epoch_msecs = ctime
try:
self._object_db.uuid_to_fq_name(str(id))
# not stale
raise ResourceExistsError(fq_name, str(id), 'cassandra')
except NoIdError:
lock_msecs = float(time.time()*1000 - epoch_msecs)
stale_msecs_cfg = 1000 * float(
self._api_svr_mgr.get_args().stale_lock_seconds)
if (lock_msecs < stale_msecs_cfg):
# not stale, race in create
raise rexist
# stale, release old and create new lock
msg = 'Releasing stale lock(%s sec) for %s %s' \
%(float(lock_msecs)/1000, obj_type, fq_name)
self.config_log(msg, level=SandeshLevel.SYS_NOTICE)
self._zk_db.delete_fq_name_to_uuid_mapping(
obj_type, fq_name)
self._zk_db.create_fq_name_to_uuid_mapping(
obj_type, fq_name, str(id))
# end do_lock
# set uuid in id_perms
obj_dict['id_perms']['uuid'] = self._uuid_to_longs(id)
obj_dict['uuid'] = str(id)
return True
# end set_uuid
def _alloc_set_uuid(self, obj_type, obj_dict):
id = uuid.uuid4()
ok = self.set_uuid(obj_type, obj_dict, id)
return (ok, obj_dict['uuid'])
# end _alloc_set_uuid
def match_uuid(self, obj_dict, obj_uuid):
new_uuid = self._uuid_to_longs(uuid.UUID(obj_uuid))
return (new_uuid == obj_dict['id_perms']['uuid'])
# end match_uuid
def update_subnet_uuid(self, subnets):
updated = False
if subnets is None:
return updated
for subnet in subnets:
if subnet.get('subnet_uuid'):
continue
subnet_uuid = str(uuid.uuid4())
subnet['subnet_uuid'] = subnet_uuid
updated = True
return updated
# end update_subnet_uuid
def update_bgp_router_type(self, obj_dict):
""" Sets router_type property based on the vendor property only
if router_type is not set.
"""
router_params = obj_dict['bgp_router_parameters']
if 'router_type' not in router_params:
router_type = 'router'
if router_params['vendor'] == 'contrail':
router_type = 'control-node'
router_params.update({'router_type': router_type})
obj_uuid = obj_dict.get('uuid')
self._object_db.object_update('bgp_router', obj_uuid, obj_dict)
# end update_bgp_router_type
def iip_check_subnet(self, iip_dict, ipam_subnet, sn_uuid):
pfx = ipam_subnet['subnet']['ip_prefix']
pfx_len = ipam_subnet['subnet']['ip_prefix_len']
cidr = '%s/%s' % (pfx, pfx_len)
if (IPAddress(iip_dict['instance_ip_address']) in
IPNetwork(cidr)):
iip_dict['subnet_uuid'] = sn_uuid
self._object_db.object_update('instance_ip',
iip_dict['uuid'], iip_dict)
return True
return False
# end iip_check_subnet
def iip_update_subnet_uuid(self, iip_dict):
""" Set the subnet uuid as instance-ip attribute """
for vn_ref in iip_dict.get('virtual_network_refs', []):
(ok, results) = self._object_db.object_read(
'virtual_network', [vn_ref['uuid']],
field_names=['network_ipam_refs'])
if not ok:
return
vn_dict = results[0]
ipam_refs = vn_dict.get('network_ipam_refs', [])
# if iip is from the subnet in ipam['attr'],
# update valid subnet_uuid in iip object
flat_ipam_uuid_list = []
flat_sn_uuid_list = []
for ipam in ipam_refs:
ipam_subnets = ipam['attr']['ipam_subnets']
for ipam_subnet in ipam_subnets:
sn_uuid = ipam_subnet['subnet_uuid']
if 'subnet' not in ipam_subnet or\
ipam_subnet['subnet'] is None:
# Ipam subnet info need not have ip/prefix info,
# instead they could hold the uuid of subnet info.
# collect flat ipam uuid and ref subnet_uuid
# which is on vn-->ipam link
flat_ipam_uuid = ipam['uuid']
flat_ipam_uuid_list.append(flat_ipam_uuid)
flat_sn_uuid_list.append(sn_uuid)
continue
if self.iip_check_subnet(iip_dict, ipam_subnet, sn_uuid):
return
# resync subnet_uuid if iip is from flat subnet
if len(flat_ipam_uuild_list) == 0:
return
# read ipam objects which are flat-ipam
(ok, result) = self._object_db.object_read('network_ipam',
flat_ipam_uuid_list)
if not ok:
return
for ipam_dict, subnet_uuid in zip(result, flat_sn_uuid_list):
ipam_subnets_dict = ipam_dict.get('ipam_subnets') or {}
ipam_subnets = ipams_subnets_dict.get('subnets') or []
for ipam_subnet in ipam_subnets:
if self.iip_check_subnet(iip_dict, ipam_subnet,
subnet_uuid):
return
def _sub_cluster_upgrade(self, obj_dict):
if obj_dict.get('sub_cluster_id'):
return
sub_cluster_id = self._zk_db.alloc_sub_cluster_id(
cls.server.global_autonomous_system,
':'.join(obj_dict['fq_name']))
self._object_db.object_update(
'sub_cluster',
obj_dict['uuid'],
{'sub_cluster_id': sub_cluster_id})
def _dbe_resync(self, obj_type, obj_uuids):
obj_class = cfgm_common.utils.obj_type_to_vnc_class(obj_type, __name__)
obj_fields = list(obj_class.prop_fields) + list(obj_class.ref_fields)
if obj_type == 'project':
obj_fields.append('logical_routers')
(ok, obj_dicts) = self._object_db.object_read(
obj_type, obj_uuids, field_names=obj_fields)
uve_trace_list = []
for obj_dict in obj_dicts:
try:
obj_uuid = obj_dict['uuid']
uve_trace_list.append(("RESYNC", obj_type, obj_uuid, obj_dict))
if obj_type == 'virtual_network':
# TODO remove backward compat (use RT instead of VN->LR ref)
for router in obj_dict.get('logical_router_refs', []):
self._object_db._delete_ref(None,
obj_type,
obj_uuid,
'logical_router',
router['uuid'])
do_update = False
if 'network_ipam_refs' in obj_dict:
ipam_refs = obj_dict['network_ipam_refs']
for ipam in ipam_refs:
vnsn = ipam['attr']
ipam_subnets = vnsn['ipam_subnets']
if (self.update_subnet_uuid(ipam_subnets)):
if not do_update:
do_update = True
# set is_provider_network property as True
# for ip-fabric network
if obj_dict['fq_name'][-1] == 'ip-fabric' and \
not obj_dict.get('is_provider_network', False):
do_update = True
obj_dict['is_provider_network'] = True
if do_update:
self._object_db.object_update(
'virtual_network', obj_uuid, obj_dict)
elif obj_type == 'virtual_machine_interface':
device_owner = obj_dict.get('virtual_machine_interface_device_owner')
li_back_refs = obj_dict.get('logical_interface_back_refs', [])
if not device_owner and li_back_refs:
obj_dict['virtual_machine_interface_device_owner'] = 'PhysicalRouter'
self._object_db.object_update('virtual_machine_interface',
obj_uuid, obj_dict)
elif obj_type == 'physical_router':
# Encrypt PR pwd if not already done
if obj_dict.get('physical_router_user_credentials') and \
obj_dict.get('physical_router_user_credentials',
{}).get('password'):
dict_password = obj_dict.get(
'physical_router_user_credentials',
{}).get('password')
encryption_type = obj_dict.get(
'physical_router_encryption_type', 'none')
if dict_password is not None and \
encryption_type == 'none':
encrypt_pwd = utils.encrypt_password(
obj_dict['uuid'], dict_password)
obj_dict[
'physical_router_user_credentials'][
'password'] = encrypt_pwd
obj_dict[
'physical_router_encryption_type'] = 'local'
self._object_db.object_update(
'physical_router',
obj_uuid, obj_dict)
elif obj_type == 'fabric':
# No longer using fabric credentials, so remove
if obj_dict.get('fabric_credentials'):
obj_dict['fabric_credentials'] = {}
self._object_db.object_update('fabric',obj_uuid,
obj_dict)
elif obj_type == 'access_control_list':
if not obj_dict.get('access_control_list_hash'):
rules = obj_dict.get('access_control_list_entries')
if rules:
rules_obj = AclEntriesType(params_dict=rules)
obj_dict['access_control_list_hash'] = hash(rules_obj)
self._object_db.object_update('access_control_list',
obj_uuid, obj_dict)
elif obj_type == 'global_system_config':
if (obj_dict['fq_name'][0] == 'default-global-system-config' and
'enable_4byte_as' not in obj_dict):
obj_dict['enable_4byte_as'] = False
self._object_db.object_update('global_system_config',
obj_uuid, obj_dict)
elif obj_type == 'project':
self._api_svr_mgr.create_singleton_entry(
ApplicationPolicySet(parent_obj=Project(**obj_dict),
all_applications=True),
)
vxlan_routing = obj_dict.get('vxlan_routing', False)
logical_routers = obj_dict.get('logical_routers', [])
lr_uuid_list = [lr['uuid'] for lr in logical_routers]
if lr_uuid_list:
(ok, lr_list) = self._object_db.object_read(
'logical_router',
obj_uuids=lr_uuid_list,
field_names=['logical_router_type',
'virtual_network_refs'])
for lr in lr_list:
if 'logical_router_type' not in lr:
self._object_db.object_update(
'logical_router',
lr['uuid'],
{'logical_router_type': 'vxlan-routing'
if vxlan_routing else 'snat-routing'})
int_vn_uuid = None
for vn_ref in lr['virtual_network_refs']:
if (vn_ref.get('attr', {}).get(
'logical_router_virtual_network_type') ==
'InternalVirtualNetwork'):
int_vn_uuid = vn_ref.get('uuid')
if int_vn_uuid is not None:
int_vn_display_name = 'LR::%s' % lr['fq_name'][-1]
self._object_db.object_update(
'virtual_network',
int_vn_uuid,
{'display_name': int_vn_display_name})
if vxlan_routing:
obj_dict['vxlan_routing'] = False
self._object_db.object_update('project',
obj_uuid, obj_dict)
elif obj_type == 'floating_ip':
if not obj_dict.get('project_refs'):
project_fq_name = obj_dict['fq_name'][:2]
ref = {'to': project_fq_name, 'attr': None}
obj_dict['project_refs'] = [ref]
self._object_db.object_update('floating_ip',
obj_uuid, obj_dict)
# create new perms if upgrading
perms2 = obj_dict.get('perms2')
update_obj = False
if perms2 is None:
perms2 = self.update_perms2(obj_uuid)
update_obj = True
elif perms2['owner'] is None:
perms2['owner'] = 'cloud-admin'
update_obj = True
if (obj_dict.get('is_shared') == True and
perms2.get('global_access', 0) == 0):
perms2['global_access'] = PERMS_RWX
update_obj = True
if obj_type == 'domain' and len(perms2.get('share') or []) == 0:
update_obj = True
perms2 = self.enable_domain_sharing(obj_uuid, perms2)
if update_obj:
obj_dict['perms2'] = perms2
self._object_db.object_update(obj_type, obj_uuid, obj_dict)
if (obj_type == 'bgp_router' and
'bgp_router_parameters' in obj_dict and
'router_type' not in obj_dict['bgp_router_parameters']):
self.update_bgp_router_type(obj_dict)
if obj_type == 'instance_ip' and 'subnet_uuid' not in obj_dict:
self.iip_update_subnet_uuid(obj_dict)
if obj_type == 'sub_cluster':
self._sub_cluster_upgrade(obj_dict)
except Exception as e:
tb = cfgm_common.utils.detailed_traceback()
self.config_log(tb, level=SandeshLevel.SYS_ERR)
continue
# end for all objects
# Send UVEs resync with a pool of workers
uve_workers = gevent.pool.Group()
def format_args_for_dbe_uve_trace(args):
return self.dbe_uve_trace(*args)
uve_workers.map(format_args_for_dbe_uve_trace, uve_trace_list)
# end _dbe_resync
def _dbe_check(self, obj_type, obj_uuids):
for obj_uuid in obj_uuids:
try:
(ok, obj_dict) = self._object_db.object_read(obj_type, [obj_uuid])
except Exception as e:
return {'uuid': obj_uuid, 'type': obj_type, 'error': str(e)}
# end _dbe_check
def _dbe_read(self, obj_type, obj_uuids):
results = []
for obj_uuid in obj_uuids:
try:
(ok, obj_dict) = self._object_db.object_read(obj_type, [obj_uuid])
result_dict = obj_dict[0]
result_dict['type'] = obj_type
result_dict['uuid'] = obj_uuid
results.append(result_dict)
except Exception as e:
self.config_object_error(
obj_uuid, None, obj_type, '_dbe_read:cassandra_read', str(e))
continue
return results
# end _dbe_read
@ignore_exceptions
def _generate_db_request_trace(self, oper, obj_type, obj_id, obj_dict):
req_id = get_trace_id()
body = dict(obj_dict)
body['type'] = obj_type
body['uuid'] = obj_id
db_trace = DBRequestTrace(request_id=req_id)
db_trace.operation = oper
db_trace.body = json.dumps(body)
return db_trace
# end _generate_db_request_trace
# Public Methods
# Returns created uuid
def dbe_alloc(self, obj_type, obj_dict, uuid_requested=None):
try:
if uuid_requested:
obj_uuid = uuid_requested
ok = self.set_uuid(obj_type, obj_dict,
uuid.UUID(uuid_requested), False)
else:
(ok, obj_uuid) = self._alloc_set_uuid(obj_type, obj_dict)
except ResourceExistsError as e:
return (False, (409, str(e)))
return (True, obj_dict['uuid'])
# end dbe_alloc
def dbe_uve_trace(self, oper, type, uuid, obj_dict=None, **kwargs):
if type not in self._UVEMAP:
return
if obj_dict is None:
try:
obj_class = self.get_resource_class(type)
fields = list(obj_class.prop_fields) + list(obj_class.ref_fields)
(ok, obj_dict) = self.dbe_read(type, uuid, obj_fields=fields)
if not ok:
return
except NoIdError:
return
if type == 'bgp_router':
if (obj_dict.get('bgp_router_parameters', {}).get('router_type') !=
'control-node'):
return
oper = oper.upper()
req_id = get_trace_id()
if 'fq_name' not in obj_dict:
obj_dict['fq_name'] = self.uuid_to_fq_name(uuid)
obj_json = {k: json.dumps(obj_dict[k]) for k in obj_dict or {}}
db_trace = DBRequestTrace(request_id=req_id)
db_trace.operation = oper
db_trace.body = "name=%s type=%s value=%s" % (obj_dict['fq_name'],
type,
json.dumps(obj_dict))
uve_table, global_uve = self._UVEMAP[type]
if global_uve:
uve_name = obj_dict['fq_name'][-1]
else:
uve_name = ':'.join(obj_dict['fq_name'])
contrail_config = ContrailConfig(name=uve_name,
elements=obj_json,
deleted=oper=='DELETE')
contrail_config_msg = ContrailConfigTrace(data=contrail_config,
table=uve_table,
sandesh=self._sandesh)
contrail_config_msg.send(sandesh=self._sandesh)
trace_msg([db_trace], 'DBUVERequestTraceBuf', self._sandesh)
def dbe_trace(oper):
def wrapper(func):
@functools.wraps(func)
def wrapped(self, *args, **kwargs):
trace = self._generate_db_request_trace(oper, *args)
try:
ret = func(self, *args, **kwargs)
trace_msg([trace], 'DBRequestTraceBuf', self._sandesh)
return ret
except Exception as e:
trace_msg([trace], 'DBRequestTraceBuf',
self._sandesh, error_msg=str(e))
raise
return wrapped
return wrapper
# dbe_trace
# create/update indexes if object is shared
def build_shared_index(oper):
def wrapper1(func):
@functools.wraps(func)
def wrapper2(self, *args, **kwargs):
obj_type, obj_id, obj_dict = args
# fetch current share information to identify what might have changed
try:
cur_perms2 = self.uuid_to_obj_perms2(obj_id)
except Exception as e:
cur_perms2 = self.get_default_perms2()
# don't build sharing indexes if operation (create/update) failed
(ok, result) = func(self, *args, **kwargs)
if not ok:
return (ok, result)
# many updates don't touch perms2
new_perms2 = obj_dict.get('perms2', None)
if not new_perms2:
return (ok, result)
share_perms = new_perms2.get('share',
cur_perms2.get('share') or [])
global_access = new_perms2.get('global_access',
cur_perms2.get('global_access',
0))
# msg = 'RBAC: BSL perms new %s, cur %s' % (new_perms2, cur_perms2)
# self.config_log(msg, level=SandeshLevel.SYS_NOTICE)
# change in global access?
if cur_perms2.get('global_access', 0) != global_access:
if global_access:
self._object_db.set_shared(obj_type, obj_id, rwx = global_access)
else:
self._object_db.del_shared(obj_type, obj_id)
# change in shared list? Construct temporary sets to compare
cur_shared_list = set(
item['tenant'] + ':' + str(item['tenant_access'])
for item in cur_perms2.get('share') or [])
new_shared_list = set(
item['tenant'] + ':' + str(item['tenant_access'])
for item in share_perms or [])
if cur_shared_list == new_shared_list:
return (ok, result)
# delete sharing if no longer in shared list
for share_info in cur_shared_list - new_shared_list:
# sharing information => [share-type, uuid, rwx bits]
(share_type, share_id, share_perms) = shareinfo_from_perms2(share_info)
self._object_db.del_shared(obj_type, obj_id,
share_id=share_id, share_type=share_type)
# share this object with specified tenants
for share_info in new_shared_list - cur_shared_list:
# sharing information => [share-type, uuid, rwx bits]
(share_type, share_id, share_perms) = shareinfo_from_perms2(share_info)
self._object_db.set_shared(obj_type, obj_id,
share_id = share_id, share_type = share_type, rwx = int(share_perms))
return (ok, result)
return wrapper2
return wrapper1
@dbe_trace('create')
@build_shared_index('create')
def dbe_create(self, obj_type, obj_uuid, obj_dict):
(ok, result) = self._object_db.object_create(obj_type, obj_uuid,
obj_dict)
if ok:
# publish to msgbus
self._msgbus.dbe_publish('CREATE', obj_type, obj_uuid,
obj_dict['fq_name'], obj_dict=obj_dict)
self._dbe_publish_update_implicit(obj_type, result)
return (ok, result)
# end dbe_create
# input id is uuid
def dbe_read(self, obj_type, obj_id, obj_fields=None,
ret_readonly=False):
try:
(ok, cassandra_result) = self._object_db.object_read(
obj_type, [obj_id], obj_fields, ret_readonly=ret_readonly)
except NoIdError as e:
# if NoIdError is for obj itself (as opposed to say for parent
# or ref), let caller decide if this can be handled gracefully
# by re-raising
if e._unknown_id == obj_id:
raise
return (False, str(e))
return (ok, cassandra_result[0])
# end dbe_read
def dbe_count_children(self, obj_type, obj_id, child_type):
try:
(ok, cassandra_result) = self._object_db.object_count_children(
obj_type, obj_id, child_type)
except NoIdError as e:
return (False, str(e))
return (ok, cassandra_result)
# end dbe_count_children
def dbe_get_relaxed_refs(self, obj_id):
return self._object_db.get_relaxed_refs(obj_id)
# end dbe_get_relaxed_refs
def dbe_is_latest(self, obj_id, tstamp):
try:
is_latest = self._object_db.is_latest(obj_id, tstamp)
return (True, is_latest)
except Exception as e:
return (False, str(e))
# end dbe_is_latest
def _dbe_publish_update_implicit(self, obj_type, uuid_list):
for ref_uuid in uuid_list:
try:
ref_fq_name = self.uuid_to_fq_name(ref_uuid)
self._msgbus.dbe_publish('UPDATE-IMPLICIT', obj_type,
ref_uuid, ref_fq_name)
except NoIdError:
# ignore if the object disappeared
pass
# end _dbe_publish_update_implicit
@dbe_trace('update')
@build_shared_index('update')
def dbe_update(self, obj_type, obj_uuid, new_obj_dict,
attr_to_publish=None):
(ok, result) = self._object_db.object_update(obj_type, obj_uuid,
new_obj_dict)
if ok:
try:
# publish to message bus (rabbitmq)
fq_name = self.uuid_to_fq_name(obj_uuid)
self._msgbus.dbe_publish('UPDATE', obj_type, obj_uuid, fq_name,
extra_dict=attr_to_publish)
self._dbe_publish_update_implicit(obj_type, result)
except NoIdError as e:
# Object might have disappeared after the update. Return Success
# to the user.
return (ok, result)
return (ok, result)
# end dbe_update
def _owner_id(self):
env = get_request().headers.environ
domain_id = env.get('HTTP_X_DOMAIN_ID')
if domain_id:
domain_id = str(uuid.UUID(domain_id))
project_id = env.get('HTTP_X_PROJECT_ID')
if project_id:
project_id = str(uuid.UUID(project_id))
return domain_id, project_id
def dbe_list(self, obj_type, parent_uuids=None, back_ref_uuids=None,
obj_uuids=None, is_count=False, filters=None,
paginate_start=None, paginate_count=None, is_detail=False,
field_names=None, include_shared=False):
def collect_shared(owned_fq_name_uuids=None, start=None, count=None):
shared_result = []
# include objects shared with tenant
domain_id, project_id = self._owner_id()
shares = self.get_shared_objects(obj_type, project_id, domain_id)
if start is not None:
# pick only ones greater than marker
shares = sorted(shares, key=lambda uuid_perm: uuid_perm[0])
shares = [(_uuid, _perms) for _uuid, _perms in shares
if _uuid > start]
owned_objs = set([obj_uuid for (fq_name, obj_uuid) in
owned_fq_name_uuids or []])
collected = 0
marker = None
for obj_uuid, obj_perm in shares:
# skip owned objects already included in results
if obj_uuid in owned_objs:
continue
try:
fq_name = self.uuid_to_fq_name(obj_uuid)
shared_result.append((fq_name, obj_uuid))
collected += 1
if count is not None and collected >= count:
marker = obj_uuid
break
except NoIdError:
# uuid no longer valid. Deleted?
pass
return shared_result, marker
# end collect_shared
if paginate_start is None:
(ok, result, ret_marker) = self._object_db.object_list(
obj_type, parent_uuids=parent_uuids,
back_ref_uuids=back_ref_uuids, obj_uuids=obj_uuids,
count=is_count, filters=filters,
paginate_start=paginate_start,
paginate_count=paginate_count)
if not ok or is_count:
return (ok, result, None)
if include_shared:
result.extend(collect_shared(result)[0])
elif not paginate_start.startswith('shared:'):
# choose to finish non-shared items before shared ones
# else, items can be missed since sorted order used across two
# collections (normally available vs shared with tenant)
(ok, result, ret_marker) = self._object_db.object_list(
obj_type, parent_uuids=parent_uuids,
back_ref_uuids=back_ref_uuids, obj_uuids=obj_uuids,
count=is_count, filters=filters,
paginate_start=paginate_start,
paginate_count=paginate_count)
if not ok or is_count:
return (ok, result, None)
if ret_marker is None and include_shared:
# transition to collect shared objects
return (True, [], 'shared:0')
else: # paginated and non-shared already visited
result, marker = collect_shared(
start=paginate_start.split(':')[-1],
count=paginate_count)
if marker is None:
ret_marker = None
else:
ret_marker = 'shared:%s' %(marker)
if is_detail:
cls = cfgm_common.utils.obj_type_to_vnc_class(obj_type, __name__)
obj_fields = list(cls.prop_fields) + list(cls.ref_fields)
else:
obj_fields = []
if field_names:
obj_fields.extend(field_names)
if not obj_fields:
return (True, [{'uuid': obj_uuid, 'fq_name': fq_name}
for fq_name, obj_uuid in result], ret_marker)
obj_ids_list = [obj_uuid for _, obj_uuid in result]
try:
ok, read_result = self._object_db.object_read(
obj_type, obj_ids_list, obj_fields, ret_readonly=True)
except NoIdError as e:
ok = False
read_result = str(e)
if not ok:
return ok, read_result, None
return ok, read_result, ret_marker
# end dbe_list
@dbe_trace('delete')
def dbe_delete(self, obj_type, obj_uuid, obj_dict):
(ok, result) = self._object_db.object_delete(obj_type, obj_uuid)
if ok:
# publish to message bus (rabbitmq)
self._msgbus.dbe_publish('DELETE', obj_type, obj_uuid,
obj_dict['fq_name'], obj_dict=obj_dict)
self._dbe_publish_update_implicit(obj_type, result)
# finally remove mapping in zk
self.dbe_release(obj_type, obj_dict['fq_name'])
return ok, result
# end dbe_delete
def dbe_release(self, obj_type, obj_fq_name):
self._zk_db.delete_fq_name_to_uuid_mapping(obj_type, obj_fq_name)
# end dbe_release
def dbe_oper_publish_pending(self):
return self._msgbus.num_pending_messages()
# end dbe_oper_publish_pending
def useragent_kv_store(self, key, value):
self._object_db.useragent_kv_store(key, value)
# end useragent_kv_store
def useragent_kv_retrieve(self, key):
return self._object_db.useragent_kv_retrieve(key)
# end useragent_kv_retrieve
def useragent_kv_delete(self, key):
return self._object_db.useragent_kv_delete(key)
# end useragent_kv_delete
def subnet_is_addr_allocated(self, subnet, addr):
return self._zk_db.subnet_is_addr_allocated(subnet, addr)
# end subnet_is_addr_allocated
def subnet_set_in_use(self, subnet, addr):
return self._zk_db.subnet_set_in_use(subnet, addr)
# end subnet_set_in_use
def subnet_reset_in_use(self, subnet, addr):
return self._zk_db.subnet_reset_in_use(subnet, addr)
# end subnet_reset_in_use
def subnet_alloc_count(self, subnet):
return self._zk_db.subnet_alloc_count(subnet)
# end subnet_alloc_count
def subnet_alloc_req(self, subnet, value=None, alloc_pools=None,
alloc_unit=1):
return self._zk_db.subnet_alloc_req(subnet, value, alloc_pools,
alloc_unit)
# end subnet_alloc_req
def subnet_reserve_req(self, subnet, addr=None, value=None):
return self._zk_db.subnet_reserve_req(subnet, addr, value)
# end subnet_reserve_req
def subnet_free_req(self, subnet, addr):
return self._zk_db.subnet_free_req(subnet, addr)
# end subnet_free_req
def subnet_change_allocator(self, subnet,
subnet_alloc_list, alloc_unit):
return self._zk_db.change_subnet_allocator(subnet,
subnet_alloc_list,
alloc_unit)
# end subnet_change_allocator
def subnet_create_allocator(self, subnet, subnet_alloc_list,
addr_from_start, should_persist,
start_subnet, size, alloc_unit):
return self._zk_db.create_subnet_allocator(
subnet, subnet_alloc_list, addr_from_start,
should_persist, start_subnet, size, alloc_unit)
# end subnet_create_allocator
def subnet_delete_allocator(self, subnet, notify=True):
return self._zk_db.delete_subnet_allocator(subnet, notify)
# end subnet_delete_allocator
def uuid_vnlist(self):
return self._object_db.uuid_vnlist()
# end uuid_vnlist
def fq_name_to_uuid(self, obj_type, fq_name):
obj_uuid = self._object_db.fq_name_to_uuid(obj_type, fq_name)
return obj_uuid
# end fq_name_to_uuid
def uuid_to_fq_name(self, obj_uuid):
return self._object_db.uuid_to_fq_name(obj_uuid)
# end uuid_to_fq_name
def uuid_to_obj_type(self, obj_uuid):
return self._object_db.uuid_to_obj_type(obj_uuid)
# end uuid_to_obj_type
def uuid_to_obj_dict(self, obj_uuid):
return self._object_db.uuid_to_obj_dict(obj_uuid)
# end uuid_to_obj_dict
def uuid_to_obj_perms(self, obj_uuid):
return self._object_db.uuid_to_obj_perms(obj_uuid)
# end uuid_to_obj_perms
def prop_collection_get(self, obj_type, obj_uuid, obj_fields, position):
(ok, cassandra_result) = self._object_db.prop_collection_read(
obj_type, obj_uuid, obj_fields, position)
return ok, cassandra_result
# end prop_collection_get
def prop_collection_update(self, obj_type, obj_uuid, updates,
attr_to_publish=None):
if not updates:
return
self._object_db.prop_collection_update(obj_type, obj_uuid, updates)
fq_name = self.uuid_to_fq_name(obj_uuid)
self._msgbus.dbe_publish('UPDATE', obj_type, obj_uuid, fq_name,
extra_dict=attr_to_publish)
return True, ''
# end prop_collection_update
def ref_update(self, obj_type, obj_uuid, ref_obj_type, ref_uuid, ref_data,
operation, id_perms, attr_to_publish=None,
relax_ref_for_delete=False):
self._object_db.ref_update(obj_type, obj_uuid, ref_obj_type,
ref_uuid, ref_data, operation, id_perms,
relax_ref_for_delete)
fq_name = self.uuid_to_fq_name(obj_uuid)
self._msgbus.dbe_publish('UPDATE', obj_type, obj_uuid, fq_name,
extra_dict=attr_to_publish)
if obj_type == ref_obj_type:
self._dbe_publish_update_implicit(obj_type, [ref_uuid])
return True, ''
# ref_update
def ref_relax_for_delete(self, obj_uuid, ref_uuid):
self._object_db.ref_relax_for_delete(obj_uuid, ref_uuid)
# end ref_relax_for_delete
def uuid_to_obj_perms2(self, obj_uuid):
return self._object_db.uuid_to_obj_perms2(obj_uuid)
# end uuid_to_obj_perms2
def get_resource_class(self, type):
return self._api_svr_mgr.get_resource_class(type)
# end get_resource_class
def get_default_perms2(self):
return self._api_svr_mgr._get_default_perms2()
# Helper routines for REST
def generate_url(self, resource_type, obj_uuid):
return self._api_svr_mgr.generate_url(resource_type, obj_uuid)
# end generate_url
def config_object_error(self, id, fq_name_str, obj_type,
operation, err_str):
self._api_svr_mgr.config_object_error(
id, fq_name_str, obj_type, operation, err_str)
# end config_object_error
def config_log(self, msg, level):
self._api_svr_mgr.config_log(msg, level)
# end config_log
def get_server_port(self):
return self._api_svr_mgr.get_server_port()
# end get_server_port
# return all objects shared with us (tenant)
# useful for collections
def get_shared_objects(self, obj_type, tenant_uuid, domain_uuid):
shared = []
# specifically shared with us
if tenant_uuid:
l1 = self._object_db.get_shared(obj_type, share_id=tenant_uuid,
share_type='tenant')
if l1:
shared.extend(l1)
# shared at domain level
if domain_uuid:
l1 = self._object_db.get_shared(obj_type, share_id=domain_uuid,
share_type='domain')
if l1:
shared.extend(l1)
# globally shared
l2 = self._object_db.get_shared(obj_type)
if l2:
shared.extend(l2)
return shared
# end get_shared_objects
def reset(self):
self._msgbus.reset()
# end reset
def get_worker_id(self):
return self._api_svr_mgr.get_worker_id()
# end get_worker_id
# Insert new perms. Called on startup when walking DB
def update_perms2(self, obj_uuid):
perms2 = copy.deepcopy(Provision.defaults.perms2)
perms2_json = json.dumps(perms2, default=lambda o: dict((k, v)
for k, v in o.__dict__.items()))
perms2 = json.loads(perms2_json)
return perms2
def enable_domain_sharing(self, obj_uuid, perms2):
share_item = {
'tenant': 'domain:%s' % obj_uuid,
'tenant_access': cfgm_common.DOMAIN_SHARING_PERMS
}
perms2.setdefault('share', []).append(share_item)
return perms2
# end class VncDbClient
|
py | 7dfc5b5b0f310ea8af7c16183db50811796e2b3f | #!/usr/bin/env python3
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Annotation library for drawing overlays on the raspberry pi's camera preview.
Annotations include bounding boxes, text overlays, and points.
Annotations support partial opacity, however only with respect to the content in
the preview. A transparent fill value will cover up previously drawn overlay
under it, but not the camera content under it. A color of None can be given,
which will then not cover up overlay content drawn under the region.
Note: Overlays do not persist through to the storage layer so images saved from
the camera, will not contain overlays.
"""
import time
from PIL import Image, ImageDraw
import picamera
def _round_to_bit(value, power):
"""Rounds the given value to the next multiple of 2^power.
Args:
value: int to be rounded.
power: power of two which the value should be rounded up to.
Returns:
the result of value rounded to the next multiple 2^power.
"""
return (((value - 1) >> power) + 1) << power
def _round_buffer_dims(dims):
"""Appropriately rounds the given dimensions for image overlaying.
The overlay buffer must be rounded the next multiple of 32 for the hight, and
the next multiple of 16 for the width."""
return (_round_to_bit(dims[0], 5), _round_to_bit(dims[1], 4))
# TODO(namiller): Add an annotator for images.
class Annotator:
"""Utility for managing annotations on the camera preview.
Args:
camera: picamera.PiCamera camera object to overlay on top of.
bg_color: PIL.ImageColor (with alpha) for the background of the overlays.
default_color: PIL.ImageColor (with alpha) default for the drawn content.
"""
def __init__(self, camera, bg_color=None, default_color=None,
dimensions=None):
self._dims = dimensions if dimensions else camera.resolution
self._buffer_dims = _round_buffer_dims(self._dims)
self._buffer = Image.new('RGBA', self._buffer_dims)
self._overlay = camera.add_overlay(
self._buffer.tobytes(), format='rgba', layer=3, size=self._buffer_dims)
self._draw = ImageDraw.Draw(self._buffer)
self._bg_color = bg_color if bg_color else (0, 0, 0, 0xA0)
self._default_color = default_color if default_color else (0xFF, 0, 0, 0xFF)
# MMALPort has a bug in enable.wrapper, where it always calls
# self._pool.send_buffer(block=False) regardless of the port direction.
# This is in contrast to setup time when it only calls
# self._pool.send_all_buffers(block=False)
# if self._port[0].type == mmal.MMAL_PORT_TYPE_OUTPUT.
# Because of this bug updating an overlay once will log a MMAL_EAGAIN
# error every update. This is safe to ignore as we the user is driving
# the renderer input port with calls to update() that dequeue buffers
# and sends them to the input port (so queue is empty on when
# send_all_buffers(block=False) is called from wrapper).
# As a workaround, monkey patch MMALPortPool.send_buffer and
# silence the "error" if thrown by our overlay instance.
original_send_buffer = picamera.mmalobj.MMALPortPool.send_buffer
def silent_send_buffer(zelf, **kwargs):
try:
original_send_buffer(zelf, **kwargs)
except picamera.exc.PiCameraMMALError as error:
# Only silence MMAL_EAGAIN for our target instance.
our_target = self._overlay.renderer.inputs[0].pool == zelf
if not our_target or error.status != 14:
raise error
picamera.mmalobj.MMALPortPool.send_buffer = silent_send_buffer
def update(self):
"""Updates the contents of the overlay."""
self._overlay.update(self._buffer.tobytes())
def stop(self):
"""Removes the overlay from the screen."""
self._draw.rectangle((0, 0) + self._dims, fill=0)
self.update()
def clear(self):
"""Clears the contents of the overlay - leaving only the plain background.
"""
self._draw.rectangle((0, 0) + self._dims, fill=self._bg_color)
def bounding_box(self, rect, outline=None, fill=None):
"""Draws a bounding box around the specified rectangle.
Args:
rect: (x1, y1, x2, y2) rectangle to be drawn - where (x1,y1) and (x2, y2)
are opposite corners of the desired rectangle.
outline: PIL.ImageColor with which to draw the outline (defaults to the
configured default_color).
fill: PIL.ImageColor with which to fill the rectangel (defaults to None
which will not cover up drawings under the region.
"""
outline = self._default_color if outline is None else outline
self._draw.rectangle(rect, fill=fill, outline=outline)
# TODO(namiller): Add a font size parameter and load a truetype font.
def text(self, location, text, color=None):
"""Draws the given text at the given location.
Args:
location: (x,y) point at which to draw the text (upper left corner).
text: string to be drawn.
color: PIL.ImageColor to draw the string in (defaults to default_color).
"""
color = self._default_color if color is None else color
self._draw.text(location, text, fill=color)
def point(self, location, radius=1, color=None):
"""Draws a point of the given size at the given location.
Args:
location: (x,y) center of the point to be drawn.
radius: the radius of the point to be drawn.
color: The color to draw the point in (defaults to default_color).
"""
color = self._default_color if color is None else color
self._draw.ellipse(
(location[0] - radius, location[1] - radius, location[0] + radius,
location[1] + radius),
fill=color)
def _main():
"""Example usage of the annotator utility.
Demonstrates setting up a camera preview, drawing slowly moving/intersecting
animations over it, and clearing the overlays."""
with picamera.PiCamera() as camera:
# Resolution can be arbitrary.
camera.resolution = (351, 561)
camera.start_preview()
annotator = Annotator(camera)
for i in range(10):
annotator.clear()
annotator.bounding_box(
(20, 20, 70, 70), outline=(0, 0xFF, 0, 0xFF), fill=0)
annotator.bounding_box((10 * i, 10, 10 * i + 50, 60))
annotator.bounding_box(
(80, 0, 130, 50), outline=(0, 0, 0xFF, 0xFF), fill=0)
annotator.text((100, 100), 'Hello World')
annotator.point((10, 100), radius=5)
annotator.update()
time.sleep(1)
annotator.stop()
time.sleep(10)
if __name__ == '__main__':
_main()
|
py | 7dfc5d96aa2723753b2bb4a117dc8881d6e0adfb | def main():
print("Square star pattern")
number = int(input("Input= "))
for i in range(number):
print(" * " * number)
if __name__ == '__main__':
main()
|
py | 7dfc5e79a56aa9fa08d4890e14276ee67c49fb45 | # -*- coding: utf-8 -*-
'''
.. versionadded:: 2016.3.0
This is a queue with postgres as the backend. It uses the jsonb store to
store information for queues.
:depends: python-psycopg2
To enable this queue, the following needs to be configured in your master
config. These are the defaults:
.. code-block:: yaml
queue.pgjsonb.host: 'salt'
queue.pgjsonb.user: 'salt'
queue.pgjsonb.pass: 'salt'
queue.pgjsonb.db: 'salt'
queue.pgjsonb.port: 5432
Use the following Pg database schema:
.. code-block:: sql
CREATE DATABASE salt WITH ENCODING 'utf-8';
--
-- Table structure for table `salt`
--
DROP TABLE IF EXISTS salt;
CREATE OR REPLACE TABLE salt(
id SERIAL PRIMARY KEY,
data jsonb NOT NULL
);
.. code-block:: bash
salt-run queue.insert test '{"name": "redis", "host": "172.16.0.8", "port": 6379}' backend=pgjsonb
salt-run queue.process_queue test all backend=pgjsonb
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
from contextlib import contextmanager
import sys
# import salt libs
import salt.utils.json
from salt.ext import six
from salt.exceptions import SaltInvocationError, SaltMasterError
try:
import psycopg2
HAS_PG = True
except ImportError:
HAS_PG = False
import logging
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'pgjsonb'
def __virtual__():
if HAS_PG is False:
return False
return __virtualname__
@contextmanager
def _conn(commit=False):
'''
Return an postgres cursor
'''
defaults = {'host': 'localhost',
'user': 'salt',
'password': 'salt',
'dbname': 'salt',
'port': 5432}
conn_kwargs = {}
for key, value in defaults.items():
conn_kwargs[key] = __opts__.get('queue.{0}.{1}'.format(__virtualname__, key), value)
try:
conn = psycopg2.connect(**conn_kwargs)
except psycopg2.OperationalError as exc:
raise SaltMasterError('pgjsonb returner could not connect to database: {exc}'.format(exc=exc))
cursor = conn.cursor()
try:
yield cursor
except psycopg2.DatabaseError as err:
error = err.args
sys.stderr.write(six.text_type(error))
cursor.execute("ROLLBACK")
raise err
else:
if commit:
cursor.execute("COMMIT")
else:
cursor.execute("ROLLBACK")
finally:
conn.close()
def _list_tables(cur):
cmd = "select relname from pg_class where relkind='r' and relname !~ '^(pg_|sql_)';"
log.debug('SQL Query: %s', cmd)
cur.execute(cmd)
result = cur.fetchall()
return [x[0] for x in result]
def _create_table(cur, queue):
cmd = 'CREATE TABLE {0}(id SERIAL PRIMARY KEY, '\
'data jsonb NOT NULL)'.format(queue)
log.debug('SQL Query: %s', cmd)
cur.execute(cmd)
return True
def _list_items(queue):
'''
Private function to list contents of a queue
'''
with _conn() as cur:
cmd = 'SELECT data FROM {0}'.format(queue)
log.debug('SQL Query: %s', cmd)
cur.execute(cmd)
contents = cur.fetchall()
return contents
def list_queues():
'''
Return a list of Salt Queues on the Salt Master
'''
with _conn() as cur:
queues = _list_tables(cur)
return queues
def list_items(queue):
'''
List contents of a queue
'''
itemstuple = _list_items(queue)
items = [item[0] for item in itemstuple]
return items
def list_length(queue):
'''
Provide the number of items in a queue
'''
items = _list_items(queue)
return len(items)
def _queue_exists(queue):
'''
Does this queue exist
:param queue: Name of the queue
:type str
:return: True if this queue exists and
False otherwise
:rtype bool
'''
return queue in list_queues()
def handle_queue_creation(queue):
if not _queue_exists(queue):
with _conn(commit=True) as cur:
log.debug('Queue %s does not exist. Creating', queue)
_create_table(cur, queue)
else:
log.debug('Queue %s already exists.', queue)
def insert(queue, items):
'''
Add an item or items to a queue
'''
handle_queue_creation(queue)
with _conn(commit=True) as cur:
if isinstance(items, dict):
items = salt.utils.json.dumps(items)
cmd = str('''INSERT INTO {0}(data) VALUES('{1}')''').format(queue, items) # future lint: disable=blacklisted-function
log.debug('SQL Query: %s', cmd)
try:
cur.execute(cmd)
except psycopg2.IntegrityError as esc:
return ('Item already exists in this queue. '
'postgres error: {0}'.format(esc))
if isinstance(items, list):
items = [(salt.utils.json.dumps(el),) for el in items]
cmd = str("INSERT INTO {0}(data) VALUES (%s)").format(queue) # future lint: disable=blacklisted-function
log.debug('SQL Query: %s', cmd)
try:
cur.executemany(cmd, items)
except psycopg2.IntegrityError as esc:
return ('One or more items already exists in this queue. '
'postgres error: {0}'.format(esc))
return True
def delete(queue, items):
'''
Delete an item or items from a queue
'''
with _conn(commit=True) as cur:
if isinstance(items, dict):
cmd = str("""DELETE FROM {0} WHERE data = '{1}'""").format( # future lint: disable=blacklisted-function
queue,
salt.utils.json.dumps(items))
log.debug('SQL Query: %s', cmd)
cur.execute(cmd)
return True
if isinstance(items, list):
items = [(salt.utils.json.dumps(el),) for el in items]
cmd = 'DELETE FROM {0} WHERE data = %s'.format(queue)
log.debug('SQL Query: %s', cmd)
cur.executemany(cmd, items)
return True
def pop(queue, quantity=1, is_runner=False):
'''
Pop one or more or all items from the queue return them.
'''
cmd = 'SELECT id, data FROM {0}'.format(queue)
if quantity != 'all':
try:
quantity = int(quantity)
except ValueError as exc:
error_txt = ('Quantity must be an integer or "all".\n'
'Error: "{0}".'.format(exc))
raise SaltInvocationError(error_txt)
cmd = ''.join([cmd, ' LIMIT {0};'.format(quantity)])
log.debug('SQL Query: %s', cmd)
items = []
with _conn(commit=True) as cur:
cur.execute(cmd)
result = cur.fetchall()
if len(result) > 0:
ids = [six.text_type(item[0]) for item in result]
items = [item[1] for item in result]
idlist = "','".join(ids)
del_cmd = '''DELETE FROM {0} WHERE id IN ('{1}');'''.format(
queue, idlist)
log.debug('SQL Query: %s', del_cmd)
cur.execute(del_cmd)
return items
|
py | 7dfc5fe7b48790825f5784ca8956028cbaaac9a8 | import urllib.request
import urllib.parse
import json
def http_request(url, query=None, method=None, headers={}, data=None):
"""Perform an HTTP request and return the associated response."""
parts = vars(urllib.parse.urlparse(url))
if query:
parts['query'] = urllib.parse.urlencode(query)
url = urllib.parse.ParseResult(**parts).geturl()
r = urllib.request.Request(url=url, method=method, headers=headers,
data=data)
with urllib.request.urlopen(r) as resp:
msg, resp = resp.info(), resp.read()
if msg.get_content_type() == 'application/json':
resp = json.loads(resp.decode('utf-8'))
return msg, resp
if __name__ == '__main__':
msg, resp = http_request(
'https://httpbin.org/get',
query={
'a': 'Hello',
'b': 'World'
}
)
print(msg.get_content_type(), resp)
msg, resp = http_request('https://httpbin.org/bytes/16')
print(msg.get_content_type(), resp)
msg, resp = http_request('https://httpbin.org/post', method='POST',
data='This is my posted data!'.encode('ascii'),
headers={'Content-Type': 'text/plain'})
print(msg.get_content_type(), resp) |
py | 7dfc601489b619510d895c7b91ad05649b11fe89 | from flask import Flask, session, render_template
from . import (
routes_main, routes_browse, routes_api, routes_file_based,
routes_auth, routes_cells, routes_markdown,
default_config)
from .flask_static_digest import FlaskStaticDigest
flask_static_digest = FlaskStaticDigest()
def render_react_error(code, title):
return render_template('pages/base_react.html',
flask_data={'errorCode': code},
title=title), code
def bad_request(e):
'''A 400 means the request to the API failed.'''
return render_react_error(400, 'Bad Request')
def not_found(e):
'''A 404 means Flask routing failed.'''
return render_react_error(404, 'Page Not Found')
def unauthorized(e):
'''A 401 probably means Globus credentials have expired.'''
# Go ahead and clear the flask session for the user.
# Without this, the button still says "Logout", as if they were still logged in.
# We check group membership on login, which is a distinct 401,
# with its own template.
session.clear()
return render_react_error(401, 'Unauthorized')
def forbidden(e):
return render_react_error(403, 'Forbidden')
def gateway_timeout(e):
'''A 504 means the API has timed out.'''
return render_react_error(504, 'Gateway Timeout')
def any_other_error(e):
'''
In debug mode, we will still fall back to the interactive debugger.
https://flask.palletsprojects.com/en/2.0.x/errorhandling/#unhandled-exceptions
'''
return render_react_error(500, 'Internal Server Error')
def create_app(testing=False):
app = Flask(__name__, instance_relative_config=True)
app.config.from_object(default_config.DefaultConfig)
if testing:
app.config['TESTING'] = True
else:
# We should not load the gitignored app.conf during tests.
app.config.from_pyfile('app.conf')
flask_static_digest.init_app(app)
app.register_blueprint(routes_main.blueprint)
app.register_blueprint(routes_browse.blueprint)
app.register_blueprint(routes_api.blueprint)
app.register_blueprint(routes_file_based.blueprint)
app.register_blueprint(routes_cells.blueprint)
app.register_blueprint(routes_auth.blueprint)
app.register_blueprint(routes_markdown.blueprint)
app.register_error_handler(400, bad_request)
app.register_error_handler(401, unauthorized)
app.register_error_handler(403, forbidden)
app.register_error_handler(404, not_found)
app.register_error_handler(504, gateway_timeout)
app.register_error_handler(500, any_other_error)
@app.context_processor
def inject_template_globals():
return {
'is_authenticated': session.get('is_authenticated'),
'groups_token': session.get('groups_token'),
'user_email': session.get('user_email')
}
@app.before_request
def set_default_groups_token():
if 'groups_token' not in session:
session.update(
groups_token='',
user_email='',
is_authenticated=False)
return app
app = create_app()
|
py | 7dfc60ae52ac9d30c6c7071e41390bbe70534496 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Original data format:
# data separation: \n
# data format: [url]\t[gdid]\t[raw_json_content]\n
# [raw_json_content] type: raw string of json
# , e.g., [content_dict] = json.loads(raw_json_content)
# [content_dict] type: json
# [content_dict] info:
# click_query = content_dict["click_query_list"]
# present_click_query = content_dict["present_click_query_list"]
# query separation: |
# query format: [click_cnt]\t[query]\n
"""
#! 이전에 construct_web_data.py 가 먼저 수행되야함
""" file_ls: [src/tgt]-[train/valid/test].txt
# (coarsely processed data format)
# src: list of raw json data: dict[field:value]
# tgt: list of raw json data: list of keyphrases
"""
"""
Pre-process Data / features files and build vocabulary
$ python preprocess.py -config config/preprocess/config-preprocess-keyphrase-kp20k.yml
"""
import codecs
import glob
import os
import shutil
import sys
import gc
import torch
from functools import partial
from collections import Counter, defaultdict
from onmt.utils.logging import init_logger, logger
from onmt.utils.misc import split_corpus
import onmt.inputters as inputters
import onmt.opts as opts
from onmt.utils.parse import ArgumentParser
from onmt.inputters.inputter import _build_fields_vocab,\
_load_vocab
def check_existing_pt_files(opt):
""" Check if there are existing .pt files to avoid overwriting them """
pattern = opt.save_data + '.{}*.pt'
for t in ['train', 'valid']:
path = pattern.format(t)
if glob.glob(path):
sys.stderr.write("Please backup existing pt files: %s, "
"to avoid overwriting them!\n" % path)
sys.exit(1)
def build_save_dataset(corpus_type, fields, src_reader, tgt_reader, opt):
assert corpus_type in ['train', 'valid']
if corpus_type == 'train':
counters = defaultdict(Counter)
srcs = opt.train_src
tgts = opt.train_tgt
ids = opt.train_ids
else:
srcs = [opt.valid_src]
tgts = [opt.valid_tgt]
ids = [None]
logger.info(opt)
for src, tgt, maybe_id in zip(srcs, tgts, ids):
logger.info("Reading source and target files: %s %s." % (src, tgt))
src_shards = split_corpus(src, opt.shard_size)
tgt_shards = split_corpus(tgt, opt.shard_size)
shard_pairs = zip(src_shards, tgt_shards)
dataset_paths = []
if (corpus_type == "train" or opt.filter_valid) and tgt is not None:
filter_pred = partial(
inputters.filter_example, use_src_len=opt.data_type == "text",
max_src_len=opt.src_seq_length, max_tgt_len=opt.tgt_seq_length)
else:
filter_pred = None
if corpus_type == "train":
existing_fields = None
if opt.src_vocab != "":
try:
logger.info("Using existing vocabulary...")
existing_fields = torch.load(opt.src_vocab)
except torch.serialization.pickle.UnpicklingError:
logger.info("Building vocab from text file...")
src_vocab, src_vocab_size = _load_vocab(
opt.src_vocab, "src", counters,
opt.src_words_min_frequency)
else:
src_vocab = None
if opt.tgt_vocab != "":
tgt_vocab, tgt_vocab_size = _load_vocab(
opt.tgt_vocab, "tgt", counters,
opt.tgt_words_min_frequency)
else:
tgt_vocab = None
for i, (src_shard, tgt_shard) in enumerate(shard_pairs):
assert len(src_shard) == len(tgt_shard)
logger.info("Building shard %d." % i)
# @memray: to be different from normal datasets
dataset = inputters.str2dataset[opt.data_type](
fields,
readers=([src_reader, tgt_reader]
if tgt_reader else [src_reader]),
data=([("src", src_shard), ("tgt", tgt_shard)]
if tgt_reader else [("src", src_shard)]),
dirs=([opt.src_dir, None]
if tgt_reader else [opt.src_dir]),
sort_key=inputters.str2sortkey[opt.data_type],
filter_pred=filter_pred
)
if corpus_type == "train" and existing_fields is None:
for ex in dataset.examples:
for name, field in fields.items():
try:
f_iter = iter(field)
except TypeError:
f_iter = [(name, field)]
all_data = [getattr(ex, name, None)]
else:
all_data = getattr(ex, name)
for (sub_n, sub_f), fd in zip(
f_iter, all_data):
has_vocab = (sub_n == 'src' and
src_vocab is not None) or \
(sub_n == 'tgt' and
tgt_vocab is not None)
if (hasattr(sub_f, 'sequential')
and sub_f.sequential and not has_vocab):
val = fd
if opt.data_type=='keyphrase' and sub_n == 'tgt':
# in this case, val is a list of phrases (list of strings (words))
for v in val:
counters[sub_n].update(v)
else:
counters[sub_n].update(val)
if maybe_id:
shard_base = corpus_type + "_" + maybe_id
else:
shard_base = corpus_type
data_path = "{:s}.{:s}.{:d}.pt".\
format(opt.save_data, shard_base, i)
dataset_paths.append(data_path)
logger.info(" * saving %sth %s data shard to %s. %d examples"
% (i, corpus_type, data_path, len(dataset.examples)))
dataset.save(data_path)
del dataset.examples
gc.collect()
del dataset
gc.collect()
if corpus_type == "train":
vocab_path = opt.save_data + '.vocab.pt'
if existing_fields is None:
fields = _build_fields_vocab(
fields, counters, opt.data_type,
opt.share_vocab, opt.vocab_size_multiple,
opt.src_vocab_size, opt.src_words_min_frequency,
opt.tgt_vocab_size, opt.tgt_words_min_frequency)
else:
fields = existing_fields
torch.save(fields, vocab_path)
def build_save_vocab(train_dataset, fields, opt):
fields = inputters.build_vocab(
train_dataset, fields, opt.data_type, opt.share_vocab,
opt.src_vocab, opt.src_vocab_size, opt.src_words_min_frequency,
opt.tgt_vocab, opt.tgt_vocab_size, opt.tgt_words_min_frequency,
vocab_size_multiple=opt.vocab_size_multiple
)
vocab_path = opt.save_data + '.vocab.pt'
torch.save(fields, vocab_path)
def count_features(path):
"""
path: location of a corpus file with whitespace-delimited tokens and
│-delimited features within the token
returns: the number of features in the dataset
"""
with codecs.open(path, "r", "utf-8") as f:
first_tok = f.readline().split(None, 1)[0]
return len(first_tok.split(u"│")) - 1
def main(opt):
ArgumentParser.validate_preprocess_args(opt)
torch.manual_seed(opt.seed)
if not(opt.overwrite):
check_existing_pt_files(opt)
init_logger(opt.log_file)
shutil.copy2(opt.config, os.path.dirname(opt.log_file))
logger.info(opt)
logger.info("Extracting features...")
src_nfeats = 0
tgt_nfeats = 0
for src, tgt in zip(opt.train_src, opt.train_tgt):
src_nfeats += count_features(src) if opt.data_type == 'text' \
else 0
tgt_nfeats += count_features(tgt) # tgt always text so far
logger.info(" * number of source features: %d." % src_nfeats)
logger.info(" * number of target features: %d." % tgt_nfeats)
logger.info("Building `Fields` object...")
fields = inputters.get_fields(
opt.data_type,
src_nfeats,
tgt_nfeats,
dynamic_dict=opt.dynamic_dict,
src_truncate=opt.src_seq_length_trunc,
tgt_truncate=opt.tgt_seq_length_trunc)
src_reader = inputters.str2reader[opt.data_type].from_opt(opt)
tgt_reader = inputters.str2reader[opt.data_type].from_opt(opt)
logger.info("Building & saving training data...")
build_save_dataset(
'train', fields, src_reader, tgt_reader, opt)
if opt.valid_src and opt.valid_tgt:
logger.info("Building & saving validation data...")
build_save_dataset('valid', fields, src_reader, tgt_reader, opt)
def _get_parser():
parser = ArgumentParser(description='preprocess.py')
opts.config_opts(parser)
opts.preprocess_opts(parser)
return parser
if __name__ == "__main__":
parser = _get_parser()
opt = parser.parse_args()
main(opt)
|
py | 7dfc61875a5d95739970d66227653f872fb0ef2d | from __future__ import print_function
import pdb, os, sys, shutil
import numpy as np
#import RunATMODef, OptimiseDef, SampleDef, TransmissionDef, EmissionDef, Utils
from . import RunATMODef, TransmissionDef, EmissionDef, Utils
class ATMO():
"""
"""
def __init__( self ):
"""
ATMO object.
"""
self.executable = '' # ATMO executable path
self.nice = None # nice rating to set execution priority (optional)
self.infile_path = '' # ATMO input file path
# PARAM: Parameters for ATMO
self.Debug = 1 # Debug=1 prints additional information to screen to help with debugging
self.fout = 'pt.ncdf' # name of the output pressure-temperature file
self.fin = 'temp.ncdf' # name of the input pressure-temperature file; leave this
# undefined to start from an isothermal profile
# Equation of state parameters:
self.gamma = 0. #
# GRID: Grid parameters
self.pmin = 1e-6 # the minimum initial pressure
self.pmax = 1e3 # the maximum initial pressure
self.taumin = 1e-6 # the minimum optical depth
self.taumax = 1e3 # the maximum optical depth
self.logg = 2.69 # the log of the gravity
self.teff = 100. # if irradiation is included, this represents the internal
# temperature of the planet; if irradiation is not included,
# it represents the effective temperature of the atmosphere
self.Tinit = 100. # need to check what this is for
self.isothermal = False # need to check how this works
self.ndepth = 50. # the number of levels
self.Rp = 0.995 # the planetary radius (in Jupiter radii)
self.pp_Rp = 1e-3 # pressure at the radius Rp (in bar)
self.nfreq = 250 # the number of frequency points used in the radiation scheme
self.nkmix = 30 # number of k-coefficients for gas mixture
self.nband = 32 # the number of bands to use
self.nband_std = 32 # the band used to define the optical depth grid
self.corr_k = True # flag to use the correlated-k method (if false, line-by-line is used)
self.numax = 5e6 # upper limit on the wave number
# CHEMISTRY: Chemistry parameters
self.chem = 'eq' # flag to choose which type of chemistry to use; 'ana' - analytical,
# 'eq' - equilibrium, 'neq' - non-equilibrium and 'cst' holds the
# abundances constant (+ 'man' for manual abundances)
self.abundances = {} # dictionary containing abundances for molecules to be used
# if chem=='man'
self.MdH = 0. # metallicity of the atmosphere, log base 10. [M/H] = 0 for the Sun.
# [M/H] = 2 is 100x solar
self.COratio = 0. # The carbon-oxygen ratio. A default solar value ([C/O] ~ 0.56) is
# assumed if COratio = 0. E.g. a ~2x solar value would be COratio = 1.2.
self.fAin = 'chem_dummy.ncdf' # the name of the input chemistry file; if running 'ana' or
# 'eq' this should be the chem_dummy file, otherwise it
# should be a pre-calculated equilibrium chemistry file
self.fAeqout = 'chem_eq.ncdf' # the name of the output equilibrium chemistry file
self.fAneqout = 'chem_neq.ncdf' # the name of the output non-equilibrium chemistry file
self.fcoeff = '../../chem/coeff_NASA_sc.dat' #
self.print_chem = True #
# CHEM_NEQ: Non-equilibrium chemistry parameters
self.mixing = False # flag to turn vertical mixing on/off
self.photochem = False # flag to turn photochemistry on/off
self.kzzcst = 1e9 # value of the Kzz term, if kzzcst = 0. then the value is read from 'fin'
self.nmol_eq = 107 #
self.tmax = 1e12 # the maximum integration time
self.dtmax = 1e10 # the maximum time step
self.rate_limiter = True # flag to turn on the rate limiter
self.Nmin = 1e-100 # the minimum number density
self.atol = 1e-10 # the absolute tolerance of the solver
# RADTRANS: Radiative transfer parameters
self.nrays = 16 # the number of rays
self.scatter = True # flag to turn on scattering
self.irrad = True # flag to turn on irradiation
self.firad = '' # name of the irradiation input file
self.rstar = 0.749 # the radius of the star in solar radii
self.rorbit = 0.0559 # the semi-major axis of the planet in astronomical units
self.murad = 0.5 # the inclination angle of the irradiation
self.fred = 0.5 # the amount the irradiation is reduced at the top of the atmosphere;
# e.g. fred = 0.5 for efficient horizontal redistribution
self.ftrans_spec = '' # name of the output transmission spectrum
self.fspectrum = '' # name of the output emission spectrum
self.fcfout = '' # name of the output normalised contribution function file which is
# a function of wavenumber and pressure
# OPACITY: Opacity parameters
self.opacity = 'default' # If set to 'default', uses nkap to set opacity sources.
# Alternatively, can be a list of molecules,
# e.g. opacity = [ 'H2', 'He', 'H2O', 'CO2', 'TiO' ]
self.nkap = 6 # the number of molecules used for opacities (note: ordering is hard-coded)
# Addition of each opacity source along with the previous ones, with increase
# in value of nkap are as follows:
# 1) H2 2) He 3) H2O 4) CO2 5) CO 6) CH4 7) NH3 8) Na 9) K 10) Li
# 11) Rb 12) Cs 13) TiO 14) VO 15) FeH 16) PH3 17) H2S 18) HCN
# 19) C2H2 20) SO2
self.art_haze = None # the variable to input artificial haze/scattering.
# (e.g. art_haze = 1 means one times standard rayleigh scattering,
# 2 means two times and so on. Default = 1)
self.cloud = False # flag to turn on cloud deck (True or False), default is False
self.cloud_top = 1 # integer representing cloud top (minimum is 1 - top of the atmophsere,
# maximum is ndepth lowest level in the atmosphere). Default is 1.
self.cloud_bottom = 20 # integer representing cloud bottom (cloud_bottom should be greater
# than cloud_top). Default is 20. To check which pressure levels
# these layers correspond to, see your input p-t profile file.
self.cloud_strength = 1 # multiplication factor to increase cloud deck scattering opacity
# relative to molecular hydrogen scattering opacity.
# (e.g. 1x, 2x, 10x, 50x etc.). Default is 1
self.kap_smooth = True # smooths the opacities, important for convergence at the top of the
# atmosphere
self.kerkap_smooth = 2 # smoothing kernel for opacities
# SOLVER: ATMO solver parameters
self.solve_hydro = True # flag to solve hydrostatic equation
self.solve_energy = True # flag to solve energy equation
self.minstep = 1e-4 # minimum step of the solver
self.maxstep = 9e-1 # maximum step of the solver
self.accuracy = 1e-1 # tolerance of the solver
self.psurf = 1e-6 # the pressure at the upper boundary of the model (i.e. minimum pressure)
self.print_err = False #
self.transmission_spectrum = False # calculate transmission spectrum
self.surface_spectrum = False # calculate emission spectrum
self.hydrostatic = True # assume the PT profile is already in hydrostatic balance
self.calc_cf = True # set to True to obtain contribution function otherwise False
# CONVECTION: Convection parameters
self.alpha = 0. # the mixing length for convection
return None
def RunATMO( self ):
"""
Runs the ATMO solver given the input file.
"""
RunATMODef.Main( self )
return None
def ComputeOpacities( self, species=[ 'H2O', 'CO', 'CH4' ], odir='.' ):
"""
"""
Utils.ComputeOpacities( self, species=species, odir=odir )
return None
def ReadChem( self, ncdf_fpath='' ):
"""
"""
Utils.ReadChem( self, ncdf_fpath=ncdf_fpath )
return None
def ReadPT( self, ncdf_fpath='' ):
"""
"""
Utils.ReadPT( self, ncdf_fpath=ncdf_fpath )
return None
def PlotPT( self, ofigpath='' ):
"""
"""
Utils.PlotPT( self, ofigpath=ofigpath )
return None
def ReadTransmissionModel( self, ncdf_fpath='' ):
"""
"""
Utils.ReadTransmissionModel( self, ncdf_fpath=ncdf_fpath )
return None
def ReadEmissionModel( self, ncdf_fpath='' ):
"""
"""
Utils.ReadEmissionModel( self, ncdf_fpath=ncdf_fpath )
return None
def PlotTransmissionModel( self, ofigpath='', xscale='log' ):
"""
"""
Utils.PlotTransmissionModel( self, ofigpath=ofigpath, xscale=xscale )
return None
def Transmission( self ):
TransmissionDef.Main( self )
return None
def Emission( self ):
EmissionDef.Main( self )
return None
|
py | 7dfc627bec320ee6f5b656aaaecc98ff090f1e8f | import pandas as pd
def get_shop_timestring(timestamp:pd.Timestamp) -> str:
# Return timestamp in format expected by Shop
return timestamp.strftime('%Y%m%d%H%M%S')
def get_shop_datetime(time_string:str, time_zone_name:str) -> pd.Timestamp:
time_format = '%Y%m%d%H%M%S'
time_string = time_string[0:14]
time_string_len = len(time_string)
# Handle the following cases '%Y%m%d%H%M%S', '%Y%m%d%H%M', %Y%m%d%H' and %Y%m%d'
missing_digits = 14 - time_string_len
relevant_time_format_len = len(time_format) - missing_digits
# Make sure format string does not end with "%". These cases will still fail, but return more intelligible errors
if relevant_time_format_len % 2 == 1:
relevant_time_format_len -= 1
# Return timestamp using format string inferred from input time_string
relevant_time_format = time_format[0:relevant_time_format_len]
timestamp = pd.to_datetime(time_string, format=relevant_time_format)
if len(time_zone_name) > 0:
timestamp = timestamp.tz_localize(time_zone_name)
return timestamp
|
py | 7dfc62bf66e7a6bc71e93a9007e87f2a0e0528a1 | # Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import os
from dataclasses import dataclass
from pathlib import Path, PurePath
from typing import Mapping, cast
from pants.core.util_rules import subprocess_environment
from pants.core.util_rules.subprocess_environment import SubprocessEnvironmentVars
from pants.engine import process
from pants.engine.engine_aware import EngineAwareReturnType
from pants.engine.environment import Environment
from pants.engine.process import BinaryPath
from pants.engine.rules import collect_rules, rule
from pants.option.global_options import GlobalOptions
from pants.option.subsystem import Subsystem
from pants.python import binaries as python_binaries
from pants.python.binaries import PythonBinary, PythonBootstrap
from pants.util.frozendict import FrozenDict
from pants.util.logging import LogLevel
from pants.util.memo import memoized_method
from pants.util.ordered_set import OrderedSet
from pants.util.strutil import create_path_env_var
class PexRuntimeEnvironment(Subsystem):
options_scope = "pex"
help = "How Pants uses Pex to run Python subprocesses."
@classmethod
def register_options(cls, register):
super().register_options(register)
# TODO(#9760): We'll want to deprecate this in favor of a global option which allows for a
# per-process override.
register(
"--executable-search-paths",
advanced=True,
type=list,
default=["<PATH>"],
metavar="<binary-paths>",
help=(
"The PATH value that will be used by the PEX subprocess and any subprocesses it "
'spawns.\n\nThe special string "<PATH>" will expand to the contents of the PATH '
"env var."
),
)
register(
"--verbosity",
advanced=True,
type=int,
default=0,
help=(
"Set the verbosity level of PEX logging, from 0 (no logging) up to 9 (max logging)."
),
)
register(
"--venv-use-symlinks",
advanced=True,
type=bool,
default=False,
help=(
"When possible, use venvs whose site-packages directories are populated with"
"symlinks.\n\nEnabling this can save space in the `--named-caches-dir` directory "
"and lead to slightly faster execution times for Pants Python goals. Some "
"distributions do not work with symlinked venvs though, so you may not be able to "
"enable this optimization as a result."
),
)
@memoized_method
def path(self, env: Environment) -> tuple[str, ...]:
def iter_path_entries():
for entry in self.options.executable_search_paths:
if entry == "<PATH>":
path = env.get("PATH")
if path:
yield from path.split(os.pathsep)
else:
yield entry
return tuple(OrderedSet(iter_path_entries()))
@property
def verbosity(self) -> int:
level = cast(int, self.options.verbosity)
if level < 0 or level > 9:
raise ValueError("verbosity level must be between 0 and 9")
return level
@property
def venv_use_symlinks(self) -> bool:
return cast(bool, self.options.venv_use_symlinks)
class PythonExecutable(BinaryPath, EngineAwareReturnType):
"""The BinaryPath of a Python executable."""
def message(self) -> str:
return f"Selected {self.path} to run PEXes with."
@classmethod
def from_python_binary(cls, python_binary: PythonBinary) -> PythonExecutable:
"""Converts from PythonBinary to PythonExecutable.
The PythonBinary type is a singleton representing the Python that is used for script
execution by `@rule`s. On the other hand, there may be multiple PythonExecutables, since
they are subject to a user's interpreter constraints.
"""
return cls(path=python_binary.path, fingerprint=python_binary.fingerprint)
@dataclass(frozen=True)
class PexEnvironment(EngineAwareReturnType):
path: tuple[str, ...]
interpreter_search_paths: tuple[str, ...]
subprocess_environment_dict: FrozenDict[str, str]
named_caches_dir: PurePath
bootstrap_python: PythonExecutable | None = None
venv_use_symlinks: bool = False
_PEX_ROOT_DIRNAME = "pex_root"
def level(self) -> LogLevel:
return LogLevel.DEBUG if self.bootstrap_python else LogLevel.WARN
def message(self) -> str:
if not self.bootstrap_python:
return (
"No bootstrap Python executable could be found from the option "
"`interpreter_search_paths` in the `[python]` scope. Will attempt to run "
"PEXes directly."
)
return f"Selected {self.bootstrap_python.path} to bootstrap PEXes with."
def in_sandbox(self, *, working_directory: str | None) -> CompletePexEnvironment:
pex_root = PurePath(".cache") / self._PEX_ROOT_DIRNAME
return CompletePexEnvironment(
_pex_environment=self,
pex_root=pex_root,
_working_directory=PurePath(working_directory) if working_directory else None,
append_only_caches=FrozenDict({self._PEX_ROOT_DIRNAME: str(pex_root)}),
)
def in_workspace(self) -> CompletePexEnvironment:
# N.B.: When running in the workspace the engine doesn't offer an append_only_caches
# service to setup a symlink to our named cache for us. As such, we point the PEX_ROOT
# directly at the underlying append only cache in that case to re-use results there and
# to keep the workspace from being dirtied by the creation of a new Pex cache rooted
# there.
pex_root = self.named_caches_dir / self._PEX_ROOT_DIRNAME
return CompletePexEnvironment(
_pex_environment=self,
pex_root=pex_root,
_working_directory=None,
append_only_caches=FrozenDict(),
)
def venv_site_packages_copies_option(self, use_copies: bool) -> str:
if self.venv_use_symlinks and not use_copies:
return "--no-venv-site-packages-copies"
return "--venv-site-packages-copies"
@rule(desc="Prepare environment for running PEXes", level=LogLevel.DEBUG)
async def find_pex_python(
python_bootstrap: PythonBootstrap,
python_binary: PythonBinary,
pex_runtime_env: PexRuntimeEnvironment,
subprocess_env_vars: SubprocessEnvironmentVars,
global_options: GlobalOptions,
) -> PexEnvironment:
return PexEnvironment(
path=pex_runtime_env.path(python_bootstrap.environment),
interpreter_search_paths=tuple(python_bootstrap.interpreter_search_paths()),
subprocess_environment_dict=subprocess_env_vars.vars,
# TODO: This path normalization is duplicated with `engine_initializer.py`. How can we do
# the normalization only once, via the options system?
named_caches_dir=Path(global_options.options.named_caches_dir).resolve(),
bootstrap_python=PythonExecutable.from_python_binary(python_binary),
venv_use_symlinks=pex_runtime_env.venv_use_symlinks,
)
@dataclass(frozen=True)
class CompletePexEnvironment:
_pex_environment: PexEnvironment
pex_root: PurePath
_working_directory: PurePath | None
append_only_caches: FrozenDict[str, str]
_PEX_ROOT_DIRNAME = "pex_root"
@property
def interpreter_search_paths(self) -> tuple[str, ...]:
return self._pex_environment.interpreter_search_paths
def create_argv(
self, pex_filepath: str, *args: str, python: PythonExecutable | None = None
) -> tuple[str, ...]:
pex_relpath = (
os.path.relpath(pex_filepath, self._working_directory)
if self._working_directory
else pex_filepath
)
python = python or self._pex_environment.bootstrap_python
if python:
return (python.path, pex_relpath, *args)
if os.path.basename(pex_relpath) == pex_relpath:
return (f"./{pex_relpath}", *args)
return (pex_relpath, *args)
def environment_dict(self, *, python_configured: bool) -> Mapping[str, str]:
"""The environment to use for running anything with PEX.
If the Process is run with a pre-selected Python interpreter, set `python_configured=True`
to avoid PEX from trying to find a new interpreter.
"""
d = dict(
PATH=create_path_env_var(self._pex_environment.path),
PEX_IGNORE_RCFILES="true",
PEX_ROOT=os.path.relpath(self.pex_root, self._working_directory)
if self._working_directory
else str(self.pex_root),
**self._pex_environment.subprocess_environment_dict,
)
# NB: We only set `PEX_PYTHON_PATH` if the Python interpreter has not already been
# pre-selected by Pants. Otherwise, Pex would inadvertently try to find another interpreter
# when running PEXes. (Creating a PEX will ignore this env var in favor of `--python-path`.)
if not python_configured:
d["PEX_PYTHON_PATH"] = create_path_env_var(self.interpreter_search_paths)
return d
def rules():
return [
*collect_rules(),
*process.rules(),
*subprocess_environment.rules(),
*python_binaries.rules(),
]
|
py | 7dfc63af239e98ed9b1c8697c9bf6eb50825cbdb | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from django.contrib.auth.models import AbstractUser
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
@python_2_unicode_compatible
class User(AbstractUser):
# First Name and Last Name do not cover name patterns
# around the globe.
name = models.CharField(_('Name of User'), blank=True, max_length=255, null=True)
firstName = models.CharField(max_length=70, null=True)
lastName = models.CharField(max_length=70, null=True)
USER_TYPES = (
('D', 'Dirty'),
('W', 'Washr')
)
type = models.CharField(max_length=1, choices=USER_TYPES, null=True)
def __str__(self):
return self.username
def get_absolute_url(self):
return reverse('users:detail', kwargs={'username': self.username})
class Addresses(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
address = models.CharField(max_length=200, null=True)
city = models.TextField(null=True)
state = models.TextField(null=True)
zipCode = models.TextField(null=True)
locationName = models.CharField(max_length=50, null=True)
class PaymentAccount(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
email = models.EmailField(null=True)
|
py | 7dfc63f93db27ef8baab234f0bf6e57fd9338950 | # coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class DeleteApplicationEndpointResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'request_id': 'str'
}
attribute_map = {
'request_id': 'request_id'
}
def __init__(self, request_id=None):
"""DeleteApplicationEndpointResponse - a model defined in huaweicloud sdk"""
super(DeleteApplicationEndpointResponse, self).__init__()
self._request_id = None
self.discriminator = None
if request_id is not None:
self.request_id = request_id
@property
def request_id(self):
"""Gets the request_id of this DeleteApplicationEndpointResponse.
请求的唯一标识ID。
:return: The request_id of this DeleteApplicationEndpointResponse.
:rtype: str
"""
return self._request_id
@request_id.setter
def request_id(self, request_id):
"""Sets the request_id of this DeleteApplicationEndpointResponse.
请求的唯一标识ID。
:param request_id: The request_id of this DeleteApplicationEndpointResponse.
:type: str
"""
self._request_id = request_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeleteApplicationEndpointResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | 7dfc643afcb9d2a4645d9490e9fdc2208f4f9ab9 | from .run_focusDB import get_coverage, downsample, make_riboseed_cmd, sralist,\
pob, referenceNotGoodEnoughError, check_riboSeed_outcome, riboSeedError, \
riboSeedUnsuccessfulError
import os
import shutil
import unittest
import subprocess
import sys
import logging as logger
from pathlib import Path
from nose.tools.nontrivial import with_setup
class coverageTests(unittest.TestCase):
""" tests for coverage and downsample functions in run_all.py
"""
def setUp(self):
self.test_dir = os.path.join(os.path.dirname(__file__),
"downsample_test_result")
self.data_dir = os.path.join(os.path.dirname(__file__), "test_data", "")
self.readsgunzipd1 = os.path.join(self.data_dir, "test_reads1.fq")
self.readsgzipd1 = os.path.join(self.data_dir, "test_reads1.fq.gz")
self.readsgunzipd2 = os.path.join(self.data_dir, "test_reads1.fq")
self.readsgzipd2 = os.path.join(self.data_dir, "test_reads1.fq.gz")
self.downsample_dir = os.path.join(self.test_dir, "downsampled")
def tearDown(self):
"tear down test fixtures"
for dir in [self.test_dir, self.downsample_dir]:
if os.path.exists(dir):
shutil.rmtree(dir)
@unittest.skipIf("TRAVIS" in os.environ and os.environ["TRAVIS"] == "true",
"skipping this test on travis.CI")
def test_coverage(self):
#genome is from NC_011750.1 ~5132068bp at 10X coverage
#reads are generated from this, under 16db/py16db/generator.py
reads1 = self.readsgunzipd1
reads2 = self.readsgunzipd2
test_result = get_coverage(approx_length=5132068, fastq1=reads1, fastq2=reads2, read_length=150, logger=logger)
print(test_result)
assert round(1.00, 2) == round(test_result, 2)
return()
@unittest.skipIf("TRAVIS" in os.environ and os.environ["TRAVIS"] == "true",
"skipping this test on travis.CI")
def test_downsample_PE(self):
#genome is built from NC_011750.1 ~5132068bp at 10X coverage
#os.makedirs(self.downsample_dir, )
reads1, reads2 = downsample(
read_length=150,
approx_length=5132068,
fastq1=self.readsgunzipd1,
fastq2=self.readsgunzipd2,
destination=self.downsample_dir,
mincoverage=.5,
maxcoverage=2,
run=True,
logger=logger)
down_cov1 = get_coverage(read_length=150, approx_length=5132068, fastq1=reads1, fastq2=reads2, logger=logger)
print(down_cov1)
# down_cov2 = get_coverage(read_length=150, approx_length=5132068, fastq1=reads2, fastq2=reads2, logger=logger)
assert round(1.0110460344640795, 1) == round(down_cov1, 1)
# assert 2.0110460344640795 == down_cov2
@unittest.skipIf("TRAVIS" in os.environ and os.environ["TRAVIS"] == "true",
"skipping this test on travis.CI")
def test_downsample_SE(self):
reads1, reads2 = downsample(
read_length=150,
approx_length=5132068,
fastq1=self.readsgunzipd1,
fastq2=self.readsgunzipd2,
destination=self.downsample_dir,
mincoverage=.5,
maxcoverage=2,
run=True,
logger=logger)
down_cov = get_coverage(read_length=150, approx_length=5132068, fastq1=reads1, fastq2=reads2, logger=logger)
print(down_cov)
assert round(1.00, 2 ) == round(down_cov, 2)
class sralistTest(unittest.TestCase):
def setUp(self):
self.sralist = os.path.join(os.path.dirname(__file__), "test_data", "test_sralist.txt")
def test_sra_list(self):
test_result = sralist(list=self.sralist)
print(test_result)
assert test_result == ["ERX3310125", "ERX3289350", "ERX3289335", "SRX2141371"]
class coverageTests(unittest.TestCase):
""" tests for coverage and downsample functions in run_all.py
"""
def setUp(self):
self.test_dir = os.path.join(os.path.dirname(__file__),
"riboSeed")
self.data_dir = os.path.join(os.path.dirname(__file__), "test_data", "")
self.readsgunzipd1 = os.path.join(self.data_dir, "test_reads1.fq")
# self.readsgzipd1 = os.path.join(self.data_dir, "test_reads1.fq.gz")
self.readsgunzipd2 = os.path.join(self.data_dir, "test_reads2.fq")
# self.readsgzipd2 = os.path.join(self.data_dir, "test_reads2.fq.gz")
self.sra = os.path.join(os.path.dirname(__file__), "test_data",
"ecoli", "NC_011750.1.fna")
if os.path.exists(self.test_dir):
shutil.rmtree(self.test_dir)
def tearDown(self):
"tear down test fixtures"
if os.path.exists(self.test_dir):
shutil.rmtree(self.test_dir)
@unittest.skipIf("TRAVIS" in os.environ and os.environ["TRAVIS"] == "true",
"skipping this test on travis.CI")
def test_riboseed(self):
readsf = self.readsgunzipd1
readsr = self.readsgunzipd2
output_dir = self.test_dir
os.makedir = output_dir
sra = (self.sra)
test_result = make_riboseed_cmd(sra=sra, readsf=readsf,
readsr=readsr, cores="4", threads="1",
subassembler="spades",
memory=8, just_seed=True,
sge=False,
skip_control=True,
output=output_dir, logger=logger)
target_cmd = "ribo run -r /Users/alexandranolan/Desktop/16db/py16db/test_data/ecoli/NC_011750.1.fna -F /Users/alexandranolan/Desktop/16db/py16db/test_data/test_reads1.fq -R /Users/alexandranolan/Desktop/16db/py16db/test_data/test_reads2.fq --cores 4 --threads 1 -v 1 -o /Users/alexandranolan/Desktop/16db/py16db/riboSeed --serialize --subassembler spades --just_seed --skip_control --stages none --memory 8"
for part in range(len(target_cmd.split(" "))):
if part not in [3, 5, 7, 15]:
print(test_result.split(" ")[part] )
print(target_cmd.split(" ")[part] )
assert test_result.split(" ")[part] == target_cmd.split(" ")[part]
class bestrefTest(unittest.TestCase):
""" test for pob function in run_all.py
"""
def setUp(self):
self.test_dir = os.path.join(os.path.dirname(__file__),
"pob_test_result", "")
self.out_dir = os.path.join(self.test_dir, "plentyofbugs")
self.data_dir = os.path.join(os.path.dirname(__file__), "test_data")
self.plasmids_dir = os.path.join(self.data_dir, "ecoli", "")
self.readsgunzipd = os.path.join(self.data_dir, "test_reads1.fq")
self.readsgzipd = os.path.join(self.data_dir, "test_reads1.fq.gz")
if os.path.exists(self.test_dir):
shutil.rmtree(self.test_dir)
if os.path.exists(os.path.join(self.plasmids_dir, "reference.msh")):
os.remove(os.path.join(self.plasmids_dir, "reference.msh"))
def tearDown(self):
"tear down test fixtures"
shutil.rmtree(self.test_dir)
@unittest.skipIf("TRAVIS" in os.environ and os.environ["TRAVIS"] == "true",
"skipping this test on travis.CI")
def test_pob(self):
plasmids = (self.plasmids_dir)
reads = (self.readsgunzipd)
os.makedirs(self.test_dir)
output_dir= (self.out_dir)
with self.assertRaises(referenceNotGoodEnoughError):
bad_test_result = pob(genomes_dir=plasmids, readsf=reads, output_dir=output_dir, maxdist=.05, logger=logger)
test_result = pob(genomes_dir=plasmids, readsf=reads, output_dir=output_dir + "2", maxdist=.3, logger=logger)
print(test_result)
assert round(0.295981, 2) == round(test_result[1], 2)
class parseRiboSeedTest(unittest.TestCase):
def setUp(self):
self.test_dir = os.path.join(os.path.dirname(__file__),
"test_parse_riboSeed", "")
self.status_file = os.path.join(self.test_dir, "tmp_status")
# make test directories for all (common) possible outcomes:
for d in [os.path.join(self.test_dir, x) for x in
["fast_pass", "fast_fail", "full_pass", "full_fail"]]:
for sd in [os.path.join(d, "seed", y) for y in
["final_long_reads", "final_de_fere_novo_assembly"]]:
if not os.path.exists(sd):
os.makedirs(sd)
# "fast_pass", "final_long_reads", "riboSeedContigs.fasta""
# these lines are long, deal with it
Path(os.path.join(self.test_dir, "fast_pass", "seed", "final_long_reads", "riboSeedContigs.fasta")).touch()
# dont write one for fail
Path(os.path.join(self.test_dir, "full_pass", "seed", "final_long_reads", "riboSeedContigs.fasta")).touch()
Path(os.path.join(self.test_dir, "full_pass", "seed", "final_de_fere_novo_assembly", "contigs.fasta")).touch()
Path(os.path.join(self.test_dir, "full_fail", "seed", "final_long_reads", "riboSeedContigs.fasta")).touch()
# dont write final contigs for fail
def test_check_riboSeed_outcome_baderror(self):
with self.assertRaises(riboSeedError):
contigs = check_riboSeed_outcome(
ribodir=os.path.join(
self.test_dir, "thisdirdoesntevenlikeexistyouknow?"),
status_file=self.status_file)
def test_check_riboSeed_outcome_fast_nosuccess(self):
with self.assertRaises(riboSeedUnsuccessfulError):
contigs = check_riboSeed_outcome(
ribodir=os.path.join(self.test_dir, "fast_fail"),
status_file=self.status_file)
def test_check_riboSeed_outcome_full_nosuccess(self):
contigs = check_riboSeed_outcome(
ribodir=os.path.join(self.test_dir, "full_fail"),
status_file=self.status_file)
self.assertEquals(contigs["full"], None)
def test_check_riboSeed_outcome_contigs(self):
contigs = check_riboSeed_outcome(
ribodir=os.path.join(self.test_dir, "fast_pass"),
status_file=self.status_file)
self.assertEquals(contigs["full"], None)
self.assertTrue(contigs["fast"] is not None)
def test_check_riboSeed_outcome_full_fail(self):
contigs = check_riboSeed_outcome(
ribodir=os.path.join(self.test_dir, "full_fail"),
status_file=self.status_file)
self.assertEquals(contigs["full"], None)
self.assertTrue(contigs["fast"] is not None)
|
py | 7dfc649ca4a9daccbca937288a2710a3b555a3e1 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from netests.constants import NOT_SET
from netests.tools.cli import parse_textfsm
from netests.protocols.cdp import CDP, ListCDP
from netests.mappings import mapping_sys_capabilities
def _ios_cdp_ssh_converter(
hostname: str,
cmd_output,
options={}
) -> ListCDP:
cmd_output = parse_textfsm(
content=cmd_output,
template_file="cisco_ios_show_cdp_neighbors_detail.textfsm"
)
cdp_neighbors_lst = ListCDP(
cdp_neighbors_lst=list()
)
for nei in cmd_output:
neighbor_type_lst = list()
if isinstance(nei[6], list):
for sys_capability in nei[6]:
neighbor_type_lst.append(
mapping_sys_capabilities(
str(sys_capability).capitalize()
)
)
else:
neighbor_type_lst.append(nei[6])
cdp_neighbors_lst.cdp_neighbors_lst.append(
CDP(
local_name=hostname,
local_port=nei[4] if nei[4] != '' else NOT_SET,
neighbor_mgmt_ip=nei[1] if nei[1] != '' else NOT_SET,
neighbor_name=nei[0] if nei[0] != '' else NOT_SET,
neighbor_port=nei[3] if nei[3] != '' else NOT_SET,
neighbor_os=nei[5] if nei[5] != '' else NOT_SET,
neighbor_type=neighbor_type_lst,
options=options
)
)
return cdp_neighbors_lst
|
py | 7dfc64be72950b49735c0a7f7867c6cd45df6cbf | #################################################################################
#
# The MIT License (MIT)
#
# Copyright (c) 2015 Dmitry Sovetov
#
# https://github.com/dmsovetov
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#################################################################################
from collections import namedtuple
import os
import tempfile
# Define named tuple for environment configuration
Configuration = namedtuple('Configuration', ['home', 'cmake', 'android', 'emscripten'])
# Environment variable that points to a Dreemchest home directory
DREEMCHEST_HOME = 'DREEMCHEST_HOME'
# Environemt variable that points to a CMake folder used by Dreemchest
DREEMCHEST_CMAKE = 'DREEMCHEST_CMAKE'
# Environment variable that points to a CMake bin folder
DREEMCHEST_CMAKE_BIN = 'DREEMCHEST_CMAKE_BIN'
# Environment variable that points to Android SDK used by Dreemchest
DREEMCHEST_ANDROID = 'DREEMCHEST_ANDROID'
# Environment variable that points to Emscripten SDK used by Dreemchest
DREEMCHEST_EMSCRIPTEN = 'DREEMCHEST_EMSCRIPTEN'
class Configuration:
"""An active environment configuration"""
def __init__(self):
# Load home directory
if DREEMCHEST_HOME not in os.environ.keys():
raise Exception("'%s' environment variable should point to a Dreemchest home directory." % DREEMCHEST_HOME)
self.home = os.environ[DREEMCHEST_HOME]
# Load CMake directory
if DREEMCHEST_CMAKE_BIN not in os.environ.keys():
raise Exception("'%s' environment variable should point to a CMake directory." % DREEMCHEST_CMAKE_BIN)
self._cmake = os.environ[DREEMCHEST_CMAKE_BIN]
# Load Android SDK directory
self._android = None
if DREEMCHEST_ANDROID in os.environ.keys():
self._android = os.environ[DREEMCHEST_ANDROID]
# Load Emscripten SDK directory
self._emscripten = None
if DREEMCHEST_EMSCRIPTEN in os.environ.keys():
self._emscripten = os.environ[DREEMCHEST_EMSCRIPTEN]
@property
def home(self):
"""Returns the Dreemchest home directory"""
return self._home
@property
def cmake(self):
"""Returns CMake home directory"""
return self._cmake
@property
def emscripten(self):
"""Returns the Emscripten SDK home directory"""
return self._emscripten
@property
def emscripten_toolchain(self):
if self.emscripten is None:
return None
return os.path.join(self.emscripten, 'cmake', 'Modules', 'Platform', 'Emscripten.cmake')
@property
def ios_toolchain(self):
"""Returns an iOS toolchain file"""
return os.path.join(self.home, 'CMake', 'Toolchains', 'iOS.cmake')
@property
def android_toolchain(self):
"""Returns an Android toolchain file"""
return os.path.join(self.home, 'CMake', 'Toolchains', 'Android.cmake')
@property
def android(self):
"""Returns the Android SDK home directory"""
return self._android
@property
def android_ndk(self):
"""Returns the Android NDK home directory"""
return os.path.join(self.android, 'ndk-bundle')
@property
def dependencies(self):
"""Returns a directory where all precompiled dependencies are stored"""
return os.path.join(self.build_dir, 'Dependencies')
@property
def build_dir(self):
"""Returns a build directory"""
return os.path.join(self.home, 'Build')
@property
def prebuilt(self):
"""Returns an prebuilt directory path"""
return os.path.join(self.build_dir, 'Prebuilt')
@property
def externals(self):
"""Returns externals source directory"""
return os.path.join(self.home, 'Externals')
@property
def source(self):
"""Returns engine source directory"""
return os.path.join(self.home, 'Source')
@property
def projects(self):
"""Returns the Projects directory path"""
return os.path.join(self.home, 'Projects')
@property
def bootstrap_temp_dir(self):
"""Returns a temporary directory where to store all intermediate artifacts for bootstrap process"""
return os.path.join(tempfile.gettempdir(), 'Bootstrap')
def load():
"""Loads an active configuration from environment"""
return Configuration() |
py | 7dfc6505b492c1c3d6247528c5cd60563ce9afaf | import nox
@nox.session
def tests(session):
session.install('pytest')
session.run('pytest') |
py | 7dfc6509639d2eaf93171be935651ef09ea2a3db | from bitcoinnano.i18n import _
fullname = _('Email')
description = _("Send and receive payment request with an email account")
available_for = ['qt']
|
py | 7dfc6594266acb055c1ea13594cb47a0324846e2 | #coding=utf-8
lang_en = {
'SMTP_TIME_OUT':'SMTP server connection timed out',
'MAIL_SENT':'Email sent',
'MAILGUN_MISS':'Mailgun settings are missing',
'MAILGUN_EXECPTION':'{error} exception occured while handling your request',
'MAILGUN_ERR_SET':'Mailgun settings are incorrect',
'MAIL_MSG_FORM':'Message from {0}',
'MAIL_MSG_TEXT':'Please click the following link to confirm your email address for {ctf_name}: {url}/{token}\n\nYour Team-Token is [{team_token}] Please save it',
'MAIL_MSG_FORGOT':'Did you initiate a password reset? Click the following link to reset your password:\n\n{0}/{1}\n\n',
'INVIDE_USER_PASS':'Your username or password is incorrect',
'INVIDE_USER':'[{date}] {ip} - submitted invalid account information',
'INVIDE_PASS':'[{date}] {ip} - submitted invalid password for {username}',
'USER_LOGINED':'[{date}] {ip} - {username} logged in',
'USER_REGISTRED':'[{date}] {ip} - {username} registered with {email}',
'USER_REG_SUCCESS':"You've successfully registered for {}",
'USER_REG_WARN':'[{date}] {ip} - {username} registered (UNCONFIRMED) with {email}',
'INVIDE_EMAIL':'Please enter a valid email address',
'TOO_SHORT_TEAM':'Pick a longer team name',
'TOO_LONG_PASS':'Pick a shorter password',
'TOO_SHORT_PASS':'Pick a longer password',
'EMAIL_HAVE_USE':'Email has already been used',
'EMAIL_NOT_TEAM':'Team name cannot be an email address',
'TEAM_EXIST':'That team name is already taken',
'FORGOT_PASS_NOTICE':'If that account exists you will receive an email, please check your inbox',
'EMAIL_NOT_CONFIG':'Email could not be sent due to server misconfiguration',
'PASS_HAVE_RESET':'[{date}] {ip} - successful password reset for {username}',
'LINK_EXPIRED':'Your link has expired',
'INVIDE_RESET_TOKEN':'Your reset token is invalid',
'EMAIL_CF_SENT':'Your confirmation email has been resent!',
'EMAIL_CF_RESENT':'[{date}] {ip} - {username} initiated a confirmation email resend',
'USER_HAVE_CM':'[{date}] {ip} - {username} confirmed their account',
'NOT_ENOUGH_POINT':'Not enough points',
'HIT_FOR':'Hint for {}',
'IS_PAUSED':'{} is paused',
'NOT_START':'{} has not started yet',
'HAS_END':'{} has ended',
'SUBMIT_TOO_FAST':"You're submitting keys too fast. Slow down.",
'ZERO_CAN_TRY':"You have 0 tries remaining",
'MANY_CAN_TRY':'{} You have {} {} remaining.',
'HAVE_SOLVE':'You already solved this',
'MUST_LOGGED':'You must be logged in to solve a challenge',
'SCORE_HIDDEN':'Scores are currently hidden',
'MISS_URL_ROUTE':'Missing URL route',
'SEARCH_ERROR1':'Your ID search term is not numeric',
'NEED_TEAM_NAME':'The team requires a name',
'NEED_TEAM_EMAIL':'The team requires an email',
'NEED_TEAM_PASS':'The team requires a password',
'INVIDE_LINK_FORMAT':'Websites must start with http:// or https://',
'MISS_INFO':"Missing information",
'T_CORRECT':"Correct",
'T_INCORRECT':"Incorrect",
'PASS_NOT_MATCH':"Your old password doesn't match what we have."
}
lang_zh_cn = {
'SMTP_TIME_OUT':'连接SMTP服务器超时,请检查端口是否被禁用',
'MAIL_SENT':'邮件已发送',
'MAILGUN_MISS':'Mailgun未设置',
'MAILGUN_EXECPTION':'请求时发生错误 {error}',
'MAILGUN_ERR_SET':'Mailgun 设置错误',
'MAIL_MSG_FORM':'您有一封来自 {0} 的邮件',
'MAIL_MSG_TEXT':'请点击下面的链接确认您在 {ctf_name} 的邮箱: {url}/{token}\n\n您的 Team-Token 是 [{team_token}] 请妥善保存',
'MAIL_MSG_FORGOT':'确定重置密码? 请点击下面的链接重置您的密码:\n\n{0}/{1}\n\n',
'INVIDE_USER_PASS':'用户名或密码错误',
'INVIDE_USER':'[{date}] {ip} - submitted invalid account information',
'INVIDE_PASS':'[{date}] {ip} - submitted invalid password for {username}',
'USER_LOGINED':'[{date}] {ip} - {username} logged in',
'USER_REGISTRED':'[{date}] {ip} - {username} registered with {email}',
'USER_REG_SUCCESS':"您已经成功注册 {}",
'USER_REG_WARN':'[{date}] {ip} - {username} registered (UNCONFIRMED) with {email}',
'INVIDE_EMAIL':'请输入一个有效的邮箱地址',
'TOO_SHORT_TEAM':'战队名太短',
'TOO_LONG_PASS':'密码太长',
'TOO_SHORT_PASS':'密码太短',
'EMAIL_HAVE_USE':'邮箱地址已被使用',
'EMAIL_NOT_TEAM':'战队名不能是邮箱地址',
'TEAM_EXIST':'战队名已经被抢先注册了',
'FORGOT_PASS_NOTICE':'如果账户信息填写正确,您将会收到一条邮件',
'EMAIL_NOT_CONFIG':'邮箱系统未配置,请联系管理员',
'PASS_HAVE_RESET':'[{date}] {ip} - successful password reset for {username}',
'LINK_EXPIRED':'您的链接已经过期了哦',
'INVIDE_RESET_TOKEN':'您的重置口令不对哦',
'EMAIL_CF_SENT':'您的验证邮件已经重新发送!',
'EMAIL_CF_RESENT':'[{date}] {ip} - {username} initiated a confirmation email resend',
'USER_HAVE_CM':'[{date}] {ip} - {username} confirmed their account',
'NOT_ENOUGH_POINT':'没有足够的分值',
'HIT_FOR':'Hint for {}',
'IS_PAUSED':'{} 已经暂停',
'NOT_START':'{} 还没开始',
'HAS_END':'{} 已经停止',
'SUBMIT_TOO_FAST':"神了,这么快,再等会再提交吧!",
'ZERO_CAN_TRY':"您的尝试次数已用完",
'MANY_CAN_TRY':'{} 您还剩 {} {} 机会.',
'HAVE_SOLVE':'您已经解决该问题了',
'MUST_LOGGED':'必须登陆才可以答题哦',
'SCORE_HIDDEN':'当前分数已隐藏',
'MISS_URL_ROUTE':'Missing URL route',
'SEARCH_ERROR1':'ID是一个数值哦',
'NEED_TEAM_NAME':'战队怎么能没名字呢?',
'NEED_TEAM_EMAIL':'战队怎么能没邮箱呢?',
'NEED_TEAM_PASS':'战队怎么能没密码呢?',
'INVIDE_LINK_FORMAT':'网址必须以 http:// 或 https:// 开头',
'MISS_INFO':"信息丢失",
'T_CORRECT':"Correct",
'T_INCORRECT':"Incorrect",
'PASS_NOT_MATCH':"原密码错误"
}
langs = {'en': lang_en,'zh_cn':lang_zh_cn}
|
py | 7dfc65f1be81a0e2759d0c07636216f24deb500f | import yaml
import pathlib
import numpy as np
from scipy.optimize import bisect
from scipy.interpolate import interp1d, interp2d, griddata, Rbf
PARACRINE = 0.23
INTRINSIC = 1-PARACRINE
class Cell(object):
def __init__(self, type):
data = pathlib.Path(__file__).parent / "parameters.yaml"
with open(data, "r") as stream:
try:
par = yaml.safe_load(stream)[type]
except yaml.YAMLError as exc:
print(exc)
self.par = par
self.mitos = 1
# GLYCOLYSIS
def J_G6P(self, G):
J_max, K_m = self.par["J_max"], self.par["K_m"]
return J_max * G**2 / (K_m**2 + G**2)
def J_ATP_gly(self, G):
return 2*self.J_G6P(G)
def J_NADH_gly(self, G):
return 2*self.J_G6P(G)
def J_pyr(self, G):
return 2*self.J_G6P(G)
def J_ATP_NADH_gly(self, G):
p_L = self.par["p_L"]
return 1.5*(self.J_NADH_gly(G)-p_L*self.J_pyr(G))
# TCA CYCLE
def J_NADH_pyr(self, G):
p_L, p_TCA = self.par["p_L"], self.par["p_TCA"]
return 5*p_TCA*(1-p_L)*self.J_pyr(G)
def J_ATP_NADH_pyr(self, G):
return 2.5*self.J_NADH_pyr(G)
# OXYGEN INPUT
def J_O2_G(self, G):
p_L = self.par["p_L"]
return 0.5*(self.J_NADH_pyr(G)+self.J_NADH_gly(G)-p_L*self.J_pyr(G))
def J_O2(self, G):
J_O2_0, k_O2 = self.par["J_O2_0"], self.par["k_O2"]
alpha = k_O2*self.J_G6P(G) + J_O2_0
J_O2_1, K_m_O2 = self.par["J_O2_1"], self.par["K_m_O2"]
n_O2 = self.par["n_O2"]
beta = J_O2_1*self.J_G6P(G)**n_O2/(K_m_O2**n_O2+self.J_G6P(G)**n_O2)
return alpha + beta
# BETA-OXIDATION
def J_NADH_FFA(self, G):
return 2*(self.J_O2(G)-self.J_O2_G(G))
def J_ATP_NADH_FFA(self, G):
return 2.3*self.J_NADH_FFA(G) # !spremenjeno iz 2.5!
# OXIDATIVE ATP PRODUCTION
def J_ATP_ox(self, G):
sum = self.J_ATP_NADH_gly(G)
sum += self.J_ATP_NADH_pyr(G)
sum += self.J_ATP_NADH_FFA(G)
return self.mitos*sum
# ATP PRODUCTION/HYDROLYSIS
def J_ATP(self, G):
return self.J_ATP_gly(G) + self.J_ATP_ox(G)
def J_ATPase(self, ATP):
k_ATPase, K_m_ATPase = self.par["k_ATPase"], self.par["K_m_ATPase"]
return k_ATPase*ATP/(K_m_ATPase + ATP)
# AXP CONCENTRATIONS
def ATP(self, G):
k_ATPase, K_m_ATPase = self.par["k_ATPase"], self.par["K_m_ATPase"]
return K_m_ATPase*self.J_ATP(G)/(k_ATPase-self.J_ATP(G))
def ADP(self, G):
A_tot = self.par["A_tot"]
return A_tot - self.ATP(G)
def RAT(self, G):
return self.ATP(G)/self.ADP(G)
# KATP CHANNEL CONDUCTANCE
def g_K_ATP(self, G):
ag_K_ATP = self.par["ag_K_ATP"]
MgADP = 0.165*self.ADP(G)
ADP3 = 0.135*self.ADP(G)
ATP4 = 0.05*self.ATP(G)
up = 0.08*(1+2*MgADP/17)+0.89*(MgADP/17)**2
down = (1+MgADP/17)**2*(1+ADP3/26+ATP4/1)
return ag_K_ATP*up/down
def g_K_ATP_vec(self):
return np.vectorize(self.g_K_ATP)
# Get glucose from gKATP
def glucose(self, gkatp):
f = lambda g: self.g_K_ATP(g)-gkatp
try:
result = bisect(f, 0, 100)
except ValueError as e:
if str(e) != "f(a) and f(b) must have different signs":
raise ValueError(e)
if gkatp > self.g_K_ATP(0):
result = np.nan
else:
result = np.inf
return result
def glucose_vec(self, gKATP):
return np.vectorize(self.glucose)(gKATP)
# -------------------------- HORMONE SECRETION -------------------------- #
def f_RS(self, gKATP):
g_s_2, n_s = self.par["g_s_2"], self.par["n_s"]
return (g_s_2**n_s)/(g_s_2**n_s + gKATP**n_s)
def RS(self, gKATP):
RS_0, g_K_ATP_ms = self.par["RS_0"], self.par["g_K_ATP_ms"]
return (1 - RS_0)*self.f_RS(gKATP)/self.f_RS(g_K_ATP_ms) + RS_0
class Beta(Cell):
def __init__(self):
super(Beta, self).__init__("beta")
def norm_data(data):
return (data-np.min(data))/(np.max(data)-np.min(data))
class Alpha(Cell):
def __init__(self):
super(Alpha, self).__init__("alpha")
self.beta_mitos = 1
cAMP_sAC_path = pathlib.Path(__file__).parent / "cAMP_sAC.txt"
self.cAMP_sAC_data = np.loadtxt(cAMP_sAC_path).T
self.cAMP_sAC_data[1] = norm_data(self.cAMP_sAC_data[1])
cAMP_tmAC_path = pathlib.Path(__file__).parent / "cAMP_tmAC.txt"
self.cAMP_tmAC_data = np.loadtxt(cAMP_tmAC_path).T
self.cAMP_tmAC_data[1] = norm_data(self.cAMP_tmAC_data[1])
mesh_data = pathlib.Path(__file__).parent / "mesh.txt"
self.mesh_data = np.loadtxt(mesh_data).T
self.mesh_data[2] /= np.max(self.mesh_data[2])
# --------------------------------- cAMP -------------------------------- #
def cAMP_sAC_interpolation(self, g):
try:
return interp1d(*self.cAMP_sAC_data)(g)
except ValueError:
return 0
def cAMP_tmAC_interpolation(self, g):
try:
return interp1d(*self.cAMP_tmAC_data)(g)
except ValueError:
return 0
# ------------------------------- secretion ----------------------------- #
def mesh_interpolation(self, gKATP, fcAMP):
assert np.array(gKATP >= 0).all() and np.array(gKATP <= 0.4).all()
assert np.array(fcAMP >= 0).all() and np.array(fcAMP <= 1).all()
x, y, z = self.mesh_data
sparse_points = np.stack([x, y], -1)
result = griddata(sparse_points, z, (gKATP, fcAMP))
return result |
py | 7dfc661202783a3f838d8d15e2a747a6608c71e2 | __all__ = [
"base",
"hio",
"wcm",
"bt",
"sock",
"flash",
"bt_host",
"traffic_agent"
]
|
py | 7dfc66bcad282a24831798b7130f3da80be6efd0 | """
Fatture in Cloud API v2 - API Reference
Connect your software with Fatture in Cloud, the invoicing platform chosen by more than 400.000 businesses in Italy. The Fatture in Cloud API is based on REST, and makes possible to interact with the user related data prior authorization via OAuth2 protocol. # noqa: E501
The version of the OpenAPI document: 2.0.9
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import json
import sys
import unittest
import datetime
import fattureincloud_python_sdk
from functions import json_serial
from functions import create_from_json
from fattureincloud_python_sdk.model.issued_document import IssuedDocument
from fattureincloud_python_sdk.model.issued_document_type import IssuedDocumentType
globals()['IssuedDocument'] = IssuedDocument
from fattureincloud_python_sdk.model.get_existing_issued_document_totals_request import GetExistingIssuedDocumentTotalsRequest
class TestGetExistingIssuedDocumentTotalsRequest(unittest.TestCase):
"""GetExistingIssuedDocumentTotalsRequest unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testGetExistingIssuedDocumentTotalsRequest(self):
"""Test GetExistingIssuedDocumentTotalsRequest"""
model = GetExistingIssuedDocumentTotalsRequest(
data=IssuedDocument(
id=1,
type=IssuedDocumentType("invoice"),
number=1,
numeration="/A",
date=datetime.datetime.strptime("2022-01-01", '%Y-%m-%d').date(),
year=1,
subject="subject_example",
visible_subject="visible_subject_example",
rc_center="rc_center_example",
notes="notes_example",
rivalsa=0.0,
cassa=0.0,
cassa_taxable=0.0,
amount_cassa_taxable=3.14,
cassa2=0.0,
cassa2_taxable=0.0,
amount_cassa2_taxable=3.14,
global_cassa_taxable=0.0,
amount_global_cassa_taxable=3.14
)
)
expected_json = "{\"data\": {\"id\": 1, \"type\": \"invoice\", \"number\": 1, \"numeration\": \"/A\", \"date\": \"2022-01-01\", \"year\": 1, \"subject\": \"subject_example\", \"visible_subject\": \"visible_subject_example\", \"rc_center\": \"rc_center_example\", \"notes\": \"notes_example\", \"rivalsa\": 0.0, \"cassa\": 0.0, \"cassa_taxable\": 0.0, \"amount_cassa_taxable\": 3.14, \"cassa2\": 0.0, \"cassa2_taxable\": 0.0, \"amount_cassa2_taxable\": 3.14, \"global_cassa_taxable\": 0.0, \"amount_global_cassa_taxable\": 3.14}}"
actual_json = json.dumps(model.to_dict(), default=json_serial)
assert actual_json == expected_json
if __name__ == '__main__':
unittest.main()
|
py | 7dfc671affbf2f631b5e2d2dfcda1dcd4d3c9657 | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 7
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_2_0
from isi_sdk_8_2_0.models.cluster_retry_last_action_item import ClusterRetryLastActionItem # noqa: E501
from isi_sdk_8_2_0.rest import ApiException
class TestClusterRetryLastActionItem(unittest.TestCase):
"""ClusterRetryLastActionItem unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testClusterRetryLastActionItem(self):
"""Test ClusterRetryLastActionItem"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_2_0.models.cluster_retry_last_action_item.ClusterRetryLastActionItem() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py | 7dfc6741bbf4ba056eaebb76c0787726c70cad2a | """setuptools script to install sarctransform"""
from __future__ import unicode_literals
# based on https://github.com/pypa/sampleproject/blob/3b73bd9433d031f0873a6cbc5bd04cea2e3407cb/setup.py
# which is linked from https://packaging.python.org/guides/distributing-packages-using-setuptools/#setup-py
from setuptools import setup, find_packages
import codecs
import os
here = os.path.abspath(os.path.dirname(__file__))
# Get the long description from the README file
with open(os.path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
# copied from https://github.com/pypa/pip/blob/edda92720385d55e7600019bfe96e2f325b58fcc/setup.py#L11
# and https://packaging.python.org/guides/single-sourcing-package-version/
def read(rel_path):
here = os.path.abspath(os.path.dirname(__file__))
# intentionally *not* adding an encoding option to open, See:
# https://github.com/pypa/virtualenv/issues/201#issuecomment-3145690
with codecs.open(os.path.join(here, rel_path), "r") as fp:
return fp.read()
# copied from https://github.com/pypa/pip/blob/edda92720385d55e7600019bfe96e2f325b58fcc/setup.py#L11
# and https://packaging.python.org/guides/single-sourcing-package-version/
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith("__version__"):
# __version__ = "0.9"
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
else:
raise RuntimeError("Unable to find version string.")
# Arguments marked as "Required" below must be included for upload to PyPI.
# Fields marked as "Optional" may be commented out.
setup(
name="sarctransform",
version=get_version("__init__.py"),
description="sarctransform",
long_description=long_description,
long_description_content_type="text/markdown",
package_dir={"": "."},
packages=[""], # find_packages(where='.'), # Required
python_requires=">=3.2, <4",
# This field lists other packages that your project depends on to run.
# Any package you put here will be installed by pip when your project is
# installed, so they must be valid existing projects.
#
# For an analysis of "install_requires" vs pip's requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
"dbf ~= 0.99.0",
"dbfread ~= 2.0.7",
"xlrd ~= 2.0.1",
# "google-cloud-bigquery ~= 2.3.1",
# "googleads ~= 26.0.0",
# "pandas ~= 1.0.0",
# "jinja2",
# "pymysql",
# "python-dateutil",
],
# List additional groups of dependencies here (e.g. development
# dependencies). Users will be able to install these using the "extras"
# syntax, for example:
#
# $ pip install sampleproject[dev]
#
# Similar to `install_requires` above, these must be valid existing
# projects.
extras_require={ # Optional
"dev": [],
"test": [],
},
# If there are data files included in your packages that need to be
# installed, specify them here.
package_data={}, # Optional
data_files=[], # Optional
# https://setuptools.readthedocs.io/en/latest/setuptools.html#automatic-script-creation
entry_points={ # Optional
"console_scripts": [
"sarctransform = sarctransform:main",
],
},
# project_urls={} # Optional
)
|
py | 7dfc6778f3c064b04e75aa8f6da377e7014382c1 | from authtools.admin import NamedUserAdmin
from django.contrib import admin
from django.contrib.auth import admin as auth_admin
from django.contrib.auth import get_user_model
from reflective.employees.admin import EmployeeStackedAdmin
from reflective.users.forms import UserChangeForm, UserCreationForm
User = get_user_model()
@admin.register(User)
class UserAdmin(NamedUserAdmin):
inlines = [EmployeeStackedAdmin]
form = UserChangeForm
add_form = UserCreationForm
fieldsets = (("User", {"fields": ("name",)}),) + auth_admin.UserAdmin.fieldsets
list_display = ["username", "name", "is_superuser"]
list_display_links = ('username', 'name',)
search_fields = ["name"]
|
py | 7dfc6b7b50ad1449ac9ce9182f2139b2687493ce | import argparse
import sys
import os
from distance_utils import time_series_twed
from scipy.spatial.distance import squareform
import itertools
import pickle
parser = argparse.ArgumentParser(
description='Calculate distances of subsequences')
parser.add_argument('--input_dir', required=True, type=str)
parser.add_argument('--output_dir', required=True, type=str)
parser.add_argument('--dataset', required=True, type=str)
parser.add_argument('--n_samples', required=True, type=int)
parser.add_argument('--time_window', type=int, default=250)
parser.add_argument('--time_step', type=int, default=10)
parser.add_argument('--n_parts', type=int)
args = parser.parse_args(sys.argv[1:])
input_dir = args.input_dir
output_dir = args.output_dir
dataset = args.dataset
n_samples = args.n_samples
time_window = args.time_window
time_step = args.time_step
n_parts = args.n_parts
input_filename = 'sample_{0}_{1}_{2}_{3}.pkl'.format(dataset, n_samples,
time_window, time_step)
output_filename = 'twed_{0}_{1}_{2}_{3}.pkl'.format(dataset, n_samples,
time_window, time_step)
input_path = os.path.join(input_dir, input_filename)
output_path = os.path.join(output_dir, output_filename)
print('Loading data...')
with open(input_path, 'rb') as f:
subsequences = pickle.load(f)
print('DONE')
print('getting ids...')
ids = [s.id for s in subsequences]
print('DONE')
distances = []
print('building distance matrix...')
for i in range(n_parts):
input_path = output_path + '.part{}of{}'.format(i, n_parts)
with open(input_path, 'rb') as f:
distances += pickle.load(f)
distance_matrix = squareform(distances)
print('DONE')
output = {'ids': ids, 'distances': distance_matrix}
print('Writing output...')
with open(output_path, 'wb') as f:
pickle.dump(output, f, protocol=4)
print('DONE') |
py | 7dfc6b876b8be142574fbe765dfc9d4ab95c2fe3 | """
Prepare eo3 metadata for USGS Landsat Level 1 data.
Input dataset paths can be directories or tar files.
"""
import logging
import os
import re
import tarfile
import tempfile
import uuid
from datetime import datetime
from pathlib import Path
from typing import List, Optional, Union, Iterable, Dict, Tuple, Callable, Generator
import click
import rasterio
from eodatasets3 import serialise, utils, DatasetAssembler, IfExists
from eodatasets3.properties import FileFormat
from eodatasets3.ui import PathPath
_COPYABLE_MTL_FIELDS = {}
_COPYABLE_MTL_FIELDS["C1"] = [
(
"metadata_file_info",
(
"landsat_scene_id",
"landsat_product_id",
"station_id",
"processing_software_version",
),
),
(
"product_metadata",
("data_type", "ephemeris_type", "wrs_path", "wrs_row", "collection_category"),
),
(
"image_attributes",
(
"ground_control_points_version",
"ground_control_points_model",
"geometric_rmse_model_x",
"geometric_rmse_model_y",
"ground_control_points_verify",
"geometric_rmse_verify",
),
),
("projection_parameters", ("scan_gap_interpolation",)),
]
_COPYABLE_MTL_FIELDS["C2"] = [
(
"level1_processing_record",
(
"landsat_scene_id",
"landsat_product_id",
"processing_software_version",
"collection_category",
"ground_control_points_version",
"ground_control_points_model",
"geometric_rmse_model_x",
"geometric_rmse_model_y",
),
),
(
"image_attributes",
(
"station_id",
"wrs_path",
"wrs_row",
# "ground_control_points_verify", # not in the test data for C1 or C2
# "geometric_rmse_verify", # not in the test data for C1 or C2
),
),
# not in the test data for C1 or C2
# ("level1_projection_parameters", ("scan_gap_interpolation",)),
]
# Static namespace to generate uuids for datacube indexing
USGS_UUID_NAMESPACE = uuid.UUID("276af61d-99f8-4aa3-b2fb-d7df68c5e28f")
LANDSAT_OLI_TIRS_BAND_ALIASES = {
"1": "coastal_aerosol",
"2": "blue",
"3": "green",
"4": "red",
"5": "nir",
"6": "swir_1",
"7": "swir_2",
"8": "panchromatic",
"9": "cirrus",
"10": "lwir_1",
"11": "lwir_2",
"quality": "quality",
}
LANDSAT_xTM_BAND_ALIASES = {
"1": "blue",
"2": "green",
"3": "red",
"4": "nir",
"5": "swir_1",
"6": "tir",
"6_vcid_1": "tir_1",
"6_vcid_2": "tir_2",
"7": "swir_2",
"8": "panchromatic",
"quality": "quality",
}
MTL_PAIRS_RE = re.compile(r"(\w+)\s=\s(.*)")
LANDSAT_MTL_MAP = {
"C1": {
"product_contents_cn": "metadata_file_info",
"product_contents_of": "product_metadata",
"product_contents_fn": "product_metadata",
"image_attributes": "product_metadata",
"level1_processing_record": "metadata_file_info",
"level1_projection_parameters": "projection_parameters",
},
"C2": {
"product_contents_cn": "product_contents",
"product_contents_of": "level1_processing_record",
"product_contents_fn": "product_contents",
"image_attributes": "image_attributes",
"level1_processing_record": "level1_processing_record",
"level1_projection_parameters": "level1_projection_parameters",
},
}
def get_band_alias_mappings(sat: str, instrument: str) -> Dict[str, str]:
"""
To load the band_names for referencing either LANDSAT8 or LANDSAT7 or LANDSAT5 bands
Landsat7 and Landsat5 have same band names
>>> get_band_alias_mappings('landsat-8', 'OLI_TIRS') == LANDSAT_OLI_TIRS_BAND_ALIASES
True
>>> get_band_alias_mappings('landsat-8', 'OLI') == LANDSAT_OLI_TIRS_BAND_ALIASES
True
>>> get_band_alias_mappings('landsat-5', 'TM') == LANDSAT_xTM_BAND_ALIASES
True
>>> get_band_alias_mappings('landsat-5', 'TM') == LANDSAT_xTM_BAND_ALIASES
True
>>> get_band_alias_mappings('aqua', 'MODIS') == LANDSAT_xTM_BAND_ALIASES
Traceback (most recent call last):
...
NotImplementedError: Unexpected satellite. Only landsat handled currently. Got 'aqua'
>>> get_band_alias_mappings('landsat-5', 'MSS') == LANDSAT_xTM_BAND_ALIASES
Traceback (most recent call last):
...
NotImplementedError: Landsat version not yet supported: 'landsat-5', 'MSS'
"""
if not sat.startswith("landsat-"):
raise NotImplementedError(
f"Unexpected satellite. Only landsat handled currently. Got {sat!r}"
)
landsat_number = int(sat.split("-")[1])
if landsat_number == 8:
return LANDSAT_OLI_TIRS_BAND_ALIASES
if landsat_number in (4, 5, 7) and instrument.endswith("TM"):
return LANDSAT_xTM_BAND_ALIASES
raise NotImplementedError(
f"Landsat version not yet supported: {sat!r}, {instrument!r}"
)
def get_mtl_content(acquisition_path: Path, root_element=None) -> Tuple[Dict, str, str]:
"""
Find MTL file for the given path. It could be a directory or a tar file.
It will return the MTL parsed as a dict and its filename.
"""
def iter_tar_members(tp: tarfile.TarFile) -> Generator[tarfile.TarInfo, None, None]:
"""
This is a lazy alternative to TarInfo.getmembers() that only reads one tar item at a time.
We're reading the MTL file, which is almost always the first entry in the tar, and then
closing it, so we're avoiding skipping through the entirety of the tar.
"""
member = tp.next()
while member is not None:
yield member
member = tp.next()
if not acquisition_path.exists():
raise RuntimeError("Missing path '{}'".format(acquisition_path))
if acquisition_path.is_file() and tarfile.is_tarfile(str(acquisition_path)):
with tarfile.open(str(acquisition_path), "r") as tp:
for member in iter_tar_members(tp):
if "_MTL" in member.name:
with tp.extractfile(member) as fp:
mtl_doc, file_root_element = read_mtl(fp)
return mtl_doc, file_root_element, member.name
else:
raise RuntimeError(
"MTL file not found in {}".format(str(acquisition_path))
)
else:
paths = list(acquisition_path.rglob("*_MTL.txt"))
if not paths:
raise RuntimeError("No MTL file")
if len(paths) > 1:
raise RuntimeError(
f"Multiple MTL files found in given acq path {acquisition_path}"
)
[path] = paths
with path.open("r") as fp:
mtl_doc, file_root_element = read_mtl(fp, root_element)
return mtl_doc, file_root_element, path.name
def read_mtl(fp: Iterable[Union[str, bytes]], root_element=None) -> Tuple[Dict, str]:
def _parse_value(s: str) -> Union[int, float, str]:
"""
>>> _parse_value("asdf")
'asdf'
>>> _parse_value("123")
123
>>> _parse_value("3.14")
3.14
"""
s = s.strip('"')
for parser in [int, float]:
try:
return parser(s)
except ValueError:
pass
return s
def _parse_group(
lines: Iterable[Union[str, bytes]],
key_transform: Callable[[str], str] = lambda s: s.lower(),
) -> dict:
tree = {}
for line in lines:
# If line is bytes-like convert to str
if isinstance(line, bytes):
line = line.decode("utf-8")
match = MTL_PAIRS_RE.findall(line)
if match:
key, value = match[0]
if key == "GROUP":
tree[key_transform(value)] = _parse_group(lines)
elif key == "END_GROUP":
break
else:
tree[key_transform(key)] = _parse_value(value)
return tree
tree = _parse_group(fp)
if root_element is None:
root_element = list(tree.keys())[0]
return tree[root_element], root_element
def _iter_bands_paths(product_doc: Dict) -> Generator[Tuple[str, str], None, None]:
prefix = "file_name_band_"
for name, filepath in product_doc.items():
if not name.startswith(prefix):
continue
usgs_band_id = name[len(prefix) :]
yield usgs_band_id, filepath
def prepare_and_write(
ds_path: Path,
output_yaml_path: Path,
source_telemetry: Path = None,
# TODO: Can we infer producer automatically? This is bound to cause mistakes othewise
producer="usgs.gov",
) -> Tuple[uuid.UUID, Path]:
"""
Prepare an eo3 metadata file for a Level1 dataset.
Input dataset path can be a folder or a tar file.
"""
mtl_doc, root_element, mtl_filename = get_mtl_content(ds_path)
if not mtl_doc:
raise ValueError(f"No MTL file found for {ds_path}")
collection_key = "C2" if root_element == "landsat_metadata_file" else "C1"
coll_map = LANDSAT_MTL_MAP[collection_key]
usgs_collection_number = mtl_doc[coll_map["product_contents_cn"]].get(
"collection_number"
)
if usgs_collection_number is None:
raise NotImplementedError(
"Dataset has no collection number: pre-collection data is not supported."
)
data_format = mtl_doc[coll_map["product_contents_of"]]["output_format"]
if data_format.upper() != "GEOTIFF":
raise NotImplementedError(f"Only GTiff currently supported, got {data_format}")
file_format = FileFormat.GeoTIFF
# Assumed below.
projection_params = mtl_doc[coll_map["level1_projection_parameters"]]
if (
"grid_cell_size_thermal" in projection_params
and "grid_cell_size_reflective" in projection_params
and (
projection_params["grid_cell_size_reflective"]
!= projection_params["grid_cell_size_thermal"]
)
):
raise NotImplementedError("reflective and thermal have different cell sizes")
ground_sample_distance = min(
value
for name, value in projection_params.items()
if name.startswith("grid_cell_size_")
)
with DatasetAssembler(
metadata_path=output_yaml_path,
dataset_location=ds_path,
# Detministic ID based on USGS's product id (which changes when the scene is reprocessed by them)
dataset_id=uuid.uuid5(
USGS_UUID_NAMESPACE,
mtl_doc[coll_map["level1_processing_record"]]["landsat_product_id"],
),
naming_conventions="dea",
if_exists=IfExists.Overwrite,
) as p:
if source_telemetry:
# Only GA's data has source telemetry...
assert producer == "ga.gov.au"
p.add_source_path(source_telemetry)
p.platform = mtl_doc[coll_map["image_attributes"]]["spacecraft_id"]
p.instrument = mtl_doc[coll_map["image_attributes"]]["sensor_id"]
p.product_family = "level1"
p.producer = producer
p.datetime = "{}T{}".format(
mtl_doc[coll_map["image_attributes"]]["date_acquired"],
mtl_doc[coll_map["image_attributes"]]["scene_center_time"],
)
if collection_key == "C2":
p.processed = mtl_doc["level1_processing_record"]["date_product_generated"]
p.properties["landsat:data_type"] = mtl_doc["level1_processing_record"][
"processing_level"
]
else:
p.processed = mtl_doc["metadata_file_info"]["file_date"]
p.properties["odc:file_format"] = file_format
p.properties["eo:gsd"] = ground_sample_distance
cloud_cover = mtl_doc["image_attributes"]["cloud_cover"]
# Cloud cover is -1 when missing (such as TIRS-only data)
if cloud_cover != -1:
p.properties["eo:cloud_cover"] = cloud_cover
p.properties["eo:sun_azimuth"] = mtl_doc["image_attributes"]["sun_azimuth"]
p.properties["eo:sun_elevation"] = mtl_doc["image_attributes"]["sun_elevation"]
p.properties["landsat:collection_number"] = usgs_collection_number
for section, fields in _COPYABLE_MTL_FIELDS[collection_key]:
for field in fields:
value = mtl_doc[section].get(field)
if value is not None:
p.properties[f"landsat:{field}"] = value
p.region_code = f"{p.properties['landsat:wrs_path']:03d}{p.properties['landsat:wrs_row']:03d}"
org_collection_number = utils.get_collection_number(
p.producer, p.properties["landsat:collection_number"]
)
p.dataset_version = f"{org_collection_number}.0.{p.processed:%Y%m%d}"
# NRT product?
# Category is one of: T1, T2 or RT ('real time')
if p.properties["landsat:collection_category"] == "RT":
p.properties["dea:dataset_maturity"] = "nrt"
band_aliases = get_band_alias_mappings(p.platform, p.instrument)
for usgs_band_id, file_location in _iter_bands_paths(
mtl_doc[coll_map["product_contents_fn"]]
):
p.note_measurement(
band_aliases[usgs_band_id],
file_location,
relative_to_dataset_location=True,
expand_valid_data=usgs_band_id != "quality",
)
if collection_key == "C2":
p.note_measurement(
band_aliases["quality"],
mtl_doc[coll_map["product_contents_fn"]]["file_name_quality_l1_pixel"],
relative_to_dataset_location=True,
expand_valid_data=False,
)
p.add_accessory_file("metadata:landsat_mtl", Path(mtl_filename))
return p.done()
@click.command(help=__doc__)
@click.option(
"--output-base",
help="Write metadata files into a directory instead of alongside each dataset",
required=False,
type=PathPath(exists=True, writable=True, dir_okay=True, file_okay=False),
)
@click.option(
"--source",
"source_telemetry",
help="Path to the source telemetry data for all of the provided datasets"
"(either the folder or metadata file)",
required=False,
type=PathPath(exists=True),
)
@click.option(
"--producer",
help="Organisation that produced the data: probably either 'ga.gov.au' or 'usgs.gov'.",
required=False,
default="usgs.gov",
)
@click.argument(
"datasets", type=PathPath(exists=True, readable=True, writable=False), nargs=-1
)
@click.option(
"--overwrite-existing/--skip-existing",
is_flag=True,
default=False,
help="Overwrite if exists (otherwise skip)",
)
@click.option(
"--newer-than",
type=serialise.ClickDatetime(),
default=None,
help="Only process files newer than this date",
)
def main(
output_base: Optional[Path],
datasets: List[Path],
overwrite_existing: bool,
producer: str,
source_telemetry: Optional[Path],
newer_than: datetime,
):
logging.basicConfig(
format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO
)
with rasterio.Env():
for ds in datasets:
if output_base:
output = output_base.joinpath(
*utils.subfolderise(_dataset_region_code(ds))
)
output.mkdir(parents=True, exist_ok=True)
else:
# Alongside the dataset itself.
output = ds.absolute().parent
ds_path = _normalise_dataset_path(Path(ds).absolute())
(mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) = os.stat(ds)
create_date = datetime.utcfromtimestamp(ctime)
if newer_than and (create_date <= newer_than):
logging.info(
"Creation time {} older than start date {:%Y-%m-%d %H:%M} ...SKIPPING {}".format(
newer_than - create_date, newer_than, ds_path.name
)
)
continue
logging.info("Processing %s", ds_path)
output_yaml = output / "{}.odc-metadata.yaml".format(_dataset_name(ds_path))
if output_yaml.exists():
if not overwrite_existing:
logging.info("Output exists: skipping. %s", output_yaml)
continue
logging.info("Output exists: overwriting %s", output_yaml)
output_uuid, output_path = prepare_and_write(
ds_path,
output_yaml,
producer=producer,
source_telemetry=source_telemetry,
)
logging.info("Wrote dataset %s to %s", output_uuid, output_path)
def _normalise_dataset_path(input_path: Path) -> Path:
"""
Dataset path should be either the direct imagery folder (mtl+bands) or a tar path.
Translate other inputs (example: the MTL path) to one of the two.
>>> tmppath = Path(tempfile.mkdtemp())
>>> ds_path = tmppath.joinpath('LE07_L1GT_104078_20131209_20161119_01_T1')
>>> ds_path.mkdir()
>>> mtl_path = ds_path / 'LC08_L1TP_090084_20160121_20170405_01_T1_MTL.txt'
>>> mtl_path.write_text('<mtl content>')
13
>>> _normalise_dataset_path(ds_path).relative_to(tmppath).as_posix()
'LE07_L1GT_104078_20131209_20161119_01_T1'
>>> _normalise_dataset_path(mtl_path).relative_to(tmppath).as_posix()
'LE07_L1GT_104078_20131209_20161119_01_T1'
>>> tar_path = tmppath / 'LS_L1GT.tar.gz'
>>> tar_path.write_text('fake tar')
8
>>> _normalise_dataset_path(tar_path).relative_to(tmppath).as_posix()
'LS_L1GT.tar.gz'
>>> _normalise_dataset_path(Path(tempfile.mkdtemp()))
Traceback (most recent call last):
...
ValueError: No MTL files within input path .... Not a dataset?
"""
input_path = normalise_nci_symlinks(input_path)
if input_path.is_file():
if ".tar" in input_path.suffixes:
return input_path
input_path = input_path.parent
mtl_files = list(input_path.rglob("*_MTL.txt"))
if not mtl_files:
raise ValueError(
"No MTL files within input path '{}'. Not a dataset?".format(input_path)
)
if len(mtl_files) > 1:
raise ValueError(
"Multiple MTL files in a single dataset (got path: {})".format(input_path)
)
return input_path
def normalise_nci_symlinks(input_path: Path) -> Path:
"""
If it's an NCI lustre path, always use the symlink (`/g/data`) rather than specific drives (eg. `/g/data2`).
>>> normalise_nci_symlinks(Path('/g/data2/v10/some/dataset.tar')).as_posix()
'/g/data/v10/some/dataset.tar'
>>> normalise_nci_symlinks(Path('/g/data1a/v10/some/dataset.tar')).as_posix()
'/g/data/v10/some/dataset.tar'
>>> # Don't change other paths!
>>> normalise_nci_symlinks(Path('/g/data/v10/some/dataset.tar')).as_posix()
'/g/data/v10/some/dataset.tar'
>>> normalise_nci_symlinks(Path('/Users/testuser/unrelated-path.yaml')).as_posix()
'/Users/testuser/unrelated-path.yaml'
"""
match = re.match(r"^/g/data[0-9a-z]+/(.*)", str(input_path))
if not match:
return input_path
[offset] = match.groups()
return Path("/g/data/" + offset)
def _dataset_name(ds_path: Path) -> str:
"""
>>> _dataset_name(Path("example/LE07_L1GT_104078_20131209_20161119_01_T1.tar.gz"))
'LE07_L1GT_104078_20131209_20161119_01_T1'
>>> _dataset_name(Path("example/LE07_L1GT_104078_20131209_20161119_01_T1.tar"))
'LE07_L1GT_104078_20131209_20161119_01_T1'
>>> _dataset_name(Path("example/LE07_L1GT_104078_20131209_20161119_01_T2"))
'LE07_L1GT_104078_20131209_20161119_01_T2'
"""
# This is a little simpler than before :)
return ds_path.stem.split(".")[0]
def _dataset_region_code(ds_path: Path) -> str:
"""
>>> _dataset_region_code(Path("example/LE07_L1GT_104078_20131209_20161119_01_T1.tar.gz"))
'104078'
>>> _dataset_region_code(Path("example/LE07_L1GT_104078_20131209_20161119_01_T1.tar"))
'104078'
>>> _dataset_region_code(Path("example/LE07_L1GT_104078_20131209_20161119_01_T2"))
'104078'
"""
return _dataset_name(ds_path).split("_")[2]
if __name__ == "__main__":
main()
|
py | 7dfc6bb3ef1ed8b8ec6d3a4213e1eff63e847c6d | from .clusters import ClustersClient
from .dataset_client import DatasetsClient
from .dataset_tag_client import DatasetTagsClient
from .dataset_version_client import DatasetVersionsClient
from .deployment_client import DeploymentsClient
from .experiment_client import ExperimentsClient
from .hyperparameter_client import HyperparameterJobsClient
from .job_client import JobsClient
from .machine_types_client import MachineTypesClient
from .machines_client import MachinesClient
from .model_client import ModelsClient
from .notebook_client import NotebooksClient
from .project_client import ProjectsClient
from .secret_client import SecretsClient
from .storage_provider_client import StorageProvidersClient
from .sdk_client import SdkClient
from .tensorboards_client import TensorboardClient
from .workflow_client import WorkflowsClient
|
py | 7dfc6ee4ccd172ae18d1a85dd43a6c88a7b672de | from typing import Tuple, FrozenSet
from collections import Iterable
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_integer_type, msat_get_rational_type, msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or
from mathsat import msat_make_leq, msat_make_equal
from mathsat import msat_make_number, msat_make_plus
from pysmt.environment import Environment as PysmtEnv
import pysmt.typing as types
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next, symb_to_next
from hint import Hint, Location
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def check_ltl(menv: msat_env, enc: LTLEncoder) -> Tuple[Iterable, msat_term,
msat_term, msat_term]:
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
bool_type = msat_get_bool_type(menv)
real_type = msat_get_rational_type(menv)
i = msat_declare_function(menv, "i", real_type)
i = msat_make_constant(menv, i)
r = msat_declare_function(menv, "r", real_type)
r = msat_make_constant(menv, r)
l = msat_declare_function(menv, "l", real_type)
l = msat_make_constant(menv, l)
inc_i = msat_declare_function(menv, "inc_i", bool_type)
inc_i = msat_make_constant(menv, inc_i)
x_i = msat_declare_function(menv, name_next("i"), real_type)
x_i = msat_make_constant(menv, x_i)
x_r = msat_declare_function(menv, name_next("r"), real_type)
x_r = msat_make_constant(menv, x_r)
x_l = msat_declare_function(menv, name_next("l"), real_type)
x_l = msat_make_constant(menv, x_l)
x_inc_i = msat_declare_function(menv, name_next("inc_i"), bool_type)
x_inc_i = msat_make_constant(menv, x_inc_i)
curr2next = {i: x_i, r: x_r, l: x_l, inc_i: x_inc_i}
zero = msat_make_number(menv, "0")
one = msat_make_number(menv, "1")
r_gt_0 = msat_make_gt(menv, r, zero)
r_lt_l = msat_make_lt(menv, r, l)
i_geq_0 = msat_make_geq(menv, i, zero)
init = msat_make_and(menv, r_gt_0, r_lt_l)
init = msat_make_and(menv, init,
msat_make_and(menv, i_geq_0,
msat_make_not(menv, inc_i)))
init = msat_make_and(menv, init, msat_make_gt(menv, l, zero))
# r' = r
trans = msat_make_equal(menv, x_r, r)
# i < l -> ((inc_i' & i' = i + 1) | (!inc_i' & i' = i)) & l' = l
i_lt_l = msat_make_lt(menv, i, l)
x_i_eq_i_p_1 = msat_make_and(menv, x_inc_i,
msat_make_equal(menv, x_i,
msat_make_plus(menv, i, one)))
x_i_eq_i = msat_make_and(menv, msat_make_not(menv, x_inc_i),
msat_make_equal(menv, x_i, i))
x_i_eq_i_p_1_or_i = msat_make_or(menv, x_i_eq_i_p_1, x_i_eq_i)
x_l_eq_l = msat_make_equal(menv, x_l, l)
x_i_eq_i_p_1_or_i_and_x_l_eq_l = msat_make_and(menv, x_i_eq_i_p_1_or_i,
x_l_eq_l)
trans = msat_make_and(menv, trans,
msat_make_impl(menv, i_lt_l,
x_i_eq_i_p_1_or_i_and_x_l_eq_l))
# i >= l -> i' = 0 & l' = l + 1 & !inc_i'
i_geq_l = msat_make_geq(menv, i, l)
x_i_eq_0 = msat_make_equal(menv, x_i, zero)
x_l_eq_l_p_1 = msat_make_equal(menv, x_l, msat_make_plus(menv, l, one))
x_i_eq_0_and_x_l_eq_l_p_1 = msat_make_and(menv,
msat_make_and(menv, x_i_eq_0,
x_l_eq_l_p_1),
msat_make_not(menv, x_inc_i))
trans = msat_make_and(menv, trans,
msat_make_impl(menv, i_geq_l,
x_i_eq_0_and_x_l_eq_l_p_1))
# (G F inc_i) -> ! G F r > i
G_F_x_i_gt_i = enc.make_G(enc.make_F(inc_i))
r_gt_i = msat_make_gt(menv, r, i)
n_G_F_r_gt_i = msat_make_not(menv, enc.make_G(enc.make_F(r_gt_i)))
ltl = msat_make_impl(menv, G_F_x_i_gt_i, n_G_F_r_gt_i)
return TermMap(curr2next), init, trans, ltl
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
i = mgr.Symbol("i", types.REAL)
r = mgr.Symbol("r", types.REAL)
l = mgr.Symbol("l", types.REAL)
inc_i = mgr.Symbol("inc_i", types.BOOL)
symbs = frozenset([i, r, l, inc_i])
x_i = symb_to_next(mgr, i)
x_r = symb_to_next(mgr, r)
x_l = symb_to_next(mgr, l)
x_inc_i = symb_to_next(mgr, inc_i)
res = []
n0 = mgr.Real(0)
n1 = mgr.Real(1)
stutter = mgr.Equals(x_i, i)
loc = Location(env, mgr.GE(i, n0), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_i, mgr.Plus(i, n1)))
h_i = Hint("h_i0", env, frozenset([i]), symbs)
h_i.set_locs([loc])
res.append(h_i)
loc = Location(env, mgr.GE(r, n0))
loc.set_progress(0, mgr.Equals(x_r, mgr.Plus(r, n1)))
h_r = Hint("h_r0", env, frozenset([r]), symbs)
h_r.set_locs([loc])
res.append(h_r)
loc = Location(env, inc_i)
loc.set_progress(0, x_inc_i)
h_inc = Hint("h_inc0", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc])
res.append(h_inc)
stutter = mgr.Equals(x_i, i)
loc = Location(env, mgr.LE(i, n0), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_i, mgr.Minus(i, n1)))
h_i = Hint("h_i1", env, frozenset([i]), symbs)
h_i.set_locs([loc])
res.append(h_i)
loc = Location(env, mgr.LE(r, n0))
loc.set_progress(0, mgr.Equals(x_r, mgr.Minus(r, n1)))
h_r = Hint("h_r1", env, frozenset([r]), symbs)
h_r.set_locs([loc])
res.append(h_r)
loc = Location(env, mgr.LE(l, n0))
loc.set_progress(0, mgr.Equals(x_l, mgr.Minus(l, n1)))
h_l = Hint("h_l1", env, frozenset([l]), symbs)
h_l.set_locs([loc])
res.append(h_l)
loc0 = Location(env, mgr.GE(i, n0))
loc0.set_progress(1, mgr.Equals(x_i, mgr.Plus(i, n1)))
loc1 = Location(env, mgr.GE(i, n0))
loc1.set_progress(0, mgr.Equals(x_i, i))
h_i = Hint("h_i2", env, frozenset([i]), symbs)
h_i.set_locs([loc0, loc1])
res.append(h_i)
loc0 = Location(env, mgr.GE(r, n0))
loc0.set_progress(1, mgr.Equals(x_r, r))
loc1 = Location(env, mgr.GE(r, n0))
loc1.set_progress(0, mgr.Equals(x_r, mgr.Plus(r, n1)))
h_r = Hint("h_r2", env, frozenset([r]), symbs)
h_r.set_locs([loc0, loc1])
res.append(h_r)
loc0 = Location(env, mgr.GE(l, n0))
loc0.set_progress(1, mgr.Equals(x_l, mgr.Plus(l, n1)))
loc1 = Location(env, mgr.GE(l, n0))
loc1.set_progress(0, mgr.Equals(x_l, l))
h_l = Hint("h_l2", env, frozenset([l]), symbs)
h_l.set_locs([loc0, loc1])
res.append(h_l)
loc0 = Location(env, mgr.Not(inc_i))
loc0.set_progress(1, x_inc_i)
loc1 = Location(env, inc_i)
loc1.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc2", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc0, loc1])
res.append(h_inc)
loc0 = Location(env, mgr.GE(i, n0), mgr.GE(l, n0),
stutterT=mgr.Equals(x_i, mgr.Plus(i, l)))
loc0.set_progress(1, mgr.Equals(x_i, mgr.Plus(i, n1)))
loc1 = Location(env, mgr.GE(i, n0))
loc1.set_progress(0, mgr.Equals(x_i, i))
h_i = Hint("h_i3", env, frozenset([i]), symbs)
h_i.set_locs([loc0, loc1])
res.append(h_i)
loc0 = Location(env, mgr.GE(r, n0), mgr.GE(i, n0),
stutterT=mgr.Equals(x_r, mgr.Plus(r, i)))
loc0.set_progress(1, mgr.Equals(x_r, r))
loc1 = Location(env, mgr.GE(r, n0))
loc1.set_progress(0, mgr.Equals(x_r, mgr.Plus(r, n1)))
h_r = Hint("h_r3", env, frozenset([r]), symbs)
h_r.set_locs([loc0, loc1])
res.append(h_r)
loc0 = Location(env, mgr.GE(l, n0), mgr.GE(r, n0),
stutterT=mgr.Equals(x_l, mgr.Plus(l, r)))
loc0.set_progress(1, mgr.Equals(x_l, mgr.Plus(l, n1)))
loc1 = Location(env, mgr.GE(l, n0))
loc1.set_progress(0, mgr.Equals(x_l, l))
h_l = Hint("h_l3", env, frozenset([l]), symbs)
h_l.set_locs([loc0, loc1])
res.append(h_l)
loc0 = Location(env, mgr.Not(inc_i))
loc0.set_progress(1, x_inc_i)
loc1 = Location(env, inc_i, stutterT=x_inc_i)
loc1.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc3", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc0, loc1])
res.append(h_inc)
loc0 = Location(env, mgr.GE(i, n0))
loc0.set_progress(1, mgr.Equals(x_i, mgr.Plus(i, n1)))
loc1 = Location(env, mgr.GE(i, n0))
loc1.set_progress(2, mgr.Equals(x_i, i))
loc2 = Location(env, mgr.GE(i, n0))
loc2.set_progress(0, mgr.Equals(x_i, i))
h_i = Hint("h_i4", env, frozenset([i]), symbs)
h_i.set_locs([loc0, loc1, loc2])
res.append(h_i)
loc0 = Location(env, mgr.GE(r, n0))
loc0.set_progress(1, mgr.Equals(x_r, r))
loc1 = Location(env, mgr.GE(r, n0))
loc1.set_progress(2, mgr.Equals(x_r, mgr.Plus(r, n1)))
loc2 = Location(env, mgr.GE(r, n0))
loc2.set_progress(0, mgr.Equals(x_r, r))
h_r = Hint("h_r4", env, frozenset([r]), symbs)
h_r.set_locs([loc0, loc1, loc2])
res.append(h_r)
loc0 = Location(env, mgr.GE(l, n0))
loc0.set_progress(1, mgr.Equals(x_l, mgr.Plus(l, n1)))
loc1 = Location(env, mgr.GE(l, n0))
loc1.set_progress(2, mgr.Equals(x_l, l))
loc2 = Location(env, mgr.GE(l, n0))
loc2.set_progress(0, mgr.Equals(x_l, l))
h_l = Hint("h_l4", env, frozenset([l]), symbs)
h_l.set_locs([loc0, loc1, loc2])
res.append(h_l)
return frozenset(res)
|
py | 7dfc6ee748e7e6f97d94606e6d71e4ee0591f71a | import typing
from random import random
import strawberry
from sanic import Sanic
from strawberry.file_uploads import Upload
from strawberry.sanic.views import GraphQLView as BaseGraphQLView
def create_app(**kwargs):
@strawberry.input
class FolderInput:
files: typing.List[Upload]
@strawberry.type
class Query:
hello: str = "strawberry"
@strawberry.type
class Mutation:
@strawberry.mutation
def read_text(self, text_file: Upload) -> str:
return text_file.read().decode()
@strawberry.mutation
def read_files(self, files: typing.List[Upload]) -> typing.List[str]:
contents = []
for file in files:
contents.append(file.read().decode())
return contents
@strawberry.mutation
def read_folder(self, folder: FolderInput) -> typing.List[str]:
contents = []
for file in folder.files:
contents.append(file.read().decode())
return contents
schema = strawberry.Schema(query=Query, mutation=Mutation)
class GraphQLView(BaseGraphQLView):
def get_root_value(self):
return Query()
app = Sanic(f"test_{int(random()*1000)}")
app.add_route(
GraphQLView.as_view(schema=schema, graphiql=kwargs.get("graphiql", True)),
"/graphql",
)
return app
|
py | 7dfc701679cc1c6fcc52f92daeb237eecb8352c1 | """Module for testing Coding DNA Substitution Validator."""
import unittest
from variation.validators import CodingDNASubstitution
from variation.classifiers import CodingDNASubstitutionClassifier
from .validator_base import ValidatorBase
class TestCodingDNASubstitutionValidator(ValidatorBase, unittest.TestCase):
"""A class to test the Coding DNA Substitution Validator."""
def validator_instance(self):
"""Return coding DNA substitution instance."""
return CodingDNASubstitution(*self.params)
def classifier_instance(self):
"""Return the coding DNA substitution classifier instance."""
return CodingDNASubstitutionClassifier()
def fixture_name(self):
"""Return the fixture name for coding DNA substitution."""
return "coding_dna_substitution"
|
py | 7dfc711c4b6da3fb3620604d1e989e07e374c6d1 | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os.path as osp
class VisionaryAsic(Package):
"""Visionary ASIC-specific environment. For now this serves as a minimum
software environment for our ASIC-related CI jobs (e.g. building bitfiles)
in the ASIC container. It is not to be used with the visionary software
stack (for now)."""
homepage = ''
# some random tarball, to make `spack fetch --dependencies visionary-defaults` work
url = 'https://github.com/electronicvisions/spack/archive/v0.8.tar.gz'
# This is only a dummy tarball (see difference between version numbers)
# TODO: as soon as a MetaPackage-concept has been merged, please update this package
version('1.0', '372ce038842f20bf0ae02de50c26e85d', url='https://github.com/electronicvisions/spack/archive/v0.8.tar.gz')
depends_on('tcl-osys@890eafffbda95b58a472a2005c3cb9e90fd22ff6')
def install(self, spec, prefix):
mkdirp(prefix.etc)
# store a copy of this package.
filename = osp.basename(osp.dirname(__file__)) # gives name of parent folder
install(__file__, join_path(prefix.etc, filename + '.py'))
# we could create some filesystem view here?
|
py | 7dfc714810284f38f5d15fa5dcc515a46a4c9f92 | VERSION = (0, 1, 3)
def get_version():
"""Return the VERSION as a string, e.g. for VERSION == (0, 1, 0),
return '0.1.0'.
"""
return '.'.join(map(str, VERSION))
__version__ = get_version() |
py | 7dfc7165921665e8c6d05740816cdf7d32916eb8 | pkgname = "Cython"
ignore = ["*[A-Za-z]*"]
|
py | 7dfc724bf9d5ce352bb8ce0418870bc4faea1aa0 | #!/usr/bin/env python2
# -*- mode: python -*-
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2016 The Electrum developers
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from typing import TYPE_CHECKING, Dict, List, Union, Tuple, Sequence, Optional, Type
from functools import partial
from electrum_ltc.plugin import BasePlugin, hook, Device, DeviceMgr, DeviceInfo
from electrum_ltc.i18n import _
from electrum_ltc.bitcoin import is_address, opcodes
from electrum_ltc.util import bfh, versiontuple, UserFacingException
from electrum_ltc.transaction import TxOutput, Transaction, PartialTransaction, PartialTxInput, PartialTxOutput
from electrum_ltc.bip32 import BIP32Node
from electrum_ltc.storage import get_derivation_used_for_hw_device_encryption
from electrum_ltc.keystore import Xpub, Hardware_KeyStore
if TYPE_CHECKING:
import threading
from electrum_ltc.wallet import Abstract_Wallet
from electrum_ltc.base_wizard import BaseWizard
class HW_PluginBase(BasePlugin):
keystore_class: Type['Hardware_KeyStore']
libraries_available: bool
# define supported library versions: minimum_library <= x < maximum_library
minimum_library = (0, )
maximum_library = (float('inf'), )
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.device = self.keystore_class.device
self.keystore_class.plugin = self
self._ignore_outdated_fw = False
def is_enabled(self):
return True
def device_manager(self) -> 'DeviceMgr':
return self.parent.device_manager
def create_device_from_hid_enumeration(self, d: dict, *, product_key) -> 'Device':
# Older versions of hid don't provide interface_number
interface_number = d.get('interface_number', -1)
usage_page = d['usage_page']
id_ = d['serial_number']
if len(id_) == 0:
id_ = str(d['path'])
id_ += str(interface_number) + str(usage_page)
device = Device(path=d['path'],
interface_number=interface_number,
id_=id_,
product_key=product_key,
usage_page=usage_page,
transport_ui_string='hid')
return device
@hook
def close_wallet(self, wallet: 'Abstract_Wallet'):
for keystore in wallet.get_keystores():
if isinstance(keystore, self.keystore_class):
self.device_manager().unpair_xpub(keystore.xpub)
if keystore.thread:
keystore.thread.stop()
def scan_and_create_client_for_device(self, *, device_id: str, wizard: 'BaseWizard') -> 'HardwareClientBase':
devmgr = self.device_manager()
client = wizard.run_task_without_blocking_gui(
task=partial(devmgr.client_by_id, device_id))
if client is None:
raise UserFacingException(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
client.handler = self.create_handler(wizard)
return client
def setup_device(self, device_info: DeviceInfo, wizard: 'BaseWizard', purpose) -> 'HardwareClientBase':
"""Called when creating a new wallet or when using the device to decrypt
an existing wallet. Select the device to use. If the device is
uninitialized, go through the initialization process.
Runs in GUI thread.
"""
raise NotImplementedError()
def get_client(self, keystore: 'Hardware_KeyStore', force_pair: bool = True, *,
devices: Sequence['Device'] = None,
allow_user_interaction: bool = True) -> Optional['HardwareClientBase']:
devmgr = self.device_manager()
handler = keystore.handler
client = devmgr.client_for_keystore(self, handler, keystore, force_pair,
devices=devices,
allow_user_interaction=allow_user_interaction)
return client
def show_address(self, wallet: 'Abstract_Wallet', address, keystore: 'Hardware_KeyStore' = None):
pass # implemented in child classes
def show_address_helper(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not is_address(address):
keystore.handler.show_error(_('Invalid Viacoin Address'))
return False
if not wallet.is_mine(address):
keystore.handler.show_error(_('Address not in wallet.'))
return False
if type(keystore) != self.keystore_class:
return False
return True
def get_library_version(self) -> str:
"""Returns the version of the 3rd party python library
for the hw wallet. For example '0.9.0'
Returns 'unknown' if library is found but cannot determine version.
Raises 'ImportError' if library is not found.
Raises 'LibraryFoundButUnusable' if found but there was some problem (includes version num).
"""
raise NotImplementedError()
def check_libraries_available(self) -> bool:
def version_str(t):
return ".".join(str(i) for i in t)
try:
# this might raise ImportError or LibraryFoundButUnusable
library_version = self.get_library_version()
# if no exception so far, we might still raise LibraryFoundButUnusable
if (library_version == 'unknown'
or versiontuple(library_version) < self.minimum_library
or versiontuple(library_version) >= self.maximum_library):
raise LibraryFoundButUnusable(library_version=library_version)
except ImportError:
return False
except LibraryFoundButUnusable as e:
library_version = e.library_version
self.libraries_available_message = (
_("Library version for '{}' is incompatible.").format(self.name)
+ '\nInstalled: {}, Needed: {} <= x < {}'
.format(library_version, version_str(self.minimum_library), version_str(self.maximum_library)))
self.logger.warning(self.libraries_available_message)
return False
return True
def get_library_not_available_message(self) -> str:
if hasattr(self, 'libraries_available_message'):
message = self.libraries_available_message
else:
message = _("Missing libraries for {}.").format(self.name)
message += '\n' + _("Make sure you install it with python3")
return message
def set_ignore_outdated_fw(self):
self._ignore_outdated_fw = True
def is_outdated_fw_ignored(self) -> bool:
return self._ignore_outdated_fw
def create_client(self, device: 'Device',
handler: Optional['HardwareHandlerBase']) -> Optional['HardwareClientBase']:
raise NotImplementedError()
def get_xpub(self, device_id: str, derivation: str, xtype, wizard: 'BaseWizard') -> str:
raise NotImplementedError()
def create_handler(self, window) -> 'HardwareHandlerBase':
# note: in Qt GUI, 'window' is either an ElectrumWindow or an InstallWizard
raise NotImplementedError()
class HardwareClientBase:
handler = None # type: Optional['HardwareHandlerBase']
def __init__(self, *, plugin: 'HW_PluginBase'):
self.plugin = plugin
def device_manager(self) -> 'DeviceMgr':
return self.plugin.device_manager()
def is_pairable(self) -> bool:
raise NotImplementedError()
def close(self):
raise NotImplementedError()
def timeout(self, cutoff) -> None:
pass
def is_initialized(self) -> bool:
"""True if initialized, False if wiped."""
raise NotImplementedError()
def label(self) -> Optional[str]:
"""The name given by the user to the device.
Note: labels are shown to the user to help distinguish their devices,
and they are also used as a fallback to distinguish devices programmatically.
So ideally, different devices would have different labels.
"""
# When returning a constant here (i.e. not implementing the method in the way
# it is supposed to work), make sure the return value is in electrum.plugin.PLACEHOLDER_HW_CLIENT_LABELS
return " "
def get_soft_device_id(self) -> Optional[str]:
"""An id-like string that is used to distinguish devices programmatically.
This is a long term id for the device, that does not change between reconnects.
This method should not prompt the user, i.e. no user interaction, as it is used
during USB device enumeration (called for each unpaired device).
Stored in the wallet file.
"""
# This functionality is optional. If not implemented just return None:
return None
def has_usable_connection_with_device(self) -> bool:
raise NotImplementedError()
def get_xpub(self, bip32_path: str, xtype) -> str:
raise NotImplementedError()
def request_root_fingerprint_from_device(self) -> str:
# digitalbitbox (at least) does not reveal xpubs corresponding to unhardened paths
# so ask for a direct child, and read out fingerprint from that:
child_of_root_xpub = self.get_xpub("m/0'", xtype='standard')
root_fingerprint = BIP32Node.from_xkey(child_of_root_xpub).fingerprint.hex().lower()
return root_fingerprint
def get_password_for_storage_encryption(self) -> str:
# note: using a different password based on hw device type is highly undesirable! see #5993
derivation = get_derivation_used_for_hw_device_encryption()
xpub = self.get_xpub(derivation, "standard")
password = Xpub.get_pubkey_from_xpub(xpub, ()).hex()
return password
def device_model_name(self) -> Optional[str]:
"""Return the name of the model of this device, which might be displayed in the UI.
E.g. for Trezor, "Trezor One" or "Trezor T".
"""
return None
class HardwareHandlerBase:
"""An interface between the GUI and the device handling logic for handling I/O."""
win = None
device: str
def get_wallet(self) -> Optional['Abstract_Wallet']:
if self.win is not None:
if hasattr(self.win, 'wallet'):
return self.win.wallet
def get_gui_thread(self) -> Optional['threading.Thread']:
if self.win is not None:
if hasattr(self.win, 'gui_thread'):
return self.win.gui_thread
def update_status(self, paired: bool) -> None:
pass
def query_choice(self, msg: str, labels: Sequence[str]) -> Optional[int]:
raise NotImplementedError()
def yes_no_question(self, msg: str) -> bool:
raise NotImplementedError()
def show_message(self, msg: str, on_cancel=None) -> None:
raise NotImplementedError()
def show_error(self, msg: str, blocking: bool = False) -> None:
raise NotImplementedError()
def finished(self) -> None:
pass
def get_word(self, msg: str) -> str:
raise NotImplementedError()
def get_passphrase(self, msg: str, confirm: bool) -> Optional[str]:
raise NotImplementedError()
def get_pin(self, msg: str, *, show_strength: bool = True) -> str:
raise NotImplementedError()
def is_any_tx_output_on_change_branch(tx: PartialTransaction) -> bool:
return any([txout.is_change for txout in tx.outputs()])
def trezor_validate_op_return_output_and_get_data(output: TxOutput) -> bytes:
validate_op_return_output(output)
script = output.scriptpubkey
if not (script[0] == opcodes.OP_RETURN and
script[1] == len(script) - 2 and script[1] <= 75):
raise UserFacingException(_("Only OP_RETURN scripts, with one constant push, are supported."))
return script[2:]
def validate_op_return_output(output: TxOutput, *, max_size: int = None) -> None:
script = output.scriptpubkey
if script[0] != opcodes.OP_RETURN:
raise UserFacingException(_("Only OP_RETURN scripts are supported."))
if max_size is not None and len(script) > max_size:
raise UserFacingException(_("OP_RETURN payload too large." + "\n"
+ f"(scriptpubkey size {len(script)} > {max_size})"))
if output.value != 0:
raise UserFacingException(_("Amount for OP_RETURN output must be zero."))
def get_xpubs_and_der_suffixes_from_txinout(tx: PartialTransaction,
txinout: Union[PartialTxInput, PartialTxOutput]) \
-> List[Tuple[str, List[int]]]:
xfp_to_xpub_map = {xfp: bip32node for bip32node, (xfp, path)
in tx.xpubs.items()} # type: Dict[bytes, BIP32Node]
xfps = [txinout.bip32_paths[pubkey][0] for pubkey in txinout.pubkeys]
try:
xpubs = [xfp_to_xpub_map[xfp] for xfp in xfps]
except KeyError as e:
raise Exception(f"Partial transaction is missing global xpub for "
f"fingerprint ({str(e)}) in input/output") from e
xpubs_and_deriv_suffixes = []
for bip32node, pubkey in zip(xpubs, txinout.pubkeys):
xfp, path = txinout.bip32_paths[pubkey]
der_suffix = list(path)[bip32node.depth:]
xpubs_and_deriv_suffixes.append((bip32node.to_xpub(), der_suffix))
return xpubs_and_deriv_suffixes
def only_hook_if_libraries_available(func):
# note: this decorator must wrap @hook, not the other way around,
# as 'hook' uses the name of the function it wraps
def wrapper(self: 'HW_PluginBase', *args, **kwargs):
if not self.libraries_available: return None
return func(self, *args, **kwargs)
return wrapper
class LibraryFoundButUnusable(Exception):
def __init__(self, library_version='unknown'):
self.library_version = library_version
class OutdatedHwFirmwareException(UserFacingException):
def text_ignore_old_fw_and_continue(self) -> str:
suffix = (_("The firmware of your hardware device is too old. "
"If possible, you should upgrade it. "
"You can ignore this error and try to continue, however things are likely to break.") + "\n\n" +
_("Ignore and continue?"))
if str(self):
return str(self) + "\n\n" + suffix
else:
return suffix
|
py | 7dfc726ef7786fdcacca5c07e3993f7ca8a209fd | # -*- coding: utf-8 -*-
"""CUBI+Snakemake wrapper code for quality filter for wgs_mei_filtration.
"""
# TODO: works for trios, singletons, or if only one parent available but NOT FOR MORE COMPLICATED CASES
import os
from snakemake import shell
__author__ = "Manuel Holtgrewe <[email protected]>"
shell.executable("/bin/bash")
base_dir = os.path.dirname(os.path.realpath(__file__))
shell(
r"""
set -x
# Load library with helper functions.
source {base_dir}/../../wgs_sv_filtration/funcs.sh
# Get name and number of index, father, and mother.
index={snakemake.wildcards.index_library}
father=$(awk '($2 == "'$index'") {{ print $3; }}' {snakemake.input.ped})
mother=$(awk '($2 == "'$index'") {{ print $4; }}' {snakemake.input.ped})
index_no=$(get_index {snakemake.input.vcf} "$index")
father_no=$(get_index {snakemake.input.vcf} "$father")
mother_no=$(get_index {snakemake.input.vcf} "$mother")
# Perform the actual filtration
lr_var={snakemake.config[step_config][wgs_mei_filtration][thresholds][conservative][lr_var]}
lr_ref={snakemake.config[step_config][wgs_mei_filtration][thresholds][conservative][lr_ref]}
case "{snakemake.wildcards.thresholds}" in
conservative*)
# Build base filter expression for conservative case.
exp="(LR[${{index_no}}] >= $lr_var)"
if [[ -n "$father_no" ]]; then
exp+="&& ("
exp+="(GT[$father_no] == \"alt\" && LR[$father_no] > $lr_var)"
exp+="|| (GT[$father_no] == \"ref\" && LR[$father_no] < $lr_ref)"
exp+=")"
fi
if [[ -n "$mother_no" ]]; then
exp+="&& ("
exp+="(GT[$mother_no] == \"alt\" && LR[$mother_no] > $lr_var)"
exp+="|| (GT[$mother_no] == \"ref\" && LR[$mother_no] < $lr_ref)"
exp+=")"
fi
bcftools view \
-i "$exp" \
-O z \
-o {snakemake.output.vcf} \
{snakemake.input.vcf}
;;
*)
cp {snakemake.input.vcf} {snakemake.output.vcf}
;;
esac
tabix -f {snakemake.output.vcf}
pushd $(dirname {snakemake.output.vcf})
md5sum $(basename {snakemake.output.vcf}) >$(basename {snakemake.output.vcf}).md5
md5sum $(basename {snakemake.output.tbi}) >$(basename {snakemake.output.tbi}).md5
"""
)
|
py | 7dfc72c7f92bd0f1af3a2bf916748b5c01b53a48 | import csv
import pickle
import numpy as np
from scipy.signal import butter, lfilter, savgol_filter, savgol_coeffs, filtfilt
import matplotlib.pylab as plt
from random import shuffle
import datetime
import load
import visualization
from scipy import signal
from processing import resample, savitzky_golay_filter, adhoc_features
from learning import unsupervised_features, supervised_features, clf_cross_validation
from sklearn import svm
from sklearn import neighbors
import warnings
warnings.filterwarnings(
action="ignore", module="scipy", message="^internal gelsd")
def classification_unsupervised_features(data, labels, config):
# test parameters
parameters = {
'feature_type': 'kinematics', #'uist'
'granularity': 'sample', #'gesture'
'dimension_reduction': 'pca', #'None', 'pca'
'dimension_reduction_args': {'n_components': 3},
'classifier': 'svm'
}
# build features
feature_type = parameters['feature_type']
features_, labels_ = adhoc_features(data, labels, feat_type=feature_type, granularity=parameters['granularity'])
# unsupervised dimension reduction
method = parameters['dimension_reduction']
if method == 'None':
feats = {}
if parameters['feature_type'] == 'kinematics':
feats['projection'] = features_[:,2:]
else:
feats['projection'] = features_
pkl_fn = 'clf_instructions_{}.pkl'.format(feature_type)
else:
if parameters['feature_type'] == 'kinematics':
features_ = features_[:,2:]
method_args = parameters['dimension_reduction_args']
feats = unsupervised_features(features_, method=method, args=method_args)
argument_string = ''
for k in method_args.keys():
argument_string += '{}'.format(k)+'='+'{}'.format(method_args[k])+'_'
pkl_fn = 'clf_instructions_{}_{}_{}.pkl'.format(feature_type,method,argument_string)
print('writing features')
pickle.dump({'features': feats}, open(pkl_fn, 'wb'))
pickle.dump(np.transpose(feats['mapping']), open('mapping.pkl', 'wb'))
print(feats['mapping'].shape)
# classification
X = feats['projection']
Y = np.array([config['conditions'].index(c) + 1 for c in labels_[:, 0]])
scores, confmat = clf_cross_validation(X, Y, clf=parameters['classifier'])
print('Scores: {:.2f}%'.format(np.mean(scores)*100.0))
# store test
pickle.dump({'scores': scores, 'confmat': confmat, 'features': feats}, open(pkl_fn, 'wb'))
# def classification_unsupervised_features_by_words_participants(data, labels, config):
# # build features
# # @feat_type uist: spd, curvature
# # @feat_type dyn: pos, vel, acc, jerk
# features_, labels_ = adhoc_features(
# data, labels, feat_type='dyn', average=False)
# # unsupervised dimension reduction
# # features_ = unsupervised_features(features_, method='pca', args={'n_components': 2})
# # supervised dimension reduction
# # Y = np.array([config['conditions'].index(c) + 1 for c in labels_[:, 0]])
# # features_ = supervised_features(features_, Y, method='lda')
# # classification
# # @clf: knn, svm, (to add: lstm)
# labels_int = np.array([config['conditions'].index(c) + 1 for c in labels_[:, 0]])
# parts = np.unique(labels_[:,2])
# for w in config['words']:
# all_scores = []
# for p in parts:
# idx_p = np.where(labels_[:,2] == p)[0]
# idx_w = np.where(labels_[:,3] == w)[0]
# idx = list(set(idx_p) & set(idx_w))
# X = features_[idx,:]
# Y = labels_int[idx]
# scores, confmat = clf_cross_validation(X, Y, clf='knn')
# all_scores.append(scores)
# all_scores = np.array(all_scores).T
# plt.errorbar(np.arange(all_scores.shape[1])+1, np.mean(all_scores, axis=0), yerr=np.std(all_scores, axis=0), label=w)
# lgd = plt.legend(bbox_to_anchor=(1.2,1.0))
# plt.xlabel('Participants')
# plt.ylabel('Mean instruction classification')
# plt.savefig('clf_inst_words_parts.pdf', bbox_extra_artists=(lgd,), bbox_inches='tight')
# plt.show()
# def sample_wise_classification(data, labels, config):
# features_, labels_ = adhoc_features(
# data, labels, feat_type='dyn', average=False)
# parts = np.unique(labels_[:,2])
# labels_int = np.array([config['conditions'].index(c) + 1 for c in labels_[:, 0]])
# for p in parts:
# for w in config['words']:
# idx_p = sorted(np.where(labels_[:,2] == p)[0])
# idx_w = sorted(np.where(labels_[:,3] == w)[0])
# idx = sorted(list(set(idx_p) & set(idx_w)))
# X = features_[idx, 2:]
# Y = labels_int[idx]
# clf = svm.SVC()
# clf.fit(X, Y)
# y_pred = clf.predict(X)
# for c in np.unique(Y):
# idx_c = sorted(np.where(Y == c)[0])
# gestures = []
# gest_tmp = []
# predictions = []
# pred_tmp = []
# gest_tmp.append(X[idx_c[0],:2])
# pred_tmp.append(y_pred[idx_c[0]])
# for i in range(1,len(idx_c)):
# if idx_c[i]-idx_c[i-1]>1:
# gestures.append(gest_tmp)
# gest_tmp = []
# predictions.append(pred_tmp)
# pred_tmp = []
# gest_tmp.append(X[idx_c[i],:2])
# pred_tmp.append(y_pred[idx_c[i]])
# gestures.append(gest_tmp)
# predictions.append(pred_tmp)
# print(c,len(gestures), len(gestures[0]))
# plt.plot(predictions[0])
# plt.show()
# # print(len(y_pred))
if __name__ == "__main__":
# load all the data and experiment configuration
data, labels = load.get_data()
config = load.get_config()
classification_unsupervised_features(data, labels, config)
# load.clf_accuracies(data, labels)
# classify_instructions_by_words_participants(data, labels, config)
# labels_ = np.array(labels_)
# for p in range(1,10):
# idx = np.where(labels_[:,2] == 'p{}'.format(p))[0]
# X, Y = features_[idx,:], labels_[idx,:]
# Y_int = np.array([config['conditions'].index(c)+1 for c in Y[:,0]])
# X_t = learn_representation_static(data=X, labels=Y_int, mthd='lda')
# visualization.vis_dataset_2d(X_t, Y_int)
# viz_inter_participant_var(data, labels)
# dataset_stats(labels)
# gbl_gesture_variability(data, labels)
# accuracies = clf_accuracies(data, labels)
# variability_word_cond(data, labels)
|
py | 7dfc744c0aadfe4abe00325f9c9ac8169db552c1 | import sys
from uuoskit import test_helper
test_helper.run_test()
|
py | 7dfc74904a7af490f72388c3bbc26a4e368e8117 | # Apache License, Version 2.0
#
# Compare renders or screenshots against reference versions and generate
# a HTML report showing the differences, for regression testing.
import glob
import os
import pathlib
import shutil
import subprocess
import sys
import time
from . import global_report
class COLORS_ANSI:
RED = '\033[00;31m'
GREEN = '\033[00;32m'
ENDC = '\033[0m'
class COLORS_DUMMY:
RED = ''
GREEN = ''
ENDC = ''
COLORS = COLORS_DUMMY
def print_message(message, type=None, status=''):
if type == 'SUCCESS':
print(COLORS.GREEN, end="")
elif type == 'FAILURE':
print(COLORS.RED, end="")
status_text = ...
if status == 'RUN':
status_text = " RUN "
elif status == 'OK':
status_text = " OK "
elif status == 'PASSED':
status_text = " PASSED "
elif status == 'FAILED':
status_text = " FAILED "
else:
status_text = status
if status_text:
print("[{}]" . format(status_text), end="")
print(COLORS.ENDC, end="")
print(" {}" . format(message))
sys.stdout.flush()
def blend_list(dirpath):
for root, dirs, files in os.walk(dirpath):
for filename in files:
if filename.lower().endswith(".blend"):
filepath = os.path.join(root, filename)
yield filepath
def test_get_name(filepath):
filename = os.path.basename(filepath)
return os.path.splitext(filename)[0]
def test_get_images(output_dir, filepath, reference_dir):
testname = test_get_name(filepath)
dirpath = os.path.dirname(filepath)
old_dirpath = os.path.join(dirpath, reference_dir)
old_img = os.path.join(old_dirpath, testname + ".png")
ref_dirpath = os.path.join(output_dir, os.path.basename(dirpath), "ref")
ref_img = os.path.join(ref_dirpath, testname + ".png")
os.makedirs(ref_dirpath, exist_ok=True)
if os.path.exists(old_img):
shutil.copy(old_img, ref_img)
new_dirpath = os.path.join(output_dir, os.path.basename(dirpath))
os.makedirs(new_dirpath, exist_ok=True)
new_img = os.path.join(new_dirpath, testname + ".png")
diff_dirpath = os.path.join(output_dir, os.path.basename(dirpath), "diff")
os.makedirs(diff_dirpath, exist_ok=True)
diff_img = os.path.join(diff_dirpath, testname + ".diff.png")
return old_img, ref_img, new_img, diff_img
class Report:
__slots__ = (
'title',
'output_dir',
'reference_dir',
'idiff',
'pixelated',
'verbose',
'update',
'failed_tests',
'passed_tests',
'compare_tests',
'compare_engines'
)
def __init__(self, title, output_dir, idiff):
self.title = title
self.output_dir = output_dir
self.reference_dir = 'reference_renders'
self.idiff = idiff
self.compare_engines = None
self.pixelated = False
self.verbose = os.environ.get("BLENDER_VERBOSE") is not None
self.update = os.getenv('BLENDER_TEST_UPDATE') is not None
if os.environ.get("BLENDER_TEST_COLOR") is not None:
global COLORS, COLORS_ANSI
COLORS = COLORS_ANSI
self.failed_tests = ""
self.passed_tests = ""
self.compare_tests = ""
os.makedirs(output_dir, exist_ok=True)
def set_pixelated(self, pixelated):
self.pixelated = pixelated
def set_reference_dir(self, reference_dir):
self.reference_dir = reference_dir
def set_compare_engines(self, engine, other_engine):
self.compare_engines = (engine, other_engine)
def run(self, dirpath, render_cb):
# Run tests and output report.
dirname = os.path.basename(dirpath)
ok = self._run_all_tests(dirname, dirpath, render_cb)
self._write_data(dirname)
self._write_html()
if self.compare_engines:
self._write_html(comparison=True)
return ok
def _write_data(self, dirname):
# Write intermediate data for single test.
outdir = os.path.join(self.output_dir, dirname)
os.makedirs(outdir, exist_ok=True)
filepath = os.path.join(outdir, "failed.data")
pathlib.Path(filepath).write_text(self.failed_tests)
filepath = os.path.join(outdir, "passed.data")
pathlib.Path(filepath).write_text(self.passed_tests)
if self.compare_engines:
filepath = os.path.join(outdir, "compare.data")
pathlib.Path(filepath).write_text(self.compare_tests)
def _write_html(self, comparison=False):
# Gather intermediate data for all tests.
if comparison:
failed_data = []
passed_data = sorted(glob.glob(os.path.join(self.output_dir, "*/compare.data")))
else:
failed_data = sorted(glob.glob(os.path.join(self.output_dir, "*/failed.data")))
passed_data = sorted(glob.glob(os.path.join(self.output_dir, "*/passed.data")))
failed_tests = ""
passed_tests = ""
for filename in failed_data:
filepath = os.path.join(self.output_dir, filename)
failed_tests += pathlib.Path(filepath).read_text()
for filename in passed_data:
filepath = os.path.join(self.output_dir, filename)
passed_tests += pathlib.Path(filepath).read_text()
tests_html = failed_tests + passed_tests
# Write html for all tests.
if self.pixelated:
image_rendering = 'pixelated'
else:
image_rendering = 'auto'
failed = len(failed_tests) > 0
if failed:
message = "<p>Run <tt>BLENDER_TEST_UPDATE=1 ctest</tt> to create or update reference images for failed tests.</p>"
else:
message = ""
if comparison:
title = "Render Test Compare"
columns_html = "<tr><th>Name</th><th>%s</th><th>%s</th>" % self.compare_engines
else:
title = self.title
columns_html = "<tr><th>Name</th><th>New</th><th>Reference</th><th>Diff</th>"
html = """
<html>
<head>
<title>{title}</title>
<style>
img {{ image-rendering: {image_rendering}; width: 256px; background-color: #000; }}
img.render {{
background-color: #fff;
background-image:
-moz-linear-gradient(45deg, #eee 25%, transparent 25%),
-moz-linear-gradient(-45deg, #eee 25%, transparent 25%),
-moz-linear-gradient(45deg, transparent 75%, #eee 75%),
-moz-linear-gradient(-45deg, transparent 75%, #eee 75%);
background-image:
-webkit-gradient(linear, 0 100%, 100% 0, color-stop(.25, #eee), color-stop(.25, transparent)),
-webkit-gradient(linear, 0 0, 100% 100%, color-stop(.25, #eee), color-stop(.25, transparent)),
-webkit-gradient(linear, 0 100%, 100% 0, color-stop(.75, transparent), color-stop(.75, #eee)),
-webkit-gradient(linear, 0 0, 100% 100%, color-stop(.75, transparent), color-stop(.75, #eee));
-moz-background-size:50px 50px;
background-size:50px 50px;
-webkit-background-size:50px 51px; /* override value for shitty webkit */
background-position:0 0, 25px 0, 25px -25px, 0px 25px;
}}
table td:first-child {{ width: 256px; }}
</style>
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0-alpha.6/css/bootstrap.min.css">
</head>
<body>
<div class="container">
<br/>
<h1>{title}</h1>
{message}
<br/>
<table class="table table-striped">
<thead class="thead-default">
{columns_html}
</thead>
{tests_html}
</table>
<br/>
</div>
</body>
</html>
""" . format(title=title,
message=message,
image_rendering=image_rendering,
tests_html=tests_html,
columns_html=columns_html)
filename = "report.html" if not comparison else "compare.html"
filepath = os.path.join(self.output_dir, filename)
pathlib.Path(filepath).write_text(html)
print_message("Report saved to: " + pathlib.Path(filepath).as_uri())
# Update global report
link_name = "Renders" if not comparison else "Comparison"
global_output_dir = os.path.dirname(self.output_dir)
global_failed = failed if not comparison else None
global_report.add(global_output_dir, self.title, link_name, filepath, global_failed)
def _relative_url(self, filepath):
relpath = os.path.relpath(filepath, self.output_dir)
return pathlib.Path(relpath).as_posix()
def _write_test_html(self, testname, filepath, error):
name = test_get_name(filepath)
name = name.replace('_', ' ')
old_img, ref_img, new_img, diff_img = test_get_images(self.output_dir, filepath, self.reference_dir)
status = error if error else ""
tr_style = """ style="background-color: #f99;" """ if error else ""
new_url = self._relative_url(new_img)
ref_url = self._relative_url(ref_img)
diff_url = self._relative_url(diff_img)
test_html = """
<tr{tr_style}>
<td><b>{name}</b><br/>{testname}<br/>{status}</td>
<td><img src="{new_url}" onmouseover="this.src='{ref_url}';" onmouseout="this.src='{new_url}';" class="render"></td>
<td><img src="{ref_url}" onmouseover="this.src='{new_url}';" onmouseout="this.src='{ref_url}';" class="render"></td>
<td><img src="{diff_url}"></td>
</tr>""" . format(tr_style=tr_style,
name=name,
testname=testname,
status=status,
new_url=new_url,
ref_url=ref_url,
diff_url=diff_url)
if error:
self.failed_tests += test_html
else:
self.passed_tests += test_html
if self.compare_engines:
ref_url = os.path.join("..", self.compare_engines[1], new_url)
test_html = """
<tr{tr_style}>
<td><b>{name}</b><br/>{testname}<br/>{status}</td>
<td><img src="{new_url}" onmouseover="this.src='{ref_url}';" onmouseout="this.src='{new_url}';" class="render"></td>
<td><img src="{ref_url}" onmouseover="this.src='{new_url}';" onmouseout="this.src='{ref_url}';" class="render"></td>
</tr>""" . format(tr_style=tr_style,
name=name,
testname=testname,
status=status,
new_url=new_url,
ref_url=ref_url)
self.compare_tests += test_html
def _diff_output(self, filepath, tmp_filepath):
old_img, ref_img, new_img, diff_img = test_get_images(self.output_dir, filepath, self.reference_dir)
# Create reference render directory.
old_dirpath = os.path.dirname(old_img)
os.makedirs(old_dirpath, exist_ok=True)
# Copy temporary to new image.
if os.path.exists(new_img):
os.remove(new_img)
if os.path.exists(tmp_filepath):
shutil.copy(tmp_filepath, new_img)
if os.path.exists(ref_img):
# Diff images test with threshold.
command = (
self.idiff,
"-fail", "0.016",
"-failpercent", "1",
ref_img,
tmp_filepath,
)
try:
subprocess.check_output(command)
failed = False
except subprocess.CalledProcessError as e:
if self.verbose:
print_message(e.output.decode("utf-8"))
failed = e.returncode != 1
else:
if not self.update:
return False
failed = True
if failed and self.update:
# Update reference image if requested.
shutil.copy(new_img, ref_img)
shutil.copy(new_img, old_img)
failed = False
# Generate diff image.
command = (
self.idiff,
"-o", diff_img,
"-abs", "-scale", "16",
ref_img,
tmp_filepath
)
try:
subprocess.check_output(command)
except subprocess.CalledProcessError as e:
if self.verbose:
print_message(e.output.decode("utf-8"))
return not failed
def _run_tests(self, filepaths, render_cb):
# Run all tests together for performance, since Blender
# startup time is a significant factor.
tmp_filepaths = []
for filepath in filepaths:
testname = test_get_name(filepath)
print_message(testname, 'SUCCESS', 'RUN')
tmp_filepaths.append(os.path.join(self.output_dir, "tmp_" + testname))
run_errors = render_cb(filepaths, tmp_filepaths)
errors = []
for error, filepath, tmp_filepath in zip(run_errors, filepaths, tmp_filepaths):
if not error:
if os.path.getsize(tmp_filepath) == 0:
error = "VERIFY"
elif not self._diff_output(filepath, tmp_filepath):
error = "VERIFY"
if os.path.exists(tmp_filepath):
os.remove(tmp_filepath)
errors.append(error)
testname = test_get_name(filepath)
if not error:
print_message(testname, 'SUCCESS', 'OK')
else:
if error == "SKIPPED":
print_message("Skipped after previous render caused error")
elif error == "NO_ENGINE":
print_message("Can't perform tests because the render engine failed to load!")
elif error == "NO_START":
print_message('Can not perform tests because blender fails to start.',
'Make sure INSTALL target was run.')
elif error == 'VERIFY':
print_message("Rendered result is different from reference image")
else:
print_message("Unknown error %r" % error)
print_message(testname, 'FAILURE', 'FAILED')
return errors
def _run_all_tests(self, dirname, dirpath, render_cb):
passed_tests = []
failed_tests = []
all_files = list(blend_list(dirpath))
all_files.sort()
print_message("Running {} tests from 1 test case." .
format(len(all_files)),
'SUCCESS', "==========")
time_start = time.time()
errors = self._run_tests(all_files, render_cb)
for filepath, error in zip(all_files, errors):
testname = test_get_name(filepath)
if error:
if error == "NO_ENGINE":
return False
elif error == "NO_START":
return False
failed_tests.append(testname)
else:
passed_tests.append(testname)
self._write_test_html(dirname, filepath, error)
time_end = time.time()
elapsed_ms = int((time_end - time_start) * 1000)
print_message("")
print_message("{} tests from 1 test case ran. ({} ms total)" .
format(len(all_files), elapsed_ms),
'SUCCESS', "==========")
print_message("{} tests." .
format(len(passed_tests)),
'SUCCESS', 'PASSED')
if failed_tests:
print_message("{} tests, listed below:" .
format(len(failed_tests)),
'FAILURE', 'FAILED')
failed_tests.sort()
for test in failed_tests:
print_message("{}" . format(test), 'FAILURE', "FAILED")
return not bool(failed_tests)
|
py | 7dfc757970091c6cb802898998ecb837929e25c9 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import sys
from pyspark.sql import Row
from pyspark.sql.functions import udf, input_file_name
from pyspark.testing.sqlutils import ReusedSQLTestCase
class FunctionsTests(ReusedSQLTestCase):
def test_explode(self):
from pyspark.sql.functions import explode, explode_outer, posexplode_outer
d = [
Row(a=1, intlist=[1, 2, 3], mapfield={"a": "b"}),
Row(a=1, intlist=[], mapfield={}),
Row(a=1, intlist=None, mapfield=None),
]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
result = data.select(explode(data.intlist).alias("a")).select("a").collect()
self.assertEqual(result[0][0], 1)
self.assertEqual(result[1][0], 2)
self.assertEqual(result[2][0], 3)
result = data.select(explode(data.mapfield).alias("a", "b")).select("a", "b").collect()
self.assertEqual(result[0][0], "a")
self.assertEqual(result[0][1], "b")
result = [tuple(x) for x in data.select(posexplode_outer("intlist")).collect()]
self.assertEqual(result, [(0, 1), (1, 2), (2, 3), (None, None), (None, None)])
result = [tuple(x) for x in data.select(posexplode_outer("mapfield")).collect()]
self.assertEqual(result, [(0, 'a', 'b'), (None, None, None), (None, None, None)])
result = [x[0] for x in data.select(explode_outer("intlist")).collect()]
self.assertEqual(result, [1, 2, 3, None, None])
result = [tuple(x) for x in data.select(explode_outer("mapfield")).collect()]
self.assertEqual(result, [('a', 'b'), (None, None), (None, None)])
def test_basic_functions(self):
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.spark.read.json(rdd)
df.count()
df.collect()
df.schema
# cache and checkpoint
self.assertFalse(df.is_cached)
df.persist()
df.unpersist(True)
df.cache()
self.assertTrue(df.is_cached)
self.assertEqual(2, df.count())
with self.tempView("temp"):
df.createOrReplaceTempView("temp")
df = self.spark.sql("select foo from temp")
df.count()
df.collect()
def test_corr(self):
import math
df = self.sc.parallelize([Row(a=i, b=math.sqrt(i)) for i in range(10)]).toDF()
corr = df.stat.corr(u"a", "b")
self.assertTrue(abs(corr - 0.95734012) < 1e-6)
def test_sampleby(self):
df = self.sc.parallelize([Row(a=i, b=(i % 3)) for i in range(100)]).toDF()
sampled = df.stat.sampleBy(u"b", fractions={0: 0.5, 1: 0.5}, seed=0)
self.assertTrue(sampled.count() == 35)
def test_cov(self):
df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF()
cov = df.stat.cov(u"a", "b")
self.assertTrue(abs(cov - 55.0 / 3) < 1e-6)
def test_crosstab(self):
df = self.sc.parallelize([Row(a=i % 3, b=i % 2) for i in range(1, 7)]).toDF()
ct = df.stat.crosstab(u"a", "b").collect()
ct = sorted(ct, key=lambda x: x[0])
for i, row in enumerate(ct):
self.assertEqual(row[0], str(i))
self.assertTrue(row[1], 1)
self.assertTrue(row[2], 1)
def test_math_functions(self):
df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF()
from pyspark.sql import functions
import math
def get_values(l):
return [j[0] for j in l]
def assert_close(a, b):
c = get_values(b)
diff = [abs(v - c[k]) < 1e-6 for k, v in enumerate(a)]
return sum(diff) == len(a)
assert_close([math.cos(i) for i in range(10)],
df.select(functions.cos(df.a)).collect())
assert_close([math.cos(i) for i in range(10)],
df.select(functions.cos("a")).collect())
assert_close([math.sin(i) for i in range(10)],
df.select(functions.sin(df.a)).collect())
assert_close([math.sin(i) for i in range(10)],
df.select(functions.sin(df['a'])).collect())
assert_close([math.pow(i, 2 * i) for i in range(10)],
df.select(functions.pow(df.a, df.b)).collect())
assert_close([math.pow(i, 2) for i in range(10)],
df.select(functions.pow(df.a, 2)).collect())
assert_close([math.pow(i, 2) for i in range(10)],
df.select(functions.pow(df.a, 2.0)).collect())
assert_close([math.hypot(i, 2 * i) for i in range(10)],
df.select(functions.hypot(df.a, df.b)).collect())
assert_close([math.hypot(i, 2 * i) for i in range(10)],
df.select(functions.hypot("a", u"b")).collect())
assert_close([math.hypot(i, 2) for i in range(10)],
df.select(functions.hypot("a", 2)).collect())
assert_close([math.hypot(i, 2) for i in range(10)],
df.select(functions.hypot(df.a, 2)).collect())
def test_rand_functions(self):
df = self.df
from pyspark.sql import functions
rnd = df.select('key', functions.rand()).collect()
for row in rnd:
assert row[1] >= 0.0 and row[1] <= 1.0, "got: %s" % row[1]
rndn = df.select('key', functions.randn(5)).collect()
for row in rndn:
assert row[1] >= -4.0 and row[1] <= 4.0, "got: %s" % row[1]
# If the specified seed is 0, we should use it.
# https://issues.apache.org/jira/browse/SPARK-9691
rnd1 = df.select('key', functions.rand(0)).collect()
rnd2 = df.select('key', functions.rand(0)).collect()
self.assertEqual(sorted(rnd1), sorted(rnd2))
rndn1 = df.select('key', functions.randn(0)).collect()
rndn2 = df.select('key', functions.randn(0)).collect()
self.assertEqual(sorted(rndn1), sorted(rndn2))
def test_string_functions(self):
from pyspark.sql import functions
from pyspark.sql.functions import col, lit, _string_functions
df = self.spark.createDataFrame([['nick']], schema=['name'])
self.assertRaisesRegexp(
TypeError,
"must be the same type",
lambda: df.select(col('name').substr(0, lit(1))))
if sys.version_info.major == 2:
self.assertRaises(
TypeError,
lambda: df.select(col('name').substr(long(0), long(1))))
for name in _string_functions.keys():
self.assertEqual(
df.select(getattr(functions, name)("name")).first()[0],
df.select(getattr(functions, name)(col("name"))).first()[0])
def test_array_contains_function(self):
from pyspark.sql.functions import array_contains
df = self.spark.createDataFrame([(["1", "2", "3"],), ([],)], ['data'])
actual = df.select(array_contains(df.data, "1").alias('b')).collect()
self.assertEqual([Row(b=True), Row(b=False)], actual)
def test_between_function(self):
df = self.sc.parallelize([
Row(a=1, b=2, c=3),
Row(a=2, b=1, c=3),
Row(a=4, b=1, c=4)]).toDF()
self.assertEqual([Row(a=2, b=1, c=3), Row(a=4, b=1, c=4)],
df.filter(df.a.between(df.b, df.c)).collect())
def test_dayofweek(self):
from pyspark.sql.functions import dayofweek
dt = datetime.datetime(2017, 11, 6)
df = self.spark.createDataFrame([Row(date=dt)])
row = df.select(dayofweek(df.date)).first()
self.assertEqual(row[0], 2)
def test_expr(self):
from pyspark.sql import functions
row = Row(a="length string", b=75)
df = self.spark.createDataFrame([row])
result = df.select(functions.expr("length(a)")).collect()[0].asDict()
self.assertEqual(13, result["length(a)"])
# add test for SPARK-10577 (test broadcast join hint)
def test_functions_broadcast(self):
from pyspark.sql.functions import broadcast
df1 = self.spark.createDataFrame([(1, "1"), (2, "2")], ("key", "value"))
df2 = self.spark.createDataFrame([(1, "1"), (2, "2")], ("key", "value"))
# equijoin - should be converted into broadcast join
plan1 = df1.join(broadcast(df2), "key")._jdf.queryExecution().executedPlan()
self.assertEqual(1, plan1.toString().count("BroadcastHashJoin"))
# no join key -- should not be a broadcast join
plan2 = df1.crossJoin(broadcast(df2))._jdf.queryExecution().executedPlan()
self.assertEqual(0, plan2.toString().count("BroadcastHashJoin"))
# planner should not crash without a join
broadcast(df1)._jdf.queryExecution().executedPlan()
def test_first_last_ignorenulls(self):
from pyspark.sql import functions
df = self.spark.range(0, 100)
df2 = df.select(functions.when(df.id % 3 == 0, None).otherwise(df.id).alias("id"))
df3 = df2.select(functions.first(df2.id, False).alias('a'),
functions.first(df2.id, True).alias('b'),
functions.last(df2.id, False).alias('c'),
functions.last(df2.id, True).alias('d'))
self.assertEqual([Row(a=None, b=1, c=None, d=98)], df3.collect())
def test_approxQuantile(self):
df = self.sc.parallelize([Row(a=i, b=i+10) for i in range(10)]).toDF()
for f in ["a", u"a"]:
aq = df.stat.approxQuantile(f, [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aq, list))
self.assertEqual(len(aq), 3)
self.assertTrue(all(isinstance(q, float) for q in aq))
aqs = df.stat.approxQuantile(["a", u"b"], [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aqs, list))
self.assertEqual(len(aqs), 2)
self.assertTrue(isinstance(aqs[0], list))
self.assertEqual(len(aqs[0]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqs[0]))
self.assertTrue(isinstance(aqs[1], list))
self.assertEqual(len(aqs[1]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqs[1]))
aqt = df.stat.approxQuantile((u"a", "b"), [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aqt, list))
self.assertEqual(len(aqt), 2)
self.assertTrue(isinstance(aqt[0], list))
self.assertEqual(len(aqt[0]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqt[0]))
self.assertTrue(isinstance(aqt[1], list))
self.assertEqual(len(aqt[1]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqt[1]))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(123, [0.1, 0.9], 0.1))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(("a", 123), [0.1, 0.9], 0.1))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(["a", 123], [0.1, 0.9], 0.1))
def test_sort_with_nulls_order(self):
from pyspark.sql import functions
df = self.spark.createDataFrame(
[('Tom', 80), (None, 60), ('Alice', 50)], ["name", "height"])
self.assertEquals(
df.select(df.name).orderBy(functions.asc_nulls_first('name')).collect(),
[Row(name=None), Row(name=u'Alice'), Row(name=u'Tom')])
self.assertEquals(
df.select(df.name).orderBy(functions.asc_nulls_last('name')).collect(),
[Row(name=u'Alice'), Row(name=u'Tom'), Row(name=None)])
self.assertEquals(
df.select(df.name).orderBy(functions.desc_nulls_first('name')).collect(),
[Row(name=None), Row(name=u'Tom'), Row(name=u'Alice')])
self.assertEquals(
df.select(df.name).orderBy(functions.desc_nulls_last('name')).collect(),
[Row(name=u'Tom'), Row(name=u'Alice'), Row(name=None)])
def test_input_file_name_reset_for_rdd(self):
rdd = self.sc.textFile('python/test_support/hello/hello.txt').map(lambda x: {'data': x})
df = self.spark.createDataFrame(rdd, "data STRING")
df.select(input_file_name().alias('file')).collect()
non_file_df = self.spark.range(100).select(input_file_name())
results = non_file_df.collect()
self.assertTrue(len(results) == 100)
# [SPARK-24605]: if everything was properly reset after the last job, this should return
# empty string rather than the file read in the last job.
for result in results:
self.assertEqual(result[0], '')
if __name__ == "__main__":
import unittest
from pyspark.sql.tests.test_functions import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
py | 7dfc76c8c789822413d8bd794a7c4356963b7e3c | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.ads.google_ads.v1.proto.services import keyword_plan_idea_service_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_keyword__plan__idea__service__pb2
class KeywordPlanIdeaServiceStub(object):
"""Proto file describing the keyword plan idea service.
Service to generate keyword ideas.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GenerateKeywordIdeas = channel.unary_unary(
'/google.ads.googleads.v1.services.KeywordPlanIdeaService/GenerateKeywordIdeas',
request_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_keyword__plan__idea__service__pb2.GenerateKeywordIdeasRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_keyword__plan__idea__service__pb2.GenerateKeywordIdeaResponse.FromString,
)
class KeywordPlanIdeaServiceServicer(object):
"""Proto file describing the keyword plan idea service.
Service to generate keyword ideas.
"""
def GenerateKeywordIdeas(self, request, context):
"""Returns a list of keyword ideas.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_KeywordPlanIdeaServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GenerateKeywordIdeas': grpc.unary_unary_rpc_method_handler(
servicer.GenerateKeywordIdeas,
request_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_keyword__plan__idea__service__pb2.GenerateKeywordIdeasRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_keyword__plan__idea__service__pb2.GenerateKeywordIdeaResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v1.services.KeywordPlanIdeaService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
py | 7dfc7714ea3ae4a3787619a5f86bb0cc84a1f6a5 | # -*- coding: utf-8 -*-
""" Utility Module
This module contains utility functions. Current implementation only has the
exponential utility function.
"""
__author__ = "Juan Carlos Chacon-Hurtado"
__credits__ = ["Juan Carlos Chacon-Hurtado", "Lisa Scholten"]
__license__ = "MIT"
__version__ = "0.1.0"
__maintainer__ = "Juan Carlos Chacon-Hurtado"
__email__ = "[email protected]"
__status__ = "Development"
__last_update__ = "01-07-2019"
import numpy as np
from numba import cuda, guvectorize, vectorize
import matplotlib.pyplot as plt
import math
#%%
# @guvectorize(['void(float64[:],float64[:],float64[:])',
# 'void(float32[:],float32[:],float32[:])'],
# '(m),(m)->(m)',
# target='cuda'
# )
# def exponential(v, r, out):
# '''Calculates the exponential utility function
# Parameters
# ----------
# v : float, ndarray
# Array containing the normalised values
# r : float, ndarray
# Exponent parameter
# returns
# out : ndarray
# Utility values
# Note
# ----
# This is executed as a vectorized function
# '''
# if r == 0:
# out[:] = v
# else:
# out[:] = (1.0 - np.exp(-r*v)) / (1.0 - np.exp(-r))
#%%
# define a device function
@cuda.jit('float32(float32, float32, float32)', device=True, inline=True)
def cu_device_fn(x, y, z):
return x ** y / z
# define a ufunc that calls our device function
@vectorize(['float32(float32, float32, float32)'], target='cuda')
def cu_ufunc(x, y, z):
return cu_device_fn(x, y, z)
#%%
@cuda.jit('float64(float64, float64)', device=True, inline=True)
def _dev_exponential(v, r):
'''Calculates the exponential utility function
Parameters
----------
v : float, ndarray
Array containing the normalised values
r : float, ndarray
Exponent parameter
returns
out : ndarray
Utility values
Note
----
This is executed as a vectorized function
'''
# if r == 0.0:
# out = v
# else:
# out = (1.0 - math.exp(-r*v)) / (1.0 - math.exp(-r))
return (1.0 - math.exp(-r*v)) / (1.0 - math.exp(-r))
#%%
@vectorize('float64(float64,float64)',
target='cuda')
def exponential(v, r):
return _dev_exponential(v, r)
#%%
# def _exponential(v, r):
# if v > 1.0 or v < 0.0:
# _msj = ('Values passed to the utility function should be ranked '
# 'normalised between 0 and 1')
# RuntimeWarning(_msj)
# if r == 0:
# out = v
# else:
# out = (1.0 - np.exp(-r*v)) / (1.0 - np.exp(-r))
# return out
# _vec_exp = np.vectorize(_exponential)
# return _vec_exp(v, r)
#example
x = np.linspace(0,1,50000000)
c = np.random.uniform(-20, 20, 50000000)
# c = 3.0*np.ones_like(x)
out = np.empty_like(x)
out = exponential(x, c)
# plt.plot(out)
|
py | 7dfc77fa9f2ffb35059ebfe95e64407fa188e055 | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 8 17:47:30 2019
@author: autol
"""
#%%
print('''
基于用户评分的推荐系统测试
Movelens 1m dataset(ml-1m)
''')
import pandas as pd
import numpy as np
from matrix_fun import svdk,Prox,obj2,Frob1
import time
#%% 读取数据文件
'''
- UserIDs range between 1 and 6040
- MovieIDs range between 1 and 3952
- Ratings are made on a 5-star scale (whole-star ratings only)
'''
ratings_list = [i.strip().split("::") for i in open('./ml-1m/ratings.dat', 'r').readlines()]
users_list = [i.strip().split("::") for i in open('./ml-1m/users.dat', 'r').readlines()]
movies_list = [i.strip().split("::") for i in open('./ml-1m/movies.txt', 'r',
encoding='utf-8').readlines() if i.strip()]
#%% 转换为DataFrame
# 仅仅基于评分,没使用用户信息
ratings_df = pd.DataFrame(ratings_list,
columns = ['UserID', 'MovieID', 'Rating', 'Timestamp']).astype(np.int)
movies_df = pd.DataFrame(movies_list,
columns = ['MovieID', 'Title', 'Genres']).drop(['Genres'],axis=1)
movies_df['MovieID'] = movies_df['MovieID'].astype(np.int)
df = ratings_df.pivot(index = 'UserID', columns ='MovieID', values = 'Rating').fillna(np.nan) # .fillna(0)
# 评分矩阵大小 users * movies 6040 * 3706 , 电影维度不一样原因是,电影库可能没收录
df.shape
#%%
# 截取大小 使用抽样方法
ui_size = 1000
rstat = 232
dfs = df.sample(n=ui_size, replace=0, random_state=rstat) \
.sample(n=ui_size, axis=1, random_state=rstat) # frac=1./10,
dfs.shape
# 使用全部数据
#dfs = df.copy()
def toR_df(R,dfs):
df = pd.DataFrame(R, columns=dfs.columns).set_index(dfs.index)
return df
#%% 方法1
k = 2
R = dfs.copy()
time1 = time.time()
Z = R.fillna(R.mean(axis=0)).fillna(0)
Z = Z.values;Z
X = R.values
xnas = np.isnan(X)
U, d, Vt = np.linalg.svd(Z,full_matrices=0)
U, d, Vt = U[:,:k],d[:k],Vt[:k,:]
(U, d, Vt)
Z = U @ np.diag(d) @ Vt;Z
#X[xnas] = Z[xnas]
time2 = time.time()
print('All Running time: %s Seconds'%(time2-time1))
R_df = toR_df(Z,dfs)
R_df.shape
#%% 方法2
λ = 1
k = 2
R = dfs.copy()
X = R.values
Z = R.fillna(R.mean(axis=0)).fillna(0)
Z = Z.values;Z
xnas = np.isnan(X) ; n,m = X.shape
nz = n*m-xnas.sum()
time1 = time.time()
svdZ = svdk(Z,k)
for i in range(20):
svdZ0 = svdZ
d = Prox(svdZ[1],λ) # 近端投影软阈值
Z1 = svdZ[0] @ np.diag(d) @ svdZ[2]
Z[xnas] = Z1[xnas]
svdZ = svdk(Z,k)
d1 = Prox(svdZ[1],λ)
obj = obj2(Z,Z1,xnas,nz,d1,λ)
ratio = Frob1(svdZ0[0],d,svdZ0[2].T, svdZ[0],d1,svdZ[2].T)
dicts = dict(ik=i,obj='%.3e'%obj,ratio='%.3e'%ratio)
print(dicts)
time2 = time.time()
print('All Running time: %s Seconds'%(time2-time1))
R_df = toR_df(Z,dfs)
R_df.shape
#%%
#%%
userID = int(dfs.sample(n=1).index.values) # ,random_state=435
print('用户',userID) # 随便抽一个用户
rm,mse = recommend_movies(R_df, userID, movies_df,ratings_df)
#%%
def recommend_movies(R_df, userID, movies_df,ratings_df,
n_movies=20, hide_ranks = 0):
u_ranks = R_df.loc[userID].sort_values(ascending=False) # one user ranks
u_ranks = u_ranks[u_ranks!=0]
u_ranks.shape
u_ratings = ratings_df[ratings_df.UserID == userID] # find this user records in all ratings
u_ratings.shape
u_m_ratings = u_ratings.merge(movies_df, how = 'inner',on = 'MovieID').sort_values(['Rating'], ascending=False)
u_m_ratings.shape
# print(u_m_ratings[['Title', 'Rating']][:3])
M = movies_df
# if hide_ranks:
# M = movies_df[~movies_df['MovieID'].isin(u_m_ratings['MovieID'])] # new except movies_df
# 指定用户的电影评分重新索引
u_ranks = pd.DataFrame(u_ranks).reset_index().apply(pd.to_numeric).rename(columns = {userID: 'Ranks'})
u_ranks.shape
u_m_ranks = (M.merge(u_ranks,how = 'left',on='MovieID').
sort_values('Ranks',ascending = False).reset_index(drop=True)).dropna()
u_m_ranks.shape
u_m_rranks = u_m_ratings[['MovieID','Rating']].merge(u_m_ranks,how = 'outer',on='MovieID')[['MovieID','Title','Rating', 'Ranks']] #[:20]
u_m_rranks = u_m_rranks.dropna(subset=['Ranks'])
u_m_rranks.shape
RR_old = u_m_rranks[u_m_rranks['MovieID'].isin(u_m_ratings['MovieID'])]
# 用于过滤已评分的电影
RR_new = u_m_rranks[~u_m_rranks['MovieID'].isin(u_m_ratings['MovieID'])]
D = RR_old[['Rating','Ranks']].values
E = RR_new[['Title','Ranks']].values[:n_movies]
print('ratings: \n ',E)
a,b = D[:,0],D[:,1]
mse = np.linalg.norm(a-b)**2/len(a)
print('与旧评分对比的 MSE:',mse)
return E,mse
#%%
#%% 读写取评分矩阵
# R_df.to_csv('svdk.csv',header=1, index=1)
R_df = pd.read_csv('svdk.csv',index_col=0)
R_df.columns.set_names('MovieID',inplace=True)
R_df.index
R_df.columns
|
py | 7dfc787edb55e4129233173e5b5e085baf21866d | # Test suite for the Sequence Object of the broadband package
#
# Horribly unfinished.
#
# The strategy is the same as for the BluePrint test suite: we cook up some
# sequences and try to break them. If we can't, everything is prolly OK
import pytest
import broadbean as bb
from broadbean.sequence import (SequenceCompatibilityError,
SequenceConsistencyError, Sequence)
from broadbean.tools import makeVaryingSequence, repeatAndVarySequence
ramp = bb.PulseAtoms.ramp
sine = bb.PulseAtoms.sine
@pytest.fixture
def protosequence1():
SR = 1e9
th = bb.BluePrint()
th.insertSegment(0, ramp, args=(0, 0), name='ramp', dur=10e-6)
th.insertSegment(1, ramp, args=(1, 1), name='ramp', dur=5e-6)
th.insertSegment(2, ramp, args=(0, 0), name='ramp', dur=10e-6)
th.setSR(SR)
wiggle1 = bb.BluePrint()
wiggle1.insertSegment(0, sine, args=(4e6, 0.5, 0), dur=25e-6)
wiggle1.setSR(SR)
wiggle2 = bb.BluePrint()
wiggle2.insertSegment(0, sine, args=(8e6, 0.5, 0), dur=25e-6)
wiggle2.setSR(SR)
elem1 = bb.Element()
elem1.addBluePrint(1, th)
elem1.addBluePrint(2, wiggle1)
elem2 = bb.Element()
elem2.addBluePrint(1, th)
elem2.addBluePrint(2, wiggle2)
seq = Sequence()
seq.addElement(1, elem1)
seq.addElement(2, elem2)
seq.setSR(SR)
seq.setChannelAmplitude(1, 2)
seq.setChannelOffset(1, 0)
seq.setChannelAmplitude(2, 2)
seq.setChannelOffset(2, 0)
seq.setSequencingTriggerWait(1, 1)
seq.setSequencingEventJumpTarget(1, 1)
seq.setSequencingGoto(1, 1)
seq.setSequencingTriggerWait(2, 1)
seq.setSequencingEventJumpTarget(2, 1)
seq.setSequencingGoto(2, 1)
return seq
@pytest.fixture
def protosequence2():
SR = 1e9
saw = bb.BluePrint()
saw.insertSegment(0, ramp, args=(0, 100e-3), dur=11e-6)
saw.insertSegment(1, 'waituntil', args=(25e-6))
saw.setSR(SR)
lineandwiggle = bb.BluePrint()
lineandwiggle.insertSegment(0, 'waituntil', args=(11e-6))
lineandwiggle.insertSegment(1, sine, args=(10e6, 50e-6, 10e-6), dur=14e-6)
lineandwiggle.setSR(SR)
elem1 = bb.Element()
elem1.addBluePrint(1, saw)
elem1.addBluePrint(2, lineandwiggle)
elem2 = bb.Element()
elem2.addBluePrint(2, saw)
elem2.addBluePrint(1, lineandwiggle)
seq = Sequence()
seq.setSR(SR)
seq.addElement(1, elem1)
seq.addElement(2, elem2)
seq.setChannelAmplitude(1, 1.5)
seq.setChannelOffset(1, 0)
seq.setChannelAmplitude(2, 1)
seq.setChannelOffset(2, 0)
seq.setSequencingTriggerWait(1, 0)
seq.setSequencingTriggerWait(2, 1)
seq.setSequencingNumberOfRepetitions(1, 2)
seq.setSequencingEventJumpTarget(1, 0)
seq.setSequencingEventJumpTarget(2, 0)
seq.setSequencingGoto(1, 2)
seq.setSequencingGoto(2, 1)
return seq
@pytest.fixture
def badseq_missing_pos():
SR = 1e9
saw = bb.BluePrint()
saw.insertSegment(0, ramp, args=(0, 100e-3), dur=11e-6)
saw.insertSegment(1, 'waituntil', args=(25e-6))
saw.setSR(SR)
lineandwiggle = bb.BluePrint()
lineandwiggle.insertSegment(0, 'waituntil', args=(11e-6))
lineandwiggle.insertSegment(1, sine, args=(10e6, 50e-6, 10e-6), dur=14e-6)
lineandwiggle.setSR(SR)
elem1 = bb.Element()
elem1.addBluePrint(1, saw)
elem1.addBluePrint(2, lineandwiggle)
elem2 = bb.Element()
elem2.addBluePrint(2, saw)
elem2.addBluePrint(1, lineandwiggle)
seq = Sequence()
seq.setSR(SR)
seq.addElement(1, elem1)
seq.addElement(3, elem2) # <--- A gap in the sequence
seq.setChannelAmplitude(1, 1.5)
seq.setChannelOffset(1, 0)
seq.setChannelAmplitude(2, 1)
seq.setChannelOffset(2, 0)
seq.setSequencingTriggerWait(3, 1)
seq.setSequencingNumberOfRepetitions(1, 2)
seq.setSequencingGoto(1, 2)
seq.setSequencingGoto(3, 1)
# seq.setSequenceSettings(1, 0, 2, 0, 2)
# seq.setSequenceSettings(2, 1, 1, 0, 1)
return seq
@pytest.fixture
def squarepulse_baseelem():
SR = 1e6
basebp = bb.BluePrint()
basebp.insertSegment(0, ramp, (0, 0), dur=0.5e-4)
basebp.insertSegment(1, ramp, (1, 1), dur=1e-4, name='varyme')
basebp.insertSegment(2, 'waituntil', 5e-4)
basebp.setSR(SR)
baseelem = bb.Element()
baseelem.addBluePrint(1, basebp)
return baseelem
##################################################
# INIT and dunderdunder part
@pytest.mark.parametrize('attribute', [('_data'), ('_sequencing'),
('_awgspecs'), ('_meta')])
def test_copy_positively(protosequence1, attribute):
new_seq = protosequence1.copy()
attr1 = new_seq.__getattribute__(attribute)
attr2 = protosequence1.__getattribute__(attribute)
assert attr1 == attr2
def test_copy_negatively_01(protosequence1):
new_seq = protosequence1.copy()
new_seq.setSequencingTriggerWait(1, 0)
new_seq.setSequencingNumberOfRepetitions(1, 1)
new_seq.setSequencingEventJumpTarget(1, 1)
new_seq.setSequencingGoto(1, 1)
assert new_seq != protosequence1
def test_copy_negatively_02(protosequence1):
new_seq = protosequence1.copy()
new_seq.setChannelAmplitude(1, 1.9)
assert new_seq != protosequence1
def test_copy_negatively_03(protosequence1):
new_seq = protosequence1.copy()
new_seq.element(1).changeArg(2, 'sine', 'freq', 1e6)
assert new_seq != protosequence1
def test_copy_and_eq(protosequence1):
new_seq = protosequence1.copy()
assert new_seq == protosequence1
def test_addition_fail_vrange(protosequence1, protosequence2):
with pytest.raises(SequenceCompatibilityError):
protosequence1 + protosequence2
def test_addition_fail_position(protosequence1, badseq_missing_pos):
with pytest.raises(SequenceConsistencyError):
protosequence1 + badseq_missing_pos
def test_addition_data(protosequence1, protosequence2):
protosequence2.setChannelAmplitude(1, 2)
protosequence2.setChannelOffset(1, 0)
protosequence2.setChannelAmplitude(2, 2)
protosequence2.setChannelOffset(2, 0)
newseq = protosequence1 + protosequence2
expected_data = {1: protosequence1.element(1),
2: protosequence1.element(2),
3: protosequence2.element(1),
4: protosequence2.element(2)}
assert newseq._data == expected_data
def test_addition_sequencing1(protosequence1, protosequence2):
protosequence2.setChannelAmplitude(1, 2)
protosequence2.setChannelOffset(1, 0)
protosequence2.setChannelAmplitude(2, 2)
protosequence2.setChannelOffset(2, 0)
newseq = protosequence1 + protosequence2
expected_sequencing = {1: {'twait': 1, 'nrep': 1, 'jump_target': 1,
'goto': 1, 'jump_input': 0},
2: {'twait': 1, 'nrep': 1, 'jump_target': 1,
'goto': 1, 'jump_input': 0},
3: {'twait': 0, 'nrep': 2, 'jump_target': 0,
'goto': 4, 'jump_input': 0},
4: {'twait': 1, 'nrep': 1, 'jump_target': 0,
'goto': 3, 'jump_input': 0}}
assert newseq._sequencing == expected_sequencing
def test_addition_sequencing2(protosequence1, protosequence2):
protosequence2.setChannelAmplitude(1, 2)
protosequence2.setChannelOffset(1, 0)
protosequence2.setChannelAmplitude(2, 2)
protosequence2.setChannelOffset(2, 0)
newseq = protosequence2 + protosequence1
expected_sequencing = {3: {'twait': 1, 'nrep': 1, 'jump_target': 3,
'goto': 3, 'jump_input': 0},
4: {'twait': 1, 'nrep': 1, 'jump_target': 3,
'goto': 3, 'jump_input': 0},
1: {'twait': 0, 'nrep': 2, 'jump_target': 0,
'goto': 2, 'jump_input': 0},
2: {'twait': 1, 'nrep': 1, 'jump_target': 0,
'goto': 1, 'jump_input': 0}}
assert newseq._sequencing == expected_sequencing
def test_addition_awgspecs(protosequence1, protosequence2):
protosequence2.setChannelAmplitude(1, 2)
protosequence2.setChannelOffset(1, 0)
protosequence2.setChannelAmplitude(2, 2)
protosequence2.setChannelOffset(2, 0)
newseq = protosequence1 + protosequence2
assert newseq._awgspecs == protosequence1._awgspecs
def test_addition_data_with_empty(protosequence1):
newseq = Sequence()
newseq._awgspecs = protosequence1._awgspecs
newseq = newseq + protosequence1
assert newseq._data == protosequence1._data
def test_add_subsequence_raises(protosequence1, squarepulse_baseelem):
# raise if a non-Sequence object is added
with pytest.raises(ValueError):
protosequence1.addSubSequence(1, squarepulse_baseelem)
seq = Sequence()
seq.addElement(1, squarepulse_baseelem)
seq.setSR(squarepulse_baseelem.SR)
mainseq = Sequence()
mainseq.setSR(seq.SR/2)
# raise if the subsequence sample rate does not match the main seq. SR
with pytest.raises(ValueError):
mainseq.addSubSequence(1, seq)
mainseq.setSR(seq.SR)
mainseq.addSubSequence(1, seq)
doublemainseq = Sequence()
doublemainseq.setSR(seq.SR)
with pytest.raises(ValueError):
doublemainseq.addSubSequence(1, mainseq)
##################################################
# AWG settings
def test_setSR(protosequence1):
protosequence1.setSR(1.2e9)
assert protosequence1._awgspecs['SR'] == 1.2e9
##################################################
# Highest level sequence variers
@pytest.mark.parametrize('channels, names, args, iters',
[([1], ['varyme'], ['start', 'stop'], [0.9, 1.0, 1.1]),
([1, 1], ['varyme', 'ramp'], ['start', 'start'], [(1,), (1,2)]),
([1], ['varyme'], ['crazyarg'], [0.9, 1.0, 1.1])])
def test_makeVaryingSequence_fail(squarepulse_baseelem, channels, names,
args, iters):
with pytest.raises(ValueError):
makeVaryingSequence(squarepulse_baseelem, channels,
names, args, iters)
@pytest.mark.parametrize('seqpos, argslist', [(1, [(0, 0), 2*(1,), (5e-4,)]),
(2, [(0, 0), 2*(1.2,), (5e-4,)]),
(3, [(0, 0), 2*(1.3,), (5e-4,)])])
def test_makeVaryingSequence(squarepulse_baseelem, seqpos, argslist):
channels = [1, 1]
names = ['varyme', 'varyme']
args = ['start', 'stop']
iters = 2*[[1, 1.2, 1.3]]
sequence = makeVaryingSequence(squarepulse_baseelem, channels,
names, args, iters)
assert sequence._data[seqpos]._data[1]['blueprint']._argslist == argslist
def test_repeatAndVarySequence_length(protosequence1):
poss = [1]
channels = [1]
names = ['ramp']
args = ['start']
iters = [[1, 1.1, 1.2]]
newseq = repeatAndVarySequence(protosequence1, poss, channels, names,
args, iters)
expected_l = len(iters[0])*protosequence1.length_sequenceelements
assert newseq.length_sequenceelements == expected_l
def test_repeatAndVarySequence_awgspecs(protosequence1):
poss = (1,)
channels = [1]
names = ['ramp']
args = ['stop']
iters = [[1, 0.9, 0.8]]
newseq = repeatAndVarySequence(protosequence1, poss, channels, names,
args, iters)
assert newseq._awgspecs == protosequence1._awgspecs
def test_repeatAndVarySequence_fail_inputlength1(protosequence1):
poss = (1, 2)
channels = [1]
names = ['ramp']
args = ['start']
iters = [(1, 0.2, 0.3)]
with pytest.raises(ValueError):
repeatAndVarySequence(protosequence1, poss,
channels, names, args, iters)
def test_repeatAndVarySequence_fail_inputlength2(protosequence1):
poss = (1, 2)
channels = [1, 1]
names = ['ramp', 'ramp']
args = ['start', 'stop']
iters = [(1, 0.2, 0.3), (1, 0.2)]
with pytest.raises(ValueError):
repeatAndVarySequence(protosequence1, poss,
channels, names, args, iters)
def test_repeatAndVarySequence_fail_consistency(protosequence1,
squarepulse_baseelem):
protosequence1.addElement(5, squarepulse_baseelem)
print(protosequence1.checkConsistency())
poss = (1,)
channels = [1]
names = ['ramp']
args = ['start']
iters = [(1, 0.2, 0.3)]
with pytest.raises(SequenceConsistencyError):
repeatAndVarySequence(protosequence1, poss,
channels, names, args, iters)
@pytest.mark.parametrize('pos', [2, 4, 6])
def test_repeatAndVarySequence_same_elements(protosequence1, pos):
poss = (1,)
channels = [1]
names = ['ramp']
args = ['start']
iters = [(1, 0.2, 0.3)]
newseq = repeatAndVarySequence(protosequence1, poss, channels,
names, args, iters)
assert newseq.element(pos) == protosequence1.element(2)
|
py | 7dfc7898af17c856b75b9ec824a9bce4641b6419 | import os
from pure import virtual_env, colors, constants
def test_virtual_env_raw_name_is_empty_when_deactivated():
os.unsetenv('VIRTUAL_ENV')
if 'VIRTUAL_ENV' in os.environ: # when running tests in a virtualenv
del os.environ['VIRTUAL_ENV']
assert virtual_env.raw() == constants.NOTHING
def test_virtual_env_segment_text_is_empty_when_deactivated():
os.unsetenv('VIRTUAL_ENV')
if 'VIRTUAL_ENV' in os.environ: # when running tests in a virtualenv
del os.environ['VIRTUAL_ENV']
colors.load_theme()
assert virtual_env.segment() == {'text': '', 'style': colors.style('mute')}
def test_virtual_env_raw_name_is_empty_when_activated():
os.environ['VIRTUAL_ENV'] = '/path/to/virtual/env'
assert virtual_env.raw() == 'env'
def test_virtual_env_segment_text_is_empty_when_activated():
os.environ['VIRTUAL_ENV'] = '/path/to/virtual/env'
colors.load_theme()
assert virtual_env.segment() == {'text': 'env', 'style': colors.style('mute')}
|
py | 7dfc78f82431f20bb8c2c01e77600d2fd2e22ba8 | import gocamping_api as ga
# import koreatour_api as ka
import make_sigungucode as ms
import pandas as pd
pd.set_option('display.max_row', 500)
pd.set_option('display.max_columns', 100)
if __name__ == '__main__':
s0 = ga.GocampingApi()
# s1 = ka.KoreaTourApi()
sgg = ms.Sigungucode()
df = s0.gocampingAPI()
# df = s1.festivalAPI(20210701)
# df = s1.tourspotAPI(900, 12)
# df = s1.tourlistAPI(10)
df = sgg.make_sigungucode(df)
sgg.final_check_save('camp_api_info', df) #저장하고자 하는 filename 기입
"""아래 함수 sigungucode 처리 불필요, 실행시 바로 csv 저장"""
# s1.tour_estiDecoAPI(20210701, 20210707)
# s1.visitors_API('metco', 20210601, 20210603) #region_type: metco(광역시), locgo(지자체) |
py | 7dfc7924b80e32bc458048b740f2a56dd92c5960 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Travis Yates
"""Tests for object_detection.utils.category_util."""
import os
import tensorflow as tf
from object_detection.utils import category_util
class EvalUtilTest(tf.test.TestCase):
def test_load_categories_from_csv_file(self):
csv_data = """
0,"cat"
1,"dog"
2,"bird"
""".strip(' ')
csv_path = os.path.join(self.get_temp_dir(), 'test.csv')
with tf.gfile.Open(csv_path, 'wb') as f:
f.write(csv_data)
categories = category_util.load_categories_from_csv_file(csv_path)
self.assertTrue({'id': 0, 'name': 'cat'} in categories)
self.assertTrue({'id': 1, 'name': 'dog'} in categories)
self.assertTrue({'id': 2, 'name': 'bird'} in categories)
def test_save_categories_to_csv_file(self):
categories = [
{'id': 0, 'name': 'cat'},
{'id': 1, 'name': 'dog'},
{'id': 2, 'name': 'bird'},
]
csv_path = os.path.join(self.get_temp_dir(), 'test.csv')
category_util.save_categories_to_csv_file(categories, csv_path)
saved_categories = category_util.load_categories_from_csv_file(csv_path)
self.assertEqual(saved_categories, categories)
if __name__ == '__main__':
tf.test.main()
|
py | 7dfc79bcf7665aba0631d78d02bcfca75697baf4 | """
WSGI config for moviegraph project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "moviegraph.settings")
application = get_wsgi_application()
|
py | 7dfc7a795562947b4e151d9cb00a5f3e77b4cdeb | from app import app
from flask import request
from werkzeug import secure_filename
from os import path, remove as remove_file
from random import randrange
from time import time
from openpyxl import load_workbook
def allowed_ext(filename, allowed=[".xlsx"]):
# Extension
ext = path.splitext(filename)[1]
# End
return ext in allowed
def stringify(value):
if value:
return bytes(str(value), 'utf-8').decode('utf-8-sig').strip()
else:
return ""
def remove_temp(filepath):
if path.isfile(filepath):
remove_file(filepath)
return True
def get_uploaded_import_wb_file():
# Default output
output = None
# Form name
form_name = "file_import"
# On post
if request.method == "POST":
# Check form
if form_name in request.files:
# Get file
file = request.files[form_name]
filename = file.filename.strip()
is_update = request.form.get("update") == "y"
# Check filename
if not filename == "":
# Check extension
if allowed_ext(filename):
# Path
filename_clean = secure_filename(f"import_{randrange(1000, 9999)}_{int(time())}.xlsx")
save_path = path.join(
app.config.get("PRIVATE_DIR"),
"temp",
filename_clean
)
# Save
file.save(save_path)
# Load file
try:
# Load workbook
wb = load_workbook(save_path)
# End
return (True, save_path, wb, is_update)
except Exception as e:
# Remove file
remove_file(save_path)
# End
return (False, "Terjadi kesalahan saat memuat file")
else:
# End
return (False, "Ekstensi tidak diizinkan, silahkan upload file berekstensi *.xlsx")
else:
# End
return (False, "Silahkan pilih file terlebih dahulu")
else:
# End
return (False, "Gagal menemukan file pada permintaan form")
# End
return output
|
py | 7dfc7b9ed9ebee126777b237c73d47df969a0279 | # Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import netaddr
from ryu.ofproto import ether
from ryu.ofproto import inet
from ryu.ofproto import ofproto_v1_2
from ryu.ofproto import ofproto_v1_2_parser
from ryu.lib import ofctl_utils
LOG = logging.getLogger('ryu.lib.ofctl_v1_2')
DEFAULT_TIMEOUT = 1.0
UTIL = ofctl_utils.OFCtlUtil(ofproto_v1_2)
str_to_int = ofctl_utils.str_to_int
def to_action(dp, dic):
ofp = dp.ofproto
parser = dp.ofproto_parser
action_type = dic.get('type')
return ofctl_utils.to_action(dic, ofp, parser, action_type, UTIL)
def to_actions(dp, acts):
inst = []
actions = []
ofp = dp.ofproto
parser = dp.ofproto_parser
for a in acts:
action = to_action(dp, a)
if action is not None:
actions.append(action)
else:
action_type = a.get('type')
if action_type == 'WRITE_ACTIONS':
write_actions = []
write_acts = a.get('actions')
for act in write_acts:
action = to_action(dp, act)
if action is not None:
write_actions.append(action)
else:
LOG.error('Unknown action type: %s', action_type)
if write_actions:
inst.append(
parser.OFPInstructionActions(ofp.OFPIT_WRITE_ACTIONS,
write_actions))
elif action_type == 'CLEAR_ACTIONS':
inst.append(
parser.OFPInstructionActions(ofp.OFPIT_CLEAR_ACTIONS, []))
elif action_type == 'GOTO_TABLE':
table_id = UTIL.ofp_table_from_user(a.get('table_id'))
inst.append(parser.OFPInstructionGotoTable(table_id))
elif action_type == 'WRITE_METADATA':
metadata = str_to_int(a.get('metadata'))
metadata_mask = (str_to_int(a['metadata_mask'])
if 'metadata_mask' in a
else parser.UINT64_MAX)
inst.append(
parser.OFPInstructionWriteMetadata(
metadata, metadata_mask))
else:
LOG.error('Unknown action type: %s', action_type)
if actions:
inst.append(parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS,
actions))
return inst
def action_to_str(act):
action_type = act.cls_action_type
if action_type == ofproto_v1_2.OFPAT_OUTPUT:
port = UTIL.ofp_port_to_user(act.port)
buf = 'OUTPUT:' + str(port)
elif action_type == ofproto_v1_2.OFPAT_COPY_TTL_OUT:
buf = 'COPY_TTL_OUT'
elif action_type == ofproto_v1_2.OFPAT_COPY_TTL_IN:
buf = 'COPY_TTL_IN'
elif action_type == ofproto_v1_2.OFPAT_SET_MPLS_TTL:
buf = 'SET_MPLS_TTL:' + str(act.mpls_ttl)
elif action_type == ofproto_v1_2.OFPAT_DEC_MPLS_TTL:
buf = 'DEC_MPLS_TTL'
elif action_type == ofproto_v1_2.OFPAT_PUSH_VLAN:
buf = 'PUSH_VLAN:' + str(act.ethertype)
elif action_type == ofproto_v1_2.OFPAT_POP_VLAN:
buf = 'POP_VLAN'
elif action_type == ofproto_v1_2.OFPAT_PUSH_MPLS:
buf = 'PUSH_MPLS:' + str(act.ethertype)
elif action_type == ofproto_v1_2.OFPAT_POP_MPLS:
buf = 'POP_MPLS:' + str(act.ethertype)
elif action_type == ofproto_v1_2.OFPAT_SET_QUEUE:
queue_id = UTIL.ofp_queue_to_user(act.queue_id)
buf = 'SET_QUEUE:' + str(queue_id)
elif action_type == ofproto_v1_2.OFPAT_GROUP:
group_id = UTIL.ofp_group_to_user(act.group_id)
buf = 'GROUP:' + str(group_id)
elif action_type == ofproto_v1_2.OFPAT_SET_NW_TTL:
buf = 'SET_NW_TTL:' + str(act.nw_ttl)
elif action_type == ofproto_v1_2.OFPAT_DEC_NW_TTL:
buf = 'DEC_NW_TTL'
elif action_type == ofproto_v1_2.OFPAT_SET_FIELD:
buf = 'SET_FIELD: {%s:%s}' % (act.key, act.value)
else:
buf = 'UNKNOWN'
return buf
def actions_to_str(instructions):
actions = []
for instruction in instructions:
if isinstance(instruction,
ofproto_v1_2_parser.OFPInstructionActions):
if instruction.type == ofproto_v1_2.OFPIT_APPLY_ACTIONS:
for a in instruction.actions:
actions.append(action_to_str(a))
elif instruction.type == ofproto_v1_2.OFPIT_WRITE_ACTIONS:
write_actions = []
for a in instruction.actions:
write_actions.append(action_to_str(a))
if write_actions:
actions.append({'WRITE_ACTIONS': write_actions})
elif instruction.type == ofproto_v1_2.OFPIT_CLEAR_ACTIONS:
actions.append('CLEAR_ACTIONS')
else:
actions.append('UNKNOWN')
elif isinstance(instruction,
ofproto_v1_2_parser.OFPInstructionGotoTable):
table_id = UTIL.ofp_table_to_user(instruction.table_id)
buf = 'GOTO_TABLE:' + str(table_id)
actions.append(buf)
elif isinstance(instruction,
ofproto_v1_2_parser.OFPInstructionWriteMetadata):
buf = ('WRITE_METADATA:0x%x/0x%x' % (instruction.metadata,
instruction.metadata_mask)
if instruction.metadata_mask
else 'WRITE_METADATA:0x%x' % instruction.metadata)
actions.append(buf)
else:
continue
return actions
def to_match(dp, attrs):
convert = {'in_port': UTIL.ofp_port_from_user,
'in_phy_port': str_to_int,
'metadata': ofctl_utils.to_match_masked_int,
'dl_dst': ofctl_utils.to_match_eth,
'dl_src': ofctl_utils.to_match_eth,
'eth_dst': ofctl_utils.to_match_eth,
'eth_src': ofctl_utils.to_match_eth,
'dl_type': str_to_int,
'eth_type': str_to_int,
'dl_vlan': to_match_vid,
'vlan_vid': to_match_vid,
'vlan_pcp': str_to_int,
'ip_dscp': str_to_int,
'ip_ecn': str_to_int,
'nw_proto': str_to_int,
'ip_proto': str_to_int,
'nw_src': ofctl_utils.to_match_ip,
'nw_dst': ofctl_utils.to_match_ip,
'ipv4_src': ofctl_utils.to_match_ip,
'ipv4_dst': ofctl_utils.to_match_ip,
'tp_src': str_to_int,
'tp_dst': str_to_int,
'tcp_src': str_to_int,
'tcp_dst': str_to_int,
'udp_src': str_to_int,
'udp_dst': str_to_int,
'sctp_src': str_to_int,
'sctp_dst': str_to_int,
'icmpv4_type': str_to_int,
'icmpv4_code': str_to_int,
'arp_op': str_to_int,
'arp_spa': ofctl_utils.to_match_ip,
'arp_tpa': ofctl_utils.to_match_ip,
'arp_sha': ofctl_utils.to_match_eth,
'arp_tha': ofctl_utils.to_match_eth,
'ipv6_src': ofctl_utils.to_match_ip,
'ipv6_dst': ofctl_utils.to_match_ip,
'ipv6_flabel': str_to_int,
'icmpv6_type': str_to_int,
'icmpv6_code': str_to_int,
'ipv6_nd_target': ofctl_utils.to_match_ip,
'ipv6_nd_sll': ofctl_utils.to_match_eth,
'ipv6_nd_tll': ofctl_utils.to_match_eth,
'mpls_label': str_to_int,
'mpls_tc': str_to_int}
keys = {'dl_dst': 'eth_dst',
'dl_src': 'eth_src',
'dl_type': 'eth_type',
'dl_vlan': 'vlan_vid',
'nw_src': 'ipv4_src',
'nw_dst': 'ipv4_dst',
'nw_proto': 'ip_proto'}
if attrs.get('dl_type') == ether.ETH_TYPE_ARP or \
attrs.get('eth_type') == ether.ETH_TYPE_ARP:
if 'nw_src' in attrs and 'arp_spa' not in attrs:
attrs['arp_spa'] = attrs['nw_src']
del attrs['nw_src']
if 'nw_dst' in attrs and 'arp_tpa' not in attrs:
attrs['arp_tpa'] = attrs['nw_dst']
del attrs['nw_dst']
kwargs = {}
for key, value in attrs.items():
if key in keys:
# For old field name
key = keys[key]
if key in convert:
value = convert[key](value)
if key == 'tp_src' or key == 'tp_dst':
# TCP/UDP port
conv = {inet.IPPROTO_TCP: {'tp_src': 'tcp_src',
'tp_dst': 'tcp_dst'},
inet.IPPROTO_UDP: {'tp_src': 'udp_src',
'tp_dst': 'udp_dst'}}
ip_proto = attrs.get('nw_proto', attrs.get('ip_proto', 0))
key = conv[ip_proto][key]
kwargs[key] = value
else:
# others
kwargs[key] = value
else:
LOG.error('Unknown match field: %s', key)
return dp.ofproto_parser.OFPMatch(**kwargs)
def to_match_vid(value):
return ofctl_utils.to_match_vid(value, ofproto_v1_2.OFPVID_PRESENT)
def match_to_str(ofmatch):
keys = {'eth_src': 'dl_src',
'eth_dst': 'dl_dst',
'eth_type': 'dl_type',
'vlan_vid': 'dl_vlan',
'ipv4_src': 'nw_src',
'ipv4_dst': 'nw_dst',
'ip_proto': 'nw_proto',
'tcp_src': 'tp_src',
'tcp_dst': 'tp_dst',
'udp_src': 'tp_src',
'udp_dst': 'tp_dst'}
match = {}
ofmatch = ofmatch.to_jsondict()['OFPMatch']
ofmatch = ofmatch['oxm_fields']
for match_field in ofmatch:
key = match_field['OXMTlv']['field']
if key in keys:
key = keys[key]
mask = match_field['OXMTlv']['mask']
value = match_field['OXMTlv']['value']
if key == 'dl_vlan':
value = match_vid_to_str(value, mask)
elif key == 'in_port':
value = UTIL.ofp_port_to_user(value)
else:
if mask is not None:
value = str(value) + '/' + str(mask)
match.setdefault(key, value)
return match
def match_vid_to_str(value, mask):
return ofctl_utils.match_vid_to_str(
value, mask, ofproto_v1_2.OFPVID_PRESENT)
def get_desc_stats(dp, waiters):
stats = dp.ofproto_parser.OFPDescStatsRequest(dp)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
s = {}
for msg in msgs:
stats = msg.body
s = stats.to_jsondict()[stats.__class__.__name__]
return {str(dp.id): s}
def get_queue_stats(dp, waiters, port=None, queue_id=None):
ofp = dp.ofproto
if port is None:
port = ofp.OFPP_ANY
else:
port = str_to_int(port)
if queue_id is None:
queue_id = ofp.OFPQ_ALL
else:
queue_id = str_to_int(queue_id)
stats = dp.ofproto_parser.OFPQueueStatsRequest(dp, port,
queue_id, 0)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
s = []
for msg in msgs:
stats = msg.body
for stat in stats:
s.append({'port_no': stat.port_no,
'queue_id': stat.queue_id,
'tx_bytes': stat.tx_bytes,
'tx_errors': stat.tx_errors,
'tx_packets': stat.tx_packets})
return {str(dp.id): s}
def get_queue_config(dp, waiters, port=None):
ofp = dp.ofproto
if port is None:
port = ofp.OFPP_ANY
else:
port = UTIL.ofp_port_from_user(str_to_int(port))
stats = dp.ofproto_parser.OFPQueueGetConfigRequest(dp, port)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
prop_type = {
dp.ofproto.OFPQT_MIN_RATE: 'MIN_RATE',
dp.ofproto.OFPQT_MAX_RATE: 'MAX_RATE',
dp.ofproto.OFPQT_EXPERIMENTER: 'EXPERIMENTER',
}
configs = []
for config in msgs:
queue_list = []
for queue in config.queues:
prop_list = []
for prop in queue.properties:
p = {'property': prop_type.get(prop.property, 'UNKNOWN')}
if prop.property == dp.ofproto.OFPQT_MIN_RATE or \
prop.property == dp.ofproto.OFPQT_MAX_RATE:
p['rate'] = prop.rate
elif prop.property == dp.ofproto.OFPQT_EXPERIMENTER:
p['experimenter'] = prop.experimenter
p['data'] = prop.data
prop_list.append(p)
q = {'port': UTIL.ofp_port_to_user(queue.port),
'properties': prop_list,
'queue_id': UTIL.ofp_queue_to_user(queue.queue_id)}
queue_list.append(q)
c = {'port': UTIL.ofp_port_to_user(config.port),
'queues': queue_list}
configs.append(c)
return {str(dp.id): configs}
def get_flow_stats(dp, waiters, flow=None):
flow = flow if flow else {}
table_id = UTIL.ofp_table_from_user(
flow.get('table_id', dp.ofproto.OFPTT_ALL))
out_port = UTIL.ofp_port_from_user(
flow.get('out_port', dp.ofproto.OFPP_ANY))
out_group = UTIL.ofp_group_from_user(
flow.get('out_group', dp.ofproto.OFPG_ANY))
cookie = str_to_int(flow.get('cookie', 0))
cookie_mask = str_to_int(flow.get('cookie_mask', 0))
match = to_match(dp, flow.get('match', {}))
# Note: OpenFlow does not allow to filter flow entries by priority,
# but for efficiency, ofctl provides the way to do it.
priority = str_to_int(flow.get('priority', -1))
stats = dp.ofproto_parser.OFPFlowStatsRequest(
dp, table_id, out_port, out_group, cookie, cookie_mask, match)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
flows = []
for msg in msgs:
for stats in msg.body:
if 0 <= priority != stats.priority:
continue
actions = actions_to_str(stats.instructions)
match = match_to_str(stats.match)
s = {'priority': stats.priority,
'cookie': stats.cookie,
'idle_timeout': stats.idle_timeout,
'hard_timeout': stats.hard_timeout,
'actions': actions,
'match': match,
'byte_count': stats.byte_count,
'duration_sec': stats.duration_sec,
'duration_nsec': stats.duration_nsec,
'packet_count': stats.packet_count,
'table_id': UTIL.ofp_table_to_user(stats.table_id),
'length': stats.length}
flows.append(s)
return {str(dp.id): flows}
def get_aggregate_flow_stats(dp, waiters, flow=None):
flow = flow if flow else {}
table_id = UTIL.ofp_table_from_user(
flow.get('table_id', dp.ofproto.OFPTT_ALL))
out_port = UTIL.ofp_port_from_user(
flow.get('out_port', dp.ofproto.OFPP_ANY))
out_group = UTIL.ofp_group_from_user(
flow.get('out_group', dp.ofproto.OFPG_ANY))
cookie = str_to_int(flow.get('cookie', 0))
cookie_mask = str_to_int(flow.get('cookie_mask', 0))
match = to_match(dp, flow.get('match', {}))
stats = dp.ofproto_parser.OFPAggregateStatsRequest(
dp, table_id, out_port, out_group, cookie, cookie_mask, match)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
flows = []
for msg in msgs:
stats = msg.body
s = {'packet_count': stats.packet_count,
'byte_count': stats.byte_count,
'flow_count': stats.flow_count}
flows.append(s)
return {str(dp.id): flows}
def get_table_stats(dp, waiters):
stats = dp.ofproto_parser.OFPTableStatsRequest(dp)
ofp = dp.ofproto
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
oxm_type_convert = {ofp.OFPXMT_OFB_IN_PORT: 'IN_PORT',
ofp.OFPXMT_OFB_IN_PHY_PORT: 'IN_PHY_PORT',
ofp.OFPXMT_OFB_METADATA: 'METADATA',
ofp.OFPXMT_OFB_ETH_DST: 'ETH_DST',
ofp.OFPXMT_OFB_ETH_SRC: 'ETH_SRC',
ofp.OFPXMT_OFB_ETH_TYPE: 'ETH_TYPE',
ofp.OFPXMT_OFB_VLAN_VID: 'VLAN_VID',
ofp.OFPXMT_OFB_VLAN_PCP: 'VLAN_PCP',
ofp.OFPXMT_OFB_IP_DSCP: 'IP_DSCP',
ofp.OFPXMT_OFB_IP_ECN: 'IP_ECN',
ofp.OFPXMT_OFB_IP_PROTO: 'IP_PROTO',
ofp.OFPXMT_OFB_IPV4_SRC: 'IPV4_SRC',
ofp.OFPXMT_OFB_IPV4_DST: 'IPV4_DST',
ofp.OFPXMT_OFB_TCP_SRC: 'TCP_SRC',
ofp.OFPXMT_OFB_TCP_DST: 'TCP_DST',
ofp.OFPXMT_OFB_UDP_SRC: 'UDP_SRC',
ofp.OFPXMT_OFB_UDP_DST: 'UDP_DST',
ofp.OFPXMT_OFB_SCTP_SRC: 'SCTP_SRC',
ofp.OFPXMT_OFB_SCTP_DST: 'SCTP_DST',
ofp.OFPXMT_OFB_ICMPV4_TYPE: 'ICMPV4_TYPE',
ofp.OFPXMT_OFB_ICMPV4_CODE: 'ICMPV4_CODE',
ofp.OFPXMT_OFB_ARP_OP: 'ARP_OP',
ofp.OFPXMT_OFB_ARP_SPA: 'ARP_SPA',
ofp.OFPXMT_OFB_ARP_TPA: 'ARP_TPA',
ofp.OFPXMT_OFB_ARP_SHA: 'ARP_SHA',
ofp.OFPXMT_OFB_ARP_THA: 'ARP_THA',
ofp.OFPXMT_OFB_IPV6_SRC: 'IPV6_SRC',
ofp.OFPXMT_OFB_IPV6_DST: 'IPV6_DST',
ofp.OFPXMT_OFB_IPV6_FLABEL: 'IPV6_FLABEL',
ofp.OFPXMT_OFB_ICMPV6_TYPE: 'ICMPV6_TYPE',
ofp.OFPXMT_OFB_ICMPV6_CODE: 'ICMPV6_CODE',
ofp.OFPXMT_OFB_IPV6_ND_TARGET: 'IPV6_ND_TARGET',
ofp.OFPXMT_OFB_IPV6_ND_SLL: 'IPV6_ND_SLL',
ofp.OFPXMT_OFB_IPV6_ND_TLL: 'IPV6_ND_TLL',
ofp.OFPXMT_OFB_MPLS_LABEL: 'MPLS_LABEL',
ofp.OFPXMT_OFB_MPLS_TC: 'MPLS_TC'}
act_convert = {ofp.OFPAT_OUTPUT: 'OUTPUT',
ofp.OFPAT_COPY_TTL_OUT: 'COPY_TTL_OUT',
ofp.OFPAT_COPY_TTL_IN: 'COPY_TTL_IN',
ofp.OFPAT_SET_MPLS_TTL: 'SET_MPLS_TTL',
ofp.OFPAT_DEC_MPLS_TTL: 'DEC_MPLS_TTL',
ofp.OFPAT_PUSH_VLAN: 'PUSH_VLAN',
ofp.OFPAT_POP_VLAN: 'POP_VLAN',
ofp.OFPAT_PUSH_MPLS: 'PUSH_MPLS',
ofp.OFPAT_POP_MPLS: 'POP_MPLS',
ofp.OFPAT_SET_QUEUE: 'SET_QUEUE',
ofp.OFPAT_GROUP: 'GROUP',
ofp.OFPAT_SET_NW_TTL: 'SET_NW_TTL',
ofp.OFPAT_DEC_NW_TTL: 'DEC_NW_TTL',
ofp.OFPAT_SET_FIELD: 'SET_FIELD'}
inst_convert = {ofp.OFPIT_GOTO_TABLE: 'GOTO_TABLE',
ofp.OFPIT_WRITE_METADATA: 'WRITE_METADATA',
ofp.OFPIT_WRITE_ACTIONS: 'WRITE_ACTIONS',
ofp.OFPIT_APPLY_ACTIONS: 'APPLY_ACTIONS',
ofp.OFPIT_CLEAR_ACTIONS: 'CLEAR_ACTIONS',
ofp.OFPIT_EXPERIMENTER: 'EXPERIMENTER'}
table_conf_convert = {
ofp.OFPTC_TABLE_MISS_CONTROLLER: 'TABLE_MISS_CONTROLLER',
ofp.OFPTC_TABLE_MISS_CONTINUE: 'TABLE_MISS_CONTINUE',
ofp.OFPTC_TABLE_MISS_DROP: 'TABLE_MISS_DROP',
ofp.OFPTC_TABLE_MISS_MASK: 'TABLE_MISS_MASK'}
tables = []
for msg in msgs:
stats = msg.body
for stat in stats:
match = []
wildcards = []
write_setfields = []
apply_setfields = []
for k, v in oxm_type_convert.items():
if (1 << k) & stat.match:
match.append(v)
if (1 << k) & stat.wildcards:
wildcards.append(v)
if (1 << k) & stat.write_setfields:
write_setfields.append(v)
if (1 << k) & stat.apply_setfields:
apply_setfields.append(v)
write_actions = []
apply_actions = []
for k, v in act_convert.items():
if (1 << k) & stat.write_actions:
write_actions.append(v)
if (1 << k) & stat.apply_actions:
apply_actions.append(v)
instructions = []
for k, v in inst_convert.items():
if (1 << k) & stat.instructions:
instructions.append(v)
config = []
for k, v in table_conf_convert.items():
if (1 << k) & stat.config:
config.append(v)
s = {'table_id': UTIL.ofp_table_to_user(stat.table_id),
'name': stat.name.decode('utf-8'),
'match': match,
'wildcards': wildcards,
'write_actions': write_actions,
'apply_actions': apply_actions,
'write_setfields': write_setfields,
'apply_setfields': apply_setfields,
'metadata_match': stat.metadata_match,
'metadata_write': stat.metadata_write,
'instructions': instructions,
'config': config,
'max_entries': stat.max_entries,
'active_count': stat.active_count,
'lookup_count': stat.lookup_count,
'matched_count': stat.matched_count}
tables.append(s)
return {str(dp.id): tables}
def get_port_stats(dp, waiters, port=None):
if port is None:
port = dp.ofproto.OFPP_ANY
else:
port = str_to_int(port)
stats = dp.ofproto_parser.OFPPortStatsRequest(
dp, port, 0)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
ports = []
for msg in msgs:
for stats in msg.body:
s = {'port_no': UTIL.ofp_port_to_user(stats.port_no),
'rx_packets': stats.rx_packets,
'tx_packets': stats.tx_packets,
'rx_bytes': stats.rx_bytes,
'tx_bytes': stats.tx_bytes,
'rx_dropped': stats.rx_dropped,
'tx_dropped': stats.tx_dropped,
'rx_errors': stats.rx_errors,
'tx_errors': stats.tx_errors,
'rx_frame_err': stats.rx_frame_err,
'rx_over_err': stats.rx_over_err,
'rx_crc_err': stats.rx_crc_err,
'collisions': stats.collisions}
ports.append(s)
return {str(dp.id): ports}
def get_group_stats(dp, waiters, group_id=None):
if group_id is None:
group_id = dp.ofproto.OFPG_ALL
else:
group_id = str_to_int(group_id)
stats = dp.ofproto_parser.OFPGroupStatsRequest(
dp, group_id, 0)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
groups = []
for msg in msgs:
for stats in msg.body:
bucket_counters = []
for bucket_counter in stats.bucket_counters:
c = {'packet_count': bucket_counter.packet_count,
'byte_count': bucket_counter.byte_count}
bucket_counters.append(c)
g = {'length': stats.length,
'group_id': UTIL.ofp_group_to_user(stats.group_id),
'ref_count': stats.ref_count,
'packet_count': stats.packet_count,
'byte_count': stats.byte_count,
'bucket_stats': bucket_counters}
groups.append(g)
return {str(dp.id): groups}
def get_group_features(dp, waiters):
ofp = dp.ofproto
type_convert = {ofp.OFPGT_ALL: 'ALL',
ofp.OFPGT_SELECT: 'SELECT',
ofp.OFPGT_INDIRECT: 'INDIRECT',
ofp.OFPGT_FF: 'FF'}
cap_convert = {ofp.OFPGFC_SELECT_WEIGHT: 'SELECT_WEIGHT',
ofp.OFPGFC_SELECT_LIVENESS: 'SELECT_LIVENESS',
ofp.OFPGFC_CHAINING: 'CHAINING',
ofp.OFPGFC_CHAINING_CHECKS: 'CHAINING_CHECKS'}
act_convert = {ofp.OFPAT_OUTPUT: 'OUTPUT',
ofp.OFPAT_COPY_TTL_OUT: 'COPY_TTL_OUT',
ofp.OFPAT_COPY_TTL_IN: 'COPY_TTL_IN',
ofp.OFPAT_SET_MPLS_TTL: 'SET_MPLS_TTL',
ofp.OFPAT_DEC_MPLS_TTL: 'DEC_MPLS_TTL',
ofp.OFPAT_PUSH_VLAN: 'PUSH_VLAN',
ofp.OFPAT_POP_VLAN: 'POP_VLAN',
ofp.OFPAT_PUSH_MPLS: 'PUSH_MPLS',
ofp.OFPAT_POP_MPLS: 'POP_MPLS',
ofp.OFPAT_SET_QUEUE: 'SET_QUEUE',
ofp.OFPAT_GROUP: 'GROUP',
ofp.OFPAT_SET_NW_TTL: 'SET_NW_TTL',
ofp.OFPAT_DEC_NW_TTL: 'DEC_NW_TTL',
ofp.OFPAT_SET_FIELD: 'SET_FIELD'}
stats = dp.ofproto_parser.OFPGroupFeaturesStatsRequest(dp, 0)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
features = []
for msg in msgs:
feature = msg.body
types = []
for k, v in type_convert.items():
if (1 << k) & feature.types:
types.append(v)
capabilities = []
for k, v in cap_convert.items():
if k & feature.capabilities:
capabilities.append(v)
max_groups = []
for k, v in type_convert.items():
max_groups.append({v: feature.max_groups[k]})
actions = []
for k1, v1 in type_convert.items():
acts = []
for k2, v2 in act_convert.items():
if (1 << k2) & feature.actions[k1]:
acts.append(v2)
actions.append({v1: acts})
f = {'types': types,
'capabilities': capabilities,
'max_groups': max_groups,
'actions': actions}
features.append(f)
return {str(dp.id): features}
def get_group_desc(dp, waiters):
type_convert = {dp.ofproto.OFPGT_ALL: 'ALL',
dp.ofproto.OFPGT_SELECT: 'SELECT',
dp.ofproto.OFPGT_INDIRECT: 'INDIRECT',
dp.ofproto.OFPGT_FF: 'FF'}
stats = dp.ofproto_parser.OFPGroupDescStatsRequest(dp, 0)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
descs = []
for msg in msgs:
for stats in msg.body:
buckets = []
for bucket in stats.buckets:
actions = []
for action in bucket.actions:
actions.append(action_to_str(action))
b = {'weight': bucket.weight,
'watch_port': bucket.watch_port,
'watch_group': bucket.watch_group,
'actions': actions}
buckets.append(b)
d = {'type': type_convert.get(stats.type),
'group_id': UTIL.ofp_group_to_user(stats.group_id),
'buckets': buckets}
descs.append(d)
return {str(dp.id): descs}
def get_port_desc(dp, waiters):
stats = dp.ofproto_parser.OFPFeaturesRequest(dp)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
descs = []
for msg in msgs:
stats = msg.ports
for stat in stats.values():
d = {'port_no': UTIL.ofp_port_to_user(stat.port_no),
'hw_addr': stat.hw_addr,
'name': stat.name.decode('utf-8'),
'config': stat.config,
'state': stat.state,
'curr': stat.curr,
'advertised': stat.advertised,
'supported': stat.supported,
'peer': stat.peer,
'curr_speed': stat.curr_speed,
'max_speed': stat.max_speed}
descs.append(d)
return {str(dp.id): descs}
def get_role(dp, waiters, to_user=True):
return ofctl_utils.get_role(dp, waiters, to_user)
def mod_flow_entry(dp, flow, cmd):
cookie = str_to_int(flow.get('cookie', 0))
cookie_mask = str_to_int(flow.get('cookie_mask', 0))
table_id = UTIL.ofp_table_from_user(flow.get('table_id', 0))
idle_timeout = str_to_int(flow.get('idle_timeout', 0))
hard_timeout = str_to_int(flow.get('hard_timeout', 0))
priority = str_to_int(flow.get('priority', 0))
buffer_id = UTIL.ofp_buffer_from_user(
flow.get('buffer_id', dp.ofproto.OFP_NO_BUFFER))
out_port = UTIL.ofp_port_from_user(
flow.get('out_port', dp.ofproto.OFPP_ANY))
out_group = UTIL.ofp_group_from_user(
flow.get('out_group', dp.ofproto.OFPG_ANY))
flags = str_to_int(flow.get('flags', 0))
match = to_match(dp, flow.get('match', {}))
inst = to_actions(dp, flow.get('actions', []))
flow_mod = dp.ofproto_parser.OFPFlowMod(
dp, cookie, cookie_mask, table_id, cmd, idle_timeout,
hard_timeout, priority, buffer_id, out_port, out_group,
flags, match, inst)
ofctl_utils.send_msg(dp, flow_mod, LOG)
def mod_group_entry(dp, group, cmd):
type_convert = {'ALL': dp.ofproto.OFPGT_ALL,
'SELECT': dp.ofproto.OFPGT_SELECT,
'INDIRECT': dp.ofproto.OFPGT_INDIRECT,
'FF': dp.ofproto.OFPGT_FF}
type_ = type_convert.get(group.get('type', 'ALL'))
if type_ is None:
LOG.error('Unknown group type: %s', group.get('type'))
group_id = UTIL.ofp_group_from_user(group.get('group_id', 0))
buckets = []
for bucket in group.get('buckets', []):
weight = str_to_int(bucket.get('weight', 0))
watch_port = str_to_int(
bucket.get('watch_port', dp.ofproto.OFPP_ANY))
watch_group = str_to_int(
bucket.get('watch_group', dp.ofproto.OFPG_ANY))
actions = []
for dic in bucket.get('actions', []):
action = to_action(dp, dic)
if action is not None:
actions.append(action)
buckets.append(dp.ofproto_parser.OFPBucket(
weight, watch_port, watch_group, actions))
group_mod = dp.ofproto_parser.OFPGroupMod(
dp, cmd, type_, group_id, buckets)
ofctl_utils.send_msg(dp, group_mod, LOG)
def mod_port_behavior(dp, port_config):
port_no = UTIL.ofp_port_from_user(port_config.get('port_no', 0))
hw_addr = str(port_config.get('hw_addr'))
config = str_to_int(port_config.get('config', 0))
mask = str_to_int(port_config.get('mask', 0))
advertise = str_to_int(port_config.get('advertise'))
port_mod = dp.ofproto_parser.OFPPortMod(
dp, port_no, hw_addr, config, mask, advertise)
ofctl_utils.send_msg(dp, port_mod, LOG)
def set_role(dp, role):
r = UTIL.ofp_role_from_user(role.get('role', dp.ofproto.OFPCR_ROLE_EQUAL))
role_request = dp.ofproto_parser.OFPRoleRequest(dp, r, 0)
ofctl_utils.send_msg(dp, role_request, LOG)
# NOTE(jkoelker) Alias common funcitons
send_experimenter = ofctl_utils.send_experimenter
|
py | 7dfc7c3ad74199a20e4de129d503c50b7d4de3e5 | # This file is distributed under the University of Illinois Open Source License. See LICENSE.TXT for details
###############################################################################
# This file contains driver test information for DXIL operations #
###############################################################################
from hctdb import *
import xml.etree.ElementTree as ET
import argparse
parser = argparse.ArgumentParser(description="contains information about dxil op test cases.")
parser.add_argument('mode', help="'gen-xml' or 'info'")
g_db_dxil = None
def get_db_dxil():
global g_db_dxil
if g_db_dxil is None:
g_db_dxil = db_dxil()
return g_db_dxil
"""
This class represents a test case for instructions for driver testings
DXIL instructions and test cases are two disjoint sets where each instruction can have multiple test cases,
and each test case can cover different DXIL instructions. So these two sets form a bipartite graph.
test_name: Test case identifier. Must be unique for each test case.
insts: dxil instructions
validation_type: validation type for test
epsilon: absolute difference check
ulp: units in last place check
relative: relative error check
validation_tolerance: tolerance value for a given test
inputs: testing inputs
outputs: expected outputs for each input
shader_target: target for testing
shader_text: hlsl file that is used for testing dxil op
"""
class test_case(object):
def __init__(self, test_name, insts, validation_type, validation_tolerance,
input_lists, output_lists, shader_target, shader_text, **kwargs):
self.test_name = test_name
self.validation_type = validation_type
self.validation_tolerance = validation_tolerance
self.input_lists = input_lists
self.output_lists = output_lists
self.shader_target = shader_target
self.shader_text = shader_text
self.insts = insts # list of instructions each test case cover
self.warp_version = -1 # known warp version that works
self.shader_arguments = ""
for k,v in kwargs.items():
setattr(self, k, v)
# Wrapper for each DXIL instruction
class inst_node(object):
def __init__(self, inst):
self.inst = inst
self.test_cases = [] # list of test_case
def add_test_case(test_name, inst_names, validation_type, validation_tolerance,
input_lists, output_lists, shader_target, shader_text, **kwargs):
insts = []
for inst_name in inst_names:
assert (inst_name in g_instruction_nodes)
insts += [g_instruction_nodes[inst_name].inst]
case = test_case(test_name, insts, validation_type,
validation_tolerance, input_lists, output_lists,
shader_target, shader_text, **kwargs)
g_test_cases[test_name] = case
# update instruction nodes
for inst_name in inst_names:
g_instruction_nodes[inst_name].test_cases += [case]
def add_test_case_int(test_name, inst_names, validation_type, validation_tolerance,
input_lists, output_lists, shader_key, shader_op_name, **kwargs):
add_test_case(test_name, inst_names, validation_type, validation_tolerance,
input_lists, output_lists, "cs_6_0", get_shader_text(shader_key, shader_op_name), **kwargs)
input_lists_16, output_lists_16 = input_lists, output_lists
if "input_16" in kwargs:
input_lists_16 = kwargs["input_16"]
if "output_16" in kwargs:
output_lists_16 = kwargs["output_16"]
add_test_case(test_name + "Bit16", inst_names, validation_type, validation_tolerance,
input_lists_16, output_lists_16, "cs_6_2", get_shader_text(shader_key.replace("int","int16_t"), shader_op_name),
shader_arguments="-enable-16bit-types", **kwargs)
def add_test_case_float_half(test_name, inst_names, validation_type, validation_tolerance,
float_input_lists, float_output_lists, shader_key, shader_op_name, **kwargs):
add_test_case(test_name, inst_names, validation_type, validation_tolerance,
float_input_lists, float_output_lists, "cs_6_0", get_shader_text(shader_key, shader_op_name), **kwargs)
# if half test cases are different from float input lists, use those lists instead for half testings
half_input_lists, half_output_lists, half_validation_type, half_validation_tolerance = float_input_lists, float_output_lists, validation_type, validation_tolerance
if "half_inputs" in kwargs:
half_input_lists = kwargs["half_inputs"]
if "half_outputs" in kwargs:
half_output_lists = kwargs["half_outputs"]
if "half_validation_type" in kwargs:
half_validation_type = kwargs["half_validation_type"]
if "half_validation_tolerance" in kwargs:
half_validation_tolerance = kwargs["half_validation_tolerance"]
# skip relative error test check for half for now
if validation_type != "Relative":
add_test_case(test_name + "Half", inst_names, half_validation_type, half_validation_tolerance,
half_input_lists, half_output_lists, "cs_6_2",
get_shader_text(shader_key.replace("float","half"), shader_op_name), shader_arguments="-enable-16bit-types", **kwargs)
def add_test_case_denorm(test_name, inst_names, validation_type, validation_tolerance, input_lists,
output_lists_ftz, output_lists_preserve, shader_target, shader_text, **kwargs):
add_test_case(test_name + "FTZ", inst_names, validation_type, validation_tolerance, input_lists,
output_lists_ftz, shader_target, shader_text, shader_arguments="-denorm ftz")
add_test_case(test_name + "Preserve", inst_names, validation_type, validation_tolerance, input_lists,
output_lists_preserve, shader_target, shader_text, shader_arguments="-denorm preserve")
# we can expect the same output for "any" and "preserve" mode. We should make sure that for validation zero are accepted outputs for denormal outputs.
add_test_case(test_name + "Any", inst_names, validation_type, validation_tolerance, input_lists,
output_lists_preserve + output_lists_ftz, shader_target, shader_text, shader_arguments="-denorm any")
g_shader_texts = {
"unary int": ''' struct SUnaryIntOp {
int input;
int output;
};
RWStructuredBuffer<SUnaryIntOp> g_buf : register(u0);
[numthreads(8,8,1)]
void main(uint GI : SV_GroupIndex) {
SUnaryIntOp l = g_buf[GI];
l.output = %s(l.input);
g_buf[GI] = l;
};''',
"unary int16_t": ''' struct SUnaryInt16Op {
int16_t input;
int16_t output;
};
RWStructuredBuffer<SUnaryInt16Op> g_buf : register(u0);
[numthreads(8,8,1)]
void main(uint GI : SV_GroupIndex) {
SUnaryInt16Op l = g_buf[GI];
l.output = %s(l.input);
g_buf[GI] = l;
};''',
"unary uint": ''' struct SUnaryUintOp {
uint input;
uint output;
};
RWStructuredBuffer<SUnaryUintOp> g_buf : register(u0);
[numthreads(8,8,1)]
void main(uint GI : SV_GroupIndex) {
SUnaryUintOp l = g_buf[GI];
l.output = %s(l.input);
g_buf[GI] = l;
};''',
"unary uint16_t": ''' struct SUnaryUint16Op {
uint16_t input;
uint16_t output;
};
RWStructuredBuffer<SUnaryUint16Op> g_buf : register(u0);
[numthreads(8,8,1)]
void main(uint GI : SV_GroupIndex) {
SUnaryUint16Op l = g_buf[GI];
l.output = %s(l.input);
g_buf[GI] = l;
};''',
"unary float": ''' struct SUnaryFPOp {
float input;
float output;
};
RWStructuredBuffer<SUnaryFPOp> g_buf : register(u0);
[numthreads(8,8,1)]
void main(uint GI : SV_GroupIndex) {
SUnaryFPOp l = g_buf[GI];
l.output = %s(l.input);
g_buf[GI] = l;
};''',
"unary float bool": ''' struct SUnaryFPOp {
float input;
float output;
};
RWStructuredBuffer<SUnaryFPOp> g_buf : register(u0);
[numthreads(8,8,1)]
void main(uint GI : SV_GroupIndex) {
SUnaryFPOp l = g_buf[GI];
if (%s(l.input))
l.output = 1;
else
l.output = 0;
g_buf[GI] = l;
};''',
"unary half": ''' struct SUnaryFPOp {
float16_t input;
float16_t output;
};
RWStructuredBuffer<SUnaryFPOp> g_buf : register(u0);
[numthreads(8,8,1)]
void main(uint GI : SV_GroupIndex) {
SUnaryFPOp l = g_buf[GI];
l.output = %s(l.input);
g_buf[GI] = l;
};''',
"unary half bool": ''' struct SUnaryFPOp {
float16_t input;
float16_t output;
};
RWStructuredBuffer<SUnaryFPOp> g_buf : register(u0);
[numthreads(8,8,1)]
void main(uint GI : SV_GroupIndex) {
SUnaryFPOp l = g_buf[GI];
if (%s(l.input))
l.output = 1;
else
l.output = 0;
g_buf[GI] = l;
};''',
"binary int": ''' struct SBinaryIntOp {
int input1;
int input2;
int output1;
int output2;
};
RWStructuredBuffer<SBinaryIntOp> g_buf : register(u0);
[numthreads(8,8,1)]
void main(uint GI : SV_GroupIndex) {
SBinaryIntOp l = g_buf[GI];
l.output1 = l.input1 %s l.input2;
g_buf[GI] = l;
};''',
"binary int16_t": ''' struct SBinaryInt16Op {
int16_t input1;
int16_t input2;
int16_t output1;
int16_t output2;
};
RWStructuredBuffer<SBinaryInt16Op> g_buf : register(u0);
[numthreads(8,8,1)]
void main(uint GI : SV_GroupIndex) {
SBinaryInt16Op l = g_buf[GI];
l.output1 = l.input1 %s l.input2;
g_buf[GI] = l;
};''',
"binary int call": ''' struct SBinaryIntOp {
int input1;
int input2;
int output1;
int output2;
};
RWStructuredBuffer<SBinaryIntOp> g_buf : register(u0);
[numthreads(8,8,1)]
void main(uint GI : SV_GroupIndex) {
SBinaryIntOp l = g_buf[GI];
l.output1 = %s(l.input1,l.input2);
g_buf[GI] = l;
};''',
"binary int16_t call": ''' struct SBinaryInt16Op {
int16_t input1;
int16_t input2;
int16_t output1;
int16_t output2;
};
RWStructuredBuffer<SBinaryInt16Op> g_buf : register(u0);
[numthreads(8,8,1)]
void main(uint GI : SV_GroupIndex) {
SBinaryInt16Op l = g_buf[GI];
l.output1 = %s(l.input1,l.input2);
g_buf[GI] = l;
};''',
"binary uint": ''' struct SBinaryUintOp {
uint input1;
uint input2;
uint output1;
uint output2;
};
RWStructuredBuffer<SBinaryUintOp> g_buf : register(u0);
[numthreads(8,8,1)]
void main(uint GI : SV_GroupIndex) {
SBinaryUintOp l = g_buf[GI];
l.output1 = l.input1 %s l.input2;
g_buf[GI] = l;
};''',
"binary uint16_t": ''' struct SBinaryUint16Op {
uint16_t input1;
uint16_t input2;
uint16_t output1;
uint16_t output2;
};
RWStructuredBuffer<SBinaryUint16Op> g_buf : register(u0);
[numthreads(8,8,1)]
void main(uint GI : SV_GroupIndex) {
SBinaryUint16Op l = g_buf[GI];
l.output1 = l.input1 %s l.input2;
g_buf[GI] = l;
};''',
"binary uint call": ''' struct SBinaryUintOp {
uint input1;
uint input2;
uint output1;
uint output2;
};
RWStructuredBuffer<SBinaryUintOp> g_buf : register(u0);
[numthreads(8,8,1)]
void main(uint GI : SV_GroupIndex) {
SBinaryUintOp l = g_buf[GI];
l.output1 = %s(l.input1,l.input2);
g_buf[GI] = l;
};''',
"binary uint16_t call": ''' struct SBinaryUint16Op {
uint16_t input1;
uint16_t input2;
uint16_t output1;
uint16_t output2;
};
RWStructuredBuffer<SBinaryUint16Op> g_buf : register(u0);
[numthreads(8,8,1)]
void main(uint GI : SV_GroupIndex) {
SBinaryUint16Op l = g_buf[GI];
l.output1 = %s(l.input1,l.input2);
g_buf[GI] = l;
};''',
"binary float": ''' struct SBinaryFPOp {
float input1;
float input2;
float output1;
float output2;
};
RWStructuredBuffer<SBinaryFPOp> g_buf : register(u0);
[numthreads(8,8,1)]
void main(uint GI : SV_GroupIndex) {
SBinaryFPOp l = g_buf[GI];
l.output1 = l.input1 %s l.input2;
g_buf[GI] = l;
};''',
"binary float call": ''' struct SBinaryFPOp {
float input1;
float input2;
float output1;
float output2;
};
RWStructuredBuffer<SBinaryFPOp> g_buf : register(u0);
[numthreads(8,8,1)]
void main(uint GI : SV_GroupIndex) {
SBinaryFPOp l = g_buf[GI];
l.output1 = %s(l.input1,l.input2);
g_buf[GI] = l;
};''',
"binary half": ''' struct SBinaryFPOp {
half input1;
half input2;
half output1;
half output2;
};
RWStructuredBuffer<SBinaryFPOp> g_buf : register(u0);
[numthreads(8,8,1)]
void main(uint GI : SV_GroupIndex) {
SBinaryFPOp l = g_buf[GI];
l.output1 = l.input1 %s l.input2;
g_buf[GI] = l;
};''',
"binary half call": ''' struct SBinaryFPOp {
half input1;
half input2;
half output1;
half output2;
};
RWStructuredBuffer<SBinaryFPOp> g_buf : register(u0);
[numthreads(8,8,1)]
void main(uint GI : SV_GroupIndex) {
SBinaryFPOp l = g_buf[GI];
l.output1 = %s(l.input1,l.input2);
g_buf[GI] = l;
};''',
"tertiary int": ''' struct STertiaryIntOp {
int input1;
int input2;
int input3;
int output;
};
RWStructuredBuffer<STertiaryIntOp> g_buf : register(u0);
[numthreads(8,8,1)]
void main(uint GI : SV_GroupIndex) {
STertiaryIntOp l = g_buf[GI];
l.output = %s(l.input1, l.input2, l.input3);
g_buf[GI] = l;
};''',
"tertiary int16_t": ''' struct STertiaryInt16Op {
int16_t input1;
int16_t input2;
int16_t input3;
int16_t output;
};
RWStructuredBuffer<STertiaryInt16Op> g_buf : register(u0);
[numthreads(8,8,1)]
void main(uint GI : SV_GroupIndex) {
STertiaryInt16Op l = g_buf[GI];
l.output = %s(l.input1, l.input2, l.input3);
g_buf[GI] = l;
};''',
"tertiary uint": ''' struct STertiaryUintOp {
uint input1;
uint input2;
uint input3;
uint output;
};
RWStructuredBuffer<STertiaryUintOp> g_buf : register(u0);
[numthreads(8,8,1)]
void main(uint GI : SV_GroupIndex) {
STertiaryUintOp l = g_buf[GI];
l.output = %s(l.input1, l.input2, l.input3);
g_buf[GI] = l;
};''',
"tertiary uint16_t": ''' struct STertiaryUint16Op {
uint16_t input1;
uint16_t input2;
uint16_t input3;
uint16_t output;
};
RWStructuredBuffer<STertiaryUint16Op> g_buf : register(u0);
[numthreads(8,8,1)]
void main(uint GI : SV_GroupIndex) {
STertiaryUint16Op l = g_buf[GI];
l.output = %s(l.input1, l.input2, l.input3);
g_buf[GI] = l;
};''',
"tertiary float": ''' struct STertiaryFloatOp {
float input1;
float input2;
float input3;
float output;
};
RWStructuredBuffer<STertiaryFloatOp> g_buf : register(u0);
[numthreads(8,8,1)]
void main(uint GI : SV_GroupIndex) {
STertiaryFloatOp l = g_buf[GI];
l.output = %s(l.input1, l.input2, l.input3);
g_buf[GI] = l;
};''',
'tertiary half': ''' struct STertiaryHalfOp {
half input1;
half input2;
half input3;
half output;
};
RWStructuredBuffer<STertiaryHalfOp> g_buf : register(u0);
[numthreads(8,8,1)]
void main(uint GI : SV_GroupIndex) {
STertiaryHalfOp l = g_buf[GI];
l.output = %s(l.input1, l.input2, l.input3);
g_buf[GI] = l;
};''',
"wave op int" :''' struct PerThreadData {
uint firstLaneId;
uint laneIndex;
int mask;
int input;
int output;
};
RWStructuredBuffer<PerThreadData> g_sb : register(u0);
[numthreads(8,12,1)]
void main(uint GI : SV_GroupIndex) {
PerThreadData pts = g_sb[GI];
pts.firstLaneId = WaveReadLaneFirst(GI);
pts.laneIndex = WaveGetLaneIndex();
if (pts.mask != 0) {
pts.output = %s(pts.input);
}
else {
pts.output = %s(pts.input);
}
g_sb[GI] = pts;
};''',
"wave op uint" :''' struct PerThreadData {
uint firstLaneId;
uint laneIndex;
int mask;
uint input;
uint output;
};
RWStructuredBuffer<PerThreadData> g_sb : register(u0);
[numthreads(8,12,1)]
void main(uint GI : SV_GroupIndex) {
PerThreadData pts = g_sb[GI];
pts.firstLaneId = WaveReadLaneFirst(GI);
pts.laneIndex = WaveGetLaneIndex();
if (pts.mask != 0) {
pts.output = %s(pts.input);
}
else {
pts.output = %s(pts.input);
}
g_sb[GI] = pts;
};''',
"wave op int count": ''' struct PerThreadData {
uint firstLaneId;
uint laneIndex;
int mask;
int input;
int output;
};
RWStructuredBuffer<PerThreadData> g_sb : register(u0);
[numthreads(8,12,1)]
void main(uint GI : SV_GroupIndex) {
PerThreadData pts = g_sb[GI];
pts.firstLaneId = WaveReadLaneFirst(GI);
pts.laneIndex = WaveGetLaneIndex();
if (pts.mask != 0) {
pts.output = %s(pts.input > 3);
}
else {
pts.output = %s(pts.input > 3);
}
g_sb[GI] = pts;
};'''
}
def get_shader_text(op_type, op_call):
assert(op_type in g_shader_texts)
if op_type.startswith("wave op"):
return g_shader_texts[op_type] % (op_call, op_call)
return g_shader_texts[op_type] % (op_call)
g_denorm_tests = ["FAddDenormAny", "FAddDenormFTZ", "FAddDenormPreserve",
"FSubDenormAny", "FSubDenormFTZ", "FSubDenormPreserve",
"FMulDenormAny", "FMulDenormFTZ", "FMulDenormPreserve",
"FDivDenormAny", "FDivDenormFTZ", "FDivDenormPreserve",
"FMadDenormAny", "FMadDenormFTZ", "FMadDenormPreserve",
"FAbsDenormAny", "FAbsDenormFTZ", "FAbsDenormPreserve",
"FMinDenormAny", "FMinDenormFTZ", "FMinDenormPreserve",
"FMaxDenormAny", "FMaxDenormFTZ", "FMaxDenormPreserve"]
# This is a collection of test case for driver tests per instruction
# Warning: For test cases, when you want to pass in signed 32-bit integer,
# make sure to pass in negative numbers with decimal values instead of hexadecimal representation.
# For some reason, TAEF is not handling them properly.
# For half values, hex is preferable since the test framework will read string as float values
# and convert them to float16, possibly losing precision. The test will read hex values as it is.
def add_test_cases():
nan = float('nan')
p_inf = float('inf')
n_inf = float('-inf')
p_denorm = float('1e-38')
n_denorm = float('-1e-38')
# Unary Float
add_test_case_float_half('Sin', ['Sin'], 'Epsilon', 0.0008, [[
'NaN', '-Inf', '-denorm', '-0', '0', 'denorm', 'Inf', '-314.16',
'314.16'
]], [[
'NaN', 'NaN', '-0', '-0', '0', '0', 'NaN', '-0.0007346401',
'0.0007346401'
]], "unary float", "sin", half_validation_tolerance=0.003, half_inputs=[[
'NaN', '-Inf', '-denorm', '-0', '0', 'denorm', 'Inf',
'0.6279297', '1.255859', '1.884766', '2.511719', '3.140625',
'3.769531', '4.398438', '5.023438', '5.652344', '6.281250'
]], half_outputs=[[
'NaN', 'NaN', '-0', '-0', '0', '0', 'NaN',
'0.58747065', '0.95081574', '0.95111507', '0.58904284', '0.00096773',
'-0.58747751', '-0.95112079', '-0.95201313', '-0.58982444', '-0.00193545'
]])
add_test_case_float_half('Cos', ['Cos'], 'Epsilon', 0.0008, [[
'NaN', '-Inf', '-denorm', '-0', '0', 'denorm', 'Inf', '-314.16',
'314.16'
]], [[
'NaN', 'NaN', '1.0', '1.0', '1.0', '1.0', 'NaN', '0.99999973015',
'0.99999973015'
]], "unary float", "cos", half_validation_tolerance=0.003, half_inputs=[[
'NaN', '-Inf', '-denorm', '-0', '0', 'denorm', 'Inf',
'0.6279297', '1.255859', '1.884766', '2.511719', '3.140625',
'3.769531', '4.398438', '5.023438', '5.652344', '6.281250'
]], half_outputs=[[
'NaN', 'NaN', '1.0', '1.0', '1.0', '1.0', 'NaN',
'0.80924553', '0.30975693', '-0.30883664', '-0.80810183', '-0.99999952',
'-0.80924052', '-0.30881903', '0.30605716', '0.80753154', '0.99999809'
]])
add_test_case_float_half('Tan', ['Tan'], 'Epsilon', 0.0008, [[
'NaN', '-Inf', '-denorm', '-0', '0', 'denorm', 'Inf', '-314.16',
'314.16'
]], [[
'NaN', 'NaN', '-0.0', '-0.0', '0.0', '0.0', 'NaN', '-0.000735',
'0.000735'
]], "unary float", "tan", half_validation_tolerance=0.016, half_inputs=[[
'NaN', '-Inf', '-denorm', '-0', '0', 'denorm', 'Inf',
'0.6279297', '1.255859', '1.884766', '2.511719', '3.140625',
'3.769531', '4.398438', '5.652344', '6.281250'
]], half_outputs=[[
'NaN', 'NaN', '-0', '-0', '0', '0', 'NaN',
'0.72594857', '3.06955433', '-3.07967043', '-0.72892153', '-0.00096773',
'0.72596157', '3.07986474', '-0.7304042', '-0.00193546'
]])
add_test_case_float_half('Hcos', ['Hcos'], 'Epsilon', 0.0008,
[['NaN', '-Inf', '-denorm', '-0', '0', 'denorm', 'Inf', '1', '-1']], [[
'NaN', 'Inf', '1.0', '1.0', '1.0', '1.0', 'Inf', '1.543081',
'1.543081'
]], "unary float", "cosh", half_validation_type='ulp', half_validation_tolerance=2)
add_test_case_float_half('Hsin', ['Hsin'], 'Epsilon', 0.0008,
[['NaN', '-Inf', '-denorm', '-0', '0', 'denorm', 'Inf', '1', '-1']], [[
'NaN', '-Inf', '0.0', '0.0', '0.0', '0.0', 'Inf', '1.175201',
'-1.175201'
]], "unary float", "sinh")
add_test_case_float_half('Htan', ['Htan'], 'Epsilon', 0.0008,
[['NaN', '-Inf', '-denorm', '-0', '0', 'denorm', 'Inf', '1', '-1']], [[
'NaN', '-1', '-0.0', '-0.0', '0.0', '0.0', '1', '0.761594',
'-0.761594'
]], "unary float", "tanh", warp_version=16202)
add_test_case_float_half('Acos', ['Acos'], 'Epsilon', 0.0008, [[
'NaN', '-Inf', '-denorm', '-0', '0', 'denorm', 'Inf', '1', '-1', '1.5',
'-1.5'
]], [[
'NaN', 'NaN', '1.570796', '1.570796', '1.570796', '1.570796', 'NaN',
'0', '3.1415926', 'NaN', 'NaN'
]], "unary float", "acos")
add_test_case_float_half('Asin', ['Asin'], 'Epsilon', 0.0008, [[
'NaN', '-Inf', '-denorm', '-0', '0', 'denorm', 'Inf', '1', '-1', '1.5',
'-1.5'
]], [[
'NaN', 'NaN', '0.0', '0.0', '0.0', '0.0', 'NaN', '1.570796',
'-1.570796', 'NaN', 'NaN'
]], "unary float", "asin")
add_test_case_float_half('Atan', ['Atan'], 'Epsilon', 0.0008,
[['NaN', '-Inf', '-denorm', '-0', '0', 'denorm', 'Inf', '1', '-1']], [[
'NaN', '-1.570796', '0.0', '0.0', '0.0', '0.0', '1.570796',
'0.785398163', '-0.785398163'
]], "unary float", "atan", warp_version=16202)
add_test_case_float_half('Exp', ['Exp'], 'Relative', 21,
[['NaN', '-Inf', '-denorm', '-0', '0', 'denorm', 'Inf', '-1', '10']],
[['NaN', '0', '1', '1', '1', '1', 'Inf', '0.367879441', '22026.46579']
], "unary float", "exp")
add_test_case_float_half('Frc', ['Frc'], 'Epsilon', 0.0008, [[
'NaN', '-Inf', '-denorm', '-0', '0', 'denorm', 'Inf', '-1', '2.718280',
'1000.599976', '-7.389'
]], [[
'NaN', 'NaN', '0', '0', '0', '0', 'NaN', '0', '0.718280', '0.599976',
'0.611'
]], "unary float", "frac",
half_inputs=[['NaN', '-Inf', '0x03FF', '-0', '0', 'Inf', '-1', '2.719',
'1000.5', '0xC764']],
half_outputs=[[
'NaN', 'NaN', '0x03FF', '0', '0', 'NaN', '0', '0.719', '0.5',
'0x38E1']])
add_test_case_float_half('Log', ['Log'], 'Relative', 21, [[
'NaN', '-Inf', '-denorm', '-0', '0', 'denorm', 'Inf', '-1',
'2.718281828', '7.389056', '100'
]], [[
'NaN', 'NaN', '-Inf', '-Inf', '-Inf', '-Inf', 'Inf', 'NaN', '1.0',
'1.99999998', '4.6051701'
]],"unary float", "log", half_inputs=[[
'NaN', '-Inf', '-denorm', '-0', '0', 'denorm', 'Inf', '-1',
'2.719', '7.39', '100'
]], half_outputs=[[
'NaN', 'NaN', '-Inf', '-Inf', '-Inf', '-Inf', 'Inf', 'NaN', '1.0',
'2', '4.605'
]])
add_test_case_float_half('Sqrt', ['Sqrt'], 'ulp', 1, [[
'NaN', '-Inf', '-denorm', '-0', '0', 'denorm', 'Inf', '-1', '2',
'16.0', '256.0'
]], [[
'NaN', 'NaN', '-0', '-0', '0', '0', 'Inf', 'NaN', '1.41421356237',
'4.0', '16.0'
]], "unary float", "sqrt",
half_inputs=[['NaN', '-Inf', '-denorm', '-0', '0', '0x03FF', 'Inf', '-1', '2', '16.0', '256.0']],
half_outputs=[['NaN', 'NaN', 'NaN', '-0', '0', '0x1FFF', 'Inf', 'NaN', '1.41421', '4.0', '16.0']])
add_test_case_float_half('Rsqrt', ['Rsqrt'], 'ulp', 1, [[
'NaN', '-Inf', '-denorm', '-0', '0', 'denorm', 'Inf', '-1', '16.0',
'256.0', '65536.0'
]], [[
'NaN', 'NaN', '-Inf', '-Inf', 'Inf', 'Inf', '0', 'NaN', '0.25',
'0.0625', '0.00390625'
]], "unary float", "rsqrt", half_inputs=[[
'NaN', '-Inf', '-denorm', '-0', '0', '0x03FF', 'Inf', '-1', '16.0',
'256.0', '0x7bff'
]], half_outputs=[[
'NaN', 'NaN', 'NaN', '-Inf', 'Inf', '0x5801', '0', 'NaN', '0.25',
'0.0625', '0x1C00'
]])
add_test_case_float_half('Round_ne', ['Round_ne'], 'Epsilon', 0, [[
'NaN', '-Inf', '-denorm', '-0', '0', 'denorm', 'Inf', '10.0', '10.4',
'10.5', '10.6', '11.5', '-10.0', '-10.4', '-10.5', '-10.6'
]], [[
'NaN', '-Inf', '-0', '-0', '0', '0', 'Inf', '10.0', '10.0', '10.0',
'11.0', '12.0', '-10.0', '-10.0', '-10.0', '-11.0'
]], "unary float", "round")
add_test_case_float_half('Round_ni', ['Round_ni'], 'Epsilon', 0, [[
'NaN', '-Inf', '-denorm', '-0', '0', 'denorm', 'Inf', '10.0', '10.4',
'10.5', '10.6', '-10.0', '-10.4', '-10.5', '-10.6'
]], [[
'NaN', '-Inf', '-0', '-0', '0', '0', 'Inf', '10.0', '10.0', '10.0',
'10.0', '-10.0', '-11.0', '-11.0', '-11.0'
]], "unary float", "floor", half_inputs=[[
'NaN', '-Inf', '-denorm', '-0', '0', 'denorm', 'Inf', '10.0', '10.4',
'10.5', '10.6', '-10.0', '-10.4', '-10.5', '-10.6'
]], half_outputs=[[
'NaN', '-Inf', '-1', '-0', '0', '0', 'Inf', '10.0', '10.0', '10.0',
'10.0', '-10.0', '-11.0', '-11.0', '-11.0'
]])
add_test_case_float_half('Round_pi', ['Round_pi'], 'Epsilon', 0,
[['NaN', '-Inf', '-denorm', '-0', '0', 'denorm', 'Inf', '10.0', '10.4',
'10.5', '10.6', '-10.0', '-10.4', '-10.5', '-10.6']],
[['NaN', '-Inf', '-0', '-0', '0', '0', 'Inf', '10.0', '11.0', '11.0',
'11.0', '-10.0', '-10.0', '-10.0', '-10.0']], "unary float", "ceil",
half_inputs=[['NaN', '-Inf', '-denorm', '-0', '0', 'denorm', 'Inf', '10.0', '10.4',
'10.5', '10.6', '-10.0', '-10.4', '-10.5', '-10.6']],
half_outputs=[['NaN', '-Inf', '-0', '-0', '0', '1', 'Inf', '10.0', '11.0', '11.0',
'11.0', '-10.0', '-10.0', '-10.0', '-10.0']])
add_test_case_float_half('Round_z', ['Round_z'], 'Epsilon', 0,
[['NaN', '-Inf', '-denorm', '-0', '0', 'denorm', 'Inf', '10.0', '10.4',
'10.5', '10.6', '-10.0', '-10.4', '-10.5', '-10.6']],
[['NaN', '-Inf', '-0', '-0', '0', '0', 'Inf', '10.0', '10.0', '10.0',
'10.0', '-10.0', '-10.0', '-10.0', '-10.0']], "unary float", "trunc")
add_test_case_float_half('IsNaN', ['IsNaN'], 'Epsilon', 0,
[['NaN', '-Inf', '-denorm', '-0', '0', 'denorm', 'Inf', '1.0', '-1.0']
], [['1', '0', '0', '0', '0', '0', '0', '0', '0']], "unary float bool", "isnan")
add_test_case_float_half('IsInf', ['IsInf'], 'Epsilon', 0,
[['NaN', '-Inf', '-denorm', '-0', '0', 'denorm', 'Inf', '1.0', '-1.0']
], [['0', '1', '0', '0', '0', '0', '1', '0', '0']], "unary float bool", "isinf")
add_test_case_float_half('IsFinite', ['IsFinite'], 'Epsilon', 0,
[['NaN', '-Inf', '-denorm', '-0', '0', 'denorm', 'Inf', '1.0', '-1.0']
], [['0', '0', '1', '1', '1', '1', '0', '1', '1']], "unary float bool", "isfinite", warp_version=16202)
add_test_case_float_half('FAbs', ['FAbs'], 'Epsilon', 0,
[['NaN', '-Inf', '-denorm', '-0', '0', 'denorm', 'Inf', '1.0', '-1.0']
], [['NaN', 'Inf', 'denorm', '0', '0', 'denorm', 'Inf', '1', '1']], "unary float", "abs")
# Binary Float
add_test_case('FMin', ['FMin','FMax'], 'epsilon', 0, [[
'-inf', '-inf', '-inf', '-inf', 'inf', 'inf', 'inf', 'inf', 'NaN',
'NaN', 'NaN', 'NaN', '1.0', '1.0', '-1.0', '-1.0', '1.0'
], [
'-inf', 'inf', '1.0', 'NaN', '-inf', 'inf', '1.0', 'NaN', '-inf',
'inf', '1.0', 'NaN', '-inf', 'inf', '1.0', 'NaN', '-1.0'
]], [[
'-inf', '-inf', '-inf', '-inf', '-inf', 'inf', '1.0', 'inf', '-inf',
'inf', '1.0', 'NaN', '-inf', '1.0', '-1.0', '-1.0', '-1.0'
], [
'-inf', 'inf', '1.0', '-inf', 'inf', 'inf', 'inf', 'inf', '-inf',
'inf', '1.0', 'NaN', '1.0', 'inf', '1.0', '-1.0', '1.0'
]], 'cs_6_0', ''' struct SBinaryFPOp {
float input1;
float input2;
float output1;
float output2;
};
RWStructuredBuffer<SBinaryFPOp> g_buf : register(u0);
[numthreads(8,8,1)]
void main(uint GI : SV_GroupIndex) {
SBinaryFPOp l = g_buf[GI];
l.output1 = min(l.input1, l.input2);
l.output2 = max(l.input1, l.input2);
g_buf[GI] = l;
};''')
add_test_case('FMinHalf', ['FMin','FMax'], 'epsilon', 0, [[
'-inf', '-inf', '-inf', '-inf', 'inf', 'inf', 'inf', 'inf', 'NaN',
'NaN', 'NaN', 'NaN', '1.0', '1.0', '-1.0', '-1.0', '1.0'
], [
'-inf', 'inf', '1.0', 'NaN', '-inf', 'inf', '1.0', 'NaN', '-inf',
'inf', '1.0', 'NaN', '-inf', 'inf', '1.0', 'NaN', '-1.0'
]], [[
'-inf', '-inf', '-inf', '-inf', '-inf', 'inf', '1.0', 'inf', '-inf',
'inf', '1.0', 'NaN', '-inf', '1.0', '-1.0', '-1.0', '-1.0'
], [
'-inf', 'inf', '1.0', '-inf', 'inf', 'inf', 'inf', 'inf', '-inf',
'inf', '1.0', 'NaN', '1.0', 'inf', '1.0', '-1.0', '1.0'
]], 'cs_6_2', ''' struct SBinaryHalfOp {
half input1;
half input2;
half output1;
half output2;
};
RWStructuredBuffer<SBinaryHalfOp> g_buf : register(u0);
[numthreads(8,8,1)]
void main(uint GI : SV_GroupIndex) {
SBinaryHalfOp l = g_buf[GI];
l.output1 = min(l.input1, l.input2);
l.output2 = max(l.input1, l.input2);
g_buf[GI] = l;
};''', shader_arguments="-enable-16bit-types")
add_test_case_float_half('FAdd', ['FAdd'], 'ulp', 1, [['-1.0', '1.0', '32.5', '1.0000001000'],['4', '5.5', '334.7', '0.5000001000']], [['3.0', '6.5', '367.2', '1.5000002000']],
"binary float", "+")
add_test_case_float_half('FSub', ['FSub'], 'ulp', 1, [['-1.0', '5.5', '32.5', '1.0000001000'],['4', '1.25', '334.7', '0.5000001000']], [['-5', '4.25', '-302.2', '0.5000']],
"binary float", "-")
add_test_case_float_half('FMul', ['FMul'], 'ulp', 1, [['-1.0', '5.5', '1.0000001'],['4', '1.25', '2.0']], [['-4.0', '6.875', '2.0000002']],
"binary float", "*")
add_test_case_float_half('FDiv', ['FDiv'], 'ulp', 1, [['-1.0', '5.5', '1.0000001'],['4', '1.25', '2.0']], [['-0.25', '4.4', '0.50000006']],
"binary float", "/")
# Denorm Binary Float
add_test_case_denorm('FAddDenorm', ['FAdd'], 'ulp', 1,
[['0x007E0000', '0x00200000', '0x007E0000', '0x007E0000'],['0x007E0000','0x00200000', '0x807E0000', '0x800E0000']],
[['0','0', '0', '0']],
[['0x00FC0000','0x00400000', '0', '0x00700000']],
'cs_6_2', get_shader_text("binary float", "+"))
add_test_case_denorm('FSubDenorm', ['FSub'], 'ulp', 1,
[['0x007E0000', '0x007F0000', '0x00FF0000', '0x007A0000'],['0x007E0000', '0x807F0000', '0x00800000', '0']],
[['0x0', '0', '0', '0']],
[['0x0', '0x00FE0000', '0x007F0000', '0x007A0000']],
'cs_6_2', get_shader_text("binary float", "-"))
add_test_case_denorm('FDivDenorm', ['FDiv'], 'ulp', 1,
[['0x007F0000', '0x807F0000', '0x20000000', '0x00800000'],['1', '4', '0x607F0000', '0x40000000']],
[['0', '0', '0', '0']],
[['0x007F0000', '0x801FC000', '0x00101010', '0x00400000']],
'cs_6_2', get_shader_text("binary float", "/"))
add_test_case_denorm('FMulDenorm', ['FMul'], 'ulp', 1,
[['0x00000300', '0x007F0000', '0x007F0000', '0x001E0000', '0x00000300'],['128', '1', '0x007F0000', '20', '0x78000000']],
[['0', '0', '0', '0', '0']],
[['0x00018000','0x007F0000', '0', '0x01960000', '0x32400000']],
'cs_6_2', get_shader_text("binary float", "*"))
# Tertiary Float
add_test_case_float_half('FMad', ['FMad'], 'ulp', 1, [[
'NaN', '-Inf', '-denorm', '-0', '0', 'denorm', 'Inf', '1.0', '-1.0',
'0', '1', '1.5'
], [
'NaN', '-Inf', '-denorm', '-0', '0', 'denorm', 'Inf', '1.0', '-1.0',
'0', '1', '10'
], [
'NaN', '-Inf', '-denorm', '-0', '0', 'denorm', 'Inf', '1.0', '-1.0',
'1', '0', '-5.5'
]], [['NaN', 'NaN', '0', '0', '0', '0', 'Inf', '2', '0', '1', '1', '9.5']],
"tertiary float", "mad",
half_inputs=[[
'NaN', '-Inf', '0x03FF', '-0', '0', 'Inf', '1.0', '-1.0',
'0', '1', '1.5'
], [
'NaN', '-Inf', '1', '-0', '0', 'Inf', '1.0', '-1.0',
'0', '1', '10'
], [
'NaN', '-Inf', '0x03FF', '-0', '0', 'Inf', '1.0', '-1.0',
'1', '0', '-5.5'
]],
half_outputs=[['NaN', 'NaN', '0x07FE', '0', '0', 'Inf', '2', '0', '1', '1', '9.5']])
# Denorm Tertiary Float
add_test_case_denorm('FMadDenorm', ['FMad'], 'ulp', 1,
[['0x80780000', '0x80780000', '0x00780000'],
['1', '2', '2'],
['0x80780000', '0x00800000', '0x00800000']],
[['0', '0x00800000', '0x00800000']],
[['0x80F00000', '0x80700000', '0x01380000']],
'cs_6_2', get_shader_text("tertiary float", "mad"))
# Unary Int
int8_min, int8_max = '-128', '127'
int16_min, int16_max = '-32768', '32767'
int32_min, int32_max = '-2147483648', '2147483647'
uint16_max = '65535'
uint32_max = '4294967295'
add_test_case_int('Bfrev', ['Bfrev'], 'Epsilon', 0, [[
int32_min, '-65536', '-8', '-1', '0', '1', '8', '65536',
int32_max
]], [[
'1', '65535', '536870911', '-1', '0', int32_min, '268435456',
'32768', '-2'
]], "unary int", "reversebits",
input_16=[[int16_min, '-256', '-8', '-1', '0', '1', '8', '256', int16_max]],
output_16=[['1', '255', '8191', '-1', '0', int16_min, '4096', '128', '-2']])
# firstbit_shi (s for signed) returns the
# first 0 from the MSB if the number is negative,
# else the first 1 from the MSB.
# all the variants of the instruction return ~0 if no match was found
add_test_case_int('FirstbitSHi', ['FirstbitSHi'], 'Epsilon', 0, [[
int32_min, '-65536', '-8', '-1', '0', '1', '8', '65536',
int32_max
]], [['30', '15', '2', '-1', '-1', '0', '3', '16', '30']],
"unary int", "firstbithigh",
input_16=[[int16_min, '-256', '-8', '-1', '0', '1', '8', '256', int16_max]],
output_16=[['14', '7', '2', '-1', '-1', '0', '3', '8', '14']])
add_test_case_int('FirstbitLo', ['FirstbitLo'], 'Epsilon', 0, [[
int32_min, '-65536', '-8', '-1', '0', '1', '8', '65536',
int32_max
]], [['31', '16', '3', '0', '-1', '0', '3', '16', '0']],
"unary int", "firstbitlow",
input_16=[[int16_min, '-256', '-8', '-1', '0', '1', '8', '256', int16_max]],
output_16=[['15', '8', '3', '0', '-1', '0', '3', '8', '0']])
# TODO: there is a known bug in countbits when passing in immediate values.
# Fix this later
add_test_case('Countbits', ['Countbits'], 'Epsilon', 0, [[
int32_min, '-65536', '-8', '-1', '0', '1', '8', '65536',
int32_max
]], [['1', '16', '29', '32', '0', '1', '1', '1', '31']],
"cs_6_0", get_shader_text("unary int", "countbits"))
# Unary uint
add_test_case_int('FirstbitHi', ['FirstbitHi'], 'Epsilon', 0,
[['0', '1', '8', '65536', int32_max, uint32_max]],
[['-1', '0', '3', '16', '30', '31']],
"unary uint", "firstbithigh",
input_16=[['0', '1', '8', uint16_max]],
output_16=[['-1', '0', '3', '15']])
# Binary Int
add_test_case_int('IAdd', ['Add'], 'Epsilon', 0,
[[int32_min, '-10', '0', '0', '10', int32_max, '486'],
['0', '10', '-10', '10', '10', '0', '54238']],
[[int32_min, '0', '-10', '10', '20', int32_max, '54724']],
"binary int", "+",
input_16=[[int16_min, '-10', '0', '0', '10', int16_max],
['0', '10', '-3114', '272', '15', '0']],
output_16=[[int16_min, '0', '-3114', '272', '25', int16_max]])
add_test_case_int('ISub', ['Sub'], 'Epsilon', 0,
[[int32_min, '-10', '0', '0', '10', int32_max, '486'],
['0', '10', '-10', '10', '10', '0', '54238']],
[[int32_min, '-20', '10', '-10', '0', int32_max, '-53752']],
"binary int", "-",
input_16=[[int16_min, '-10', '0', '0', '10', int16_max],
['0', '10', '-3114', '272', '15', '0']],
output_16=[[int16_min, '-20', '3114', '-272', '-5', int16_max]])
add_test_case_int('IMax', ['IMax'], 'Epsilon', 0,
[[int32_min, '-10', '0', '0', '10', int32_max],
['0', '10', '-10', '10', '10', '0']],
[['0', '10', '0', '10', '10', int32_max]],
"binary int call", "max",
input_16=[[int16_min, '-10', '0', '0', '10', int16_max],
['0', '10', '-3114', '272', '15', '0']],
output_16=[['0', '10', '0', '272', '15', int16_max]])
add_test_case_int('IMin', ['IMin'], 'Epsilon', 0,
[[int32_min, '-10', '0', '0', '10', int32_max],
['0', '10', '-10', '10', '10', '0']],
[[int32_min, '-10', '-10', '0', '10', '0']],
"binary int call", "min",
input_16=[[int16_min, '-10', '0', '0', '10', int16_max],
['0', '10', '-3114', '272', '15', '0']],
output_16=[[int16_min, '-10', '-3114', '0', '10', '0']])
add_test_case_int('IMul', ['Mul'], 'Epsilon', 0, [
[ int32_min, '-10', '-1', '0', '1', '10', '10000', int32_max, int32_max ],
['-10', '-10', '10', '0', '256', '4', '10001', '0', int32_max]],
[['0', '100', '-10', '0', '256', '40', '100010000', '0', '1']],
"binary int", "*",
input_16=[[ int16_min, '-10', '-1', '0', '1', '10', int16_max],
['-10', '-10', '10', '0', '256', '4', '0']],
output_16=[['0', '100', '-10', '0', '256', '40', '0']])
add_test_case('IDiv', ['SDiv', 'SRem'], 'Epsilon', 0,
[['1', '1', '10', '10000', int32_max, int32_max, '-1'],
['1', '256', '4', '10001', '2', int32_max, '1']],
[['1', '0', '2', '0', '1073741823', '1', '-1'],
['0', '1', '2', '10000', '1', '0', '0']], "cs_6_0",
''' struct SBinaryIntOp {
int input1;
int input2;
int output1;
int output2;
};
RWStructuredBuffer<SBinaryIntOp> g_buf : register(u0);
[numthreads(8,8,1)]
void main(uint GI : SV_GroupIndex) {
SBinaryIntOp l = g_buf[GI];
l.output1 = l.input1 / l.input2;
l.output2 = l.input1 % l.input2;
g_buf[GI] = l;
};''')
add_test_case_int('Shl', ['Shl'], 'Epsilon', 0,
[['1', '1', '0x1010', '0xa', '-1', '0x12341234', '-1'],
['0', '259', '4', '2', '0', '15', '3']],
[['0x1', '0x8', '0x10100', '0x28', '-1','0x091a0000', '-8']],
"binary int", "<<",
input_16=[['1', '1', '0x0101', '0xa', '-1', '0x1234', '-1'],
['0', '259', '4', '2', '0', '13', '3']],
output_16=[['0x1', '0x8', '0x1010', '0x28', '-1','0x8000', '-8']])
add_test_case_int("LShr", ['LShr'], 'Epsilon', 0,
[['1', '1', '0xffff', '0x7fffffff', '0x70001234', '0x12340ab3', '0x7fffffff'],
['0', '1', '4', '30', '15', '16', '1']],
[['1', '0', '0xfff', '1', '0xe000', '0x1234', '0x3fffffff']],
"binary int", ">>",
input_16=[['1', '1', '0x7fff', '0x7fff'],
['0', '1', '4', '14']],
output_16=[['1', '0', '0x07ff', '1']]
)
add_test_case_int("And", ['And'], 'Epsilon', 0,
[['0x1', '0x01', '0x7fff0000', '0x33333333', '0x137f', '0x12345678', '0xa341', '-1'],
['0x1', '0xf0', '0x0000ffff', '0x22222222', '0xec80', '-1', '0x3471', '-1']],
[['0x1', '0x00', '0x0', '0x22222222', '0x0', '0x12345678', '0x2041', '-1']],
"binary int", "&",
input_16=[['0x1', '0x01', '0x7fff', '0x3333', '0x137f', '0x1234', '0xa341', '-1'],
['0x1', '0xf0', '0x0000', '0x2222', '0xec80', '-1', '0x3471', '-1']],
output_16=[['0x1', '0x00', '0x0', '0x2222', '0x0', '0x1234', '0x2041', '-1']],
)
add_test_case_int("Or", ['Or'], 'Epsilon', 0,
[['0x1', '0x01', '0x7fff0000', '0x11111111', '0x137f', '0x0', '0x12345678', '0xa341', '-1'],
['0x1', '0xf0', '0x0000ffff', '0x22222222', '0xec80', '0x0', '0x00000000', '0x3471', '-1']],
[['0x1', '0xf1', '0x7fffffff', '0x33333333', '0xffff', '0x0', '0x12345678', '0xb771', '-1']],
"binary int", "|",
input_16=[['0x1', '0x01', '0x7fff', '0x3333', '0x137f', '0x1234', '0xa341', '-1'],
['0x1', '0xf0', '0x0000', '0x2222', '0xec80', '0xffff', '0x3471', '-1']],
output_16=[['0x1', '0xf1', '0x7fff', '0x3333', '0xffff', '0xffff', '0xb771', '-1']],
)
add_test_case_int("Xor", ['Xor'], 'Epsilon', 0,
[['0x1', '0x01', '0x7fff0000', '0x11111111', '0x137f', '0x0', '0x12345678', '0xa341', '-1'],
['0x1', '0xf0', '0x0000ffff', '0x22222222', '0xec80', '0x0', '0x00000000', '0x3471', '-1']],
[['0x0', '0xf1', '0x7fffffff', '0x33333333', '0xffff', '0x0', '0x12345678', '0x9730', '0x00000000']],
"binary int", "^",
input_16=[['0x1', '0x01', '0x7fff', '0x1111', '0x137f', '0x0', '0x1234', '0xa341', '-1'],
['0x1', '0xf0', '0x0000', '0x2222', '0xec80', '0x0', '0x0000', '0x3471', '-1']],
output_16=[['0x0', '0xf1', '0x7fff', '0x3333', '0xffff', '0x0', '0x1234', '0x9730', '0x0000']],
)
# Binary Uint
add_test_case_int('UAdd', ['Add'], 'Epsilon', 0,
[['2147483648', '4294967285', '0', '0', '10', int32_max, '486'],
['0', '10', '0', '10', '10', '0', '54238']],
[['2147483648', uint32_max, '0', '10', '20', int32_max, '54724']],
"binary uint", "+",
input_16=[['323', '0xfff5', '0', '0', '10', uint16_max, '486'],
['0', '10', '0', '10', '10', '0', '334']],
output_16=[['323', uint16_max, '0', '10', '20', uint16_max, '820']])
add_test_case_int('USub', ['Sub'], 'Epsilon', 0,
[['2147483648', uint32_max, '0', '0', '30', int32_max, '54724'],
['0', '10', '0', '10', '10', '0', '54238']],
[['2147483648', '4294967285', '0', '4294967286', '20', int32_max, '486']],
"binary uint", "-",
input_16=[['323', uint16_max, '0', '0', '10', uint16_max, '486'],
['0', '10', '0', '10', '10', '0', '334']],
output_16=[['323', '0xfff5', '0', '-10', '0', uint16_max, '152']])
add_test_case_int('UMax', ['UMax'], 'Epsilon', 0,
[['0', '0', '10', '10000', int32_max, uint32_max],
['0', '256', '4', '10001', '0', uint32_max]],
[['0', '256', '10', '10001', int32_max, uint32_max]],
"binary uint call", "max",
input_16=[['0', '0', '10', '10000', int16_max, uint16_max],
['0', '256', '4', '10001', '0', uint16_max]],
output_16=[['0', '256', '10', '10001', int16_max, uint16_max]])
add_test_case_int('UMin', ['UMin'], 'Epsilon', 0,
[['0', '0', '10', '10000', int32_max, uint32_max],
['0', '256', '4', '10001', '0', uint32_max]],
[['0', '0', '4', '10000', '0', uint32_max]],
"binary uint call", "min",
input_16=[['0', '0', '10', '10000', int16_max, uint16_max],
['0', '256', '4', '10001', '0', uint16_max]],
output_16=[['0', '0', '4', '10000', '0', uint16_max]])
add_test_case_int('UMul', ['Mul'], 'Epsilon', 0,
[['0', '1', '10', '10000', int32_max],
['0', '256', '4', '10001', '0']],
[['0', '256', '40', '100010000', '0']],
"binary uint", "*",
input_16=[['0', '0', '10', '100', int16_max],
['0', '256', '4', '101', '0']],
output_16=[['0', '0', '40', '10100', '0']])
add_test_case('UDiv', ['UDiv', 'URem'], 'Epsilon', 0,
[['1', '1', '10', '10000', int32_max, int32_max, '0xffffffff'],
['0', '256', '4', '10001', '0', int32_max, '1']],
[['0xffffffff', '0', '2', '0', '0xffffffff', '1', '0xffffffff'],
['0xffffffff', '1', '2', '10000', '0xffffffff', '0', '0']], 'cs_6_0',
''' struct SBinaryUintOp {
uint input1;
uint input2;
uint output1;
uint output2;
};
RWStructuredBuffer<SBinaryUintOp> g_buf : register(u0);
[numthreads(8,8,1)]
void main(uint GI : SV_GroupIndex) {
SBinaryUintOp l = g_buf[GI];
l.output1 = l.input1 / l.input2;
l.output2 = l.input1 % l.input2;
g_buf[GI] = l;
};''')
add_test_case('UAddc', ['UAddc'], 'Epsilon', 0,
[['1', '1', '10000', '0x80000000', '0x7fffffff', '0xffffffff'],
['0', '256', '10001', '1', '0x7fffffff', '0x7fffffff']],
[['2', '2', '20000', '0', '0xfffffffe', '0xfffffffe'],
['0', '512', '20002', '3', '0xfffffffe', '0xffffffff']], 'cs_6_0',
''' struct SBinaryUintOp {
uint input1;
uint input2;
uint output1;
uint output2;
};
RWStructuredBuffer<SBinaryUintOp> g_buf : register(u0);
[numthreads(8,8,1)]
void main(uint GI : SV_GroupIndex) {
SBinaryUintOp l = g_buf[GI];
uint2 x = uint2(l.input1, l.input2);
uint2 y = AddUint64(x, x);
l.output1 = y.x;
l.output2 = y.y;
g_buf[GI] = l;
};''')
# Tertiary Int
add_test_case_int('IMad', ['IMad'], 'epsilon', 0, [[
'-2147483647', '-256', '-1', '0', '1', '2', '16', int32_max, '1',
'-1', '1', '10'
], ['1', '-256', '-1', '0', '1', '3', '16', '0', '1', '-1', '10', '100'], [
'0', '0', '0', '0', '1', '3', '1', '255', '2147483646', '-2147483647',
'-10', '-2000'
]], [[
'-2147483647', '65536', '1', '0', '2', '9', '257', '255', int32_max,
'-2147483646', '0', '-1000'
]], "tertiary int", "mad",
input_16=[[int16_min, '-256', '-1', '0', '1', '2', '16', int16_max],
['1','8','-1', '0', '1', '3', '16','1'],
['0', '0', '1', '3', '250', '-30', int16_min, '-50']],
output_16=[[int16_min, '-2048', '2', '3', '251', '-24', '-32512', '32717']]
)
add_test_case_int('UMad', ['UMad'], 'epsilon', 0,
[['0', '1', '2', '16', int32_max, '0', '10'], [
'0', '1', '2', '16', '1', '0', '10'
], ['0', '0', '1', '15', '0', '10', '10']],
[['0', '1', '5', '271', int32_max, '10', '110']],
"tertiary uint", "mad",
input_16=[['0', '1', '2', '16', int16_max, '0', '10'], [
'0', '1', '2', '16', '1', '0', '10'
], ['0', '0', '1', '15', '0', '10', '10']],
output_16=[['0', '1', '5', '271', int16_max, '10', '110']],
)
# Dot
add_test_case('Dot', ['Dot2', 'Dot3', 'Dot4'], 'epsilon', 0.008, [[
'NaN,NaN,NaN,NaN', '-Inf,-Inf,-Inf,-Inf',
'-denorm,-denorm,-denorm,-denorm', '-0,-0,-0,-0', '0,0,0,0',
'denorm,denorm,denorm,denorm', 'Inf,Inf,Inf,Inf', '1,1,1,1',
'-10,0,0,10', 'Inf,Inf,Inf,-Inf'
], [
'NaN,NaN,NaN,NaN', '-Inf,-Inf,-Inf,-Inf',
'-denorm,-denorm,-denorm,-denorm', '-0,-0,-0,-0', '0,0,0,0',
'denorm,denorm,denorm,denorm', 'Inf,Inf,Inf,Inf', '1,1,1,1',
'10,0,0,10', 'Inf,Inf,Inf,Inf'
]], [
[nan, p_inf, 0, 0, 0, 0, p_inf, 2, -100, p_inf],
[nan, p_inf, 0, 0, 0, 0, p_inf, 3, -100, p_inf],
[nan, p_inf, 0, 0, 0, 0, p_inf, 4, 0, nan],
], 'cs_6_0', ''' struct SDotOp {
float4 input1;
float4 input2;
float o_dot2;
float o_dot3;
float o_dot4;
};
RWStructuredBuffer<SDotOp> g_buf : register(u0);
[numthreads(8,8,1)]
void main(uint GI : SV_GroupIndex) {
SDotOp l = g_buf[GI];
l.o_dot2 = dot(l.input1.xy, l.input2.xy);
l.o_dot3 = dot(l.input1.xyz, l.input2.xyz);
l.o_dot4 = dot(l.input1.xyzw, l.input2.xyzw);
g_buf[GI] = l;
};''')
# Dot2AddHalf
add_test_case('Dot2AddHalf', ['Dot2AddHalf'], 'epsilon', 0.008, [[
'1,2', '1,-2', '1,2', '-1,2', '1,2', '-1,2', '1,2', '-1,-2',
'65504,1', '-65504,1', '1,65504', '1,-65504', 'inf,inf',
'denorm,denorm', '-denorm,-denorm', 'nan,nan'
], [
'3,4', '-3,4', '3,4', '3,-4', '3,4', '-3,4', '3,4', '-3,-4',
'1,65504', '1,-65504', '65504,1', '-65504,1', 'inf,inf',
'denorm,denorm', '-denorm,-denorm', 'nan,nan'
], [
'0', '0', '10', '10', '-5', '-5', '-30', '-30', '0', '0',
'10000000', '-10000000', 'inf', 'denorm', '-denorm',
'nan'
]], [
[11, -11, 21, -1, 6, 6, -19, -19, 131008, -131008, 10131008,
-10131008, p_inf, 0, 0, nan],
], 'cs_6_4', ''' struct SDot2AddHalfOp {
half2 input1;
half2 input2;
float acc;
float result;
};
RWStructuredBuffer<SDot2AddHalfOp> g_buf : register(u0);
[numthreads(8,8,1)]
void main(uint GI : SV_GroupIndex) {
SDot2AddHalfOp l = g_buf[GI];
l.result = dot2add(l.input1, l.input2, l.acc);
g_buf[GI] = l;
};''', shader_arguments='-enable-16bit-types')
# Dot4AddI8Packed
add_test_case('Dot4AddI8Packed', ['Dot4AddI8Packed'], 'epsilon', 0, [[
'0x00000102', '0x00000102', '0x00000102', '0x00000102',
'0XFFFFFFFF', '0x80808080', '0x80808080', '0x807F807F',
'0x7F7F7F7F', '0x80808080'
], [
'0x00000304', '0x00000304', '0x00000304', '0x00000304',
'0xFFFFFFFF', '0x01010101', '0x7F7F7F7F', '0x807F807F',
'0x7F7F7F7F', '0x80808080'
], [
'0', '10', '-5', '-30', '0', '0', '0', '0', '0', '0'
]], [
[11, 21, 6, -19, 4, -512, -65024, 65026, 64516, 65536],
], 'cs_6_4', ''' struct SDot4AddI8PackedOp {
dword input1;
dword input2;
int acc;
int result;
};
RWStructuredBuffer<SDot4AddI8PackedOp> g_buf : register(u0);
[numthreads(8,8,1)]
void main(uint GI : SV_GroupIndex) {
SDot4AddI8PackedOp l = g_buf[GI];
l.result = dot4add_i8packed(l.input1, l.input2, l.acc);
g_buf[GI] = l;
};''')
# Dot4AddU8Packed
add_test_case('Dot4AddU8Packed', ['Dot4AddU8Packed'], 'epsilon', 0, [[
'0x00000102', '0x00000102', '0x01234567', '0xFFFFFFFF',
'0xFFFFFFFF'
], [
'0x00000304', '0x00000304', '0x23456789', '0xFFFFFFFF',
'0xFFFFFFFF'
], [
'0', '10', '10000', '0', '3000000000'
]], [
[11, 21, 33668, 260100, 3000260100],
], 'cs_6_4', ''' struct SDot4AddU8PackedOp {
dword input1;
dword input2;
dword acc;
dword result;
};
RWStructuredBuffer<SDot4AddU8PackedOp> g_buf : register(u0);
[numthreads(8,8,1)]
void main(uint GI : SV_GroupIndex) {
SDot4AddU8PackedOp l = g_buf[GI];
l.result = dot4add_u8packed(l.input1, l.input2, l.acc);
g_buf[GI] = l;
};''')
# Quaternary
# Msad4 intrinsic calls both Bfi and Msad. Currently this is the only way to call bfi instruction from HLSL
add_test_case('Bfi', ['Bfi', 'Msad'], 'epsilon', 0,
[["0xA100B2C3", "0x00000000", "0xFFFF01C1", "0xFFFFFFFF"], [
"0xD7B0C372, 0x4F57C2A3", "0xFFFFFFFF, 0x00000000",
"0x38A03AEF, 0x38194DA3", "0xFFFFFFFF, 0x00000000"
], ["1,2,3,4", "1,2,3,4", "0,0,0,0", "10,10,10,10"]],
[['153,6,92,113', '1,2,3,4', '397,585,358,707', '10,265,520,775']],
'cs_6_0', ''' struct SMsad4 {
uint ref;
uint2 source;
uint4 accum;
uint4 result;
};
RWStructuredBuffer<SMsad4> g_buf : register(u0);
[numthreads(8,8,1)]
void main(uint GI : SV_GroupIndex) {
SMsad4 l = g_buf[GI];
l.result = msad4(l.ref, l.source, l.accum);
g_buf[GI] = l;
};''')
# Wave Active Tests
add_test_case('WaveActiveSum', ['WaveActiveOp', 'WaveReadLaneFirst', 'WaveReadLaneAt'], 'Epsilon', 0,
[['1', '2', '3', '4'], ['0'], ['2', '4', '8', '-64']], [],
'cs_6_0', get_shader_text("wave op int", "WaveActiveSum"))
add_test_case('WaveActiveProduct', ['WaveActiveOp', 'WaveReadLaneFirst', 'WaveReadLaneAt'], 'Epsilon', 0,
[['1', '2', '3', '4'], ['0'], ['1', '2', '4', '-64']], [],
'cs_6_0', get_shader_text("wave op int", "WaveActiveProduct"))
add_test_case('WaveActiveCountBits', ['WaveAllBitCount', 'WaveReadLaneFirst', 'WaveReadLaneAt'], 'Epsilon', 0,
[['1', '2', '3', '4'], ['0'], ['1', '10', '-4', '-64'],
['-100', '-1000', '300']], [], 'cs_6_0',
get_shader_text("wave op int count", "WaveActiveCountBits"))
add_test_case('WaveActiveMax', ['WaveActiveOp', 'WaveReadLaneFirst', 'WaveReadLaneAt'], 'Epsilon', 0,
[['1', '2', '3', '4'], ['0'], ['1', '10', '-4', '-64'],
['-100', '-1000', '300']], [], 'cs_6_0',
get_shader_text("wave op int", "WaveActiveMax"))
add_test_case('WaveActiveMin', ['WaveActiveOp', 'WaveReadLaneFirst', 'WaveReadLaneAt'], 'Epsilon', 0,
[['1', '2', '3', '4', '5', '6', '7', '8', '9', '10'], ['0'],
['1', '10', '-4', '-64'], ['-100', '-1000', '300']], [],
'cs_6_0', get_shader_text("wave op int", "WaveActiveMin"))
add_test_case('WaveActiveAllEqual', ['WaveActiveAllEqual'], 'Epsilon', 0,
[['1', '2', '3', '4', '1', '1', '1', '1'], ['3'], ['-10']],
[], 'cs_6_0', get_shader_text("wave op int", "WaveActiveAllEqual"))
add_test_case('WaveActiveAnyTrue', ['WaveAnyTrue', 'WaveReadLaneFirst', 'WaveReadLaneAt'], 'Epsilon', 0,
[['1', '0', '1', '0', '1'], ['1'], ['0']], [], 'cs_6_0',
get_shader_text("wave op int", "WaveActiveAnyTrue"))
add_test_case('WaveActiveAllTrue', ['WaveAllTrue', 'WaveReadLaneFirst', 'WaveReadLaneAt'], 'Epsilon', 0,
[['1', '0', '1', '0', '1'], ['1'], ['1']], [], 'cs_6_0',
get_shader_text("wave op int", "WaveActiveAllTrue"))
add_test_case('WaveActiveUSum', ['WaveActiveOp', 'WaveReadLaneFirst', 'WaveReadLaneAt'], 'Epsilon', 0,
[['1', '2', '3', '4'], ['0'], ['2', '4', '8', '64']], [],
'cs_6_0', get_shader_text("wave op uint", "WaveActiveSum"))
add_test_case('WaveActiveUProduct', ['WaveActiveOp', 'WaveReadLaneFirst', 'WaveReadLaneAt'], 'Epsilon', 0,
[['1', '2', '3', '4'], ['0'], ['1', '2', '4', '64']], [],
'cs_6_0', get_shader_text("wave op uint", "WaveActiveProduct"))
add_test_case('WaveActiveUMax', ['WaveActiveOp', 'WaveReadLaneFirst', 'WaveReadLaneAt'], 'Epsilon', 0,
[['1', '2', '3', '4'], ['0'], ['1', '10', '4', '64']], [],
'cs_6_0', get_shader_text("wave op uint", "WaveActiveMax"))
add_test_case('WaveActiveUMin', ['WaveActiveOp', 'WaveReadLaneFirst', 'WaveReadLaneAt'], 'Epsilon', 0,
[['1', '2', '3', '4', '5', '6', '7', '8', '9', '10'], ['0'],
['1', '10', '4', '64']], [], 'cs_6_0',
get_shader_text("wave op uint", "WaveActiveMin"))
add_test_case('WaveActiveBitOr', ['WaveActiveBit'], 'Epsilon', 0, [[
'0xe0000000', '0x0d000000', '0x00b00000', '0x00070000', '0x0000e000',
'0x00000d00', '0x000000b0', '0x00000007'
], ['0xedb7edb7', '0xdb7edb7e', '0xb7edb7ed', '0x7edb7edb'], [
'0x12481248', '0x24812481', '0x48124812', '0x81248124'
], ['0x00000000', '0xffffffff']], [], 'cs_6_0', get_shader_text("wave op uint", "WaveActiveBitOr"))
add_test_case('WaveActiveBitAnd', ['WaveActiveBit'], 'Epsilon', 0, [[
'0xefffffff', '0xfdffffff', '0xffbfffff', '0xfff7ffff', '0xffffefff',
'0xfffffdff', '0xffffffbf', '0xfffffff7'
], ['0xedb7edb7', '0xdb7edb7e', '0xb7edb7ed', '0x7edb7edb'], [
'0x12481248', '0x24812481', '0x48124812', '0x81248124'
], ['0x00000000', '0xffffffff']], [], 'cs_6_0', get_shader_text("wave op uint", "WaveActiveBitAnd"))
add_test_case('WaveActiveBitXor', ['WaveActiveBit'], 'Epsilon', 0, [[
'0xe0000000', '0x0d000000', '0x00b00000', '0x00070000', '0x0000e000',
'0x00000d00', '0x000000b0', '0x00000007'
], ['0xedb7edb7', '0xdb7edb7e', '0xb7edb7ed', '0x7edb7edb'], [
'0x12481248', '0x24812481', '0x48124812', '0x81248124'
], ['0x00000000', '0xffffffff']], [], 'cs_6_0', get_shader_text("wave op uint", "WaveActiveBitXor"))
add_test_case('WavePrefixCountBits', ['WavePrefixBitCount'], 'Epsilon', 0,
[['1', '2', '3', '4', '5'], ['0'], ['1', '10', '-4', '-64'],
['-100', '-1000', '300']], [], 'cs_6_0',
get_shader_text("wave op int count", "WavePrefixCountBits"))
add_test_case('WavePrefixSum', ['WavePrefixOp'], 'Epsilon', 0,
[['1', '2', '3', '4', '5'], ['0', '1'], ['1', '2', '4', '-64', '128']],
[], 'cs_6_0', get_shader_text("wave op int", "WavePrefixSum"))
add_test_case('WavePrefixProduct', ['WavePrefixOp'], 'Epsilon', 0,
[['1', '2', '3', '4', '5'], ['0', '1'], ['1', '2', '4', '-64', '128']],
[], 'cs_6_0', get_shader_text("wave op int", "WavePrefixProduct"))
add_test_case('WavePrefixUSum', ['WavePrefixOp'], 'Epsilon', 0,
[['1', '2', '3', '4', '5'], ['0', '1'], ['1', '2', '4', '128']], [],
'cs_6_0', get_shader_text("wave op uint", "WavePrefixSum"))
add_test_case('WavePrefixUProduct', ['WavePrefixOp'], 'Epsilon', 0,
[['1', '2', '3', '4', '5'], ['0', '1'], ['1', '2', '4', '128']], [],
'cs_6_0', get_shader_text("wave op uint", "WavePrefixProduct"))
# generating xml file for execution test using data driven method
# TODO: ElementTree is not generating formatted XML. Currently xml file is checked in after VS Code formatter.
# Implement xml formatter or import formatter library and use that instead.
def generate_parameter_types(table, num_inputs, num_outputs, has_known_warp_issue=False):
param_types = ET.SubElement(table, "ParameterTypes")
ET.SubElement(
param_types, "ParameterType", attrib={
"Name": "ShaderOp.Target"
}).text = "String"
ET.SubElement(
param_types, "ParameterType", attrib={
"Name": "ShaderOp.Arguments"
}).text = "String"
ET.SubElement(
param_types, "ParameterType", attrib={
"Name": "ShaderOp.Text"
}).text = "String"
ET.SubElement(
param_types, "ParameterType", attrib={
"Name": "Validation.Type"
}).text = "String"
ET.SubElement(
param_types, "ParameterType", attrib={
"Name": "Validation.Tolerance"
}).text = "double"
for i in range(0, num_inputs):
ET.SubElement(
param_types,
"ParameterType",
attrib={
"Name": 'Validation.Input{}'.format(i + 1),
'Array': 'true'
}).text = "String"
for i in range(0, num_outputs):
ET.SubElement(
param_types,
"ParameterType",
attrib={
"Name": 'Validation.Expected{}'.format(i + 1),
'Array': 'true'
}).text = "String"
if has_known_warp_issue:
ET.SubElement(param_types, "ParameterType", attrib={"Name":"Warp.Version"}).text = "unsigned int"
def generate_parameter_types_wave(table):
param_types = ET.SubElement(table, "ParameterTypes")
ET.SubElement(
param_types, "ParameterType", attrib={
"Name": "ShaderOp.Target"
}).text = "String"
ET.SubElement(
param_types, "ParameterType", attrib={
"Name": "ShaderOp.Text"
}).text = "String"
ET.SubElement(
param_types,
"ParameterType",
attrib={
"Name": "Validation.NumInputSet"
}).text = "String"
ET.SubElement(
param_types,
"ParameterType",
attrib={
"Name": "Validation.InputSet1",
"Array": "true"
}).text = "String"
ET.SubElement(
param_types,
"ParameterType",
attrib={
"Name": "Validation.InputSet2",
"Array": "true"
}).text = "String"
ET.SubElement(
param_types,
"ParameterType",
attrib={
"Name": "Validation.InputSet3",
"Array": "true"
}).text = "String"
ET.SubElement(
param_types,
"ParameterType",
attrib={
"Name": "Validation.InputSet4",
"Array": "true"
}).text = "String"
def generate_parameter_types_msad(table):
param_types = ET.SubElement(table, "ParameterTypes")
ET.SubElement(
param_types, "ParameterType", attrib={
"Name": "ShaderOp.Text"
}).text = "String"
ET.SubElement(
param_types, "ParameterType", attrib={
"Name": "Validation.Tolerance"
}).text = "int"
ET.SubElement(
param_types,
"ParameterType",
attrib={
"Name": "Validation.Input1",
"Array": "true"
}).text = "unsigned int"
ET.SubElement(
param_types,
"ParameterType",
attrib={
"Name": "Validation.Input2",
"Array": "true"
}).text = "String"
ET.SubElement(
param_types,
"ParameterType",
attrib={
"Name": "Validation.Input3",
"Array": "true"
}).text = "String"
ET.SubElement(
param_types,
"ParameterType",
attrib={
"Name": "Validation.Expected1",
"Array": "true"
}).text = "String"
def generate_row(table, case):
row = ET.SubElement(table, "Row", {"Name": case.test_name})
ET.SubElement(row, "Parameter", {
"Name": "Validation.Type"
}).text = case.validation_type
ET.SubElement(row, "Parameter", {
"Name": "Validation.Tolerance"
}).text = str(case.validation_tolerance)
ET.SubElement(row, "Parameter", {
"Name": "ShaderOp.Text"
}).text = case.shader_text
ET.SubElement(row, "Parameter", {
"Name": "ShaderOp.Target"
}).text = case.shader_target
for i in range(len(case.input_lists)):
inputs = ET.SubElement(row, "Parameter", {
"Name": "Validation.Input{}".format(i + 1)
})
for val in case.input_lists[i]:
ET.SubElement(inputs, "Value").text = str(val)
for i in range(len(case.output_lists)):
outputs = ET.SubElement(row, "Parameter", {
"Name": "Validation.Expected{}".format(i + 1)
})
for val in case.output_lists[i]:
ET.SubElement(outputs, "Value").text = str(val)
# Optional parameters
if case.warp_version > 0:
ET.SubElement(row, "Parameter", {"Name":"Warp.Version"}).text = str(case.warp_version)
if case.shader_arguments != "":
ET.SubElement(row, "Parameter", {"Name":"ShaderOp.Arguments"}).text = case.shader_arguments
def generate_row_wave(table, case):
row = ET.SubElement(table, "Row", {"Name": case.test_name})
ET.SubElement(row, "Parameter", {
"Name": "ShaderOp.Name"
}).text = case.test_name
ET.SubElement(row, "Parameter", {
"Name": "ShaderOp.Text"
}).text = case.shader_text
ET.SubElement(row, "Parameter", {
"Name": "Validation.NumInputSet"
}).text = str(len(case.input_lists))
for i in range(len(case.input_lists)):
inputs = ET.SubElement(row, "Parameter", {
"Name": "Validation.InputSet{}".format(i + 1)
})
for val in case.input_lists[i]:
ET.SubElement(inputs, "Value").text = str(val)
def generate_table_for_taef():
with open("..\\..\\tools\\clang\\unittests\\HLSL\\ShaderOpArithTable.xml",
'w') as f:
tree = ET.ElementTree()
root = ET.Element('Data')
# Create tables
generate_parameter_types(
ET.SubElement(root, "Table", attrib={
"Id": "UnaryFloatOpTable"
}), 1, 1, True)
generate_parameter_types(
ET.SubElement(root, "Table", attrib={
"Id": "BinaryFloatOpTable"
}), 2, 2)
generate_parameter_types(
ET.SubElement(root, "Table", attrib={
"Id": "TertiaryFloatOpTable"
}), 3, 1)
generate_parameter_types(
ET.SubElement(root, "Table", attrib={
"Id": "UnaryHalfOpTable"
}), 1, 1, True)
generate_parameter_types(
ET.SubElement(root, "Table", attrib={
"Id": "BinaryHalfOpTable"
}), 2, 2)
generate_parameter_types(
ET.SubElement(root, "Table", attrib={
"Id": "TertiaryHalfOpTable"
}), 3, 1)
generate_parameter_types(
ET.SubElement(root, "Table", attrib={
"Id": "UnaryIntOpTable"
}), 1, 1)
generate_parameter_types(
ET.SubElement(root, "Table", attrib={
"Id": "BinaryIntOpTable"
}), 2, 2)
generate_parameter_types(
ET.SubElement(root, "Table", attrib={
"Id": "TertiaryIntOpTable"
}), 3, 1)
generate_parameter_types(
ET.SubElement(root, "Table", attrib={
"Id": "UnaryInt16OpTable"
}), 1, 1)
generate_parameter_types(
ET.SubElement(root, "Table", attrib={
"Id": "BinaryInt16OpTable"
}), 2, 2)
generate_parameter_types(
ET.SubElement(root, "Table", attrib={
"Id": "TertiaryInt16OpTable"
}), 3, 1)
generate_parameter_types(
ET.SubElement(root, "Table", attrib={
"Id": "UnaryUintOpTable"
}), 1, 1)
generate_parameter_types(
ET.SubElement(root, "Table", attrib={
"Id": "BinaryUintOpTable"
}), 2, 2)
generate_parameter_types(
ET.SubElement(root, "Table", attrib={
"Id": "TertiaryUintOpTable"
}), 3, 1)
generate_parameter_types(
ET.SubElement(root, "Table", attrib={
"Id": "UnaryUint16OpTable"
}), 1, 1)
generate_parameter_types(
ET.SubElement(root, "Table", attrib={
"Id": "BinaryUint16OpTable"
}), 2, 2)
generate_parameter_types(
ET.SubElement(root, "Table", attrib={
"Id": "TertiaryUint16OpTable"
}), 3, 1)
generate_parameter_types(
ET.SubElement(root, "Table", attrib={
"Id": "DotOpTable"
}), 2, 3)
generate_parameter_types(
ET.SubElement(root, "Table", attrib={
"Id": "Dot2AddHalfOpTable"
}), 3, 1)
generate_parameter_types(
ET.SubElement(root, "Table", attrib={
"Id": "Dot4AddI8PackedOpTable"
}), 3, 1)
generate_parameter_types(
ET.SubElement(root, "Table", attrib={
"Id": "Dot4AddU8PackedOpTable"
}), 3, 1)
generate_parameter_types_msad(
ET.SubElement(root, "Table", attrib={
"Id": "Msad4Table"
}))
generate_parameter_types_wave(
ET.SubElement(
root, "Table", attrib={
"Id": "WaveIntrinsicsActiveIntTable"
}))
generate_parameter_types_wave(
ET.SubElement(
root, "Table", attrib={
"Id": "WaveIntrinsicsActiveUintTable"
}))
generate_parameter_types_wave(
ET.SubElement(
root, "Table", attrib={
"Id": "WaveIntrinsicsPrefixIntTable"
}))
generate_parameter_types_wave(
ET.SubElement(
root, "Table", attrib={
"Id": "WaveIntrinsicsPrefixUintTable"
}))
generate_parameter_types(
ET.SubElement(
root, "Table", attrib={
"Id": "DenormBinaryFloatOpTable"
}), 2, 2) # 2 sets of expected values for any mode
generate_parameter_types(
ET.SubElement(
root, "Table", attrib={
"Id": "DenormTertiaryFloatOpTable"
}), 3, 2)
for case in g_test_cases.values():
cur_inst = case.insts[0]
if cur_inst.is_cast or cur_inst.category.startswith("Unary"):
if "f" in cur_inst.oload_types and not "Half" in case.test_name:
generate_row(
root.find("./Table[@Id='UnaryFloatOpTable']"),
case)
if "h" in cur_inst.oload_types and "Half" in case.test_name:
generate_row(root.find("./Table[@Id='UnaryHalfOpTable']"),case)
if "i" in cur_inst.oload_types and "Bit16" not in case.test_name:
if cur_inst.category.startswith("Unary int"):
generate_row(
root.find("./Table[@Id='UnaryIntOpTable']"),
case)
elif cur_inst.category.startswith("Unary uint"):
generate_row(
root.find("./Table[@Id='UnaryUintOpTable']"),
case)
else:
print("unknown op: " + cur_inst.name)
print(cur_inst.dxil_class)
if "w" in cur_inst.oload_types and "Bit16" in case.test_name:
if cur_inst.category.startswith("Unary int"):
generate_row(
root.find("./Table[@Id='UnaryInt16OpTable']"),
case)
elif cur_inst.category.startswith("Unary uint"):
generate_row(
root.find("./Table[@Id='UnaryUint16OpTable']"),
case)
else:
print("unknown op: " + cur_inst.name)
print(cur_inst.dxil_class)
elif cur_inst.is_binary or cur_inst.category.startswith(
"Binary"):
if "f" in cur_inst.oload_types and not "Half" in case.test_name:
if case.test_name in g_denorm_tests: # for denorm tests
generate_row(
root.find("./Table[@Id='DenormBinaryFloatOpTable']"),
case)
else:
generate_row(
root.find("./Table[@Id='BinaryFloatOpTable']"),
case)
if "h" in cur_inst.oload_types and "Half" in case.test_name:
generate_row(root.find("./Table[@Id='BinaryHalfOpTable']"),case)
if "i" in cur_inst.oload_types and "Bit16" not in case.test_name:
if cur_inst.category.startswith("Binary int"):
if case.test_name in ['UAdd', 'USub', 'UMul']: # Add, Sub, Mul use same operations for int and uint.
generate_row(
root.find("./Table[@Id='BinaryUintOpTable']"),
case)
else:
generate_row(
root.find("./Table[@Id='BinaryIntOpTable']"),
case)
elif cur_inst.category.startswith("Binary uint"):
generate_row(
root.find("./Table[@Id='BinaryUintOpTable']"),
case)
else:
print("unknown op: " + cur_inst.name)
print(cur_inst.dxil_class)
if "w" in cur_inst.oload_types and "Bit16" in case.test_name:
if cur_inst.category.startswith("Binary int"):
if case.test_name in ['UAdd', 'USub', 'UMul']: # Add, Sub, Mul use same operations for int and uint.
generate_row(
root.find("./Table[@Id='BinaryUint16OpTable']"),
case)
else:
generate_row(
root.find("./Table[@Id='BinaryInt16OpTable']"),
case)
elif cur_inst.category.startswith("Binary uint"):
generate_row(
root.find("./Table[@Id='BinaryUint16OpTable']"),
case)
else:
print("unknown op: " + cur_inst.name)
print(cur_inst.dxil_class)
elif cur_inst.category.startswith("Tertiary"):
if "f" in cur_inst.oload_types and not "Half" in case.test_name:
if case.test_name in g_denorm_tests: # for denorm tests
generate_row(
root.find("./Table[@Id='DenormTertiaryFloatOpTable']"),case)
else:
generate_row(
root.find("./Table[@Id='TertiaryFloatOpTable']"),case)
if "h" in cur_inst.oload_types and "Half" in case.test_name:
generate_row(root.find("./Table[@Id='TertiaryHalfOpTable']"),case)
if "i" in cur_inst.oload_types and "Bit16" not in case.test_name:
if cur_inst.category.startswith("Tertiary int"):
generate_row(
root.find("./Table[@Id='TertiaryIntOpTable']"),
case)
elif cur_inst.category.startswith("Tertiary uint"):
generate_row(
root.find(
"./Table[@Id='TertiaryUintOpTable']"),
case)
else:
print("unknown op: " + cur_inst.name)
print(cur_inst.dxil_class)
if "w" in cur_inst.oload_types and "Bit16" in case.test_name:
if cur_inst.category.startswith("Tertiary int"):
generate_row(
root.find("./Table[@Id='TertiaryInt16OpTable']"),
case)
elif cur_inst.category.startswith("Tertiary uint"):
generate_row(
root.find(
"./Table[@Id='TertiaryUint16OpTable']"),
case)
else:
print("unknown op: " + cur_inst.name)
print(cur_inst.dxil_class)
elif cur_inst.category.startswith("Quaternary"):
if cur_inst.name == "Bfi":
generate_row(
root.find("./Table[@Id='Msad4Table']"), case)
else:
print("unknown op: " + cur_inst.name)
print(cur_inst.dxil_class)
elif cur_inst.category == "Dot":
generate_row(root.find("./Table[@Id='DotOpTable']"), case)
elif cur_inst.category == "Dot product with accumulate":
if cur_inst.name == "Dot2AddHalf":
generate_row(root.find("./Table[@Id='Dot2AddHalfOpTable']"), case)
elif cur_inst.name == "Dot4AddI8Packed":
generate_row(root.find("./Table[@Id='Dot4AddI8PackedOpTable']"), case)
elif cur_inst.name == "Dot4AddU8Packed":
generate_row(root.find("./Table[@Id='Dot4AddU8PackedOpTable']"), case)
else:
print("unknown op: " + cur_inst.name)
print(cur_inst.dxil_class)
elif cur_inst.dxil_class in ["WaveActiveOp", "WaveAllOp","WaveActiveAllEqual","WaveAnyTrue","WaveAllTrue"]:
if case.test_name.startswith("WaveActiveU"):
generate_row_wave(
root.find(
"./Table[@Id='WaveIntrinsicsActiveUintTable']"
), case)
else:
generate_row_wave(
root.find(
"./Table[@Id='WaveIntrinsicsActiveIntTable']"),
case)
elif cur_inst.dxil_class == "WaveActiveBit":
generate_row_wave(
root.find(
"./Table[@Id='WaveIntrinsicsActiveUintTable']"),
case)
elif cur_inst.dxil_class == "WavePrefixOp":
if case.test_name.startswith("WavePrefixU"):
generate_row_wave(
root.find(
"./Table[@Id='WaveIntrinsicsPrefixUintTable']"
), case)
else:
generate_row_wave(
root.find(
"./Table[@Id='WaveIntrinsicsPrefixIntTable']"),
case)
else:
print("unknown op: " + cur_inst.name)
print(cur_inst.dxil_class)
tree._setroot(root)
from xml.dom.minidom import parseString
pretty_xml = parseString(ET.tostring(root)).toprettyxml(indent=" ")
f.write(pretty_xml)
print("Saved file at: " + f.name)
f.close()
def print_untested_inst():
lst = []
for name in [node.inst.name for node in g_instruction_nodes.values() if len(node.test_cases) == 0]:
lst += [name]
lst.sort()
print("Untested dxil ops: ")
for name in lst:
print(name)
print("Total uncovered dxil ops: " + str(len(lst)))
print("Total covered dxil ops: " + str(len(g_instruction_nodes)-len(lst)))
# inst name to instruction dict
g_instruction_nodes = {}
# test name to test case dict
g_test_cases = {}
if __name__ == "__main__":
db = get_db_dxil()
for inst in db.instr:
g_instruction_nodes[inst.name] = inst_node(inst)
add_test_cases()
args = vars(parser.parse_args())
mode = args['mode']
if mode == "info":
print_untested_inst()
elif mode == "gen-xml":
generate_table_for_taef()
else:
print("unknown mode: " + mode)
exit(1)
exit(0)
|
py | 7dfc7d358f4cdc34f000be508ba38a5f22fa31fb | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/7/30 9:52
# @Author : cendeavor
# @File : models.py
# @Software: PyCharm
from flask_login import UserMixin
class User(UserMixin):
"""用户类"""
def __init__(self, user):
self.username = user.get("username")
self.password = user.get("password")
# self.password_hash = user.get("password")
self.id = user.get("username") # 为了简化,id == username
def verify_password(self, password):
"""密码验证"""
if self.password != password:
return False
return True
# if self.password_hash is None:
# return False
# return check_password_hash(self.password_hash, password) |
py | 7dfc7d4885e1d722a4a180950e1bfe23a3298e10 | #!/usr/bin/env python
command = oslc("test_variadic_macro.osl")
|
py | 7dfc7f21d4f2ad7ed919e65f172a456dc028b748 | __author__ = 'Riley Crane'
__license__ = 'MIT'
__version__ = '0.1.0'
from placewalk import *
|
py | 7dfc7f2e23a9d94ff289824dbd6bcf1678414c49 | import pytest
import getpass
from django.contrib.auth import get_user_model
from collections import namedtuple
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import Group
from django.conf import settings as django_settings
@pytest.fixture
def test_user(request, db):
"""Fixture for a django db user"""
password = "fake"
user = get_user_model().objects.create_user(username="testuser",
password=password)
user.unhashed_password = password
def fin():
user.delete()
request.addfinalizer(fin)
return user
@pytest.fixture
def test_group(request, db):
"""Fixture for a django test group"""
group = Group.objects.create(name="test_group")
def fin():
group.delete()
request.addfinalizer(fin)
return group
@pytest.fixture
def test_permission(request, db):
"""Fixture for a django test permission"""
content_type = ContentType.objects.get_for_model(get_user_model())
permission = Permission.objects.create(codename="test_permission",
name="Test Permission",
content_type=content_type)
def fin():
permission.delete()
request.addfinalizer(fin)
return permission
@pytest.fixture
def patch_authenticate_success(request, db, monkeypatch):
"""Fixture to patch successful authentication"""
monkeypatch.setattr("requests.sessions.Session.request",
lambda *args, **kwargs:
namedtuple("Response", ['status_code'])(200))
monkeypatch.setattr("freeipa_auth.freeipa_utils."
"FreeIpaSession._get_user_data",
lambda *args: {
"givenname": "Test",
"sn": "User",
"mail": "[email protected]",
})
@pytest.fixture
def patch_authenticate_fail(request, db, monkeypatch):
"""Fixture patch a failed authentication"""
monkeypatch.setattr("requests.sessions.Session.request",
lambda *args, **kwargs:
namedtuple("Response", ['status_code'])(401))
@pytest.fixture
def patch_remote_user_groups(request, db, monkeypatch):
"""Fixture to patch remote user groups"""
monkeypatch.setattr("freeipa_auth.freeipa_utils.FreeIpaSession.groups",
["admin", "test_group",
"test_permission"])
@pytest.fixture
def settings(request, db):
"""Fixture to allow for setting overrides per test case"""
def override(**kwargs):
for k, v in kwargs.items():
setattr(django_settings, k, v)
def fin():
for k in kwargs:
delattr(django_settings, k)
request.addfinalizer(fin)
django_settings.override = override
return django_settings
@pytest.fixture
def liveserver_password(request, db):
"""Fixture to use a liveserver password"""
return getpass.getpass("password: ")
@pytest.fixture
def liveserver(request, db):
"""Fixture to use a liveserver for testing (passed in from command line)"""
return request.config.getoption('liveserver')
|
py | 7dfc7feca68f27825c7658c9a43aee5052cae8bc | #!/usr/bin/python
def remove_nikud(txt,encoding="utf8"):
"""
Removes nikud from a text
Returns the texts in the given encoding.
If txt is unicode -> returns unicode object.
Python2\3 compatible
"""
alef = 1488
min_nikud = 1416
max_nikud = 1479
try :
nikud_chars = [unichr(i) for i in range(min_nikud,alef)]
non_unicode_type = str
except NameError :#Python3
nikud_chars = [chr(i) for i in range(min_nikud,alef)]
non_unicode_type = bytes
if not isinstance(txt,non_unicode_type):
was_unicode = True
unicode_txt = txt
else :
was_unicode = False
unicode_txt = txt.decode(encoding)
for char in nikud_chars:
unicode_txt = unicode_txt.replace(char,u"")
if not was_unicode:
return unicode_txt.encode(encoding)
else :
return unicode_txt
|
py | 7dfc80da1069cee62161bcad813607a2c321173c | """ from https://github.com/keithito/tacotron """
'''
Defines the set of symbols used in text input to the model.
The default is a set of ASCII characters that works well for English or text that has been run through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details. '''
from text import cmudict
_pad = '_'
_punctuation = '!\'(),.:;? "'
_special = '-'
_arpabet = [s for s in cmudict.valid_symbols]
symbols = _arpabet
# Export all symbols:
#symbols = [_pad] + list(_special) + list(_punctuation) + _arpabet
|
py | 7dfc8282e02a5f268d80fab52801846e7955d913 | # import the necessary packages
import cv2
import imutils
import numpy as np
from imutils.video import VideoStream
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.image import img_to_array
# Flast requirements
from flask import Flask, jsonify, request
from flask_restful import Api, Resource
from model.Train import train_model
# Initialize Flask
app = Flask(__name__)
@app.route('/', methods=['POST'])
def detect_and_predict_mask(frame, face_net, mask_net):
# grab the dimensions of the frame and then construct a blob
# from it
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame, 1.0, (224, 224),
(104.0, 177.0, 123.0))
# pass the blob through the network and obtain the face detections
face_net.setInput(blob)
detections = face_net.forward()
print(detections.shape)
# initialize our list of faces, their corresponding locations,
# and the list of predictions from our face mask network
faces = []
locs = []
preds = []
# loop over the detections
for i in range(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with
# the detection
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the confidence is
# greater than the minimum confidence
if confidence > 0.5:
# compute the (x, y)-coordinates of the bounding box for
# the object
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# ensure the bounding boxes fall within the dimensions of
# the frame
(startX, startY) = (max(0, startX), max(0, startY))
(endX, endY) = (min(w - 1, endX), min(h - 1, endY))
# extract the face ROI, convert it from BGR to RGB channel
# ordering, resize it to 224x224, and preprocess it
face = frame[startY:endY, startX:endX]
face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
face = cv2.resize(face, (224, 224))
face = img_to_array(face)
face = preprocess_input(face)
# add the face and bounding boxes to their respective
# lists
faces.append(face)
locs.append((startX, startY, endX, endY))
# only make a predictions if at least one face was detected
if len(faces) > 0:
# for faster inference we'll make batch predictions on *all*
# faces at the same time rather than one-by-one predictions
# in the above `for` loop
faces = np.array(faces, dtype="float32")
preds = mask_net.predict(faces, batch_size=32)
# return a 2-tuple of the face locations and their corresponding
# locations
return jsonify(locs, preds)
# load our serialized face detector model from disk
prototxtPath = r"face_detector\deploy.prototxt"
weightsPath = r"face_detector\res10_300x300_ssd_iter_140000.caffemodel"
faceNet = cv2.dnn.readNet(prototxtPath, weightsPath)
# load the face mask detector model from disk
maskNet = load_model("mask_detector.model")
# initialize the video stream
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
# loop over the frames from the video stream
while True:
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 400 pixels
frame = vs.read()
frame = imutils.resize(frame, width=800)
# detect faces in the frame and determine if they are wearing a
# face mask or not
(locs, preds) = detect_and_predict_mask(frame, faceNet, maskNet)
# loop over the detected face locations and their corresponding
# locations
for (box, pred) in zip(locs, preds):
# unpack the bounding box and predictions
(startX, startY, endX, endY) = box
(mask, withoutMask) = pred
# determine the class label and color we'll use to draw
# the bounding box and text
label = "Face Mask Detected" if mask > withoutMask else "No Face Mask Detected"
color = (0, 255, 0) if label == "Face Mask Detected" else (0, 0, 255)
# include the probability in the label
label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100)
# display the label and bounding box rectangle on the output
# frame
cv2.putText(frame, label, (startX, startY - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)
# show the output frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
app.run(debug=True, host='0.0.0.0')
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
|
py | 7dfc82927433dcc4c6b8f2c51244481dfd3d0d22 | from PIL import Image
def resize(image, w, h, bgcolor="black"):
ow, oh = image.size
if w == -1 and h == -1:
return image
elif w == -1 and h != -1:
w = ow * (float(h) / float(oh))
w = int(w)
return image.resize((w, h))
elif w != -1 and h == -1:
h = oh * (float(w) / float(ow))
h = int(h)
return image.resize((w, h))
else:
# Fit longest axis
if ow <= oh:
nh = h
nw = (float(nh) / float(oh)) * ow
nw = int(nw)
im2 = image.resize((nw, nh))
wdiff = int((w - nw) / 2.0)
im = Image.new("RGB", (w, h), bgcolor)
im.paste(im2, (wdiff, 0))
else:
nw = w
nh = (float(nw) / float(ow)) * oh
nh = int(nh)
im2 = image.resize((nw, nh))
hdiff = int((h - nh) / 2.0)
im = Image.new("RGB", (w, h), bgcolor)
im.paste(im2, (0, hdiff))
return im
|
py | 7dfc83deb7165bc90be830a4cafa1ce0dd930968 | import platform
my_system = platform.uname()
print(f"System: {my_system.system}")
print(f"Node Name: {my_system.node}")
print(f"Release: {my_system.release}")
print(f"Version: {my_system.version}")
print(f"Machine: {my_system.machine}")
print(f"Processor: {my_system.processor}")
|
py | 7dfc8535126bcf3af157480cec7a4d5a362f7ae7 | """Builds our static title embedding for each entity.
The output of this is torch saved pt file to be read in by our StaticEmb class.
```
ent_embeddings:
- key: title_static
load_class: StaticEmb
freeze: false # Freeze the projection layer or not
cpu: true
args:
emb_file: <path to saved pt file>
proj: 256
```
"""
import argparse
import os
import torch
import ujson
from tqdm import tqdm
from transformers import BertModel, BertTokenizer
from bootleg.symbols.entity_symbols import EntitySymbols
MAX_LEN = 512
BERT_DIM = 768
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--method", type=str, choices=["avg_title"], default="avg_title"
)
parser.add_argument(
"--entity_dir",
type=str,
default="/dfs/scratch0/lorr1/projects/bootleg/data/wiki_title_0122/entity_db",
help="Path to entities inside data_dir",
)
parser.add_argument(
"--entity_map_dir",
type=str,
default="entity_mappings",
help="Path to entities inside data_dir",
)
parser.add_argument(
"--alias_cand_map",
type=str,
default="alias2qids.json",
help="Path to alias candidate map",
)
parser.add_argument(
"--alias_idx_map",
type=str,
default="alias2id.json",
help="Path to alias candidate map",
)
parser.add_argument(
"--bert_model", type=str, default="bert-base-cased", help="Bert model"
)
parser.add_argument(
"--word_model_cache",
type=str,
default="/dfs/scratch0/lorr1/projects/bootleg-data/embs/pretrained_bert_models",
help="Path to saved model",
)
parser.add_argument(
"--save_file", type=str, required=True, help="Path to save embedding file"
)
parser.add_argument("--batch_size", type=int, default=2056)
parser.add_argument("--cpu", action="store_true")
parser.add_argument("--output_method", default="pt", choices=["pt", "json"])
args = parser.parse_args()
return args
def average_titles(input_ids, embeddings):
num_valid = (input_ids != 0).sum(-1)
return embeddings.sum(1) / num_valid.unsqueeze(1)
def build_title_table(cpu, batch_size, model, tokenizer, entity_symbols):
"""Builds the table of the word indices associated with each title."""
entity2avgtitle = torch.zeros(
entity_symbols.num_entities_with_pad_and_nocand, BERT_DIM
)
titles = []
eids = []
for q in tqdm(
entity_symbols.get_all_qids(),
total=len(entity_symbols.get_all_qids()),
desc="Itearting over entities",
):
eids.append(entity_symbols.get_eid(q))
titles.append(entity_symbols.get_title(q))
assert len(eids) == len(titles)
for i in tqdm(range(0, len(titles), batch_size)):
batch_eids = eids[i : i + batch_size]
batch_titles = titles[i : i + batch_size]
batch_inputs = tokenizer(
batch_titles, padding=True, truncation=True, return_tensors="pt"
)
inputs = batch_inputs["input_ids"]
attention_mask = batch_inputs["attention_mask"]
inputs = inputs.to(model.device)
attention_mask = attention_mask.to(model.device)
# model() returns tuple of (last layer of embeddings, pooled output)
with torch.no_grad():
outputs = model(inputs, attention_mask=attention_mask)[0]
assert list(outputs.shape) == [len(batch_titles), inputs.shape[1], BERT_DIM]
outputs[inputs == 0] = 0
assert all(outputs[(1 - attention_mask).bool()].sum(-1) == 0)
entity2avgtitle[batch_eids] = average_titles(inputs, outputs).to("cpu")
return entity2avgtitle
def main():
args = parse_args()
print(ujson.dumps(vars(args), indent=4))
entity_symbols = EntitySymbols.load_from_cache(
os.path.join(args.entity_dir, args.entity_map_dir),
alias_cand_map_file=args.alias_cand_map,
alias_idx_file=args.alias_idx_map,
)
print("DO LOWERCASE IS", "uncased" in args.bert_model)
tokenizer = BertTokenizer.from_pretrained(
args.bert_model,
do_lower_case="uncased" in args.bert_model,
cache_dir=args.word_model_cache,
)
model = BertModel.from_pretrained(
args.bert_model,
cache_dir=args.word_model_cache,
output_attentions=False,
output_hidden_states=False,
)
if torch.cuda.is_available():
model = model.to("cuda")
model.eval()
entity2avgtitle = build_title_table(
args.cpu, args.batch_size, model, tokenizer, entity_symbols
)
save_fold = os.path.dirname(args.save_file)
if len(save_fold) > 0:
if not os.path.exists(save_fold):
os.makedirs(save_fold)
if args.output_method == "pt":
save_obj = (entity_symbols.get_qid2eid(), entity2avgtitle)
torch.save(obj=save_obj, f=args.save_file)
else:
res = {}
for qid in tqdm(entity_symbols.get_all_qids(), desc="Building final json"):
eid = entity_symbols.get_eid(qid)
res[qid] = entity2avgtitle[eid].tolist()
with open(args.save_file, "w") as out_f:
ujson.dump(res, out_f)
print(f"Done!")
if __name__ == "__main__":
main()
|
py | 7dfc854d1b5696e7d5ae2524b5cd096df64bf563 | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from supra.Utils.Classes import Constants
consts = Constants()
def getPressure(z):
p = 10*101.325*np.exp(-0.00012*z)*100
# in Pa
return p
def anglescan(S, phi, theta, z_profile, vfreq, P_amb, wind=True, debug=True, trace=False, plot=False):
# Originally by Wayne Edwards (Supracenter)
""" Ray-traces from a point given initial launch angles
Arguments:
S: [list] [x, y, z] of initial launch point (Supracenter or Wave-Release point)
phi: [float] initial azimuthal angle of launch [deg] with 0 deg being North and 90 deg being East
theta: [float] initial takeoff angle of launch [deg] with 90 deg being horizontal and 180 deg being vertically down
z_profile: [list] weather profile (n_layers * 4)
[[heights (increasing order) [m], speed of sound [m/s], wind speed [m/s], wind direction [rad] (same angle definition as phi)],
... ]
Keyword Arguments:
wind: [Boolean] if False sets all wind speeds to 0
debug: [Boolean] if True outputs print messages of program status
trace: [Boolean] if True returns (x, y, z, t) coordinates of the ray trace
plot: [Boolean] if True plots the ray trace
Returns:
D: [list] (x, y, z, t) final position and travel time of the raytrace
T: [list] returned if trace is set to True, (x, y, z, t) of all points along the ray-trace
"""
b_const = 1.119e-4
k_const = 2.0e-4
T = z_profile[-1, 1]
P = getPressure(z_profile[-1, 0])
# Azimuths and Wind directions are measured as angles from north, and increasing clockwise to the East
phi = (phi - 90)%360
# Flip coordinate system horizontally
phi = (360 - phi)%360
phi = np.radians(phi)
theta = np.radians(theta)
# Switch to turn off winds
if not wind:
z_profile[:, 2] = 0
# z_profile[:, 1] = 330
# The number of layers in the integration region
n_layers = len(z_profile)
# Slowness, as defined in SUPRACENTER on pg 35, s = 1/c
s = 1.0/z_profile[0:n_layers, 1]
# Elevation for that layer
z = z_profile[0:n_layers, 0]
# Component of wind vector in the direction of phi and phi + pi/2 respectively
u = z_profile[:, 2]*np.sin(z_profile[:, 3])*np.cos(phi) + z_profile[:, 2]*np.cos(z_profile[:, 3])*np.sin(phi)
v = z_profile[:, 2]*np.sin(z_profile[:, 3])*np.cos(phi+np.pi/2) + z_profile[:, 2]*np.cos(z_profile[:, 3])*np.sin(phi+np.pi/2)
s_val = s[n_layers-1]
# ray parameter
p = s_val*np.sin(theta)/(1 + s_val*u[n_layers - 1]*np.sin(theta))
X = 0
Y = 0
#Travel time
t_arrival = 0
if trace:
T = []
T.append([S[0], S[1], S[2], t_arrival])
# ignore negative roots
np.seterr(divide='ignore', invalid='ignore')
### Scan Loop ###
a, b = np.cos(phi), np.sin(phi)
last_z = 0
g = 0
#for i in range(n_layers - 1):
for i in range(n_layers - 1, 0, -1):
s2 = s[i]**2
delz = z[i] - z[i-1]
pres_1 = getPressure(z[i])
pres = getPressure(z[i-1])
# clear old variables
# Wind transformation variables
U = u[i]
V = v[i]
p2 = p/(1 - p*U)
# This term produces nans
A = delz/np.sqrt(s2 - p2**2)
if np.isnan(A).all():
if debug:
print("ANGLESCAN ERROR: All NaNs - rays reflect upwards")
if trace:
return np.array([[np.nan, np.nan, np.nan, np.nan]])
else:
return np.array([np.nan, np.nan, np.nan, np.nan])
# Equation (10)
dx = (p2 + s2*U)*A
X += dx
# Equation (11)
dy = s2*V*A
Y += dy
horizontal_change = np.sqrt((a*dx - b*dy)**2 + (b*dx + a*dy)**2)
angle_of_depression = np.arctan(delz/horizontal_change)
# snell = delz / np.sqrt(delz**2 + (a*dx - b*dy)**2 + (b*dx + a*dy)**2)
# sin(arctan(x)) == x / (sqrt(x^2 + 1))
G = -vfreq**2*k_const/b_const*(np.exp(b_const*z[i]) - np.exp(b_const*z[i-1]))/np.sin(angle_of_depression)/101325
g += G
# Calculate true destination positions (transform back)
#0.0016s
# Winds Disabled
last_z = i - 1
dt = s2/np.sqrt(s2 - p**2/(1 - p*u[i-1])**2)*delz
# If possible, use the ray timing, else just use the distance with the sound speed at that layer
if not np.isnan(dt):
t_arrival += dt
else:
t_arrival += np.sqrt((a*dx - b*dy)**2 + (b*dx + a*dy)**2 + delz**2)*s[i]
if trace:
T.append([S[0] + (a*X - b*Y), S[1] + (b*X + a*Y), z[last_z], t_arrival])
if trace and plot:
tr = np.array(T)
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(tr[:, 0], tr[:, 1], tr[:, 2], c='b')
ax.plot(tr[:, 0], tr[:, 1], tr[:, 2], c='k')
ax.scatter(S[0], S[1], S[2], c='r', marker="*")
ax.scatter(S[0] + (a*X - b*Y), S[1] + (b*X + a*Y), z[last_z], c='g', marker="^")
plt.show()
# if v_tol is not None and h_tol is not None:
# dh = z[last_z] - target[2]
# dx = np.sqrt((S[0] + (a*X - b*Y) - target[0])**2 + (S[1] + (b*X + a*Y) - target[1])**2)
# if dh <= v_tol and dx <= h_tol:
# t_arrival += np.sqrt(dh**2 + dx**2)/310
# Compare these destinations with the desired destination, all imaginary values are "turned rays" and are ignored
# E = np.sqrt(((a*X - b*Y)**2 + (b*X + a*Y)**2 + (z[n_layers - last_z - 1])**2))
D = [S[0] + (a*X - b*Y), S[1] + (b*X + a*Y), z[last_z], t_arrival]
T_0 = z_profile[0, 1]**2/consts.GAMMA/consts.R*consts.M_0
P_0 = getPressure(z_profile[0, 0])
z_2 = z_profile[-1, 0]
z_1 = z_profile[0, 0]
P_2 = getPressure(z_2)
P_1 = getPressure(z_1)
T_2 = z_profile[-1, 1]**2*consts.M_0/consts.GAMMA/consts.R
T_1 = z_profile[1, 1]**2*consts.M_0/consts.GAMMA/consts.R
# needs to be average f_d
f = ((T_0/P_0)**(0.33)*(((P_2/T_2)**(0.33)*z_2 - (P_1/T_1)**(0.33)*z_1)/(z_2 - z_1)) + 1)/2
##########################
return np.array([f, np.exp(g), T, P])
if __name__ == '__main__':
S = np.array([0, 0, 1000])
#takeoff
theta = 135
#azimuth
phi = 0
z_profile = np.array([[ 0, 330, 0, 0],
[500, 330, 0, 0],
[1000, 330, 0, 0]])
D = anglescan(S, phi, theta, z_profile, trace=True, plot=True)
print(D) |
py | 7dfc856e8f6039720072aad39d5c4a1eb440dfaa | #Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file is stored in the variable path.
#Code starts here
# Data Loading
#Reading the file
data=pd.read_csv(path)
#Renaming a column
data.rename(columns={'Total':'Total_Medals'},inplace=True)
#Printing the first five columns
print(data.head(5))
# Summer or Winter
#Creating new column 'Better_Event'
data['Better_Event'] = np.where(data['Total_Summer'] > data['Total_Winter'] , 'Summer', 'Winter')
data['Better_Event'] = np.where(data['Total_Summer'] == data['Total_Winter'] , 'Both', data['Better_Event'])
#Finding the value with max count in 'Better_Event' column
better_event=data['Better_Event'].value_counts().index.values[0]
#Printing the better event
print('Better_Event=', better_event)
# Top 10
#Subsetting the dataframe
top_countries=data[['Country_Name','Total_Summer', 'Total_Winter','Total_Medals']]
#Dropping the last row
top_countries=top_countries[:-1]
#Function for top 10
def top_ten(data, col):
#Creating a new list
country_list=[]
#Finding the top 10 values of 'col' column
country_list= list((data.nlargest(10,col)['Country_Name']))
#Returning the top 10 list
return country_list
#Calling the function for Top 10 in Summer
top_10_summer=top_ten(top_countries,'Total_Summer')
print("Top 10 Summer:\n",top_10_summer, "\n")
#Calling the function for Top 10 in Winter
top_10_winter=top_ten(top_countries,'Total_Winter')
print("Top 10 Winter:\n",top_10_winter, "\n")
#Calling the function for Top 10 in both the events
top_10=top_ten(top_countries,'Total_Medals')
print("Top 10:\n",top_10, "\n")
#Extracting common country names from all three lists
common=list(set(top_10_summer) & set(top_10_winter) & set(top_10))
print('Common Countries :\n', common, "\n")
# Plotting Top 10
#For Summer
#Creating the dataframe for Summer event
summer_df= data[data['Country_Name'].isin(top_10_summer)]
#Plotting the bar graph
plt.figure(figsize=(20, 6))
plt.bar(summer_df['Country_Name'], summer_df['Total_Summer'])
#Changing the graph title
plt.title('Top 10 Summer')
#Changing the x-axis label
plt.xlabel('Country Name')
#Changing the y-axis label
plt.ylabel('Total Medals')
#For Winter
#Creating the dataframe for Winter event
winter_df=data[data['Country_Name'].isin(top_10_winter)]
#Plotting the bar graph
plt.figure(figsize=(20, 6))
plt.bar(winter_df['Country_Name'], winter_df['Total_Winter'])
#Changing the graph title
plt.title('Top 10 Winter')
#Changing the x-axis label
plt.xlabel('Country Name')
#Changing the y-axis label
plt.ylabel('Total Medals')
#For both the events
#Creating the dataframe for both the events
top_df=data[data['Country_Name'].isin(top_10)]
#Plotting the bar graph
plt.figure(figsize=(20, 6))
plt.bar(top_df['Country_Name'], top_df['Total_Medals'])
#Changing the graph title
plt.title('Top 10')
#Changing the x-axis label
plt.xlabel('Country Name')
#Changing the y-axis label
plt.ylabel('Total Medals')
# Top Performing countries
#For Summer List
#Creating new column 'Golden_Ratio'
summer_df['Golden_Ratio']=summer_df['Gold_Summer']/summer_df['Total_Summer']
#Finding the max value of 'Golden_Ratio' column
summer_max_ratio=max(summer_df['Golden_Ratio'])
#Finding the country assosciated with the max value of 'Golden_Ratio' column
summer_country_gold=summer_df.loc[summer_df['Golden_Ratio'].idxmax(),'Country_Name']
print("Top Summer Country:", summer_country_gold, " with a ratio of %.2f" %summer_max_ratio )
#For Winter List
#Creating new column 'Golden_Ratio'
winter_df['Golden_Ratio']=winter_df['Gold_Winter']/winter_df['Total_Winter']
#Finding the max value of 'Golden_Ratio' column
winter_max_ratio=max(winter_df['Golden_Ratio'])
#Finding the country assosciated with the max value of 'Golden_Ratio' column
winter_country_gold=winter_df.loc[winter_df['Golden_Ratio'].idxmax(),'Country_Name']
print("Top Winter Country:", winter_country_gold, " with a ratio of %.2f" %winter_max_ratio )
#For Overall List
#Creating new column 'Golden_Ratio'
top_df['Golden_Ratio']=top_df['Gold_Total']/top_df['Total_Medals']
#Finding the max value of 'Golden_Ratio' column
top_max_ratio=max(top_df['Golden_Ratio'])
#Finding the country assosciated with the max value of 'Golden_Ratio' column
top_country_gold=top_df.loc[top_df['Golden_Ratio'].idxmax(),'Country_Name']
print("Top Country:", top_country_gold, " with a ratio of %.2f" %top_max_ratio )
# Best in the world
#Removing the last column of the dataframe
data_1=data[:-1]
#Creating a new column 'Total_Points'
data_1['Total_Points']= data_1['Gold_Total']*3 + data_1['Silver_Total']*2 + data_1['Bronze_Total']*1 # Use of position index to handle the ambiguity of having same name columns
#Finding the maximum value of 'Total_Points' column
most_points=max(data_1['Total_Points'])
#Finding the country assosciated with the max value of 'Total_Column' column
best_country=data_1.loc[data_1['Total_Points'].idxmax(),'Country_Name']
print('The maximum points achieved is ', most_points, ' by ', best_country )
# Plot for the best
#Subsetting the dataframe
best=data[data['Country_Name']==best_country]
best.reset_index(drop = True, inplace = True)
best=best[['Gold_Total','Silver_Total','Bronze_Total']]
#Plotting bar plot
best.plot.bar(stacked=True)
#Changing the x-axis label
plt.xlabel('United States')
#Changing the y-axis label
plt.ylabel('Medals Tally')
#Rotating the ticks of X-axis
plt.xticks(rotation=45)
#Updating the graph legend
l=plt.legend()
l.get_texts()[0].set_text('Gold_Total :' + str(best['Gold_Total'].values))
l.get_texts()[1].set_text('Silver_Total :' + str(best['Silver_Total'].values))
l.get_texts()[2].set_text('Bronze_Total :' + str(best['Bronze_Total'].values))
|
py | 7dfc860820c0873b6a3db38d11f7375dde346902 | maior = 0
menor = 0
from datetime import date
for c in range(1, 8):
nasc = int(input('Qual é o ano de nascimento da {}° pessoa? '.format(c)))
atual = date.today().year - nasc
if atual >= 21:
maior += 1
elif atual <= 21:
menor += 1
print('\033[34m{} \033[30mpessoas são maior de idade e \n\033[34m{} \033[30mpessoas são menor de idade.'.format(maior, menor))
|
py | 7dfc8711b61b097f5077cca68965f053e1d6b129 | # coding: utf-8
"""
IriusRisk API
Products API # noqa: E501
OpenAPI spec version: 1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from iriusrisk_python_client_lib.api_client import ApiClient
class CountermeasuresApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def libraries_library_ref_riskpatterns_risk_pattern_ref_countermeasures_post(self, api_token, library_ref, risk_pattern_ref, create_countermeasure_library_request_body, **kwargs): # noqa: E501
"""Creates new countermeasure in a risk pattern # noqa: E501
Creates new countermeasure in a risk pattern. Conditions to be able to perform the action: - To have the permission **LIBRARY_UPDATE** granted. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.libraries_library_ref_riskpatterns_risk_pattern_ref_countermeasures_post(api_token, library_ref, risk_pattern_ref, create_countermeasure_library_request_body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_token: Authentication token (required)
:param str library_ref: Reference for library (required)
:param str risk_pattern_ref: Reference for Risk Pattern (required)
:param ControlCommand create_countermeasure_library_request_body: JSON data that contains information of the fields (required)
:return: LibraryControl
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.libraries_library_ref_riskpatterns_risk_pattern_ref_countermeasures_post_with_http_info(api_token, library_ref, risk_pattern_ref, create_countermeasure_library_request_body, **kwargs) # noqa: E501
else:
(data) = self.libraries_library_ref_riskpatterns_risk_pattern_ref_countermeasures_post_with_http_info(api_token, library_ref, risk_pattern_ref, create_countermeasure_library_request_body, **kwargs) # noqa: E501
return data
def libraries_library_ref_riskpatterns_risk_pattern_ref_countermeasures_post_with_http_info(self, api_token, library_ref, risk_pattern_ref, create_countermeasure_library_request_body, **kwargs): # noqa: E501
"""Creates new countermeasure in a risk pattern # noqa: E501
Creates new countermeasure in a risk pattern. Conditions to be able to perform the action: - To have the permission **LIBRARY_UPDATE** granted. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.libraries_library_ref_riskpatterns_risk_pattern_ref_countermeasures_post_with_http_info(api_token, library_ref, risk_pattern_ref, create_countermeasure_library_request_body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_token: Authentication token (required)
:param str library_ref: Reference for library (required)
:param str risk_pattern_ref: Reference for Risk Pattern (required)
:param ControlCommand create_countermeasure_library_request_body: JSON data that contains information of the fields (required)
:return: LibraryControl
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['api_token', 'library_ref', 'risk_pattern_ref', 'create_countermeasure_library_request_body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method libraries_library_ref_riskpatterns_risk_pattern_ref_countermeasures_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'api_token' is set
if ('api_token' not in params or
params['api_token'] is None):
raise ValueError("Missing the required parameter `api_token` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_countermeasures_post`") # noqa: E501
# verify the required parameter 'library_ref' is set
if ('library_ref' not in params or
params['library_ref'] is None):
raise ValueError("Missing the required parameter `library_ref` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_countermeasures_post`") # noqa: E501
# verify the required parameter 'risk_pattern_ref' is set
if ('risk_pattern_ref' not in params or
params['risk_pattern_ref'] is None):
raise ValueError("Missing the required parameter `risk_pattern_ref` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_countermeasures_post`") # noqa: E501
# verify the required parameter 'create_countermeasure_library_request_body' is set
if ('create_countermeasure_library_request_body' not in params or
params['create_countermeasure_library_request_body'] is None):
raise ValueError("Missing the required parameter `create_countermeasure_library_request_body` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_countermeasures_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'library_ref' in params:
path_params['libraryRef'] = params['library_ref'] # noqa: E501
if 'risk_pattern_ref' in params:
path_params['riskPatternRef'] = params['risk_pattern_ref'] # noqa: E501
query_params = []
header_params = {}
if 'api_token' in params:
header_params['api-token'] = params['api_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'create_countermeasure_library_request_body' in params:
body_params = params['create_countermeasure_library_request_body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/libraries/{libraryRef}/riskpatterns/{riskPatternRef}/countermeasures', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LibraryControl', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_countermeasures_put(self, api_token, library_ref, risk_pattern_ref, use_case_ref, threat_ref, associate_countermeasure_threat_library_request_body, **kwargs): # noqa: E501
"""Associates a countermeasure to a threat in a risk pattern. # noqa: E501
Associates a countermeasure to a threat in a risk pattern. Conditions to be able to perform the action: - To have the permission **LIBRARY_UPDATE** granted. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_countermeasures_put(api_token, library_ref, risk_pattern_ref, use_case_ref, threat_ref, associate_countermeasure_threat_library_request_body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_token: Authentication token (required)
:param str library_ref: Reference for library (required)
:param str risk_pattern_ref: Reference for Risk Pattern (required)
:param str use_case_ref: Reference for Use Case (required)
:param str threat_ref: Reference for Threat (required)
:param AssociateCountermeasureThreatLibraryRequestBody associate_countermeasure_threat_library_request_body: JSON data that contains information of the fields (required)
:return: LibraryControl
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_countermeasures_put_with_http_info(api_token, library_ref, risk_pattern_ref, use_case_ref, threat_ref, associate_countermeasure_threat_library_request_body, **kwargs) # noqa: E501
else:
(data) = self.libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_countermeasures_put_with_http_info(api_token, library_ref, risk_pattern_ref, use_case_ref, threat_ref, associate_countermeasure_threat_library_request_body, **kwargs) # noqa: E501
return data
def libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_countermeasures_put_with_http_info(self, api_token, library_ref, risk_pattern_ref, use_case_ref, threat_ref, associate_countermeasure_threat_library_request_body, **kwargs): # noqa: E501
"""Associates a countermeasure to a threat in a risk pattern. # noqa: E501
Associates a countermeasure to a threat in a risk pattern. Conditions to be able to perform the action: - To have the permission **LIBRARY_UPDATE** granted. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_countermeasures_put_with_http_info(api_token, library_ref, risk_pattern_ref, use_case_ref, threat_ref, associate_countermeasure_threat_library_request_body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_token: Authentication token (required)
:param str library_ref: Reference for library (required)
:param str risk_pattern_ref: Reference for Risk Pattern (required)
:param str use_case_ref: Reference for Use Case (required)
:param str threat_ref: Reference for Threat (required)
:param AssociateCountermeasureThreatLibraryRequestBody associate_countermeasure_threat_library_request_body: JSON data that contains information of the fields (required)
:return: LibraryControl
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['api_token', 'library_ref', 'risk_pattern_ref', 'use_case_ref', 'threat_ref', 'associate_countermeasure_threat_library_request_body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_countermeasures_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'api_token' is set
if ('api_token' not in params or
params['api_token'] is None):
raise ValueError("Missing the required parameter `api_token` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_countermeasures_put`") # noqa: E501
# verify the required parameter 'library_ref' is set
if ('library_ref' not in params or
params['library_ref'] is None):
raise ValueError("Missing the required parameter `library_ref` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_countermeasures_put`") # noqa: E501
# verify the required parameter 'risk_pattern_ref' is set
if ('risk_pattern_ref' not in params or
params['risk_pattern_ref'] is None):
raise ValueError("Missing the required parameter `risk_pattern_ref` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_countermeasures_put`") # noqa: E501
# verify the required parameter 'use_case_ref' is set
if ('use_case_ref' not in params or
params['use_case_ref'] is None):
raise ValueError("Missing the required parameter `use_case_ref` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_countermeasures_put`") # noqa: E501
# verify the required parameter 'threat_ref' is set
if ('threat_ref' not in params or
params['threat_ref'] is None):
raise ValueError("Missing the required parameter `threat_ref` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_countermeasures_put`") # noqa: E501
# verify the required parameter 'associate_countermeasure_threat_library_request_body' is set
if ('associate_countermeasure_threat_library_request_body' not in params or
params['associate_countermeasure_threat_library_request_body'] is None):
raise ValueError("Missing the required parameter `associate_countermeasure_threat_library_request_body` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_countermeasures_put`") # noqa: E501
collection_formats = {}
path_params = {}
if 'library_ref' in params:
path_params['libraryRef'] = params['library_ref'] # noqa: E501
if 'risk_pattern_ref' in params:
path_params['riskPatternRef'] = params['risk_pattern_ref'] # noqa: E501
if 'use_case_ref' in params:
path_params['useCaseRef'] = params['use_case_ref'] # noqa: E501
if 'threat_ref' in params:
path_params['threatRef'] = params['threat_ref'] # noqa: E501
query_params = []
header_params = {}
if 'api_token' in params:
header_params['api-token'] = params['api_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'associate_countermeasure_threat_library_request_body' in params:
body_params = params['associate_countermeasure_threat_library_request_body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/libraries/{libraryRef}/riskpatterns/{riskPatternRef}/usecases/{useCaseRef}/threats/{threatRef}/countermeasures', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LibraryControl', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_weaknesses_weakness_ref_countermeasures_put(self, api_token, library_ref, risk_pattern_ref, use_case_ref, threat_ref, weakness_ref, associate_countermeasure_weakness_library_request_body, **kwargs): # noqa: E501
"""Associates a countermeasure to a weakness in a risk pattern. # noqa: E501
Associates a countermeasure to a weakness in a risk pattern. Conditions to be able to perform the action: - To have the permission **LIBRARY_UPDATE** granted. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_weaknesses_weakness_ref_countermeasures_put(api_token, library_ref, risk_pattern_ref, use_case_ref, threat_ref, weakness_ref, associate_countermeasure_weakness_library_request_body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_token: Authentication token (required)
:param str library_ref: Reference for library (required)
:param str risk_pattern_ref: Reference for Risk Pattern (required)
:param str use_case_ref: Reference for Use Case (required)
:param str threat_ref: Reference for Threat (required)
:param str weakness_ref: Reference for Weakness (required)
:param AssociateCountermeasureWeaknessLibraryRequestBody associate_countermeasure_weakness_library_request_body: JSON data that contains information of the fields (required)
:return: LibraryControl
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_weaknesses_weakness_ref_countermeasures_put_with_http_info(api_token, library_ref, risk_pattern_ref, use_case_ref, threat_ref, weakness_ref, associate_countermeasure_weakness_library_request_body, **kwargs) # noqa: E501
else:
(data) = self.libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_weaknesses_weakness_ref_countermeasures_put_with_http_info(api_token, library_ref, risk_pattern_ref, use_case_ref, threat_ref, weakness_ref, associate_countermeasure_weakness_library_request_body, **kwargs) # noqa: E501
return data
def libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_weaknesses_weakness_ref_countermeasures_put_with_http_info(self, api_token, library_ref, risk_pattern_ref, use_case_ref, threat_ref, weakness_ref, associate_countermeasure_weakness_library_request_body, **kwargs): # noqa: E501
"""Associates a countermeasure to a weakness in a risk pattern. # noqa: E501
Associates a countermeasure to a weakness in a risk pattern. Conditions to be able to perform the action: - To have the permission **LIBRARY_UPDATE** granted. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_weaknesses_weakness_ref_countermeasures_put_with_http_info(api_token, library_ref, risk_pattern_ref, use_case_ref, threat_ref, weakness_ref, associate_countermeasure_weakness_library_request_body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_token: Authentication token (required)
:param str library_ref: Reference for library (required)
:param str risk_pattern_ref: Reference for Risk Pattern (required)
:param str use_case_ref: Reference for Use Case (required)
:param str threat_ref: Reference for Threat (required)
:param str weakness_ref: Reference for Weakness (required)
:param AssociateCountermeasureWeaknessLibraryRequestBody associate_countermeasure_weakness_library_request_body: JSON data that contains information of the fields (required)
:return: LibraryControl
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['api_token', 'library_ref', 'risk_pattern_ref', 'use_case_ref', 'threat_ref', 'weakness_ref', 'associate_countermeasure_weakness_library_request_body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_weaknesses_weakness_ref_countermeasures_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'api_token' is set
if ('api_token' not in params or
params['api_token'] is None):
raise ValueError("Missing the required parameter `api_token` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_weaknesses_weakness_ref_countermeasures_put`") # noqa: E501
# verify the required parameter 'library_ref' is set
if ('library_ref' not in params or
params['library_ref'] is None):
raise ValueError("Missing the required parameter `library_ref` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_weaknesses_weakness_ref_countermeasures_put`") # noqa: E501
# verify the required parameter 'risk_pattern_ref' is set
if ('risk_pattern_ref' not in params or
params['risk_pattern_ref'] is None):
raise ValueError("Missing the required parameter `risk_pattern_ref` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_weaknesses_weakness_ref_countermeasures_put`") # noqa: E501
# verify the required parameter 'use_case_ref' is set
if ('use_case_ref' not in params or
params['use_case_ref'] is None):
raise ValueError("Missing the required parameter `use_case_ref` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_weaknesses_weakness_ref_countermeasures_put`") # noqa: E501
# verify the required parameter 'threat_ref' is set
if ('threat_ref' not in params or
params['threat_ref'] is None):
raise ValueError("Missing the required parameter `threat_ref` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_weaknesses_weakness_ref_countermeasures_put`") # noqa: E501
# verify the required parameter 'weakness_ref' is set
if ('weakness_ref' not in params or
params['weakness_ref'] is None):
raise ValueError("Missing the required parameter `weakness_ref` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_weaknesses_weakness_ref_countermeasures_put`") # noqa: E501
# verify the required parameter 'associate_countermeasure_weakness_library_request_body' is set
if ('associate_countermeasure_weakness_library_request_body' not in params or
params['associate_countermeasure_weakness_library_request_body'] is None):
raise ValueError("Missing the required parameter `associate_countermeasure_weakness_library_request_body` when calling `libraries_library_ref_riskpatterns_risk_pattern_ref_usecases_use_case_ref_threats_threat_ref_weaknesses_weakness_ref_countermeasures_put`") # noqa: E501
collection_formats = {}
path_params = {}
if 'library_ref' in params:
path_params['libraryRef'] = params['library_ref'] # noqa: E501
if 'risk_pattern_ref' in params:
path_params['riskPatternRef'] = params['risk_pattern_ref'] # noqa: E501
if 'use_case_ref' in params:
path_params['useCaseRef'] = params['use_case_ref'] # noqa: E501
if 'threat_ref' in params:
path_params['threatRef'] = params['threat_ref'] # noqa: E501
if 'weakness_ref' in params:
path_params['weaknessRef'] = params['weakness_ref'] # noqa: E501
query_params = []
header_params = {}
if 'api_token' in params:
header_params['api-token'] = params['api_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'associate_countermeasure_weakness_library_request_body' in params:
body_params = params['associate_countermeasure_weakness_library_request_body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/libraries/{libraryRef}/riskpatterns/{riskPatternRef}/usecases/{useCaseRef}/threats/{threatRef}/weaknesses/{weaknessRef}/countermeasures', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LibraryControl', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
py | 7dfc88957cf1b76cd7d6020a4a046bf774e80371 | # encoding: utf-8
"""
credit:
https://github.com/facebookresearch/detectron2/blob/master/detectron2/engine/train_loop.py
"""
import logging
import time
import weakref
import numpy as np
import torch
import fastreid.utils.comm as comm
from fastreid.utils.events import EventStorage
from torch.cuda.amp import GradScaler
from torch.cuda.amp import autocast
__all__ = ["HookBase", "TrainerBase", "SimpleTrainer",
"HazySimpleTrainer", "HazyTrainerBase", "LoadSimpleTrainer", "LoadTrainerBase"]
class HookBase:
"""
Base class for hooks that can be registered with :class:`TrainerBase`.
Each hook can implement 4 methods. The way they are called is demonstrated
in the following snippet:
.. code-block:: python
hook.before_train()
for iter in range(start_iter, max_iter):
hook.before_step()
trainer.run_step()
hook.after_step()
hook.after_train()
Notes:
1. In the hook method, users can access `self.trainer` to access more
properties about the context (e.g., current iteration).
2. A hook that does something in :meth:`before_step` can often be
implemented equivalently in :meth:`after_step`.
If the hook takes non-trivial time, it is strongly recommended to
implement the hook in :meth:`after_step` instead of :meth:`before_step`.
The convention is that :meth:`before_step` should only take negligible time.
Following this convention will allow hooks that do care about the difference
between :meth:`before_step` and :meth:`after_step` (e.g., timer) to
function properly.
Attributes:
trainer: A weak reference to the trainer object. Set by the trainer when the hook is
registered.
"""
def before_train(self):
"""
Called before the first iteration.
"""
pass
def after_train(self):
"""
Called after the last iteration.
"""
pass
def before_step(self):
"""
Called before each iteration.
"""
pass
def after_step(self):
"""
Called after each iteration.
"""
pass
class TrainerBase:
"""
Base class for iterative trainer with hooks.
The only assumption we made here is: the training runs in a loop.
A subclass can implement what the loop is.
We made no assumptions about the existence of dataloader, optimizer, model, etc.
Attributes:
iter(int): the current iteration.
start_iter(int): The iteration to start with.
By convention the minimum possible value is 0.
max_iter(int): The iteration to end training.
storage(EventStorage): An EventStorage that's opened during the course of training.
"""
def __init__(self):
self._hooks = []
def register_hooks(self, hooks):
"""
Register hooks to the trainer. The hooks are executed in the order
they are registered.
Args:
hooks (list[Optional[HookBase]]): list of hooks
"""
hooks = [h for h in hooks if h is not None]
for h in hooks:
assert isinstance(h, HookBase)
# To avoid circular reference, hooks and trainer cannot own each other.
# This normally does not matter, but will cause memory leak if the
# involved objects contain __del__:
# See http://engineering.hearsaysocial.com/2013/06/16/circular-references-in-python/
h.trainer = weakref.proxy(self)
self._hooks.extend(hooks)
def train(self, start_iter: int, max_iter: int):
"""
Args:
start_iter, max_iter (int): See docs above
"""
logger = logging.getLogger(__name__)
logger.info("Starting training from iteration {}".format(start_iter))
self.iter = self.start_iter = start_iter
self.max_iter = max_iter
with EventStorage(start_iter) as self.storage:
try:
self.before_train()
for self.iter in range(start_iter, max_iter):
self.before_step()
self.run_step()
self.after_step()
except Exception:
logger.exception("Exception during training:")
finally:
self.after_train()
def before_train(self):
for h in self._hooks:
h.before_train()
def after_train(self):
for h in self._hooks:
h.after_train()
def before_step(self):
for h in self._hooks:
h.before_step()
def after_step(self):
for h in self._hooks:
h.after_step()
# this guarantees, that in each hook's after_step, storage.iter == trainer.iter
self.storage.step()
def run_step(self):
raise NotImplementedError
class HazyTrainerBase:
"""
Base class for iterative trainer with hooks.
The only assumption we made here is: the training runs in a loop.
A subclass can implement what the loop is.
We made no assumptions about the existence of dataloader, optimizer, model, etc.
Attributes:
iter(int): the current iteration.
start_iter(int): The iteration to start with.
By convention the minimum possible value is 0.
max_iter(int): The iteration to end training.
storage(EventStorage): An EventStorage that's opened during the course of training.
"""
def __init__(self):
self._hooks = []
def register_hooks(self, hooks):
"""
Register hooks to the trainer. The hooks are executed in the order
they are registered.
Args:
hooks (list[Optional[HookBase]]): list of hooks
"""
hooks = [h for h in hooks if h is not None]
for h in hooks:
assert isinstance(h, HookBase)
# To avoid circular reference, hooks and trainer cannot own each other.
# This normally does not matter, but will cause memory leak if the
# involved objects contain __del__:
# See http://engineering.hearsaysocial.com/2013/06/16/circular-references-in-python/
h.trainer = weakref.proxy(self)
self._hooks.extend(hooks)
def train(self, start_iter: int, max_iter: int):
"""
Args:
start_iter, max_iter (int): See docs above
"""
logger = logging.getLogger(__name__)
logger.info("Starting training from iteration {}".format(start_iter))
self.iter = self.start_iter = start_iter
self.max_iter = max_iter
with EventStorage(start_iter) as self.storage:
try:
self.before_train()
for self.iter in range(start_iter, max_iter):
self.before_step()
self.run_step()
self.after_step()
except Exception:
logger.exception("Exception during training:")
finally:
self.after_train()
def before_train(self):
for h in self._hooks:
h.before_train()
def after_train(self):
for h in self._hooks:
h.after_train()
def before_step(self):
for h in self._hooks:
h.before_step()
def after_step(self):
for h in self._hooks:
h.after_step()
# this guarantees, that in each hook's after_step, storage.iter == trainer.iter
self.storage.step()
def run_step(self):
raise NotImplementedError
class LoadTrainerBase:
"""
Base class for iterative trainer with hooks.
The only assumption we made here is: the training runs in a loop.
A subclass can implement what the loop is.
We made no assumptions about the existence of dataloader, optimizer, model, etc.
Attributes:
iter(int): the current iteration.
start_iter(int): The iteration to start with.
By convention the minimum possible value is 0.
max_iter(int): The iteration to end training.
storage(EventStorage): An EventStorage that's opened during the course of training.
"""
def __init__(self):
self._hooks = []
def register_hooks(self, hooks):
"""
Register hooks to the trainer. The hooks are executed in the order
they are registered.
Args:
hooks (list[Optional[HookBase]]): list of hooks
"""
hooks = [h for h in hooks if h is not None]
for h in hooks:
assert isinstance(h, HookBase)
# To avoid circular reference, hooks and trainer cannot own each other.
# This normally does not matter, but will cause memory leak if the
# involved objects contain __del__:
# See http://engineering.hearsaysocial.com/2013/06/16/circular-references-in-python/
h.trainer = weakref.proxy(self)
self._hooks.extend(hooks)
def train(self, start_iter: int, max_iter: int):
"""
Args:
start_iter, max_iter (int): See docs above
"""
logger = logging.getLogger(__name__)
logger.info("Starting training from iteration {}".format(start_iter))
self.iter = self.start_iter = start_iter
self.max_iter = max_iter
with EventStorage(start_iter) as self.storage:
try:
self.before_train()
for self.iter in range(start_iter, max_iter):
self.before_step()
self.run_step()
self.after_step()
except Exception:
logger.exception("Exception during training:")
finally:
self.after_train()
def before_train(self):
for h in self._hooks:
h.before_train()
def after_train(self):
for h in self._hooks:
h.after_train()
def before_step(self):
for h in self._hooks:
h.before_step()
def after_step(self):
for h in self._hooks:
h.after_step()
# this guarantees, that in each hook's after_step, storage.iter == trainer.iter
self.storage.step()
def run_step(self):
raise NotImplementedError
class SimpleTrainer(TrainerBase):
"""
A simple trainer for the most common type of task:
single-cost single-optimizer single-data-source iterative optimization.
It assumes that every step, you:
1. Compute the loss with a data from the data_loader.
2. Compute the gradients with the above loss.
3. Update the model with the optimizer.
If you want to do anything fancier than this,
either subclass TrainerBase and implement your own `run_step`,
or write your own training loop.
"""
def __init__(self, model, data_loader, optimizer):
"""
Args:
model: a torch Module. Takes a data from data_loader and returns a
dict of heads.
data_loader: an iterable. Contains data to be used to call model.
optimizer: a torch optimizer.
"""
super().__init__()
"""
We set the model to training mode in the trainer.
However it's valid to train a model that's in eval mode.
If you want your model (or a submodule of it) to behave
like evaluation during training, you can overwrite its train() method.
"""
model.train()
self.model = model
self.data_loader = data_loader
self._data_loader_iter = iter(data_loader)
self.optimizer = optimizer
def run_step(self):
"""
Implement the standard training logic described above.
"""
assert self.model.training, "[SimpleTrainer] model was changed to eval mode!"
start = time.perf_counter()
"""
If your want to do something with the data, you can wrap the dataloader.
"""
data = next(self._data_loader_iter)
data_time = time.perf_counter() - start
"""
If your want to do something with the heads, you can wrap the model.
"""
loss_dict = self.model(data)
losses = sum(loss_dict.values())
self._detect_anomaly(losses, loss_dict)
metrics_dict = loss_dict
metrics_dict["data_time"] = data_time
self._write_metrics(metrics_dict)
"""
If you need accumulate gradients or something similar, you can
wrap the optimizer with your custom `zero_grad()` method.
"""
self.optimizer.zero_grad()
losses.backward()
"""
If you need gradient clipping/scaling or other processing, you can
wrap the optimizer with your custom `step()` method.
"""
self.optimizer.step()
def _detect_anomaly(self, losses, loss_dict):
if not torch.isfinite(losses).all():
raise FloatingPointError(
"Loss became infinite or NaN at iteration={}!\nloss_dict = {}".format(
self.iter, loss_dict
)
)
def _write_metrics(self, metrics_dict: dict):
"""
Args:
metrics_dict (dict): dict of scalar metrics
"""
metrics_dict = {
k: v.detach().cpu().item() if isinstance(v, torch.Tensor) else float(v)
for k, v in metrics_dict.items()
}
# gather metrics among all workers for logging
# This assumes we do DDP-style training, which is currently the only
# supported method in fastreid.
all_metrics_dict = comm.gather(metrics_dict)
if comm.is_main_process():
if "data_time" in all_metrics_dict[0]:
# data_time among workers can have high variance. The actual latency
# caused by data_time is the maximum among workers.
data_time = np.max([x.pop("data_time") for x in all_metrics_dict])
self.storage.put_scalar("data_time", data_time)
# average the rest metrics
metrics_dict = {
k: np.mean([x[k] for x in all_metrics_dict]) for k in all_metrics_dict[0].keys()
}
total_losses_reduced = sum(loss for loss in metrics_dict.values())
self.storage.put_scalar("total_loss", total_losses_reduced)
if len(metrics_dict) > 1:
self.storage.put_scalars(**metrics_dict)
class HazySimpleTrainer(HazyTrainerBase):
"""
A simple trainer for the most common type of task:
single-cost single-optimizer single-data-source iterative optimization.
It assumes that every step, you:
1. Compute the loss with a data from the data_loader.
2. Compute the gradients with the above loss.
3. Update the model with the optimizer.
If you want to do anything fancier than this,
either subclass TrainerBase and implement your own `run_step`,
or write your own training loop.
"""
def __init__(self, model, data_loader, t_data_loader, tea_optimizer, stu_optimizer, dis_optimizer):
"""
Args:
model: a torch Module. Takes a data from data_loader and returns a
dict of heads.
data_loader: an iterable. Contains data to be used to call model.
optimizer: a torch optimizer.
"""
super().__init__()
"""
We set the model to training mode in the trainer.
However it's valid to train a model that's in eval mode.
If you want your model (or a submodule of it) to behave
like evaluation during training, you can overwrite its train() method.
"""
model.train()
self.model = model
self.data_loader = data_loader
self._data_loader_iter = iter(data_loader)
self.t_data_loader = t_data_loader
self._t_data_loader_iter = iter(t_data_loader)
self.tea_optimizer = tea_optimizer
self.stu_optimizer = stu_optimizer
self.dis_optimizer = dis_optimizer
self.scaler = GradScaler()
def fix_bn(self):
for m in self.model.student_net.modules():
if isinstance(m, (torch.nn.BatchNorm2d, torch.nn.BatchNorm1d)):
m.track_running_stats = False
def run_step(self):
if self.cfg.MODEL.PARAM.BASEMODE:
"""
Implement the standard training logic described above.
"""
assert self.model.training, "[SimpleTrainer] model was changed to eval mode!"
start = time.perf_counter()
"""
If your want to do something with the data, you can wrap the dataloader.
"""
data = next(self._data_loader_iter)
data_time = time.perf_counter() - start
"""
add AMP
"""
self.stu_optimizer.zero_grad()
self.tea_optimizer.zero_grad()
with autocast():
"""
If your want to do something with the heads, you can wrap the model.
"""
loss_dict = self.model(data)
losses = sum(loss_dict.values())
self._detect_anomaly(losses, loss_dict)
metrics_dict = loss_dict
metrics_dict["data_time"] = data_time
self._write_metrics(metrics_dict)
self.scaler.scale(losses).backward()
self.scaler.step(self.stu_optimizer)
self.scaler.step(self.tea_optimizer)
self.scaler.update()
else:
"""
Implement the standard training logic described above.
"""
assert self.model.training, "[SimpleTrainer] model was changed to eval mode!"
self.model.teacher_net.eval()
# self.fix_bn()
start = time.perf_counter()
"""
If your want to do something with the data, you can wrap the dataloader.
"""
data = next(self._data_loader_iter)
t_data = next(self._t_data_loader_iter)
data_time = time.perf_counter() - start
"""
add AMP
"""
self.stu_optimizer.zero_grad()
self.dis_optimizer.zero_grad()
# self.tea_optimizer.zero_grad()
with autocast():
"""
If your want to do something with the heads, you can wrap the model.
"""
loss_dict = self.model(data, t_data, iters=self.iter)
losses = sum(loss_dict.values())
self._detect_anomaly(losses, loss_dict)
metrics_dict = loss_dict
metrics_dict["data_time"] = data_time
self._write_metrics(metrics_dict)
if (self.iter <= self.cfg.MODEL.PARAM.Dis_iter) and (("Dis_loss" in self.cfg.MODEL.LOSSES.NAME) or ("Dis_loss_cam" in self.cfg.MODEL.LOSSES.NAME)):
self.scaler.scale(losses).backward()
self.scaler.step(self.stu_optimizer)
self.scaler.step(self.dis_optimizer)
self.scaler.update()
else:
self.scaler.scale(losses).backward()
self.scaler.step(self.stu_optimizer)
self.scaler.update()
def _detect_anomaly(self, losses, loss_dict):
if not torch.isfinite(losses).all():
raise FloatingPointError(
"Loss became infinite or NaN at iteration={}!\nloss_dict = {}".format(
self.iter, loss_dict
)
)
def _write_metrics(self, metrics_dict: dict):
"""
Args:
metrics_dict (dict): dict of scalar metrics
"""
metrics_dict = {
k: v.detach().cpu().item() if isinstance(v, torch.Tensor) else float(v)
for k, v in metrics_dict.items()
}
# gather metrics among all workers for logging
# This assumes we do DDP-style training, which is currently the only
# supported method in fastreid.
all_metrics_dict = comm.gather(metrics_dict)
if comm.is_main_process():
if "data_time" in all_metrics_dict[0]:
# data_time among workers can have high variance. The actual latency
# caused by data_time is the maximum among workers.
data_time = np.max([x.pop("data_time") for x in all_metrics_dict])
self.storage.put_scalar("data_time", data_time)
# average the rest metrics
metrics_dict = {
k: np.mean([x[k] for x in all_metrics_dict]) for k in all_metrics_dict[0].keys()
}
total_losses_reduced = sum(loss for loss in metrics_dict.values())
self.storage.put_scalar("total_loss", total_losses_reduced)
if len(metrics_dict) > 1:
self.storage.put_scalars(**metrics_dict)
class LoadSimpleTrainer(LoadTrainerBase):
"""
A simple trainer for the most common type of task:
single-cost single-optimizer single-data-source iterative optimization.
It assumes that every step, you:
1. Compute the loss with a data from the data_loader.
2. Compute the gradients with the above loss.
3. Update the model with the optimizer.
If you want to do anything fancier than this,
either subclass TrainerBase and implement your own `run_step`,
or write your own training loop.
"""
def __init__(self, model, data_loader, t_data_loader, dis_optimizer, stu_optimizer):
"""
Args:
model: a torch Module. Takes a data from data_loader and returns a
dict of heads.
data_loader: an iterable. Contains data to be used to call model.
optimizer: a torch optimizer.
"""
super().__init__()
"""
We set the model to training mode in the trainer.
However it's valid to train a model that's in eval mode.
If you want your model (or a submodule of it) to behave
like evaluation during training, you can overwrite its train() method.
"""
model.train()
self.model = model
self.data_loader = data_loader
self._data_loader_iter = iter(data_loader)
self.t_data_loader = t_data_loader
self._t_data_loader_iter = iter(t_data_loader)
# self.optimizer = optimizer
self.dis_optimizer = dis_optimizer
self.stu_optimizer = stu_optimizer
self.scaler = GradScaler()
def run_step(self):
"""
Implement the standard training logic described above.
"""
assert self.model.training, "[SimpleTrainer] model was changed to eval mode!"
start = time.perf_counter()
"""
If your want to do something with the data, you can wrap the dataloader.
"""
data = next(self._data_loader_iter)
t_data = next(self._t_data_loader_iter)
data_time = time.perf_counter() - start
"""
add AMP
"""
with autocast():
"""
If your want to do something with the heads, you can wrap the model.
"""
loss_dict = self.model(data, t_data)
# loss_dict = self.model(data, t_data)
losses = sum(loss_dict.values())
self._detect_anomaly(losses, loss_dict)
metrics_dict = loss_dict
metrics_dict["data_time"] = data_time
self._write_metrics(metrics_dict)
"""
If you need accumulate gradients or something similar, you can
wrap the optimizer with your custom `zero_grad()` method.
"""
self.stu_optimizer.zero_grad()
self.dis_optimizer.zero_grad()
self.scaler.scale(losses).backward()
"""
If you need gradient clipping/scaling or other processing, you can
wrap the optimizer with your custom `step()` method.
"""
self.scaler.step(self.stu_optimizer)
# self.scaler.step(self.dis_optimizer)
# self.scaler.step(self.dis_optimizer)
self.scaler.update()
def KLDiv(self):
a = 1
def _detect_anomaly(self, losses, loss_dict):
if not torch.isfinite(losses).all():
raise FloatingPointError(
"Loss became infinite or NaN at iteration={}!\nloss_dict = {}".format(
self.iter, loss_dict
)
)
def _write_metrics(self, metrics_dict: dict):
"""
Args:
metrics_dict (dict): dict of scalar metrics
"""
metrics_dict = {
k: v.detach().cpu().item() if isinstance(v, torch.Tensor) else float(v)
for k, v in metrics_dict.items()
}
# gather metrics among all workers for logging
# This assumes we do DDP-style training, which is currently the only
# supported method in fastreid.
all_metrics_dict = comm.gather(metrics_dict)
if comm.is_main_process():
if "data_time" in all_metrics_dict[0]:
# data_time among workers can have high variance. The actual latency
# caused by data_time is the maximum among workers.
data_time = np.max([x.pop("data_time") for x in all_metrics_dict])
self.storage.put_scalar("data_time", data_time)
# average the rest metrics
metrics_dict = {
k: np.mean([x[k] for x in all_metrics_dict]) for k in all_metrics_dict[0].keys()
}
total_losses_reduced = sum(loss for loss in metrics_dict.values())
self.storage.put_scalar("total_loss", total_losses_reduced)
if len(metrics_dict) > 1:
self.storage.put_scalars(**metrics_dict)
|
py | 7dfc8993b7c2d6138ddce2800e0d42debf046250 | #!/bin/python
# -*- coding: utf-8 -*-
"""
Unit tests for gluon.html
"""
import unittest
from gluon.html import A, ASSIGNJS, B, BEAUTIFY, P, BODY, BR, BUTTON, CAT, CENTER, CODE, COL, COLGROUP, DIV, SPAN, URL, verifyURL
from gluon.html import truncate_string, EM, FIELDSET, FORM, H1, H2, H3, H4, H5, H6, HEAD, HR, HTML, I, IFRAME, IMG, INPUT, EMBED
from gluon.html import LABEL, LEGEND, LI, LINK, MARKMIN, MENU, META, OBJECT, OL, OPTGROUP, OPTION, PRE, SCRIPT, SELECT, STRONG
from gluon.html import STYLE, TABLE, TR, TD, TAG, TBODY, THEAD, TEXTAREA, TFOOT, TH, TITLE, TT, UL, XHTML, XML, web2pyHTMLParser
from gluon.storage import Storage
from gluon.html import XML_pickle, XML_unpickle
from gluon.html import TAG_pickler, TAG_unpickler
from gluon._compat import xrange, PY2, to_native
from gluon.decoder import decoder
import re
class TestBareHelpers(unittest.TestCase):
# xmlescape() = covered by other tests
# TODO: def test_call_as_list(self):
def test_truncate_string(self):
# Ascii text
self.assertEqual(truncate_string('Lorem ipsum dolor sit amet, consectetur adipiscing elit, '
'sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.',
length=30), 'Lorem ipsum dolor sit amet,...')
self.assertEqual(truncate_string('Short text shorter than the length parameter.', length=100),
'Short text shorter than the length parameter.')
# French text
self.assertEqual(truncate_string('Un texte en français avec des accents et des caractères bizarre.', length=30),
'Un texte en français avec d...')
def test_StaticURL(self):
# test response.static_version coupled with response.static_version_urls
self.assertEqual(URL('a', 'c', 'f'), '/a/c/f')
self.assertEqual(URL('a', 'static', 'design.css'), '/a/static/design.css')
response = Storage()
response.static_version = '1.2.3'
from gluon.globals import current
current.response = response
self.assertEqual(URL('a', 'static', 'design.css'), '/a/static/design.css')
response.static_version_urls = True
self.assertEqual(URL('a', 'static', 'design.css'), '/a/static/_1.2.3/design.css')
def test_URL(self):
self.assertEqual(URL('a', 'c', 'f', args='1'), '/a/c/f/1')
self.assertEqual(URL('a', 'c', 'f', args=('1', '2')), '/a/c/f/1/2')
self.assertEqual(URL('a', 'c', 'f', args=['1', '2']), '/a/c/f/1/2')
self.assertEqual(URL('a', 'c', '/f'), '/a/c/f')
self.assertEqual(URL('a', 'c', 'f.json'), '/a/c/f.json')
from gluon.globals import current
current.request = None
self.assertRaises(SyntaxError, URL, *['a'])
request = Storage()
request.application = 'a'
request.controller = 'c'
request.function = 'f'
request.env = {}
from gluon.globals import current # Can't be moved with other import
current.request = request
must_return = '/a/c/f'
self.assertEqual(URL(), must_return)
self.assertEqual(URL('f'), must_return)
self.assertEqual(URL('c', 'f'), must_return)
self.assertEqual(URL('a', 'c', 'f'), must_return)
self.assertEqual(URL('a', 'c', 'f', extension='json'), '/a/c/f.json')
def weird():
pass
self.assertEqual(URL('a', 'c', weird), '/a/c/weird')
self.assertRaises(SyntaxError, URL, *['a', 'c', 1])
# test signature
rtn = URL(a='a', c='c', f='f', args=['x', 'y', 'z'],
vars={'p': (1, 3), 'q': 2}, anchor='1', hmac_key='key')
self.assertEqual(rtn, '/a/c/f/x/y/z?p=1&p=3&q=2&_signature=a32530f0d0caa80964bb92aad2bedf8a4486a31f#1')
# test _signature exclusion
rtn = URL(a='a', c='c', f='f', args=['x', 'y', 'z'],
vars={'p': (1, 3), 'q': 2, '_signature': 'abc'},
anchor='1', hmac_key='key')
self.assertEqual(rtn, '/a/c/f/x/y/z?p=1&p=3&q=2&_signature=a32530f0d0caa80964bb92aad2bedf8a4486a31f#1')
# emulate user_signature
current.session = Storage(auth=Storage(hmac_key='key'))
self.assertEqual(URL(user_signature=True), '/a/c/f?_signature=c4aed53c08cff08f369dbf8b5ba51889430cf2c2')
# hash_vars combination
rtn = URL('a', 'c', 'f', args=['x', 'y', 'z'], vars={'p': (1, 3), 'q': 2}, hmac_key='key')
self.assertEqual(rtn, '/a/c/f/x/y/z?p=1&p=3&q=2&_signature=a32530f0d0caa80964bb92aad2bedf8a4486a31f')
rtn = URL('a', 'c', 'f', args=['x', 'y', 'z'], vars={'p': (1, 3), 'q': 2}, hmac_key='key', hash_vars=True)
self.assertEqual(rtn, '/a/c/f/x/y/z?p=1&p=3&q=2&_signature=a32530f0d0caa80964bb92aad2bedf8a4486a31f')
rtn = URL('a', 'c', 'f', args=['x', 'y', 'z'], vars={'p': (1, 3), 'q': 2}, hmac_key='key', hash_vars=False)
self.assertEqual(rtn, '/a/c/f/x/y/z?p=1&p=3&q=2&_signature=0b5a0702039992aad23c82794b8496e5dcd59a5b')
rtn = URL('a', 'c', 'f', args=['x', 'y', 'z'], vars={'p': (1, 3), 'q': 2}, hmac_key='key', hash_vars=['p'])
self.assertEqual(rtn, '/a/c/f/x/y/z?p=1&p=3&q=2&_signature=5d01b982fd72b39674b012e0288071034e156d7a')
rtn = URL('a', 'c', 'f', args=['x', 'y', 'z'], vars={'p': (1, 3), 'q': 2}, hmac_key='key', hash_vars='p')
self.assertEqual(rtn, '/a/c/f/x/y/z?p=1&p=3&q=2&_signature=5d01b982fd72b39674b012e0288071034e156d7a')
# test CRLF detection
self.assertRaises(SyntaxError, URL, *['a\n', 'c', 'f'])
self.assertRaises(SyntaxError, URL, *['a\r', 'c', 'f'])
# test url_encode
rtn = URL('a', 'c', 'f', args=['x', 'y', 'z'], vars={'maï': (1, 3), 'lié': 2}, url_encode=True)
self.assertEqual(rtn, '/a/c/f/x/y/z?li%C3%A9=2&ma%C3%AF=1&ma%C3%AF=3')
@unittest.skipIf(not PY2, "Skipping Python 3.x tests for test_URL_encode")
def test_URL_encode(self):
rtn = URL('a', 'c', 'f', args=['x', 'y', 'z'], vars={'maï': (1, 3), 'lié': 2}, url_encode=False)
self.assertEqual(rtn, '/a/c/f/x/y/z?li\xc3\xa9=2&ma\xc3\xaf=1&ma\xc3\xaf=3')
def test_verifyURL(self):
r = Storage()
r.application = 'a'
r.controller = 'c'
r.function = 'f'
r.extension = 'html'
r.env = {}
r.get_vars = Storage()
# missing signature as request.get_vars returns False
rtn = verifyURL(r, 'key')
self.assertEqual(rtn, False)
# reverse tests from previous testcase with hash_vars combinations
r.args = ['x', 'y', 'z']
r.get_vars = Storage(p=(1, 3), q=2)
# add signature
r.get_vars['_signature'] = 'a32530f0d0caa80964bb92aad2bedf8a4486a31f'
rtn = verifyURL(r, 'key')
self.assertEqual(rtn, True)
r.get_vars['_signature'] = 'a32530f0d0caa80964bb92aad2bedf8a4486a31f'
rtn = verifyURL(r, 'key', hash_vars=True)
self.assertEqual(rtn, True)
r.get_vars['_signature'] = '0b5a0702039992aad23c82794b8496e5dcd59a5b'
rtn = verifyURL(r, 'key', hash_vars=False)
self.assertEqual(rtn, True)
r.get_vars['_signature'] = '5d01b982fd72b39674b012e0288071034e156d7a'
rtn = verifyURL(r, 'key', hash_vars=['p'])
self.assertEqual(rtn, True)
r.get_vars['_signature'] = '5d01b982fd72b39674b012e0288071034e156d7a'
rtn = verifyURL(r, 'key', hash_vars='p')
self.assertEqual(rtn, True)
# without session, user_signature returns always False
rtn = verifyURL(r, user_signature=True)
self.assertEqual(rtn, False)
# same goes if you don't use an hmac_key
rtn = verifyURL(r)
self.assertEqual(rtn, False)
# emulate user signature
from gluon.globals import current
current.session = Storage(auth=Storage(hmac_key='key'))
r.get_vars['_signature'] = 'a32530f0d0caa80964bb92aad2bedf8a4486a31f'
rtn = verifyURL(r, user_signature=True)
self.assertEqual(rtn, True)
# TODO: def test_XmlComponent(self):
def test_XML(self):
# sanitization process
self.assertEqual(XML('<h1>Hello<a data-hello="world">World</a></h1>').xml(),
b'<h1>Hello<a data-hello="world">World</a></h1>')
# with sanitize, data-attributes are not permitted
self.assertEqual(XML('<h1>Hello<a data-hello="world">World</a></h1>', sanitize=True).xml(),
b'<h1>HelloWorld</h1>')
# stringify by default
# FIXME PY3
# seams that __repr__ is no longer enough
##self.assertEqual(XML('1.3'), '1.3')
self.assertEqual(XML(u'<div>è</div>').xml(), b'<div>\xc3\xa8</div>')
# make sure unicode works with sanitize
self.assertEqual(XML(u'<div>è</div>', sanitize=True).xml(), b'<div>\xc3\xa8</div>')
# you can calc len on the class, that equals the xml() and the str()
##self.assertEqual(len(XML('1.3')), len('1.3'))
self.assertEqual(len(XML('1.3').xml()), len('1.3'))
##self.assertEqual(len(str(XML('1.3'))), len('1.3'))
# you can concatenate them to strings (check for __add__ and __radd__ methods)
##self.assertEqual(XML('a') + 'b', 'ab')
##self.assertEqual(XML('a') + XML('b'), 'ab')
##self.assertEqual('a' + XML('b'), 'ab')
# you can compare them
##self.assertEqual(XML('a') == XML('a'), True)
# beware that the comparison is made on the XML repr
self.assertEqual(XML('<h1>Hello<a data-hello="world">World</a></h1>', sanitize=True).__repr__(),
XML('<h1>HelloWorld</h1>').__repr__())
# bug check for the sanitizer for closing no-close tags
self.assertEqual(XML('<p>Test</p><br/><p>Test</p><br/>', sanitize=True).xml(),
XML('<p>Test</p><br /><p>Test</p><br />').xml())
# basic flatten test
self.assertEqual(XML('<p>Test</p>').flatten(), '<p>Test</p>')
self.assertEqual(XML('<p>Test</p>').flatten(render=lambda text, tag, attr: text), '<p>Test</p>')
def test_XML_pickle_unpickle(self):
self.assertEqual(str(XML_unpickle(XML_pickle('data to be pickle')[1][0])), 'data to be pickle')
def test_DIV(self):
# Empty DIV()
self.assertEqual(DIV().xml(), b'<div></div>')
self.assertEqual(DIV('<>', _a='1', _b='2').xml(),
b'<div a="1" b="2"><></div>')
# attributes can be updated like in a dict
div = DIV('<>', _a='1')
div['_b'] = '2'
self.assertEqual(div.xml(),
b'<div a="1" b="2"><></div>')
# also with a mapping
div.update(_b=2, _c=3)
self.assertEqual(div.xml(),
b'<div a="1" b="2" c="3"><></div>')
# length of the DIV is the number of components
self.assertEqual(len(DIV('a', 'bc')), 2)
# also if empty, DIV is True in a boolean evaluation
self.assertTrue(True if DIV() else False)
# parent and siblings
a = DIV(SPAN('a'), DIV('b'))
s = a.element('span')
d = s.parent
d['_class'] = 'abc'
self.assertEqual(a.xml(), b'<div class="abc"><span>a</span><div>b</div></div>')
self.assertEqual([el.xml() for el in s.siblings()], [b'<div>b</div>'])
self.assertEqual(s.sibling().xml(), b'<div>b</div>')
# siblings with wrong args
self.assertEqual(s.siblings('a'), [])
# siblings with good args
self.assertEqual(s.siblings('div')[0].xml(), b'<div>b</div>')
# Check for siblings with wrong kargs and value
self.assertEqual(s.siblings(a='d'), [])
# Check for siblings with good kargs and value
# Can't figure this one out what is a right value here??
# Commented for now...
# self.assertEqual(s.siblings(div='<div>b</div>'), ???)
# No other sibling should return None
self.assertEqual(DIV(P('First element')).element('p').sibling(), None)
# --------------------------------------------------------------------------------------------------------------
# This use unicode to hit xmlescape() line :
# """
# elif isinstance(data, unicode):
# data = data.encode('utf8', 'xmlcharrefreplace')
# """
self.assertEqual(DIV(u'Texte en français avec des caractères accentués...').xml(),
b'<div>Texte en fran\xc3\xa7ais avec des caract\xc3\xa8res accentu\xc3\xa9s...</div>')
# --------------------------------------------------------------------------------------------------------------
self.assertEqual(DIV('Test with an ID', _id='id-of-the-element').xml(),
b'<div id="id-of-the-element">Test with an ID</div>')
self.assertEqual(DIV().element('p'), None)
# Corner case for raise coverage of one line
# I think such assert fail cause of python 2.6
# Work under python 2.7
# with self.assertRaises(SyntaxError) as cm:
# DIV(BR('<>')).xml()
# self.assertEqual(cm.exception[0], '<br/> tags cannot have components')
# test .get('attrib')
self.assertEqual(DIV('<p>Test</p>', _class="class_test").get('_class'), 'class_test')
self.assertEqual(DIV(b'a').xml(), b'<div>a</div>')
def test_decoder(self):
tag_html = '<div><span><a id="1-1" u:v="$">hello</a></span><p class="this is a test">world</p></div>'
a = decoder(tag_html)
self.assertEqual(a, tag_html)
def test_CAT(self):
# Empty CAT()
self.assertEqual(CAT().xml(), b'')
# CAT('')
self.assertEqual(CAT('').xml(), b'')
# CAT(' ')
self.assertEqual(CAT(' ').xml(), b' ')
def test_TAG_pickler_unpickler(self):
# weird test
self.assertEqual(TAG_unpickler(TAG_pickler(TAG.div('data to be pickle'))[1][0]).xml(),
b'<div>data to be pickle</div>')
def test_TAG(self):
self.assertEqual(TAG.first(TAG.second('test'), _key=3).xml(),
b'<first key="3"><second>test</second></first>')
# ending in underscore "triggers" <input /> style
self.assertEqual(TAG.first_(TAG.second('test'), _key=3).xml(),
b'<first key="3" />')
# unicode test for TAG
self.assertEqual(TAG.div(u'Texte en français avec des caractères accentués...').xml(),
b'<div>Texte en fran\xc3\xa7ais avec des caract\xc3\xa8res accentu\xc3\xa9s...</div>')
def test_HTML(self):
self.assertEqual(HTML('<>', _a='1', _b='2').xml(),
b'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">\n<html a="1" b="2" lang="en"><></html>')
self.assertEqual(HTML('<>', _a='1', _b='2', doctype='strict').xml(),
b'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">\n<html a="1" b="2" lang="en"><></html>')
self.assertEqual(HTML('<>', _a='1', _b='2', doctype='transitional').xml(),
b'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">\n<html a="1" b="2" lang="en"><></html>')
self.assertEqual(HTML('<>', _a='1', _b='2', doctype='frameset').xml(),
b'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Frameset//EN" "http://www.w3.org/TR/html4/frameset.dtd">\n<html a="1" b="2" lang="en"><></html>')
self.assertEqual(HTML('<>', _a='1', _b='2', doctype='html5').xml(),
b'<!DOCTYPE HTML>\n<html a="1" b="2" lang="en"><></html>')
self.assertEqual(HTML('<>', _a='1', _b='2', doctype='').xml(),
b'<html a="1" b="2" lang="en"><></html>')
self.assertEqual(HTML('<>', _a='1', _b='2', doctype='CustomDocType').xml(),
b'CustomDocType\n<html a="1" b="2" lang="en"><></html>')
def test_XHTML(self):
# Empty XHTML test
self.assertEqual(XHTML().xml(),
b'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n<html lang="en" xml:lang="en" xmlns="http://www.w3.org/1999/xhtml"></html>')
# Not Empty XHTML test
self.assertEqual(XHTML('<>', _a='1', _b='2').xml(),
b'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n<html a="1" b="2" lang="en" xml:lang="en" xmlns="http://www.w3.org/1999/xhtml"><></html>')
self.assertEqual(XHTML('<>', _a='1', _b='2', doctype='').xml(),
b'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n<html a="1" b="2" lang="en" xml:lang="en" xmlns="http://www.w3.org/1999/xhtml"><></html>')
self.assertEqual(XHTML('<>', _a='1', _b='2', doctype='strict').xml(),
b'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n<html a="1" b="2" lang="en" xml:lang="en" xmlns="http://www.w3.org/1999/xhtml"><></html>')
self.assertEqual(XHTML('<>', _a='1', _b='2', doctype='transitional').xml(),
b'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n<html a="1" b="2" lang="en" xml:lang="en" xmlns="http://www.w3.org/1999/xhtml"><></html>')
self.assertEqual(XHTML('<>', _a='1', _b='2', doctype='frameset').xml(),
b'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Frameset//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd">\n<html a="1" b="2" lang="en" xml:lang="en" xmlns="http://www.w3.org/1999/xhtml"><></html>')
self.assertEqual(XHTML('<>', _a='1', _b='2', doctype='xmlns').xml(),
b'xmlns\n<html a="1" b="2" lang="en" xml:lang="en" xmlns="http://www.w3.org/1999/xhtml"><></html>')
self.assertEqual(XHTML('<>', _a='1', _b='2', _xmlns='xmlns').xml(),
b'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n<html a="1" b="2" lang="en" xml:lang="en" xmlns="http://www.w3.org/1999/xhtml"><></html>')
def test_HEAD(self):
self.assertEqual(HEAD('<>', _a='1', _b='2').xml(),
b'<head a="1" b="2"><></head>')
def test_TITLE(self):
self.assertEqual(TITLE('<>', _a='1', _b='2').xml(),
b'<title a="1" b="2"><></title>')
def test_META(self):
self.assertEqual(META(_a='1', _b='2').xml(),
b'<meta a="1" b="2" />')
def test_LINK(self):
self.assertEqual(LINK(_a='1', _b='2').xml(),
b'<link a="1" b="2" />')
def test_SCRIPT(self):
self.assertEqual(SCRIPT('<>', _a='1', _b='2').xml(),
b'''<script a="1" b="2"><!--
<>
//--></script>''')
self.assertEqual(SCRIPT('<>').xml(),
b'''<script><!--
<>
//--></script>''')
self.assertEqual(SCRIPT().xml(), b'<script></script>')
self.assertEqual(SCRIPT(';').xml() + DIV().xml(),
b'<script><!--\n;\n//--></script><div></div>')
def test_STYLE(self):
self.assertEqual(STYLE('<>', _a='1', _b='2').xml(),
b'<style a="1" b="2"><!--/*--><![CDATA[/*><!--*/\n<>\n/*]]>*/--></style>')
# Try to hit : return DIV.xml(self)
self.assertEqual(STYLE().xml(), b'<style></style>')
def test_IMG(self):
self.assertEqual(IMG(_a='1', _b='2').xml(),
b'<img a="1" b="2" />')
def test_SPAN(self):
self.assertEqual(SPAN('<>', _a='1', _b='2').xml(),
b'<span a="1" b="2"><></span>')
def test_BODY(self):
self.assertEqual(BODY('<>', _a='1', _b='2').xml(),
b'<body a="1" b="2"><></body>')
def test_H1(self):
self.assertEqual(H1('<>', _a='1', _b='2').xml(),
b'<h1 a="1" b="2"><></h1>')
def test_H2(self):
self.assertEqual(H2('<>', _a='1', _b='2').xml(),
b'<h2 a="1" b="2"><></h2>')
def test_H3(self):
self.assertEqual(H3('<>', _a='1', _b='2').xml(),
b'<h3 a="1" b="2"><></h3>')
def test_H4(self):
self.assertEqual(H4('<>', _a='1', _b='2').xml(),
b'<h4 a="1" b="2"><></h4>')
def test_H5(self):
self.assertEqual(H5('<>', _a='1', _b='2').xml(),
b'<h5 a="1" b="2"><></h5>')
def test_H6(self):
self.assertEqual(H6('<>', _a='1', _b='2').xml(),
b'<h6 a="1" b="2"><></h6>')
def test_P(self):
self.assertEqual(P('<>', _a='1', _b='2').xml(),
b'<p a="1" b="2"><></p>')
# test cr2br
self.assertEqual(P('a\nb').xml(), b'<p>a\nb</p>')
self.assertEqual(P('a\nb', cr2br=True).xml(), b'<p>a<br />b</p>')
def test_STRONG(self):
self.assertEqual(STRONG('<>', _a='1', _b='2').xml(),
b'<strong a="1" b="2"><></strong>')
def test_B(self):
self.assertEqual(B('<>', _a='1', _b='2').xml(),
b'<b a="1" b="2"><></b>')
def test_BR(self):
# empty BR()
self.assertEqual(BR().xml(), b'<br />')
self.assertEqual(BR(_a='1', _b='2').xml(), b'<br a="1" b="2" />')
def test_HR(self):
self.assertEqual(HR(_a='1', _b='2').xml(), b'<hr a="1" b="2" />')
def test_A(self):
self.assertEqual(A('<>', _a='1', _b='2').xml(),
b'<a a="1" b="2"><></a>')
self.assertEqual(A('a', cid='b').xml(),
b'<a data-w2p_disable_with="default" data-w2p_method="GET" data-w2p_target="b">a</a>')
self.assertEqual(A('a', callback='b', _id='c').xml(),
b'<a data-w2p_disable_with="default" data-w2p_method="POST" href="b" id="c">a</a>')
# Callback with no id trigger web2py_uuid() call
from gluon.html import web2pyHTMLParser
#a = A('a', callback='b').xml()
#for tag in web2pyHTMLParser(a).tree.elements('a'):
# uuid_generated = tag.attributes['_id']
#self.assertEqual(a,
# b'<a data-w2p_disable_with="default" data-w2p_method="POST" href="b" id="{id}">a</a>'.format(id=uuid_generated))
self.assertEqual(A('a', delete='tr').xml(),
b'<a data-w2p_disable_with="default" data-w2p_remove="tr">a</a>')
self.assertEqual(A('a', _id='b', target='<self>').xml(),
b'<a data-w2p_disable_with="default" data-w2p_target="b" id="b">a</a>')
self.assertEqual(A('a', component='b').xml(),
b'<a data-w2p_disable_with="default" data-w2p_method="GET" href="b">a</a>')
self.assertEqual(A('a', _id='b', callback='c', noconfirm=True).xml(),
b'<a data-w2p_disable_with="default" data-w2p_method="POST" href="c" id="b">a</a>')
self.assertEqual(A('a', cid='b').xml(),
b'<a data-w2p_disable_with="default" data-w2p_method="GET" data-w2p_target="b">a</a>')
self.assertEqual(A('a', cid='b', _disable_with='processing...').xml(),
b'<a data-w2p_disable_with="processing..." data-w2p_method="GET" data-w2p_target="b">a</a>')
self.assertEqual(A('a', callback='b', delete='tr', noconfirm=True, _id='c').xml(),
b'<a data-w2p_disable_with="default" data-w2p_method="POST" data-w2p_remove="tr" href="b" id="c">a</a>')
self.assertEqual(A('a', callback='b', delete='tr', confirm='Are you sure?', _id='c').xml(),
b'<a data-w2p_confirm="Are you sure?" data-w2p_disable_with="default" data-w2p_method="POST" data-w2p_remove="tr" href="b" id="c">a</a>')
def test_BUTTON(self):
self.assertEqual(BUTTON('test', _type='button').xml(),
b'<button type="button">test</button>')
def test_EM(self):
self.assertEqual(EM('<>', _a='1', _b='2').xml(),
b'<em a="1" b="2"><></em>')
def test_EMBED(self):
self.assertEqual(EMBED(_a='1', _b='2').xml(),
b'<embed a="1" b="2" />')
def test_TT(self):
self.assertEqual(TT('<>', _a='1', _b='2').xml(),
b'<tt a="1" b="2"><></tt>')
def test_PRE(self):
self.assertEqual(PRE('<>', _a='1', _b='2').xml(),
b'<pre a="1" b="2"><></pre>')
def test_CENTER(self):
self.assertEqual(CENTER('<>', _a='1', _b='2').xml(),
b'<center a="1" b="2"><></center>')
def test_CODE(self):
self.assertEqual(CODE("print 'hello world'",
language='python',
link=None,
counter=1,
styles={},
highlight_line=None).xml(),
'<table><tr style="vertical-align:top;"><td style="min-width:40px; text-align: right;"><pre style="\nfont-size: 11px;\nfont-family: Bitstream Vera Sans Mono,monospace;\nbackground-color: transparent;\nmargin: 0;\npadding: 5px;\nborder: none;\ncolor: #A0A0A0;\n">1.</pre></td><td><pre style="\nfont-size: 11px;\nfont-family: Bitstream Vera Sans Mono,monospace;\nbackground-color: transparent;\nmargin: 0;\npadding: 5px;\nborder: none;\noverflow: auto;\nwhite-space: pre !important;\n"><span style="color:#185369; font-weight: bold">print </span><span style="color: #FF9966">\'hello world\'</span></pre></td></tr></table>')
def test_LABEL(self):
self.assertEqual(LABEL('<>', _a='1', _b='2').xml(),
b'<label a="1" b="2"><></label>')
def test_LI(self):
self.assertEqual(LI('<>', _a='1', _b='2').xml(),
b'<li a="1" b="2"><></li>')
def test_UL(self):
self.assertEqual(UL('<>', _a='1', _b='2').xml(),
b'<ul a="1" b="2"><li><></li></ul>')
def test_OL(self):
self.assertEqual(OL('<>', _a='1', _b='2').xml(),
b'<ol a="1" b="2"><li><></li></ol>')
def test_TD(self):
self.assertEqual(TD('<>', _a='1', _b='2').xml(),
b'<td a="1" b="2"><></td>')
def test_TH(self):
self.assertEqual(TH('<>', _a='1', _b='2').xml(),
b'<th a="1" b="2"><></th>')
def test_TR(self):
self.assertEqual(TR('<>', _a='1', _b='2').xml(),
b'<tr a="1" b="2"><td><></td></tr>')
def test_THEAD(self):
self.assertEqual(THEAD('<>', _a='1', _b='2').xml(),
b'<thead a="1" b="2"><tr><th><></th></tr></thead>')
# self.assertEqual(THEAD(TRHEAD('<>'), _a='1', _b='2').xml(),
# '<thead a="1" b="2"><tr><th><></th></tr></thead>')
self.assertEqual(THEAD(TR('<>'), _a='1', _b='2').xml(),
b'<thead a="1" b="2"><tr><td><></td></tr></thead>')
def test_TBODY(self):
self.assertEqual(TBODY('<>', _a='1', _b='2').xml(),
b'<tbody a="1" b="2"><tr><td><></td></tr></tbody>')
def test_TFOOT(self):
self.assertEqual(TFOOT('<>', _a='1', _b='2').xml(),
b'<tfoot a="1" b="2"><tr><td><></td></tr></tfoot>')
def test_COL(self):
# Empty COL test
self.assertEqual(COL().xml(), b'<col />')
# Not Empty COL test
self.assertEqual(COL(_span='2').xml(), b'<col span="2" />')
# Commented for now not so sure how to make it pass properly was passing locally
# I think this test is interesting and add value
# This fail relate to python 2.6 limitation I think
# Failing COL test
# with self.assertRaises(SyntaxError) as cm:
# COL('<>').xml()
# self.assertEqual(cm.exception[0], '<col/> tags cannot have components')
# For now
self.assertRaises(SyntaxError, COL, b'<>')
def test_COLGROUP(self):
# Empty COLGROUP test
self.assertEqual(COLGROUP().xml(), b'<colgroup></colgroup>')
# Not Empty COLGROUP test
self.assertEqual(COLGROUP('<>', _a='1', _b='2').xml(), b'<colgroup a="1" b="2"><></colgroup>')
def test_TABLE(self):
self.assertEqual(TABLE('<>', _a='1', _b='2').xml(),
b'<table a="1" b="2"><tr><td><></td></tr>' +
b'</table>')
def test_I(self):
self.assertEqual(I('<>', _a='1', _b='2').xml(),
b'<i a="1" b="2"><></i>')
def test_IFRAME(self):
self.assertEqual(IFRAME('<>', _a='1', _b='2').xml(),
b'<iframe a="1" b="2"><></iframe>')
def test_INPUT(self):
self.assertEqual(INPUT(_a='1', _b='2').xml(), b'<input a="1" b="2" type="text" />')
# list value
self.assertEqual(INPUT(_value=[1, 2, 3]).xml(), b'<input type="text" value="[1, 2, 3]" />')
def test_TEXTAREA(self):
self.assertEqual(TEXTAREA('<>', _a='1', _b='2').xml(),
b'<textarea a="1" b="2" cols="40" rows="10"><>' +
b'</textarea>')
# override _rows and _cols
self.assertEqual(TEXTAREA('<>', _a='1', _b='2', _rows=5, _cols=20).xml(),
b'<textarea a="1" b="2" cols="20" rows="5"><>' +
b'</textarea>')
self.assertEqual(TEXTAREA('<>', value='bla bla bla...', _rows=10, _cols=40).xml(),
b'<textarea cols="40" rows="10">bla bla bla...</textarea>')
def test_OPTION(self):
self.assertEqual(OPTION('<>', _a='1', _b='2').xml(),
b'<option a="1" b="2" value="<>"><>' +
b'</option>')
def test_OBJECT(self):
self.assertEqual(OBJECT('<>', _a='1', _b='2').xml(),
b'<object a="1" b="2"><></object>')
def test_OPTGROUP(self):
# Empty OPTGROUP test
self.assertEqual(OPTGROUP().xml(),
b'<optgroup></optgroup>')
# Not Empty OPTGROUP test
self.assertEqual(OPTGROUP('<>', _a='1', _b='2').xml(),
b'<optgroup a="1" b="2"><option value="<>"><></option></optgroup>')
# With an OPTION
self.assertEqual(OPTGROUP(OPTION('Option 1', _value='1'), _label='Group 1').xml(),
b'<optgroup label="Group 1"><option value="1">Option 1</option></optgroup>')
def test_SELECT(self):
self.assertEqual(SELECT('<>', _a='1', _b='2').xml(),
b'<select a="1" b="2">' +
b'<option value="<>"><></option></select>')
self.assertEqual(SELECT(OPTION('option 1', _value='1'),
OPTION('option 2', _value='2')).xml(),
b'<select><option value="1">option 1</option><option value="2">option 2</option></select>')
self.assertEqual(SELECT(OPTION('option 1', _value='1', _selected='selected'),
OPTION('option 2', _value='2'),
_multiple='multiple').xml(),
b'<select multiple="multiple"><option selected="selected" value="1">option 1</option><option value="2">option 2</option></select>')
# More then one select with mutilple
self.assertEqual(SELECT(OPTION('option 1', _value='1', _selected='selected'),
OPTION('option 2', _value='2', _selected='selected'),
_multiple='multiple').xml(),
b'<select multiple="multiple"><option selected="selected" value="1">option 1</option><option selected="selected" value="2">option 2</option></select>'
)
# OPTGROUP
self.assertEqual(SELECT(OPTGROUP(OPTION('option 1', _value='1'),
OPTION('option 2', _value='2'),
_label='Group 1',)).xml(),
b'<select><optgroup label="Group 1"><option value="1">option 1</option><option value="2">option 2</option></optgroup></select>')
# List
self.assertEqual(SELECT([1, 2, 3, 4, 5]).xml(),
b'<select><option value="1">1</option><option value="2">2</option><option value="3">3</option><option value="4">4</option><option value="5">5</option></select>')
# Tuple
self.assertEqual(SELECT((1, 2, 3, 4, 5)).xml(),
b'<select><option value="1">1</option><option value="2">2</option><option value="3">3</option><option value="4">4</option><option value="5">5</option></select>')
# String value
self.assertEqual(SELECT('Option 1', 'Option 2').xml(),
b'<select><option value="Option 1">Option 1</option><option value="Option 2">Option 2</option></select>')
# list as a value
self.assertEqual(SELECT(OPTION('option 1', _value=[1, 2, 3]),
OPTION('option 2', _value=[4, 5, 6], _selected='selected'),
_multiple='multiple').xml(),
b'<select multiple="multiple"><option value="[1, 2, 3]">option 1</option><option selected="selected" value="[4, 5, 6]">option 2</option></select>')
def test_FIELDSET(self):
self.assertEqual(FIELDSET('<>', _a='1', _b='2').xml(),
b'<fieldset a="1" b="2"><></fieldset>')
def test_LEGEND(self):
self.assertEqual(LEGEND('<>', _a='1', _b='2').xml(),
b'<legend a="1" b="2"><></legend>')
def test_FORM(self):
self.assertEqual(FORM('<>', _a='1', _b='2').xml(),
b'<form a="1" action="#" b="2" enctype="multipart/form-data" method="post"><></form>')
# These 2 crash AppVeyor and Travis with: "ImportError: No YAML serializer available"
# self.assertEqual(FORM('<>', _a='1', _b='2').as_yaml(),
# "accepted: null\nattributes: {_a: '1', _action: '#', _b: '2', _enctype: multipart/form-data, _method: post}\ncomponents: [<>]\nerrors: {}\nlatest: {}\nparent: null\nvars: {}\n")
# TODO check tags content
self.assertEqual(len(FORM('<>', _a='1', _b='2').as_xml()), 334)
def test_BEAUTIFY(self):
#self.assertEqual(BEAUTIFY(['a', 'b', {'hello': 'world'}]).xml(),
# '<div><table><tr><td><div>a</div></td></tr><tr><td><div>b</div></td></tr><tr><td><div><table><tr><td style="font-weight:bold;vertical-align:top;">hello</td><td style="vertical-align:top;">:</td><td><div>world</div></td></tr></table></div></td></tr></table></div>')
# unicode
self.assertEqual(BEAUTIFY([P(u'àéèûôç'), 'a', 'b', {'hello': 'world'}]).xml(),
b'<div><table><tr><td><div><p>\xc3\xa0\xc3\xa9\xc3\xa8\xc3\xbb\xc3\xb4\xc3\xa7</p></div></td></tr><tr><td><div>a</div></td></tr><tr><td><div>b</div></td></tr><tr><td><div><table><tr><td style="font-weight:bold;vertical-align:top;">hello</td><td style="vertical-align:top;">:</td><td><div>world</div></td></tr></table></div></td></tr></table></div>')
def test_MENU(self):
self.assertEqual(MENU([('Home', False, '/welcome/default/index', [])]).xml(),
b'<ul class="web2py-menu web2py-menu-vertical"><li class="web2py-menu-first"><a href="/welcome/default/index">Home</a></li></ul>')
# Multiples entries menu
self.assertEqual(MENU([('Home', False, '/welcome/default/index', []),
('Item 1', False, '/welcome/default/func_one', []),
('Item 2', False, '/welcome/default/func_two', []),
('Item 3', False, '/welcome/default/func_three', []),
('Item 4', False, '/welcome/default/func_four', [])]).xml(),
b'<ul class="web2py-menu web2py-menu-vertical"><li class="web2py-menu-first"><a href="/welcome/default/index">Home</a></li><li><a href="/welcome/default/func_one">Item 1</a></li><li><a href="/welcome/default/func_two">Item 2</a></li><li><a href="/welcome/default/func_three">Item 3</a></li><li class="web2py-menu-last"><a href="/welcome/default/func_four">Item 4</a></li></ul>'
)
# mobile=True
self.assertEqual(MENU([('Home', False, '/welcome/default/index', [])], mobile=True).xml(),
b'<select class="web2py-menu web2py-menu-vertical" onchange="window.location=this.value"><option value="/welcome/default/index">Home</option></select>')
# Multiples entries menu for mobile
self.assertEqual(MENU([('Home', False, '/welcome/default/index', []),
('Item 1', False, '/welcome/default/func_one', []),
('Item 2', False, '/welcome/default/func_two', []),
('Item 3', False, '/welcome/default/func_three', []),
('Item 4', False, '/welcome/default/func_four', [])], mobile=True).xml(),
b'<select class="web2py-menu web2py-menu-vertical" onchange="window.location=this.value"><option value="/welcome/default/index">Home</option><option value="/welcome/default/func_one">Item 1</option><option value="/welcome/default/func_two">Item 2</option><option value="/welcome/default/func_three">Item 3</option><option value="/welcome/default/func_four">Item 4</option></select>')
# TODO: def test_embed64(self):
def test_web2pyHTMLParser(self):
#tag should not be a byte
self.assertEqual(web2pyHTMLParser("<div></div>").tree.components[0].tag, 'div')
a = str(web2pyHTMLParser('<div>a<span>b</div>c').tree)
self.assertEqual(a, "<div>a<span>b</span></div>c")
tree = web2pyHTMLParser('hello<div a="b">world</div>').tree
tree.element(_a='b')['_c']=5
self.assertEqual(str(tree), 'hello<div a="b" c="5">world</div>')
a = str(web2pyHTMLParser('<div><img class="img"/></div>', closed=['img']).tree)
self.assertEqual(a, '<div><img class="img" /></div>')
#greater-than sign ( > ) --> decimal > --> hexadecimal >
#Less-than sign ( < ) --> decimal < --> hexadecimal <
# test decimal
a = str(web2pyHTMLParser('<div>< ></div>').tree)
self.assertEqual(a, '<div>< ></div>')
# test hexadecimal
a = str(web2pyHTMLParser('<div>< ></div>').tree)
self.assertEqual(a, '<div>< ></div>')
def test_markdown(self):
def markdown(text, tag=None, attributes={}):
r = {None: re.sub('\s+',' ',text), \
'h1':'#'+text+'\\n\\n', \
'p':text+'\\n'}.get(tag,text)
return r
a=TAG('<h1>Header</h1><p>this is a test</p>')
ret = a.flatten(markdown)
self.assertEqual(ret, '#Header\\n\\nthis is a test\\n')
# TODO: def test_markdown_serializer(self):
# TODO: def test_markmin_serializer(self):
def test_MARKMIN(self):
# This test pass with python 2.7 but expected to fail under 2.6
# with self.assertRaises(TypeError) as cm:
# MARKMIN().xml()
# self.assertEqual(cm.exception[0], '__init__() takes at least 2 arguments (1 given)')
# For now
self.assertRaises(TypeError, MARKMIN)
self.assertEqual(MARKMIN('').xml(), b'')
self.assertEqual(MARKMIN('<>').xml(),
b'<p><></p>')
self.assertEqual(MARKMIN("``hello_world = 'Hello World!'``:python").xml(),
b'<code class="python">hello_world = \'Hello World!\'</code>')
self.assertEqual(MARKMIN('<>').flatten(), b'<>')
def test_ASSIGNJS(self):
# empty assignation
self.assertEqual(ASSIGNJS().xml(), b'')
# text assignation
self.assertEqual(ASSIGNJS(var1='1').xml(), b'var var1 = "1";\n')
# int assignation
self.assertEqual(ASSIGNJS(var2=2).xml(), b'var var2 = 2;\n')
class TestData(unittest.TestCase):
def test_Adata(self):
self.assertEqual(A('<>', data=dict(abc='<def?asd>', cde='standard'), _a='1', _b='2').xml(),
b'<a a="1" b="2" data-abc="<def?asd>" data-cde="standard"><></a>')
|
py | 7dfc899d37607fd6307ed0aa87e9b6a90d67a75a | # Definition for a binary tree node.
from typing import List
from leetcode.binaryTrees.TreeNode import *
class Solution:
def preorderTraversal(self, root: TreeNode) -> List[int]:
# to return an empty array if root is empty
if not root:
return []
# appending the root value in array and recursively going into left and right subtrees
return [root.val] + self.preorderTraversal(root.left) + self.preorderTraversal(root.right)
print(Solution().preorderTraversal(initializeBinaryTree()))
|
py | 7dfc8ab078af4f42e4159653dc2e09403911150c | import rospy
from lowpass import LowPassFilter
from pid import PID
from yaw_controller import YawController
GAS_DENSITY = 2.858
ONE_MPH= 0.44704
MIN_SPEED = 0.1 # m/s
# Actuator bounds
MIN_THROTTLE = 0.0
MAX_THROTTLE = 0.2
# Filter tuning constants
TAU = 0.5
TS = 0.02
# PID-Controller tuning parameters
KP = 0.18
KI = 0.0002
KD = 3.2
# As explained in the walk-through, break torque needed to keep the vehicle in place.
TORQUE_TO_KEEP_VEHICLE_STATIONARY = 700 # Nm
class Controller(object):
def __init__(self,
vehicle_mass,
fuel_capacity,
brake_deadband,
decel_limit,
accel_limit,
wheel_radius,
wheel_base,
steer_ratio,
max_lat_accel,
max_steer_angle):
self.vehicle_mass = vehicle_mass
self.fuel_capacity = fuel_capacity
self.brake_deadband = brake_deadband
self.decel_limit = decel_limit
self.accel_limit = accel_limit
self.wheel_radius = wheel_radius
self.yaw_controller = YawController(
wheel_base, steer_ratio, MIN_SPEED, max_lat_accel, max_steer_angle)
self.throttle_controller = PID(
KP, KI, KD, MIN_THROTTLE, MAX_THROTTLE)
self.v_low_pass_filter = LowPassFilter(TAU, TS)
self.last_time = rospy.get_time()
def control(self, current_vel, dbw_enabled, linear_vel, angular_vel):
if not dbw_enabled:
self.throttle_controller.reset()
return 0, 0, 0
current_time = rospy.get_time()
current_vel = self.v_low_pass_filter.filt(current_vel)
error_vel = linear_vel - current_vel
steering = self.yaw_controller.get_steering(
linear_vel, angular_vel, current_vel)
throttle = self.throttle_controller.step(
error=error_vel,
sample_time=current_time - self.last_time)
brake = 0
if linear_vel == 0 and current_vel < 0.1:
# The vehicle is stopped.
throttle = 0
brake = TORQUE_TO_KEEP_VEHICLE_STATIONARY
elif throttle < 0.1 and error_vel < 0:
# Velocity error is negative, so we need to slow down.
throttle = 0
decel = max(error_vel, self.decel_limit)
brake = abs(decel) * self.vehicle_mass * self.wheel_radius
self.last_vel = current_vel
self.last_time = rospy.get_time()
return throttle, brake, steering
|
py | 7dfc8d346cd8ba514fc66024d62a3ef2b56d52bd | import pymysql
MYSQL_CONFIG = {
'host': 'mysql', # mysql or '127.0.0.1'
'port': 3306,
'user': 'root',
'password': 'mysql520',
'charset': 'utf8',
'use_unicode': True,
'cursorclass': pymysql.cursors.DictCursor,
'connect_timeout': 60,
'maxconnections': 50
}
|
py | 7dfc8dbf1b00e8bee4d93abb28cacba87e1b7739 | class ArgumentError(Exception):
pass
class UploadError(Exception):
pass
class ReleaseError(Exception):
pass
|
py | 7dfc8dc5597029fc2601f9ef7317673d75ec3f55 |
import argparse
from py_template.__main__ import main
if __name__ == '__main__':
main() |
py | 7dfc8f710fa00cbd0acb83b7eb46260de8092959 | #!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_database_autonomous_database_regional_wallet_facts
short_description: Fetches details about a AutonomousDatabaseRegionalWallet resource in Oracle Cloud Infrastructure
description:
- Fetches details about a AutonomousDatabaseRegionalWallet resource in Oracle Cloud Infrastructure
- Gets the Autonomous Database regional wallet details.
version_added: "2.9.0"
author: Oracle (@oracle)
options: {}
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: Get a specific autonomous_database_regional_wallet
oci_database_autonomous_database_regional_wallet_facts:
"""
RETURN = """
autonomous_database_regional_wallet:
description:
- AutonomousDatabaseRegionalWallet resource
returned: on success
type: complex
contains:
lifecycle_state:
description:
- The current lifecycle state of the Autonomous Database wallet.
returned: on success
type: str
sample: ACTIVE
time_rotated:
description:
- The date and time the wallet was last rotated.
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
sample: {
"lifecycle_state": "ACTIVE",
"time_rotated": "2013-10-20T19:20:30+01:00"
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.database import DatabaseClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class AutonomousDatabaseRegionalWalletFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: get"""
def get_required_params_for_get(self):
return []
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_autonomous_database_regional_wallet,
)
AutonomousDatabaseRegionalWalletFactsHelperCustom = get_custom_class(
"AutonomousDatabaseRegionalWalletFactsHelperCustom"
)
class ResourceFactsHelper(
AutonomousDatabaseRegionalWalletFactsHelperCustom,
AutonomousDatabaseRegionalWalletFactsHelperGen,
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(dict())
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="autonomous_database_regional_wallet",
service_client_class=DatabaseClient,
namespace="database",
)
result = []
if resource_facts_helper.is_get():
result = resource_facts_helper.get()
else:
resource_facts_helper.fail()
module.exit_json(autonomous_database_regional_wallet=result)
if __name__ == "__main__":
main()
|
py | 7dfc91f7d80cfdbff08d6194352b8aa9350fda70 | from OpenGLCffi.GL import params
@params(api='gl', prms=['pname', 'param'])
def glTexBumpParameterivATI(pname, param):
pass
@params(api='gl', prms=['pname', 'param'])
def glTexBumpParameterfvATI(pname, param):
pass
@params(api='gl', prms=['pname', 'param'])
def glGetTexBumpParameterivATI(pname, param):
pass
@params(api='gl', prms=['pname', 'param'])
def glGetTexBumpParameterfvATI(pname, param):
pass
|
py | 7dfc931a5ef6f721662af7362c81b322106278c6 | # -*- coding: utf-8 -*-
"""Bytecode Interpreter operations for Python 3.4
"""
from __future__ import print_function, division
import inspect
import types
from xdis import PYTHON_VERSION, IS_PYPY
from xpython.byteop.byteop24 import Version_info
from xpython.byteop.byteop32 import ByteOp32
from xpython.byteop.byteop33 import ByteOp33
from xpython.pyobj import Function
# Gone since 3.3
del ByteOp32.STORE_LOCALS
class ByteOp34(ByteOp33):
def __init__(self, vm):
super(ByteOp34, self).__init__(vm)
self.version = "3.4.6 (default, Oct 27 1955, 00:00:00)\n[x-python]"
# FIXME: should be a class
self.version_info = Version_info(3, 4, 6, "final", 0)
# New in 3.4
def LOAD_CLASSDEREF(self, count):
"""
Much like LOAD_DEREF but first checks the locals dictionary before
consulting the cell. This is used for loading free variables in class
bodies.
"""
self.vm.push(self.vm.frame.cells[count].get())
##############################################################################
# Order of function here is the same as in:
# https://docs.python.org/3.4/library/dis.htmls#python-bytecode-instructions
#
##############################################################################
# Changed in 3.4
# Python 3.4 __build_class__ is more strict about what can be a
# function type whereas in earlier version we could get away with
# our own kind of xpython.pyobj.Function object.
#
# Python 3.3 docs describe this but seem to follow pre-3.3
# conventions (which go back to Python 2.x days).
def MAKE_FUNCTION(self, argc):
"""
Pushes a new function object on the stack. From bottom to top, the consumed stack must consist of:
* argc & 0xFF default argument objects in positional order
* (argc >> 8) & 0xFF pairs of name and default argument, with the name just below the object on the stack, for keyword-only parameters
* (argc >> 16) & 0x7FFF parameter annotation objects
* a tuple listing the parameter names for the annotations (only if there are ony annotation objects)
* the code associated with the function (at TOS1)
* the qualified name of the function (at TOS)
"""
rest, default_count = divmod(argc, 256)
annotate_count, kw_default_count = divmod(rest, 256)
name = self.vm.pop()
code = self.vm.pop()
if annotate_count:
annotate_names = self.vm.pop()
# annotate count includes +1 for the above names
annotate_objects = self.vm.popn(annotate_count - 1)
n = len(annotate_names)
assert n == len(annotate_objects)
annotations = {annotate_names[i]: annotate_objects[i] for i in range(n)}
else:
annotations = {}
if kw_default_count:
kw_default_pairs = self.vm.popn(2 * kw_default_count)
kwdefaults = dict(
kw_default_pairs[i : i + 2] for i in range(0, len(kw_default_pairs), 2)
)
else:
kwdefaults = {}
if default_count:
defaults = self.vm.popn(default_count)
else:
defaults = tuple()
# FIXME: DRY with code in byteop3{2,6}.py
globs = self.vm.frame.f_globals
fn = Function(
name=name,
code=code,
globs=globs,
argdefs=tuple(defaults),
closure=None,
vm=self.vm,
kwdefaults=kwdefaults,
annotations=annotations,
# FIXME: figure out qualname
)
if (
inspect.iscode(code)
and self.version == PYTHON_VERSION
and self.is_pypy == IS_PYPY
):
# Python 3.4 __build_class__ is more strict about what can be a
# function type whereas in earlier version we could get away with
# our own kind of xpython.pyobj.Function object.
native_fn = types.FunctionType(code, globs, name, tuple(defaults))
native_fn.__kwdefaults__ = kwdefaults
native_fn.__annonations__ = annotations
self.vm.fn2native[fn] = native_fn
self.vm.push(fn)
|
py | 7dfc938df3b58ca252cdeaba088e1d9722cd11da |
EXTENSIONS_GROUP_SUFFIX = '.k8s.io'
CORE_GROUP = 'core'
class ApiVersionParser:
def parse(self, api_version):
# apiVersion: v1 -> group=core, version=v1
# apiVersion: apiextensions.k8s.io/v1beta1 -> group = apiextensions.k8s.io, version = v1beta1
group, slash, version = api_version.partition('/')
if len(version) == 0:
version = group
group = CORE_GROUP
return group, version |
py | 7dfc93a904983ed51b61641dea1e7c8edc15649b | """
Django settings for simple_api_server_test project.
Generated by 'django-admin startproject' using Django 2.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '7=^$pnw@5tc&0uyo7qw%h@9q-s3)v(9rnx@lw%t932r=1a@#4_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# 注册新的应用
'get_by_douban'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'simple_api_server_test.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'simple_api_server_test.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
|
py | 7dfc943338eab0790dfeefe7ec0491edce73db39 | from .model_zoo import get_model
from .model_store import get_model_file
from .base import *
from .fcn import *
from .psp import *
from .encnet import *
from .danet import *
def get_segmentation_model(name, **kwargs):
from .fcn import get_fcn
models = {
'fcn': get_fcn,
'psp': get_psp,
'encnet': get_encnet,
'danet': get_danet,
}
return models[name.lower()](**kwargs)
|
py | 7dfc9488b9116d9b149b0715a51d160c17a139f2 | import ta
import pandas as pd
from util.errors import IndicatorsError
from util.logger import get_logger
LOGGER = get_logger(__file__.__name__)
def add_indicators(data: pd.DataFrame) -> pd.DataFrame:
"""
This method creates technical indicators, based on the OHLC and volume bars
:param data: pandas DataFrame, containing open, high, low and close and
optional volume columns
:return: DataFrame with added technical indicators
"""
assert 'open' in data.columns, "open column not present or with different name"
assert 'high' in data.columns, "high column not present or with different name"
assert 'low' in data.columns, "low column not present or with different name"
assert 'close' in data.columns, "close column not present or with different name"
try:
data['RSI'] = ta.rsi(data["close"])
data['TSI'] = ta.tsi(data["close"])
data['UO'] = ta.uo(data["high"], data["low"], data["close"])
data['AO'] = ta.ao(data["high"], data["low"])
data['MACD_diff'] = ta.macd_diff(data["close"])
data['Vortex_pos'] = ta.vortex_indicator_pos(data["high"], data["low"], data["close"])
data['Vortex_neg'] = ta.vortex_indicator_neg(data["high"], data["low"], data["close"])
data['Vortex_diff'] = abs(data['Vortex_pos'] - data['Vortex_neg'])
data['Trix'] = ta.trix(data["close"])
data['Mass_index'] = ta.mass_index(data["high"], data["low"])
data['CCI'] = ta.cci(data["high"], data["low"], data["close"])
data['DPO'] = ta.dpo(data["close"])
data['KST'] = ta.kst(data["close"])
data['KST_sig'] = ta.kst_sig(data["close"])
data['KST_diff'] = (data['KST'] - data['KST_sig'])
data['Aroon_up'] = ta.aroon_up(data["close"])
data['Aroon_down'] = ta.aroon_down(data["close"])
data['Aroon_ind'] = (data['Aroon_up'] - data['Aroon_down'])
data['BBH'] = ta.bollinger_hband(data["close"])
data['BBL'] = ta.bollinger_lband(data["close"])
data['BBM'] = ta.bollinger_mavg(data["close"])
data['BBHI'] = ta.bollinger_hband_indicator(data["close"])
data['BBLI'] = ta.bollinger_lband_indicator(data["close"])
data['KCHI'] = ta.keltner_channel_hband_indicator(data["high"], data["low"], data["close"])
data['KCLI'] = ta.keltner_channel_lband_indicator(data["high"], data["low"], data["close"])
data['DCHI'] = ta.donchian_channel_hband_indicator(data["close"])
data['DCLI'] = ta.donchian_channel_lband_indicator(data["close"])
data['DR'] = ta.daily_return(data["close"])
data['DLR'] = ta.daily_log_return(data["close"])
if 'volume' in data.columns:
data['MFI'] = ta.money_flow_index(data["high"], data["low"], data["close"], data["volume"])
data['ADI'] = ta.acc_dist_index(data["high"], data["low"], data["close"], data["volume"])
data['OBV'] = ta.on_balance_volume(data["close"], data["volume"])
data['CMF'] = ta.chaikin_money_flow(data["high"], data["low"], data["close"], data["volume"])
data['FI'] = ta.force_index(data["close"], data["volume"])
data['EM'] = ta.ease_of_movement(data["high"], data["low"], data["close"], data["volume"])
data['VPT'] = ta.volume_price_trend(data["close"], data["volume"])
data['NVI'] = ta.negative_volume_index(data["close"], data["volume"])
data.fillna(method='bfill', inplace=True)
return data
except (AssertionError, Exception) as error:
raise IndicatorsError(error)
LOGGER.error(error)
|
py | 7dfc94ddc2158e24baa6567341797d051072ff34 | #!/usr/bin/python3
"""
Corner to Corner 2 version 2- Choose Your Own Color
Moves a square from the upper right corner to the lower left corner.
Instead of cycling through all the colors, a specific color must be sent
to the function as an argument.
....................
Author: Paul Ryan
This program was written on a Raspberry Pi using the Geany IDE.
"""
########################################################################
# Import modules #
########################################################################
from time import sleep
import unicornhat
from bfp_unicornhat import print_header
from bfp_unicornhat import stop
########################################################################
# Import Variables #
########################################################################
from bfp_unicornhat import C1
from bfp_unicornhat import C2
from bfp_unicornhat import C3
from bfp_unicornhat import C4
from bfp_unicornhat import C5
from bfp_unicornhat import C6
from bfp_unicornhat import C7
from bfp_unicornhat import C8
########################################################################
# Functions #
########################################################################
def corner_to_corner_2_v2(color):
"""
Moves a square from the upper right corner to the lower left corner.
Arguments:
This function takes an RGB tuple as an argument argument.
"""
sleep_speed = 0.1
off = (0, 0, 0)
unicornhat.set_pixel(7, 0, color)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(6, 0, color)
unicornhat.set_pixel(6, 1, color)
unicornhat.set_pixel(7, 1, color)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(5, 0, color)
unicornhat.set_pixel(5, 1, color)
unicornhat.set_pixel(5, 2, color)
unicornhat.set_pixel(6, 2, color)
unicornhat.set_pixel(7, 2, color)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(4, 0, color)
unicornhat.set_pixel(4, 1, color)
unicornhat.set_pixel(4, 2, color)
unicornhat.set_pixel(4, 3, color)
unicornhat.set_pixel(5, 3, color)
unicornhat.set_pixel(6, 3, color)
unicornhat.set_pixel(7, 3, color)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(3, 0, color)
unicornhat.set_pixel(3, 1, color)
unicornhat.set_pixel(3, 2, color)
unicornhat.set_pixel(3, 3, color)
unicornhat.set_pixel(3, 4, color)
unicornhat.set_pixel(4, 4, color)
unicornhat.set_pixel(5, 4, color)
unicornhat.set_pixel(6, 4, color)
unicornhat.set_pixel(7, 4, color)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(2, 0, color)
unicornhat.set_pixel(2, 1, color)
unicornhat.set_pixel(2, 2, color)
unicornhat.set_pixel(2, 3, color)
unicornhat.set_pixel(2, 4, color)
unicornhat.set_pixel(2, 5, color)
unicornhat.set_pixel(3, 5, color)
unicornhat.set_pixel(4, 5, color)
unicornhat.set_pixel(5, 5, color)
unicornhat.set_pixel(6, 5, color)
unicornhat.set_pixel(7, 5, color)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(1, 0, color)
unicornhat.set_pixel(1, 1, color)
unicornhat.set_pixel(1, 2, color)
unicornhat.set_pixel(1, 3, color)
unicornhat.set_pixel(1, 4, color)
unicornhat.set_pixel(1, 5, color)
unicornhat.set_pixel(1, 6, color)
unicornhat.set_pixel(2, 6, color)
unicornhat.set_pixel(3, 6, color)
unicornhat.set_pixel(4, 6, color)
unicornhat.set_pixel(5, 6, color)
unicornhat.set_pixel(6, 6, color)
unicornhat.set_pixel(7, 6, color)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(0, 0, color)
unicornhat.set_pixel(0, 1, color)
unicornhat.set_pixel(0, 2, color)
unicornhat.set_pixel(0, 3, color)
unicornhat.set_pixel(0, 4, color)
unicornhat.set_pixel(0, 5, color)
unicornhat.set_pixel(0, 6, color)
unicornhat.set_pixel(0, 7, color)
unicornhat.set_pixel(1, 7, color)
unicornhat.set_pixel(2, 7, color)
unicornhat.set_pixel(3, 7, color)
unicornhat.set_pixel(4, 7, color)
unicornhat.set_pixel(5, 7, color)
unicornhat.set_pixel(6, 7, color)
unicornhat.set_pixel(7, 7, color)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(0, 0, off)
unicornhat.set_pixel(1, 0, off)
unicornhat.set_pixel(2, 0, off)
unicornhat.set_pixel(3, 0, off)
unicornhat.set_pixel(4, 0, off)
unicornhat.set_pixel(5, 0, off)
unicornhat.set_pixel(6, 0, off)
unicornhat.set_pixel(7, 0, off)
unicornhat.set_pixel(7, 1, off)
unicornhat.set_pixel(7, 2, off)
unicornhat.set_pixel(7, 3, off)
unicornhat.set_pixel(7, 4, off)
unicornhat.set_pixel(7, 5, off)
unicornhat.set_pixel(7, 6, off)
unicornhat.set_pixel(7, 7, off)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(0, 1, off)
unicornhat.set_pixel(1, 1, off)
unicornhat.set_pixel(2, 1, off)
unicornhat.set_pixel(3, 1, off)
unicornhat.set_pixel(4, 1, off)
unicornhat.set_pixel(5, 1, off)
unicornhat.set_pixel(6, 1, off)
unicornhat.set_pixel(6, 2, off)
unicornhat.set_pixel(6, 3, off)
unicornhat.set_pixel(6, 4, off)
unicornhat.set_pixel(6, 5, off)
unicornhat.set_pixel(6, 6, off)
unicornhat.set_pixel(6, 7, off)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(0, 2, off)
unicornhat.set_pixel(1, 2, off)
unicornhat.set_pixel(2, 2, off)
unicornhat.set_pixel(3, 2, off)
unicornhat.set_pixel(4, 2, off)
unicornhat.set_pixel(5, 2, off)
unicornhat.set_pixel(5, 3, off)
unicornhat.set_pixel(5, 4, off)
unicornhat.set_pixel(5, 5, off)
unicornhat.set_pixel(5, 6, off)
unicornhat.set_pixel(5, 7, off)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(0, 3, off)
unicornhat.set_pixel(1, 3, off)
unicornhat.set_pixel(2, 3, off)
unicornhat.set_pixel(3, 3, off)
unicornhat.set_pixel(4, 3, off)
unicornhat.set_pixel(4, 4, off)
unicornhat.set_pixel(4, 5, off)
unicornhat.set_pixel(4, 6, off)
unicornhat.set_pixel(4, 7, off)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(0, 4, off)
unicornhat.set_pixel(1, 4, off)
unicornhat.set_pixel(2, 4, off)
unicornhat.set_pixel(3, 4, off)
unicornhat.set_pixel(3, 5, off)
unicornhat.set_pixel(3, 6, off)
unicornhat.set_pixel(3, 7, off)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(0, 5, off)
unicornhat.set_pixel(1, 5, off)
unicornhat.set_pixel(2, 5, off)
unicornhat.set_pixel(2, 6, off)
unicornhat.set_pixel(2, 7, off)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(0, 6, off)
unicornhat.set_pixel(1, 6, off)
unicornhat.set_pixel(1, 7, off)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(0, 7, off)
unicornhat.show()
sleep(sleep_speed)
if __name__ == '__main__':
COLORS = [C1, C2, C3, C4, C5, C6, C7, C8]
try:
# STEP01: Print header
print_header()
# STEP02: Print instructions in white text
print("\033[1;37;40mPress Ctrl-C to stop the program.")
# STEP03:
for COLOR in COLORS:
corner_to_corner_2_v2(COLOR)
# STEP04: Exit the program.
stop()
except KeyboardInterrupt:
stop()
|
py | 7dfc953d2e19e63d8c60b281510b349e9bf4e581 | from django.test import TestCase
from weather.models import Subscription, City
from django.contrib.auth import get_user_model
class TestCityModel(TestCase):
def test_city_openweather_id_creation(self):
city = City(name='kyiv')
self.assertEqual(city.openweather_id, 703448)
def test_city_name_that_doesnt_exist(self):
City(name='doest_exist')
self.assertFalse(City.objects.filter(name='doest_exist').exists())
class TestSubscriptionModel(TestCase):
@classmethod
def setUpTestData(cls):
user_model = get_user_model()
test_user = user_model(username='test_user',
email='[email protected]',
password='test_user_password')
test_user.save()
city = City(name='kyiv')
city.save()
test_subscription = Subscription(user=test_user,
city=city)
test_subscription.save()
def test_subscription_default_period(self):
subscription = Subscription.objects.last()
self.assertEqual(subscription.reminding_time_period, Subscription.Period.NO_REMINDING)
|
py | 7dfc9694327f2348ea363d33afb0837904cf0c12 | from flask import Blueprint, render_template
ui_module = Blueprint("site", __name__, template_folder="../../ui/templates")
@ui_module.route("/")
@ui_module.route("/homepage")
def homepage():
return render_template("index.html")
# default route - error page
@ui_module.route("/", defaults={"path": ""})
@ui_module.route("/<path:path>")
def catch_all(path):
return render_template("error.html")
|
py | 7dfc96bfc0d952be2643810e7dd245126cadc151 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('reachapp', '0003_reachtrackerlog_code'),
]
operations = [
migrations.AlterField(
model_name='reachtracker',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
migrations.AlterField(
model_name='reachtrackerlog',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
]
|
py | 7dfc9828b518844b4f361e2c8b6b3d88d5790846 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import numpy as np
import astropy.units as u
from astropy.coordinates.angle_utilities import angular_separation
from astropy.coordinates import Angle, Longitude, Latitude
from ...utils.fitting import Parameter, Parameters, Model
from ...maps import Map
__all__ = [
"SkySpatialModel",
"SkyPointSource",
"SkyGaussian",
"SkyDisk",
"SkyShell",
"SkyDiffuseConstant",
"SkyDiffuseMap",
]
log = logging.getLogger(__name__)
class SkySpatialModel(Model):
"""Sky spatial model base class."""
def __call__(self, lon, lat):
"""Call evaluate method"""
kwargs = dict()
for par in self.parameters.parameters:
kwargs[par.name] = par.quantity
return self.evaluate(lon, lat, **kwargs)
class SkyPointSource(SkySpatialModel):
r"""Point Source.
.. math::
\phi(lon, lat) = \delta{(lon - lon_0, lat - lat_0)}
A tolerance of 1 arcsecond is accepted for numerical stability
Parameters
----------
lon_0 : `~astropy.coordinates.Longitude`
:math:`lon_0`
lat_0 : `~astropy.coordinates.Latitude`
:math:`lat_0`
"""
def __init__(self, lon_0, lat_0):
self.parameters = Parameters(
[Parameter("lon_0", Longitude(lon_0)), Parameter("lat_0", Latitude(lat_0))]
)
@staticmethod
def evaluate(lon, lat, lon_0, lat_0):
"""Evaluate the model (static function)."""
wrapval = lon_0 + 180 * u.deg
lon = Angle(lon).wrap_at(wrapval)
_, grad_lon = np.gradient(lon)
grad_lat, _ = np.gradient(lat)
lon_diff = np.abs((lon - lon_0) / grad_lon)
lat_diff = np.abs((lat - lat_0) / grad_lat)
lon_val = np.select([lon_diff < 1], [1 - lon_diff], 0) / np.abs(grad_lon)
lat_val = np.select([lat_diff < 1], [1 - lat_diff], 0) / np.abs(grad_lat)
return lon_val * lat_val
class SkyGaussian(SkySpatialModel):
r"""Two-dimensional symmetric Gaussian model.
.. math::
\phi(lon, lat) = \frac{1}{2\pi\sigma^2} \exp{\left(-\frac{1}{2}
\frac{\theta^2}{\sigma^2}\right)}
where :math:`\theta` is the sky separation
Parameters
----------
lon_0 : `~astropy.coordinates.Longitude`
:math:`lon_0`
lat_0 : `~astropy.coordinates.Latitude`
:math:`lat_0`
sigma : `~astropy.coordinates.Angle`
:math:`\sigma`
"""
def __init__(self, lon_0, lat_0, sigma):
self.parameters = Parameters(
[
Parameter("lon_0", Longitude(lon_0)),
Parameter("lat_0", Latitude(lat_0)),
Parameter("sigma", Angle(sigma)),
]
)
@staticmethod
def evaluate(lon, lat, lon_0, lat_0, sigma):
"""Evaluate the model (static function)."""
sep = angular_separation(lon, lat, lon_0, lat_0)
norm = 1 / (2 * np.pi * sigma ** 2)
exponent = -0.5 * (sep / sigma) ** 2
return norm * np.exp(exponent)
class SkyDisk(SkySpatialModel):
r"""Constant radial disk model.
.. math::
\phi(lon, lat) = \frac{1}{2 \pi (1 - \cos{r}) } \cdot
\begin{cases}
1 & \text{for } \theta \leq r_0 \\
0 & \text{for } \theta < r_0
\end{cases}
where :math:`\theta` is the sky separation
Parameters
----------
lon_0 : `~astropy.coordinates.Longitude`
:math:`lon_0`
lat_0 : `~astropy.coordinates.Latitude`
:math:`lat_0`
r_0 : `~astropy.coordinates.Angle`
:math:`r_0`
"""
def __init__(self, lon_0, lat_0, r_0):
self.parameters = Parameters(
[
Parameter("lon_0", Longitude(lon_0)),
Parameter("lat_0", Latitude(lat_0)),
Parameter("r_0", Angle(r_0)),
]
)
@staticmethod
def evaluate(lon, lat, lon_0, lat_0, r_0):
"""Evaluate the model (static function)."""
sep = angular_separation(lon, lat, lon_0, lat_0)
# Surface area of a spherical cap, see https://en.wikipedia.org/wiki/Spherical_cap
norm = 1.0 / (2 * np.pi * (1 - np.cos(r_0)))
return u.Quantity(norm.value * (sep <= r_0), "sr-1", copy=False)
class SkyShell(SkySpatialModel):
r"""Shell model
.. math::
\phi(lon, lat) = \frac{3}{2 \pi (r_{out}^3 - r_{in}^3)} \cdot
\begin{cases}
\sqrt{r_{out}^2 - \theta^2} - \sqrt{r_{in}^2 - \theta^2} &
\text{for } \theta \lt r_{in} \\
\sqrt{r_{out}^2 - \theta^2} &
\text{for } r_{in} \leq \theta \lt r_{out} \\
0 & \text{for } \theta > r_{out}
\end{cases}
where :math:`\theta` is the sky separation and :math:`r_out = r_in` + width
Note that the normalization is a small angle approximation,
although that approximation is still very good even for 10 deg radius shells.
Parameters
----------
lon_0 : `~astropy.coordinates.Longitude`
:math:`lon_0`
lat_0 : `~astropy.coordinates.Latitude`
:math:`lat_0`
radius : `~astropy.coordinates.Angle`
Inner radius, :math:`r_{in}`
width : `~astropy.coordinates.Angle`
Shell width
"""
def __init__(self, lon_0, lat_0, radius, width):
self.parameters = Parameters(
[
Parameter("lon_0", Longitude(lon_0)),
Parameter("lat_0", Latitude(lat_0)),
Parameter("radius", Angle(radius)),
Parameter("width", Angle(width)),
]
)
@staticmethod
def evaluate(lon, lat, lon_0, lat_0, radius, width):
"""Evaluate the model (static function)."""
sep = angular_separation(lon, lat, lon_0, lat_0)
radius_out = radius + width
norm = 3 / (2 * np.pi * (radius_out ** 3 - radius ** 3))
with np.errstate(invalid="ignore"):
# np.where and np.select do not work with quantities, so we use the
# workaround with indexing
value = np.sqrt(radius_out ** 2 - sep ** 2)
mask = [sep < radius]
value[mask] = (value - np.sqrt(radius ** 2 - sep ** 2))[mask]
value[sep > radius_out] = 0
return norm * value
class SkyDiffuseConstant(SkySpatialModel):
"""Spatially constant (isotropic) spatial model.
Parameters
----------
value : `~astropy.units.Quantity`
Value
"""
def __init__(self, value=1):
self.parameters = Parameters([Parameter("value", value)])
@staticmethod
def evaluate(lon, lat, value):
return value
class SkyDiffuseMap(SkySpatialModel):
"""Spatial sky map template model (2D).
This is for a 2D image. Use `~gammapy.cube.SkyDiffuseCube` for 3D cubes with
an energy axis.
Parameters
----------
map : `~gammapy.maps.Map`
Map template
norm : float
Norm parameter (multiplied with map values)
meta : dict, optional
Meta information, meta['filename'] will be used for serialization
normalize : bool
Normalize the input map so that it integrates to unity.
interp_kwargs : dict
Interpolation keyword arguments passed to `Map.interp_by_coord()`.
Default arguments are {'interp': 'linear', 'fill_value': 0}.
"""
def __init__(self, map, norm=1, meta=None, normalize=True, interp_kwargs=None):
if (map.data < 0).any():
log.warn(
"Map template contains negative values, please check the"
" data and fix if needed."
)
self.map = map
if normalize:
self.normalize()
self.parameters = Parameters([Parameter("norm", norm)])
self.meta = dict() if meta is None else meta
interp_kwargs = {} if interp_kwargs is None else interp_kwargs
interp_kwargs.setdefault("interp", "linear")
interp_kwargs.setdefault("fill_value", 0)
self._interp_kwargs = interp_kwargs
def normalize(self):
"""Normalize the diffuse map model so that it integrates to unity."""
data = self.map.data / self.map.data.sum()
data /= self.map.geom.solid_angle().to_value("sr")
self.map = self.map.copy(data=data, unit="sr-1")
@classmethod
def read(cls, filename, normalize=True, **kwargs):
"""Read spatial template model from FITS image.
The default unit used if none is found in the file is ``sr-1``.
Parameters
----------
filename : str
FITS image filename.
normalize : bool
Normalize the input map so that it integrates to unity.
kwargs : dict
Keyword arguments passed to `Map.read()`.
"""
m = Map.read(filename, **kwargs)
if m.unit == "":
m.unit = "sr-1"
return cls(m, normalize=normalize)
def evaluate(self, lon, lat, norm):
"""Evaluate model."""
coord = {"lon": lon.to_value("deg"), "lat": lat.to_value("deg")}
val = self.map.interp_by_coord(coord, **self._interp_kwargs)
return u.Quantity(norm.value * val, self.map.unit, copy=False)
|
py | 7dfc992699009fd51015a8ffddd9e27ae7ab26c3 | import datetime
import pandas as pd
import pytest
import ibis
import ibis.expr.datatypes as dt
pytestmark = pytest.mark.bigquery
pytest.importorskip('google.cloud.bigquery')
def test_timestamp_accepts_date_literals(alltypes, project_id):
date_string = '2009-03-01'
param = ibis.param(dt.timestamp).name('param_0')
expr = alltypes.mutate(param=param)
params = {param: date_string}
result = expr.compile(params=params)
expected = """\
SELECT *, @param AS `param`
FROM `{}.testing.functional_alltypes`""".format(
project_id
)
assert result == expected
@pytest.mark.parametrize(
('distinct', 'expected_keyword'), [(True, 'DISTINCT'), (False, 'ALL')]
)
def test_union(alltypes, distinct, expected_keyword, project_id):
expr = alltypes.union(alltypes, distinct=distinct)
result = expr.compile()
expected = """\
SELECT *
FROM `{project}.testing.functional_alltypes`
UNION {}
SELECT *
FROM `{project}.testing.functional_alltypes`""".format(
expected_keyword, project=project_id
)
assert result == expected
def test_ieee_divide(alltypes, project_id):
expr = alltypes.double_col / 0
result = expr.compile()
expected = """\
SELECT IEEE_DIVIDE(`double_col`, 0) AS `tmp`
FROM `{}.testing.functional_alltypes`""".format(
project_id
)
assert result == expected
def test_identical_to(alltypes, project_id):
t = alltypes
pred = t.string_col.identical_to('a') & t.date_string_col.identical_to('b')
expr = t[pred]
result = expr.compile()
expected = """\
SELECT *
FROM `{}.testing.functional_alltypes`
WHERE (((`string_col` IS NULL) AND ('a' IS NULL)) OR (`string_col` = 'a')) AND
(((`date_string_col` IS NULL) AND ('b' IS NULL)) OR (`date_string_col` = 'b'))""".format( # noqa: E501
project_id
)
assert result == expected
@pytest.mark.parametrize('timezone', [None, 'America/New_York'])
def test_to_timestamp(alltypes, timezone, project_id):
expr = alltypes.date_string_col.to_timestamp('%F', timezone)
result = expr.compile()
if timezone:
expected = """\
SELECT PARSE_TIMESTAMP('%F', `date_string_col`, 'America/New_York') AS `tmp`
FROM `{}.testing.functional_alltypes`""".format(
project_id
)
else:
expected = """\
SELECT PARSE_TIMESTAMP('%F', `date_string_col`) AS `tmp`
FROM `{}.testing.functional_alltypes`""".format(
project_id
)
assert result == expected
@pytest.mark.parametrize(
('case', 'expected', 'dtype'),
[
(datetime.date(2017, 1, 1), "DATE '{}'".format('2017-01-01'), dt.date),
(
pd.Timestamp('2017-01-01'),
"DATE '{}'".format('2017-01-01'),
dt.date,
),
('2017-01-01', "DATE '{}'".format('2017-01-01'), dt.date),
(
datetime.datetime(2017, 1, 1, 4, 55, 59),
"TIMESTAMP '{}'".format('2017-01-01 04:55:59'),
dt.timestamp,
),
(
'2017-01-01 04:55:59',
"TIMESTAMP '{}'".format('2017-01-01 04:55:59'),
dt.timestamp,
),
(
pd.Timestamp('2017-01-01 04:55:59'),
"TIMESTAMP '{}'".format('2017-01-01 04:55:59'),
dt.timestamp,
),
],
)
def test_literal_date(case, expected, dtype):
expr = ibis.literal(case, type=dtype).year()
result = ibis.bigquery.compile(expr)
assert result == "SELECT EXTRACT(year from {}) AS `tmp`".format(expected)
@pytest.mark.parametrize(
('case', 'expected', 'dtype', 'strftime_func'),
[
(
datetime.date(2017, 1, 1),
"DATE '{}'".format('2017-01-01'),
dt.date,
'FORMAT_DATE',
),
(
pd.Timestamp('2017-01-01'),
"DATE '{}'".format('2017-01-01'),
dt.date,
'FORMAT_DATE',
),
(
'2017-01-01',
"DATE '{}'".format('2017-01-01'),
dt.date,
'FORMAT_DATE',
),
(
datetime.datetime(2017, 1, 1, 4, 55, 59),
"TIMESTAMP '{}'".format('2017-01-01 04:55:59'),
dt.timestamp,
'FORMAT_TIMESTAMP',
),
(
'2017-01-01 04:55:59',
"TIMESTAMP '{}'".format('2017-01-01 04:55:59'),
dt.timestamp,
'FORMAT_TIMESTAMP',
),
(
pd.Timestamp('2017-01-01 04:55:59'),
"TIMESTAMP '{}'".format('2017-01-01 04:55:59'),
dt.timestamp,
'FORMAT_TIMESTAMP',
),
],
)
def test_day_of_week(case, expected, dtype, strftime_func):
date_var = ibis.literal(case, type=dtype)
expr_index = date_var.day_of_week.index()
result = ibis.bigquery.compile(expr_index)
assert (
result
== "SELECT MOD(EXTRACT(DAYOFWEEK FROM {}) + 5, 7) AS `tmp`".format(
expected
)
) # noqa: E501
expr_name = date_var.day_of_week.full_name()
result = ibis.bigquery.compile(expr_name)
if strftime_func == 'FORMAT_TIMESTAMP':
assert result == "SELECT {}('%A', {}, 'UTC') AS `tmp`".format(
strftime_func, expected
)
else:
assert result == "SELECT {}('%A', {}) AS `tmp`".format(
strftime_func, expected
)
@pytest.mark.parametrize(
('case', 'expected', 'dtype'),
[
(
datetime.datetime(2017, 1, 1, 4, 55, 59),
"TIMESTAMP '{}'".format('2017-01-01 04:55:59'),
dt.timestamp,
),
(
'2017-01-01 04:55:59',
"TIMESTAMP '{}'".format('2017-01-01 04:55:59'),
dt.timestamp,
),
(
pd.Timestamp('2017-01-01 04:55:59'),
"TIMESTAMP '{}'".format('2017-01-01 04:55:59'),
dt.timestamp,
),
(datetime.time(4, 55, 59), "TIME '{}'".format('04:55:59'), dt.time),
('04:55:59', "TIME '{}'".format('04:55:59'), dt.time),
],
)
def test_literal_timestamp_or_time(case, expected, dtype):
expr = ibis.literal(case, type=dtype).hour()
result = ibis.bigquery.compile(expr)
assert result == "SELECT EXTRACT(hour from {}) AS `tmp`".format(expected)
def test_window_function(alltypes, project_id):
t = alltypes
w1 = ibis.window(
preceding=1, following=0, group_by='year', order_by='timestamp_col'
)
expr = t.mutate(win_avg=t.float_col.mean().over(w1))
result = expr.compile()
expected = """\
SELECT *,
avg(`float_col`) OVER (PARTITION BY `year` ORDER BY `timestamp_col` ROWS BETWEEN 1 PRECEDING AND CURRENT ROW) AS `win_avg`
FROM `{}.testing.functional_alltypes`""".format( # noqa: E501
project_id
)
assert result == expected
w2 = ibis.window(
preceding=0, following=2, group_by='year', order_by='timestamp_col'
)
expr = t.mutate(win_avg=t.float_col.mean().over(w2))
result = expr.compile()
expected = """\
SELECT *,
avg(`float_col`) OVER (PARTITION BY `year` ORDER BY `timestamp_col` ROWS BETWEEN CURRENT ROW AND 2 FOLLOWING) AS `win_avg`
FROM `{}.testing.functional_alltypes`""".format( # noqa: E501
project_id
)
assert result == expected
w3 = ibis.window(
preceding=(4, 2), group_by='year', order_by='timestamp_col'
)
expr = t.mutate(win_avg=t.float_col.mean().over(w3))
result = expr.compile()
expected = """\
SELECT *,
avg(`float_col`) OVER (PARTITION BY `year` ORDER BY `timestamp_col` ROWS BETWEEN 4 PRECEDING AND 2 PRECEDING) AS `win_avg`
FROM `{}.testing.functional_alltypes`""".format( # noqa: E501
project_id
)
assert result == expected
def test_range_window_function(alltypes, project_id):
t = alltypes
w = ibis.range_window(
preceding=1, following=0, group_by='year', order_by='month'
)
expr = t.mutate(two_month_avg=t.float_col.mean().over(w))
result = expr.compile()
expected = """\
SELECT *,
avg(`float_col`) OVER (PARTITION BY `year` ORDER BY `month` RANGE BETWEEN 1 PRECEDING AND CURRENT ROW) AS `two_month_avg`
FROM `{}.testing.functional_alltypes`""".format( # noqa: E501
project_id
)
assert result == expected
w3 = ibis.range_window(
preceding=(4, 2), group_by='year', order_by='timestamp_col'
)
expr = t.mutate(win_avg=t.float_col.mean().over(w3))
result = expr.compile()
expected = """\
SELECT *,
avg(`float_col`) OVER (PARTITION BY `year` ORDER BY UNIX_MICROS(`timestamp_col`) RANGE BETWEEN 4 PRECEDING AND 2 PRECEDING) AS `win_avg`
FROM `{}.testing.functional_alltypes`""".format( # noqa: E501
project_id
)
assert result == expected
@pytest.mark.parametrize(
('preceding', 'value'),
[
(5, 5),
(ibis.interval(nanoseconds=1), 0.001),
(ibis.interval(microseconds=1), 1),
(ibis.interval(seconds=1), 1000000),
(ibis.interval(minutes=1), 1000000 * 60),
(ibis.interval(hours=1), 1000000 * 60 * 60),
(ibis.interval(days=1), 1000000 * 60 * 60 * 24),
(2 * ibis.interval(days=1), 1000000 * 60 * 60 * 24 * 2),
(ibis.interval(weeks=1), 1000000 * 60 * 60 * 24 * 7),
],
)
def test_trailing_range_window(alltypes, preceding, value, project_id):
t = alltypes
w = ibis.trailing_range_window(
preceding=preceding, order_by=t.timestamp_col
)
expr = t.mutate(win_avg=t.float_col.mean().over(w))
result = expr.compile()
expected = """\
SELECT *,
avg(`float_col`) OVER (ORDER BY UNIX_MICROS(`timestamp_col`) RANGE BETWEEN {} PRECEDING AND CURRENT ROW) AS `win_avg`
FROM `{}.testing.functional_alltypes`""".format( # noqa: E501
value, project_id
)
assert result == expected
@pytest.mark.parametrize(
('preceding', 'value'), [(ibis.interval(years=1), None)]
)
def test_trailing_range_window_unsupported(alltypes, preceding, value):
t = alltypes
w = ibis.trailing_range_window(
preceding=preceding, order_by=t.timestamp_col
)
expr = t.mutate(win_avg=t.float_col.mean().over(w))
with pytest.raises(ValueError):
expr.compile()
@pytest.mark.parametrize(
('distinct1', 'distinct2', 'expected1', 'expected2'),
[
(True, True, 'UNION DISTINCT', 'UNION DISTINCT'),
(True, False, 'UNION DISTINCT', 'UNION ALL'),
(False, True, 'UNION ALL', 'UNION DISTINCT'),
(False, False, 'UNION ALL', 'UNION ALL'),
],
)
def test_union_cte(
alltypes, distinct1, distinct2, expected1, expected2, project_id
):
t = alltypes
expr1 = t.group_by(t.string_col).aggregate(metric=t.double_col.sum())
expr2 = expr1.view()
expr3 = expr1.view()
expr = expr1.union(expr2, distinct=distinct1).union(
expr3, distinct=distinct2
)
result = expr.compile()
expected = """\
WITH t0 AS (
SELECT `string_col`, sum(`double_col`) AS `metric`
FROM `{project}.testing.functional_alltypes`
GROUP BY 1
)
SELECT *
FROM t0
{}
SELECT `string_col`, sum(`double_col`) AS `metric`
FROM `{project}.testing.functional_alltypes`
GROUP BY 1
{}
SELECT `string_col`, sum(`double_col`) AS `metric`
FROM `{project}.testing.functional_alltypes`
GROUP BY 1""".format(
expected1, expected2, project=project_id
)
assert result == expected
def test_projection_fusion_only_peeks_at_immediate_parent():
schema = [
('file_date', 'timestamp'),
('PARTITIONTIME', 'date'),
('val', 'int64'),
]
table = ibis.table(schema, name='unbound_table')
table = table[table.PARTITIONTIME < ibis.date('2017-01-01')]
table = table.mutate(file_date=table.file_date.cast('date'))
table = table[table.file_date < ibis.date('2017-01-01')]
table = table.mutate(XYZ=table.val * 2)
expr = table.join(table.view())[table]
result = ibis.bigquery.compile(expr)
expected = """\
WITH t0 AS (
SELECT *
FROM unbound_table
WHERE `PARTITIONTIME` < DATE '2017-01-01'
),
t1 AS (
SELECT CAST(`file_date` AS DATE) AS `file_date`, `PARTITIONTIME`, `val`
FROM t0
),
t2 AS (
SELECT t1.*
FROM t1
WHERE t1.`file_date` < DATE '2017-01-01'
),
t3 AS (
SELECT *, `val` * 2 AS `XYZ`
FROM t2
)
SELECT t3.*
FROM t3
CROSS JOIN t3 t4"""
assert result == expected
def test_bool_reducers(alltypes):
b = alltypes.bool_col
expr = b.mean()
result = expr.compile()
expected = """\
SELECT avg(CAST(`bool_col` AS INT64)) AS `mean`
FROM `ibis-gbq.testing.functional_alltypes`"""
assert result == expected
expr2 = b.sum()
result = expr2.compile()
expected = """\
SELECT sum(CAST(`bool_col` AS INT64)) AS `sum`
FROM `ibis-gbq.testing.functional_alltypes`"""
assert result == expected
def test_bool_reducers_where(alltypes):
b = alltypes.bool_col
m = alltypes.month
expr = b.mean(where=m > 6)
result = expr.compile()
expected = """\
SELECT avg(CASE WHEN `month` > 6 THEN CAST(`bool_col` AS INT64) ELSE NULL END) AS `mean`
FROM `ibis-gbq.testing.functional_alltypes`""" # noqa: E501
assert result == expected
expr2 = b.sum(where=((m > 6) & (m < 10)))
result = expr2.compile()
expected = """\
SELECT sum(CASE WHEN (`month` > 6) AND (`month` < 10) THEN CAST(`bool_col` AS INT64) ELSE NULL END) AS `sum`
FROM `ibis-gbq.testing.functional_alltypes`""" # noqa: E501
assert result == expected
def test_approx_nunique(alltypes):
d = alltypes.double_col
expr = d.approx_nunique()
result = expr.compile()
expected = """\
SELECT APPROX_COUNT_DISTINCT(`double_col`) AS `approx_nunique`
FROM `ibis-gbq.testing.functional_alltypes`"""
assert result == expected
b = alltypes.bool_col
m = alltypes.month
expr2 = b.approx_nunique(where=m > 6)
result = expr2.compile()
expected = """\
SELECT APPROX_COUNT_DISTINCT(CASE WHEN `month` > 6 THEN `bool_col` ELSE NULL END) AS `approx_nunique`
FROM `ibis-gbq.testing.functional_alltypes`""" # noqa: E501
assert result == expected
def test_approx_median(alltypes):
d = alltypes.double_col
expr = d.approx_median()
result = expr.compile()
expected = """\
SELECT APPROX_QUANTILES(`double_col`, 2)[OFFSET(1)] AS `approx_median`
FROM `ibis-gbq.testing.functional_alltypes`"""
assert result == expected
m = alltypes.month
expr2 = d.approx_median(where=m > 6)
result = expr2.compile()
expected = """\
SELECT APPROX_QUANTILES(CASE WHEN `month` > 6 THEN `double_col` ELSE NULL END, 2)[OFFSET(1)] AS `approx_median`
FROM `ibis-gbq.testing.functional_alltypes`""" # noqa: E501
assert result == expected
@pytest.mark.parametrize(
('unit', 'expected_unit', 'expected_func'),
[
('Y', 'YEAR', 'TIMESTAMP'),
('Q', 'QUARTER', 'TIMESTAMP'),
('M', 'MONTH', 'TIMESTAMP'),
('W', 'WEEK', 'TIMESTAMP'),
('D', 'DAY', 'TIMESTAMP'),
('h', 'HOUR', 'TIMESTAMP'),
('m', 'MINUTE', 'TIMESTAMP'),
('s', 'SECOND', 'TIMESTAMP'),
('ms', 'MILLISECOND', 'TIMESTAMP'),
('us', 'MICROSECOND', 'TIMESTAMP'),
('Y', 'YEAR', 'DATE'),
('Q', 'QUARTER', 'DATE'),
('M', 'MONTH', 'DATE'),
('W', 'WEEK', 'DATE'),
('D', 'DAY', 'DATE'),
('h', 'HOUR', 'TIME'),
('m', 'MINUTE', 'TIME'),
('s', 'SECOND', 'TIME'),
('ms', 'MILLISECOND', 'TIME'),
('us', 'MICROSECOND', 'TIME'),
],
)
def test_temporal_truncate(unit, expected_unit, expected_func):
t = ibis.table([('a', getattr(dt, expected_func.lower()))], name='t')
expr = t.a.truncate(unit)
result = ibis.bigquery.compile(expr)
expected = """\
SELECT {}_TRUNC(`a`, {}) AS `tmp`
FROM t""".format(
expected_func, expected_unit
)
assert result == expected
@pytest.mark.parametrize('kind', ['date', 'time'])
def test_extract_temporal_from_timestamp(kind):
t = ibis.table([('ts', dt.timestamp)], name='t')
expr = getattr(t.ts, kind)()
result = ibis.bigquery.compile(expr)
expected = """\
SELECT {}(`ts`) AS `tmp`
FROM t""".format(
kind.upper()
)
assert result == expected
def test_now():
expr = ibis.now()
result = ibis.bigquery.compile(expr)
expected = 'SELECT CURRENT_TIMESTAMP() AS `tmp`'
assert result == expected
def test_bucket():
t = ibis.table([('value', 'double')], name='t')
buckets = [0, 1, 3]
expr = t.value.bucket(buckets).name('foo')
result = ibis.bigquery.compile(expr)
expected = """\
SELECT
CASE
WHEN (`value` >= 0) AND (`value` < 1) THEN 0
WHEN (`value` >= 1) AND (`value` <= 3) THEN 1
ELSE CAST(NULL AS INT64)
END AS `tmp`
FROM t"""
assert result == expected
@pytest.mark.parametrize(
('kind', 'begin', 'end', 'expected'),
[
('preceding', None, 1, 'UNBOUNDED PRECEDING AND 1 PRECEDING'),
('following', 1, None, '1 FOLLOWING AND UNBOUNDED FOLLOWING'),
],
)
def test_window_unbounded(kind, begin, end, expected):
t = ibis.table([('a', 'int64')], name='t')
kwargs = {kind: (begin, end)}
expr = t.a.sum().over(ibis.window(**kwargs))
result = ibis.bigquery.compile(expr)
assert (
result
== """\
SELECT sum(`a`) OVER (ROWS BETWEEN {}) AS `tmp`
FROM t""".format(
expected
)
)
|
py | 7dfc99420210838bece86e180a45709d2b33b581 | class TfsWiqlResult:
@staticmethod
def from_json(tfs_client, json_response):
wiql = TfsWiqlResult()
wiql.__tfs_client = tfs_client
wiql.__is_empty = True
wiql.__item_ids = None
if 'workItems' in json_response:
wiql.__item_ids = [int(item['id']) for item in json_response['workItems']]
wiql.__is_empty = not (len(wiql.__item_ids) > 0)
return wiql
@property
def is_empty(self):
return self.__is_empty
def get_workitems(self):
if self.__is_empty:
return None
return self.__tfs_client.get_workitems(self.__item_ids)
|
py | 7dfc998f182b1cdcb64326f628641875d7a19cc9 | import FWCore.ParameterSet.Config as cms
process = cms.Process("DUMP")
process.load("Geometry.HGCalCommonData.testAHcalModuleAlgoXML_cfi")
process.load('FWCore.MessageService.MessageLogger_cfi')
if 'MessageLogger' in process.__dict__:
process.MessageLogger.categories.append('G4cerr')
process.MessageLogger.categories.append('G4cout')
process.MessageLogger.categories.append('HGCalGeom')
process.source = cms.Source("EmptySource")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.add_(cms.ESProducer("TGeoMgrFromDdd",
verbose = cms.untracked.bool(False),
level = cms.untracked.int32(14)
))
process.dump = cms.EDAnalyzer("DumpSimGeometry",
outputFileName = cms.untracked.string('ahcalModuleAlgoDDD.root'))
process.p = cms.Path(process.dump)
|
py | 7dfc99ca9c65f7593cf5bf6cc5be2774f8b2f4c9 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.12.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import kubernetes_asyncio.client
from kubernetes_asyncio.client.models.v1_env_var import V1EnvVar # noqa: E501
from kubernetes_asyncio.client.rest import ApiException
class TestV1EnvVar(unittest.TestCase):
"""V1EnvVar unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1EnvVar(self):
"""Test V1EnvVar"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes_asyncio.client.models.v1_env_var.V1EnvVar() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py | 7dfc99d48f8c1b79dcaf5a44263efa3c289cd629 | from cc3d.core.PySteppables import *
class ElasticityLocalSteppable(SteppableBasePy):
def __init__(self, frequency=10):
SteppableBasePy.__init__(self, frequency)
self.links_initialized = False
def initialize_elasticity_local(self):
for cell in self.cellList:
elasticity_data_list = self.get_elasticity_data_list(cell)
for elasticity_data in elasticity_data_list: # visiting all elastic links of 'cell'
target_length = elasticity_data.targetLength
elasticity_data.targetLength = 6.0
elasticity_data.lambdaLength = 200.0
elasticity_neighbor = elasticity_data.neighborAddress
# now we set up elastic link data stored in neighboring cell
neighbor_elasticity_data = None
neighbor_elasticity_data_list = self.get_elasticity_data_list(elasticity_neighbor)
for neighbor_elasticity_data_tmp in neighbor_elasticity_data_list:
if not CompuCell.areCellsDifferent(neighbor_elasticity_data_tmp.neighborAddress, cell):
neighbor_elasticity_data = neighbor_elasticity_data_tmp
break
if neighbor_elasticity_data is None:
raise RuntimeError("None Type returned. Problems with FemDataNeighbors initialization or sets of "
"neighbor_elasticity_data are corrupted")
neighbor_elasticity_data.targetLength = 6.0
neighbor_elasticity_data.lambdaLength = 200.0
def step(self, mcs):
if not self.links_initialized:
self.initialize_elasticity_local()
# adding link between cell.id=1 and cell.id=3
cell1 = None
cell3 = None
for cell in self.cellList:
if cell.id == 1:
cell1 = cell
if cell.id == 3:
cell3 = cell
self.elasticity_tracker_plugin.addNewElasticLink(cell1, cell3, 200.0, 6.0)
|
py | 7dfc9a3634dcff34964af1f285177b298058fee8 | # encoding: utf-8
"""Unit test suite for docx.opc.pkgreader module"""
from __future__ import absolute_import, print_function, unicode_literals
import pytest
from docx.opc.constants import CONTENT_TYPE as CT, RELATIONSHIP_TARGET_MODE as RTM
from docx.opc.packuri import PackURI
from docx.opc.phys_pkg import _ZipPkgReader
from docx.opc.pkgreader import (
_ContentTypeMap,
PackageReader,
_SerializedPart,
_SerializedRelationship,
_SerializedRelationships,
)
from .unitdata.types import a_Default, a_Types, an_Override
from ..unitutil.mock import (
ANY,
call,
class_mock,
function_mock,
initializer_mock,
instance_mock,
loose_mock,
method_mock,
Mock,
patch,
)
class DescribePackageReader(object):
def it_can_construct_from_pkg_file(
self, _init_, PhysPkgReader_, from_xml, _srels_for, _load_serialized_parts
):
phys_reader = PhysPkgReader_.return_value
content_types = from_xml.return_value
pkg_srels = _srels_for.return_value
sparts = _load_serialized_parts.return_value
pkg_file = Mock(name='pkg_file')
pkg_reader = PackageReader.from_file(pkg_file)
PhysPkgReader_.assert_called_once_with(pkg_file)
from_xml.assert_called_once_with(phys_reader.content_types_xml)
_srels_for.assert_called_once_with(phys_reader, '/')
_load_serialized_parts.assert_called_once_with(
phys_reader, pkg_srels, content_types
)
phys_reader.close.assert_called_once_with()
_init_.assert_called_once_with(ANY, content_types, pkg_srels, sparts)
assert isinstance(pkg_reader, PackageReader)
def it_can_iterate_over_the_serialized_parts(self, iter_sparts_fixture):
pkg_reader, expected_iter_spart_items = iter_sparts_fixture
iter_spart_items = list(pkg_reader.iter_sparts())
assert iter_spart_items == expected_iter_spart_items
def it_can_iterate_over_all_the_srels(self):
# mockery ----------------------
pkg_srels = ['srel1', 'srel2']
sparts = [
Mock(name='spart1', partname='pn1', srels=['srel3', 'srel4']),
Mock(name='spart2', partname='pn2', srels=['srel5', 'srel6']),
]
pkg_reader = PackageReader(None, pkg_srels, sparts)
# exercise ---------------------
generated_tuples = [t for t in pkg_reader.iter_srels()]
# verify -----------------------
expected_tuples = [
('/', 'srel1'),
('/', 'srel2'),
('pn1', 'srel3'),
('pn1', 'srel4'),
('pn2', 'srel5'),
('pn2', 'srel6'),
]
assert generated_tuples == expected_tuples
def it_can_load_serialized_parts(self, _SerializedPart_, _walk_phys_parts):
# test data --------------------
test_data = (
('/part/name1.xml', 'app/vnd.type_1', 'reltype1', '<Part_1/>',
'srels_1'),
('/part/name2.xml', 'app/vnd.type_2', 'reltype2', '<Part_2/>',
'srels_2'),
)
iter_vals = [(t[0], t[2], t[3], t[4]) for t in test_data]
content_types = dict((t[0], t[1]) for t in test_data)
# mockery ----------------------
phys_reader = Mock(name='phys_reader')
pkg_srels = Mock(name='pkg_srels')
_walk_phys_parts.return_value = iter_vals
_SerializedPart_.side_effect = expected_sparts = (
Mock(name='spart_1'), Mock(name='spart_2')
)
# exercise ---------------------
retval = PackageReader._load_serialized_parts(
phys_reader, pkg_srels, content_types
)
# verify -----------------------
expected_calls = [
call('/part/name1.xml', 'app/vnd.type_1', '<Part_1/>',
'reltype1', 'srels_1'),
call('/part/name2.xml', 'app/vnd.type_2', '<Part_2/>',
'reltype2', 'srels_2'),
]
assert _SerializedPart_.call_args_list == expected_calls
assert retval == expected_sparts
def it_can_walk_phys_pkg_parts(self, _srels_for):
# test data --------------------
# +----------+ +--------+
# | pkg_rels |-----> | part_1 |
# +----------+ +--------+
# | | ^
# v v |
# external +--------+ +--------+
# | part_2 |---> | part_3 |
# +--------+ +--------+
partname_1, partname_2, partname_3 = (
'/part/name1.xml', '/part/name2.xml', '/part/name3.xml'
)
part_1_blob, part_2_blob, part_3_blob = (
'<Part_1/>', '<Part_2/>', '<Part_3/>'
)
reltype1, reltype2, reltype3 = ('reltype1', 'reltype2', 'reltype3')
srels = [
Mock(name='rId1', is_external=True),
Mock(name='rId2', is_external=False, reltype=reltype1,
target_partname=partname_1),
Mock(name='rId3', is_external=False, reltype=reltype2,
target_partname=partname_2),
Mock(name='rId4', is_external=False, reltype=reltype1,
target_partname=partname_1),
Mock(name='rId5', is_external=False, reltype=reltype3,
target_partname=partname_3),
]
pkg_srels = srels[:2]
part_1_srels = srels[2:3]
part_2_srels = srels[3:5]
part_3_srels = []
# mockery ----------------------
phys_reader = Mock(name='phys_reader')
_srels_for.side_effect = [part_1_srels, part_2_srels, part_3_srels]
phys_reader.blob_for.side_effect = [
part_1_blob, part_2_blob, part_3_blob
]
# exercise ---------------------
generated_tuples = list(
PackageReader._walk_phys_parts(phys_reader, pkg_srels)
)
# verify -----------------------
expected_tuples = [
(partname_1, part_1_blob, reltype1, part_1_srels),
(partname_2, part_2_blob, reltype2, part_2_srels),
(partname_3, part_3_blob, reltype3, part_3_srels),
]
assert generated_tuples == expected_tuples
def it_can_retrieve_srels_for_a_source_uri(
self, _SerializedRelationships_):
# mockery ----------------------
phys_reader = Mock(name='phys_reader')
source_uri = Mock(name='source_uri')
rels_xml = phys_reader.rels_xml_for.return_value
load_from_xml = _SerializedRelationships_.load_from_xml
srels = load_from_xml.return_value
# exercise ---------------------
retval = PackageReader._srels_for(phys_reader, source_uri)
# verify -----------------------
phys_reader.rels_xml_for.assert_called_once_with(source_uri)
load_from_xml.assert_called_once_with(source_uri.baseURI, rels_xml)
assert retval == srels
# fixtures -------------------------------------------------------
@pytest.fixture
def blobs_(self, request):
blob_ = loose_mock(request, spec=str, name='blob_')
blob_2_ = loose_mock(request, spec=str, name='blob_2_')
return blob_, blob_2_
@pytest.fixture
def content_types_(self, request):
content_type_ = loose_mock(request, spec=str, name='content_type_')
content_type_2_ = loose_mock(request, spec=str, name='content_type_2_')
return content_type_, content_type_2_
@pytest.fixture
def from_xml(self, request):
return method_mock(request, _ContentTypeMap, 'from_xml', autospec=False)
@pytest.fixture
def _init_(self, request):
return initializer_mock(request, PackageReader)
@pytest.fixture
def iter_sparts_fixture(
self, sparts_, partnames_, content_types_, reltypes_, blobs_):
pkg_reader = PackageReader(None, None, sparts_)
expected_iter_spart_items = [
(partnames_[0], content_types_[0], reltypes_[0], blobs_[0]),
(partnames_[1], content_types_[1], reltypes_[1], blobs_[1]),
]
return pkg_reader, expected_iter_spart_items
@pytest.fixture
def _load_serialized_parts(self, request):
return method_mock(
request, PackageReader, '_load_serialized_parts', autospec=False
)
@pytest.fixture
def partnames_(self, request):
partname_ = loose_mock(request, spec=str, name='partname_')
partname_2_ = loose_mock(request, spec=str, name='partname_2_')
return partname_, partname_2_
@pytest.fixture
def PhysPkgReader_(self, request):
_patch = patch(
'docx.opc.pkgreader.PhysPkgReader', spec_set=_ZipPkgReader
)
request.addfinalizer(_patch.stop)
return _patch.start()
@pytest.fixture
def reltypes_(self, request):
reltype_ = instance_mock(request, str, name='reltype_')
reltype_2_ = instance_mock(request, str, name='reltype_2')
return reltype_, reltype_2_
@pytest.fixture
def _SerializedPart_(self, request):
return class_mock(request, 'docx.opc.pkgreader._SerializedPart')
@pytest.fixture
def _SerializedRelationships_(self, request):
return class_mock(
request, 'docx.opc.pkgreader._SerializedRelationships'
)
@pytest.fixture
def sparts_(
self, request, partnames_, content_types_, reltypes_, blobs_):
sparts_ = []
for idx in range(2):
name = 'spart_%s' % (('%d_' % (idx + 1)) if idx else '')
spart_ = instance_mock(
request, _SerializedPart, name=name,
partname=partnames_[idx], content_type=content_types_[idx],
reltype=reltypes_[idx], blob=blobs_[idx]
)
sparts_.append(spart_)
return sparts_
@pytest.fixture
def _srels_for(self, request):
return method_mock(request, PackageReader, '_srels_for', autospec=False)
@pytest.fixture
def _walk_phys_parts(self, request):
return method_mock(request, PackageReader, '_walk_phys_parts', autospec=False)
class Describe_ContentTypeMap(object):
def it_can_construct_from_ct_item_xml(self, from_xml_fixture):
content_types_xml, expected_defaults, expected_overrides = (
from_xml_fixture
)
ct_map = _ContentTypeMap.from_xml(content_types_xml)
assert ct_map._defaults == expected_defaults
assert ct_map._overrides == expected_overrides
def it_matches_an_override_on_case_insensitive_partname(
self, match_override_fixture):
ct_map, partname, content_type = match_override_fixture
assert ct_map[partname] == content_type
def it_falls_back_to_case_insensitive_extension_default_match(
self, match_default_fixture):
ct_map, partname, content_type = match_default_fixture
assert ct_map[partname] == content_type
def it_should_raise_on_partname_not_found(self):
ct_map = _ContentTypeMap()
with pytest.raises(KeyError):
ct_map[PackURI('/!blat/rhumba.1x&')]
def it_should_raise_on_key_not_instance_of_PackURI(self):
ct_map = _ContentTypeMap()
ct_map._overrides = {PackURI('/part/name1.xml'): 'app/vnd.type1'}
with pytest.raises(KeyError):
ct_map['/part/name1.xml']
# fixtures ---------------------------------------------
@pytest.fixture
def from_xml_fixture(self):
entries = (
('Default', 'xml', CT.XML),
('Default', 'PNG', CT.PNG),
('Override', '/ppt/presentation.xml', CT.PML_PRESENTATION_MAIN),
)
content_types_xml = self._xml_from(entries)
expected_defaults = {}
expected_overrides = {}
for entry in entries:
if entry[0] == 'Default':
ext = entry[1].lower()
content_type = entry[2]
expected_defaults[ext] = content_type
elif entry[0] == 'Override':
partname, content_type = entry[1:]
expected_overrides[partname] = content_type
return content_types_xml, expected_defaults, expected_overrides
@pytest.fixture(params=[
('/foo/bar.xml', 'xml', 'application/xml'),
('/foo/bar.PNG', 'png', 'image/png'),
('/foo/bar.jpg', 'JPG', 'image/jpeg'),
])
def match_default_fixture(self, request):
partname_str, ext, content_type = request.param
partname = PackURI(partname_str)
ct_map = _ContentTypeMap()
ct_map._add_override(PackURI('/bar/foo.xyz'), 'application/xyz')
ct_map._add_default(ext, content_type)
return ct_map, partname, content_type
@pytest.fixture(params=[
('/foo/bar.xml', '/foo/bar.xml'),
('/foo/bar.xml', '/FOO/Bar.XML'),
('/FoO/bAr.XmL', '/foo/bar.xml'),
])
def match_override_fixture(self, request):
partname_str, should_match_partname_str = request.param
partname = PackURI(partname_str)
should_match_partname = PackURI(should_match_partname_str)
content_type = 'appl/vnd-foobar'
ct_map = _ContentTypeMap()
ct_map._add_override(partname, content_type)
return ct_map, should_match_partname, content_type
def _xml_from(self, entries):
"""
Return XML for a [Content_Types].xml based on items in *entries*.
"""
types_bldr = a_Types().with_nsdecls()
for entry in entries:
if entry[0] == 'Default':
ext, content_type = entry[1:]
default_bldr = a_Default()
default_bldr.with_Extension(ext)
default_bldr.with_ContentType(content_type)
types_bldr.with_child(default_bldr)
elif entry[0] == 'Override':
partname, content_type = entry[1:]
override_bldr = an_Override()
override_bldr.with_PartName(partname)
override_bldr.with_ContentType(content_type)
types_bldr.with_child(override_bldr)
return types_bldr.xml()
class Describe_SerializedPart(object):
def it_remembers_construction_values(self):
# test data --------------------
partname = '/part/name.xml'
content_type = 'app/vnd.type'
reltype = 'http://rel/type'
blob = '<Part/>'
srels = 'srels proxy'
# exercise ---------------------
spart = _SerializedPart(partname, content_type, reltype, blob, srels)
# verify -----------------------
assert spart.partname == partname
assert spart.content_type == content_type
assert spart.reltype == reltype
assert spart.blob == blob
assert spart.srels == srels
class Describe_SerializedRelationship(object):
def it_remembers_construction_values(self):
# test data --------------------
rel_elm = Mock(
name='rel_elm', rId='rId9', reltype='ReLtYpE',
target_ref='docProps/core.xml', target_mode=RTM.INTERNAL
)
# exercise ---------------------
srel = _SerializedRelationship('/', rel_elm)
# verify -----------------------
assert srel.rId == 'rId9'
assert srel.reltype == 'ReLtYpE'
assert srel.target_ref == 'docProps/core.xml'
assert srel.target_mode == RTM.INTERNAL
def it_knows_when_it_is_external(self):
cases = (RTM.INTERNAL, RTM.EXTERNAL, 'FOOBAR')
expected_values = (False, True, False)
for target_mode, expected_value in zip(cases, expected_values):
rel_elm = Mock(name='rel_elm', rId=None, reltype=None,
target_ref=None, target_mode=target_mode)
srel = _SerializedRelationship(None, rel_elm)
assert srel.is_external is expected_value
def it_can_calculate_its_target_partname(self):
# test data --------------------
cases = (
('/', 'docProps/core.xml', '/docProps/core.xml'),
('/ppt', 'viewProps.xml', '/ppt/viewProps.xml'),
('/ppt/slides', '../slideLayouts/slideLayout1.xml',
'/ppt/slideLayouts/slideLayout1.xml'),
)
for baseURI, target_ref, expected_partname in cases:
# setup --------------------
rel_elm = Mock(name='rel_elm', rId=None, reltype=None,
target_ref=target_ref, target_mode=RTM.INTERNAL)
# exercise -----------------
srel = _SerializedRelationship(baseURI, rel_elm)
# verify -------------------
assert srel.target_partname == expected_partname
def it_raises_on_target_partname_when_external(self):
rel_elm = Mock(
name='rel_elm', rId='rId9', reltype='ReLtYpE',
target_ref='docProps/core.xml', target_mode=RTM.EXTERNAL
)
srel = _SerializedRelationship('/', rel_elm)
with pytest.raises(ValueError):
srel.target_partname
class Describe_SerializedRelationships(object):
def it_can_load_from_xml(self, parse_xml_, _SerializedRelationship_):
# mockery ----------------------
baseURI, rels_item_xml, rel_elm_1, rel_elm_2 = (
Mock(name='baseURI'), Mock(name='rels_item_xml'),
Mock(name='rel_elm_1'), Mock(name='rel_elm_2'),
)
rels_elm = Mock(
name='rels_elm', Relationship_lst=[rel_elm_1, rel_elm_2]
)
parse_xml_.return_value = rels_elm
# exercise ---------------------
srels = _SerializedRelationships.load_from_xml(
baseURI, rels_item_xml)
# verify -----------------------
expected_calls = [
call(baseURI, rel_elm_1),
call(baseURI, rel_elm_2),
]
parse_xml_.assert_called_once_with(rels_item_xml)
assert _SerializedRelationship_.call_args_list == expected_calls
assert isinstance(srels, _SerializedRelationships)
def it_should_be_iterable(self):
srels = _SerializedRelationships()
try:
for x in srels:
pass
except TypeError:
msg = "_SerializedRelationships object is not iterable"
pytest.fail(msg)
# fixtures ---------------------------------------------
@pytest.fixture
def parse_xml_(self, request):
return function_mock(request, 'docx.opc.pkgreader.parse_xml')
@pytest.fixture
def _SerializedRelationship_(self, request):
return class_mock(
request, 'docx.opc.pkgreader._SerializedRelationship'
)
|
py | 7dfc9b3b9b9c346f04a653343f1c11cd17a0b79a | '''
Created on Sep 6, 2014
@author: moloyc
'''
import unittest
import os
import shutil
import json
from webtest import TestApp, AppError
from jnpr.openclos.rest import RestServer, webServerRoot, junosImageRoot
from test_dao import InMemoryDao
configLocation = webServerRoot
imageLocation = junosImageRoot
class TestRest(unittest.TestCase):
def setUp(self):
if not os.path.exists(configLocation):
os.makedirs(configLocation)
self._dao = InMemoryDao.getInstance()
self._conf = {'httpServer': {'ipAddr': '1.2.3.4', 'port': 9090}}
self.restServer = RestServer(self._conf, InMemoryDao)
self.restServer.initRest()
self.restServerTestApp = TestApp(self.restServer.app)
def tearDown(self):
shutil.rmtree(os.path.join(configLocation, 'test1'), ignore_errors=True)
self.restServer._reset()
InMemoryDao._destroy()
def testInit(self):
self.assertEqual('1.2.3.4', self.restServer.host)
self.assertEqual(9090, self.restServer.port)
self.assertEqual('http://1.2.3.4:9090', self.restServer.baseUrl)
def testGetIndexNoPodNoDevice(self):
response = self.restServerTestApp.get('/openclos')
self.assertEqual(200, response.status_int)
self.assertEqual(2, len(response.json['links']))
def testGetPodsNoPod(self):
response = self.restServerTestApp.get('/openclos/pods')
self.assertEqual(200, response.status_int)
self.assertEqual(0, len(response.json['pods']['pod']))
def setupRestWithTwoDevices(self, session):
from test_model import createDevice
self.device1 = createDevice(session, "test1")
self.device2 = createDevice(session, "test2")
def setupRestWithTwoPods(self, session):
from test_model import createPod
self.pod1 = createPod("test1", session)
self.pod2 = createPod("test2", session)
def testGetPods(self):
with self._dao.getReadWriteSession() as session:
self.setupRestWithTwoDevices(session)
device1PodId = self.device1.pod_id
device2PodId = self.device2.pod_id
response = self.restServerTestApp.get('/openclos/pods')
self.assertEqual(200, response.status_int)
self.assertEqual(2, len(response.json['pods']['pod']))
self.assertTrue("/openclos/pods/"+device1PodId in response.json['pods']['pod'][0]['uri'])
self.assertTrue("/openclos/pods/"+device2PodId in response.json['pods']['pod'][1]['uri'])
def testGetDevicesNonExistingPod(self):
with self.assertRaises(AppError) as e:
self.restServerTestApp.get('/openclos/pods/' + 'nonExisting'+'/devices')
self.assertTrue('404 Not Found' in e.exception.message)
def testGetDevices(self):
with self._dao.getReadWriteSession() as session:
self.setupRestWithTwoDevices(session)
device1PodId = self.device1.pod_id
device1Id = self.device1.id
response = self.restServerTestApp.get('/openclos/pods/'+device1PodId+'/devices')
self.assertEqual(200, response.status_int)
self.assertEqual(1, len(response.json['devices']['device']))
self.assertTrue("/openclos/pods/"+device1PodId+"/devices/"+device1Id in response.json['devices']['device'][0]['uri'])
def testGetDeviceNonExistingDevice(self):
with self._dao.getReadWriteSession() as session:
self.setupRestWithTwoPods(session)
pod1Id = self.pod1.id
with self.assertRaises(AppError) as e:
self.restServerTestApp.get('/openclos/pods/' +pod1Id+'/devices/'+'nonExisting')
self.assertTrue('404 Not Found' in e.exception.message)
def testGetDevice(self):
with self._dao.getReadWriteSession() as session:
self.setupRestWithTwoDevices(session)
device1PodId = self.device1.pod_id
device1Id = self.device1.id
deviceName = self.device1.name
deviceFamily = self.device1.family
response = self.restServerTestApp.get('/openclos/pods/'+device1PodId+'/devices/'+device1Id)
self.assertEqual(200, response.status_int)
self.assertEqual(deviceName, response.json['device']['name'])
self.assertEqual(deviceFamily, response.json['device']['family'])
self.assertTrue('/openclos/pods/' + device1PodId in response.json['device']['pod']['uri'])
def testGetIndex(self):
response = self.restServerTestApp.get('/')
self.assertEqual(302, response.status_int)
def testGetConfigNoDevice(self):
with self._dao.getReadWriteSession() as session:
self.setupRestWithTwoDevices(session)
device1PodId = self.device1.pod_id
with self.assertRaises(AppError) as e:
self.restServerTestApp.get('/openclos/pods/'+device1PodId+'/devices/'+'nonExisting'+'/config')
self.assertTrue('404 Not Found' in e.exception.message)
self.assertTrue('No device found' in e.exception.message)
def testGetConfigNoConfigFile(self):
with self._dao.getReadWriteSession() as session:
self.setupRestWithTwoDevices(session)
podId = self.device1.pod_id
deviceId = self.device1.id
with self.assertRaises(AppError) as e:
self.restServerTestApp.get('/openclos/pods/'+podId+'/devices/'+deviceId+'/config')
self.assertTrue('404 Not Found' in e.exception.message)
self.assertTrue('Device exists but no config found' in e.exception.message)
def testGetConfig(self):
from jnpr.openclos.model import DeviceConfig
with self._dao.getReadWriteSession() as session:
self.setupRestWithTwoDevices(session)
self.device1.config = DeviceConfig(self.device1.id, "testconfig")
podId = self.device1.pod_id
deviceId = self.device1.id
response = self.restServerTestApp.get('/openclos/pods/'+podId+'/devices/'+deviceId+'/config')
self.assertEqual(200, response.status_int)
self.assertEqual("testconfig", response.body)
def testGetDeviceConfigsInZip(self):
from jnpr.openclos.model import DeviceConfig
with self._dao.getReadWriteSession() as session:
self.setupRestWithTwoDevices(session)
self.device1.config = DeviceConfig(self.device1.id, "testconfig")
podId = self.device1.pod_id
deviceId = self.device1.id
response = self.restServerTestApp.get('/openclos/pods/'+podId+'/device-configuration')
self.assertEqual(200, response.status_int)
self.assertEqual('application/zip', response.headers.get('Content-Type'))
import StringIO
import zipfile
buff = StringIO.StringIO(response.body)
archive = zipfile.ZipFile(buff, "r")
self.assertEqual(1, len(archive.namelist()))
def testGetDeviceConfigsInZipUnknownPod(self):
with self._dao.getReadWriteSession() as session:
self.setupRestWithTwoDevices(session)
podDir = os.path.join(configLocation, self.device1.pod_id+'-test1')
if not os.path.exists(podDir):
os.makedirs(podDir)
open(os.path.join(podDir, self.device1.id+'-test1.conf'), "a")
with self.assertRaises(AppError) as e:
self.restServerTestApp.get('/openclos/pods/UNOKNOWN/device-configuration')
self.assertTrue('404 Not Found' in e.exception.message)
shutil.rmtree(podDir, ignore_errors=True)
def testGetJunosImage404(self):
with self.assertRaises(AppError) as e:
self.restServerTestApp.get('/openclos/images/abcd.tgz')
self.assertTrue('404 Not Found' in e.exception.message)
def testGetJunosImage(self):
open(os.path.join(imageLocation, 'efgh.tgz'), "a")
response = self.restServerTestApp.get('/openclos/images/efgh.tgz')
self.assertEqual(200, response.status_int)
os.remove(os.path.join(imageLocation, 'efgh.tgz'))
def testGetPod(self):
with self._dao.getReadWriteSession() as session:
self.setupRestWithTwoPods(session)
pod1Id = self.pod1.id
pod1Name = self.pod1.name
pod1SpineDeviceType = self.pod1.spineDeviceType
response = self.restServerTestApp.get('/openclos/pods/' + pod1Id)
self.assertEqual(200, response.status_int)
self.assertEqual(pod1Id, response.json['pod']['id'])
self.assertEqual(pod1Name, response.json['pod']['name'])
self.assertEqual(pod1SpineDeviceType, response.json['pod']['spineDeviceType'])
self.assertTrue('/openclos/pods/' + pod1Id + '/cabling-plan' in response.json['pod']['cablingPlan']['uri'])
self.assertTrue('/openclos/pods/' + pod1Id + '/devices' in response.json['pod']['devices']['uri'])
def testGetgetNonExistingPod(self):
with self.assertRaises(AppError) as e:
self.restServerTestApp.get('/openclos/pods/' + 'nonExisting')
self.assertTrue('404 Not Found' in e.exception.message)
def testGetNonExistingCablingPlan(self):
with self._dao.getReadWriteSession() as session:
self.setupRestWithTwoPods(session)
pod1Id = self.pod1.id
with self.assertRaises(AppError) as e:
self.restServerTestApp.get('/openclos/pods/'+pod1Id+'/cabling-plan',headers = {'Accept':'application/json'})
self.assertTrue('404 Not Found' in e.exception.message)
def testGetCablingPlanJson(self):
from jnpr.openclos.model import CablingPlan
with self._dao.getReadWriteSession() as session:
self.setupRestWithTwoPods(session)
cablingPlan = CablingPlan(self.pod1.id, 'cabling json')
self.pod1.cablingPlan = cablingPlan
pod1Id = self.pod1.id
response = self.restServerTestApp.get('/openclos/pods/'+pod1Id+'/cabling-plan',headers = {'Accept':'application/json'})
self.assertEqual(200, response.status_int)
self.assertEqual('cabling json', response.body)
def testGetCablingPlanDot(self):
with self._dao.getReadWriteSession() as session:
self.setupRestWithTwoPods(session)
cablingPlanLocation = os.path.join(configLocation, self.pod1.id+'-'+self.pod1.name)
if not os.path.exists(os.path.join(cablingPlanLocation)):
os.makedirs((os.path.join(cablingPlanLocation)))
ls = open(os.path.join(cablingPlanLocation, 'cablingPlan.dot'), "a+")
pod1Id = self.pod1.id
response = self.restServerTestApp.get('/openclos/pods/'+pod1Id+'/cabling-plan',headers = {'Accept':'application/dot'})
self.assertEqual(200, response.status_int)
ls.close()
shutil.rmtree(cablingPlanLocation, ignore_errors=True)
def testGetZtpConfig(self):
with self._dao.getReadWriteSession() as session:
self.setupRestWithTwoPods(session)
ztpConfigLocation = os.path.join(configLocation, self.pod1.id+'-'+self.pod1.name)
if not os.path.exists(os.path.join(ztpConfigLocation)):
os.makedirs((os.path.join(ztpConfigLocation)))
ls = open(os.path.join(ztpConfigLocation, 'dhcpd.conf'), "a+")
pod1Id = self.pod1.id
response = self.restServerTestApp.get('/openclos/pods/'+pod1Id+'/ztp-configuration')
self.assertEqual(200, response.status_int)
ls.close()
shutil.rmtree(ztpConfigLocation, ignore_errors=True)
def testGetNonExistingZtpConfig(self):
with self._dao.getReadWriteSession() as session:
self.setupRestWithTwoPods(session)
pod1Id = self.pod1.id
with self.assertRaises(AppError) as e:
self.restServerTestApp.get('/openclos/pods/'+pod1Id+'/ztp-configuration')
self.assertTrue('404 Not Found' in e.exception.message)
def testgetOpenClosConfigParams(self):
self.tearDown()
restServer = RestServer({}, InMemoryDao)
restServer.initRest()
self.restServerTestApp = TestApp(restServer.app)
response = self.restServerTestApp.get('/openclos/conf')
self.assertEqual(200, response.status_int)
self.assertTrue(response.json['OpenClosConf']['httpServer'].has_key('port'))
self.assertTrue(response.json['OpenClosConf']['snmpTrap']['openclos_trap_group'].has_key('port'))
self.assertEquals(14, len(response.json['OpenClosConf']['supportedDevices']))
def testdeletePod(self):
with self._dao.getReadWriteSession() as session:
self.setupRestWithTwoPods(session)
pod1Id = self.pod1.id
response = self.restServerTestApp.delete('/openclos/pods/'+pod1Id)
self.assertEqual(204, response.status_int)
response = self.restServerTestApp.get('/openclos/pods')
self.assertEqual(1, response.json['pods']['total'])
def testDeleteNonExistingPod(self):
with self.assertRaises(AppError) as e:
self.restServerTestApp.delete('/openclos/pods/' + 'nonExisting')
self.assertTrue('404 Not Found', e.exception.message)
def testCreatePodWithPostBodyEmpty(self):
response = self.restServerTestApp.post('/openclos/pods', headers = {'Content-Type':'application/json'}, expect_errors = True)
self.assertEqual(400, response.status_int)
self.assertTrue('No json in request object' in response.json['errorMessage'] )
def testCreatePodWithPost(self):
self.tearDown()
self._conf['deviceFamily'] = {
"qfx5100-24q-2p": {
"ports": 'et-0/0/[0-23]'
},
"qfx5100-48s-6q": {
"uplinkPorts": 'et-0/0/[48-53]',
"downlinkPorts": 'xe-0/0/[0-47]'
},
"ex4300-24p": {
"uplinkPorts": 'et-0/1/[0-3]',
"downlinkPorts": 'ge-0/0/[0-23]'
}
}
restServer = RestServer(self._conf, InMemoryDao)
restServer.initRest()
self.restServerTestApp = TestApp(restServer.app)
pod = {
"pod": {
"name": "test12321",
"spineDeviceType": "qfx5100-24q-2p",
"spineCount": 2,
"spineAS": 5,
"leafSettings": [{"deviceType": "ex4300-24p"},{"deviceType": "qfx5100-48s-6q"}],
"leafCount": 3,
"leafAS": 10,
"topologyType": "threeStage",
"loopbackPrefix": "12.1.1.1/21",
"vlanPrefix": "15.1.1.1/21",
"interConnectPrefix": "14.1.1.1/21",
"outOfBandAddressList": "10.204.244.95",
"managementPrefix": "192.168.2.1/24",
"description": "test12321",
"hostOrVmCountPerLeaf": 254,
"devicePassword": "viren123",
"outOfBandGateway": "192.168.2.1",
"devices": [
{"role": "spine", "family": "qfx5100-24q-2p", "name": "test12321-spine-0", "username": "root", "password": "viren123", "serialNumber":"1234567", "deployStatus": "deploy"},
{"role": "spine", "family": "qfx5100-24q-2p", "name": "test12321-spine-1", "serialNumber":"JNPR-1234" },
{"role": "leaf", "family": "qfx5100-48s-6q", "name": "test12321-leaf-0", "serialNumber":"JNPR-3456", "deployStatus": "deploy"},
{"role": "leaf", "family": "qfx5100-48s-6q", "name": "test12321-leaf-1", "serialNumber":"JNPR-5678", "deployStatus": "deploy"},
{"role": "leaf", "name": "test12321-leaf-2"}
]
}
}
response = self.restServerTestApp.post('/openclos/pods', headers = {'Content-Type':'application/json'}, params=json.dumps(pod))
self.assertEqual(201, response.status_int)
response = self.restServerTestApp.get('/openclos/pods')
self.assertEqual(200, response.status_int)
self.assertEqual(1, len(response.json['pods']['pod']))
def testReconfigurePodWithPostBodyEmpty(self):
with self._dao.getReadWriteSession() as session:
self.setupRestWithTwoPods(session)
pod1Id = self.pod1.id
response = self.restServerTestApp.put('/openclos/pods/'+pod1Id, headers = {'Content-Type':'application/json'}, expect_errors = True)
self.assertEqual(400, response.status_int)
self.assertTrue('No json in request object' in response.json['errorMessage'] )
def testUpdatePodWithInvalidRole(self):
with self._dao.getReadWriteSession() as session:
self.setupRestWithTwoPods(session)
pod1Id = self.pod1.id
podDetails = {
"pod": {
"name": "moloy1",
"spineDeviceType": "qfx5100-24q-2p",
"spineCount": 2,
"spineAS": 100,
"deviceType": "qfx5100-48s-6q",
"leafCount": 1,
"leafAS": 200,
"topologyType": "threeStage",
"loopbackPrefix": "1.1.1.1",
"vlanPrefix": "3.3.3.3",
"interConnectPrefix": "2.2.2.2",
"outOfBandAddressList": "10.204.244.95",
"outOfBandGateway": "10.204.244.254",
"managementPrefix": "4.4.4.0/24",
"description": "moloy 11111111",
"hostOrVmCountPerLeaf": 254,
"devices": [
{
"role": "test",
"name": "pparam_Test1-spine-0",
"username": "root",
"password": "Test123!",
"serialNumber":"JNPR-1234567"
},
]
}
}
response = self.restServerTestApp.put('/openclos/pods/'+pod1Id, params=json.dumps(podDetails), headers = {'Content-Type':'application/json'}, expect_errors = True)
self.assertEqual(400, response.status_int)
self.assertTrue('Unexpected role value' in response.json['errorMessage'] )
def testGetLeafGenericConfiguration404(self):
with self._dao.getReadWriteSession() as session:
self.setupRestWithTwoPods(session)
pod1Id = self.pod1.id
with self.assertRaises(AppError) as e:
self.restServerTestApp.get('/openclos/pods/'+pod1Id+'/leaf-generic-configurations/qfx5100-48s-6q')
self.assertTrue('404 Not Found' in e.exception.message)
self.assertTrue('Pod exists but no leaf generic config' in e.exception.message)
self.assertTrue('qfx5100-48s-6q' in e.exception.message)
def setupRestWithPodAndGenericConfig(self, session):
from test_model import createPod
from jnpr.openclos.model import LeafSetting
self.pod1 = createPod("test1", session)
leafSetting = LeafSetting('qfx5100-48s-6q', self.pod1.id, config = "testConfig abcd")
self.pod1.leafSettings = [leafSetting]
session.merge(self.pod1)
def testGetLeafGenericConfiguration(self):
with self._dao.getReadWriteSession() as session:
self.setupRestWithPodAndGenericConfig(session)
pod1Id = self.pod1.id
response = self.restServerTestApp.get('/openclos/pods/'+pod1Id+'/leaf-generic-configurations/qfx5100-48s-6q')
self.assertEqual(200, response.status_int)
self.assertTrue('testConfig abcd' in response.body)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
py | 7dfc9bdf20cd22c40241417b939548671696635a | """
This module implements methods for generating random connections between nodes in a graph.
Method generate() will create all the necessary connections for the graph:
dataset <-> dataset collection
system <-> system collection
dataset collection <-> collection
system collection <-> collection
dataset read <-> system input
dataset write <-> system output
"""
from itertools import islice
import random
class ConnectionGenerator:
"""
A class to generate random connections between node ids, based on distribution maps.
...
Attributes:
dataset_count: Integer of how many datasets are in a graph.
dataset_count_map: Dictionary int:int that maps number of datasets in collection to count of its collections.
system_count: Integer of how many systems are in a graph.
system_count_map: Dictionary int:int that maps number of systems in collection to count of system collections.
dataset_read_count: Integer of how many dataset reads are in a graph.
dataset_write_count: Integer of how many dataset writes are in a graph.
system_input_count: Integer of how many system inputs are in a graph.
system_output_count: Integer of how many system outputs are in a graph.
dataset_read_count_map: Dictionary int:int that maps number of system inputs of dataset read to count of
dataset reads.
system_input_count_map: Dictionary int:int that maps number of dataset reads by system input to count of
system inputs.
dataset_write_count_map: Dictionary int:int that maps number of system outputs of dataset write to count of
dataset writes.
system_output_count_map: Dictionary int:int that maps number of dataset writes by system output to count of
system outputs.
dataset_collections_conn_collection: Dictionary int:[int] that maps collection id to dataset collection ids.
system_collections_conn_collection: Dictionary int:[int] that maps collection id to system collection ids.
datasets_conn_collection: Dictionary int:[int] that maps dataset collection id to dataset ids.
systems_conn_collection: Dictionary int:[int] that maps system collection id to system ids.
dataset_read_conn_systems: Dictionary int:[int] that maps dataset read id to system ids this dataset inputs to.
dataset_write_conn_systems: Dictionary int:[int] that maps dataset write id to system ids this dataset outputs from.
Methods:
get_one_to_many_connections()
Creates connections between an element and a group. Each element belongs to one group exactly.
get_many_to_many_connections()
Creates connections between two groups with many to many relationship.
_dataset_to_dataset_collection()
Generates dataset - dataset collection connections.
_system_to_system_collection()
Generates system - system collection connections.
_dataset_read_to_system_input()
Generates connections between dataset reads and system inputs.
_dataset_write_to_system_output()
Generates connections between dataset write and system outputs.
generate()
Generates all the needed connections for data dependency mapping graph.
"""
def __init__(self, dataset_params, system_params, dataset_to_system_params, collection_params):
"""
Args:
dataset_params: DatasetParams object.
system_params: SystemParams object.
dataset_to_system_params: DatasetToSystemParams object.
collection_params: CollectionParams object.
"""
self.dataset_count = dataset_params.dataset_count
self.dataset_count_map = collection_params.dataset_count_map
self.dataset_collection_count = collection_params.dataset_collection_count
self.dataset_collection_count_map = collection_params.dataset_collection_count_map
self.system_count = system_params.system_count
self.system_count_map = collection_params.system_count_map
self.system_collection_count = collection_params.system_collection_count
self.system_collection_count_map = collection_params.system_collection_count_map
self.dataset_read_count = dataset_to_system_params.dataset_read_count
self.dataset_write_count = dataset_to_system_params.dataset_write_count
self.system_input_count = dataset_to_system_params.system_input_count
self.system_output_count = dataset_to_system_params.system_output_count
self.dataset_read_count_map = dataset_to_system_params.dataset_read_count_map
self.system_input_count_map = dataset_to_system_params.system_input_count_map
self.dataset_write_count_map = dataset_to_system_params.dataset_write_count_map
self.system_output_count_map = dataset_to_system_params.system_output_count_map
self.dataset_collections_conn_collection = {}
self.system_collections_conn_collection = {}
self.datasets_conn_collection = {}
self.systems_conn_collection = {}
self.dataset_read_conn_systems = {}
self.dataset_write_conn_systems = {}
@staticmethod
def get_one_to_many_connections(element_count, element_count_map):
"""Generate group id for each element, based on number of element in group distribution.
Args:
element_count: Total number of elements.
element_count_map: Dictionary int:int that maps element count in a group to number of groups with that count.
Returns:
Dictionary int:[int] that maps group id to a list of element ids.
"""
# Create element ids.
element_values = list(range(1, element_count + 1))
# Get number of elements for each group id from their count.
elements_per_group = [i for i in element_count_map for _ in range(element_count_map[i])]
# Randomise element ids and group ids.
random.shuffle(element_values)
random.shuffle(elements_per_group)
# Split element ids into chunks to get connections for each group.
group_to_elements = {}
last_index = 0
for i in range(len(elements_per_group)):
group_to_elements[i + 1] = element_values[last_index:last_index + elements_per_group[i]]
last_index += elements_per_group[i]
# In case we don't have a full config - assign rest of elements to a last group.
if last_index != element_count - 1:
group_to_elements[len(elements_per_group)] += element_values[last_index:]
return group_to_elements
@staticmethod
def get_many_to_many_connections(element_1_count, element_2_count, element_1_count_map, element_2_count_map):
"""Generates random connections between elements of type 1 and type 2 that have many-to-many relationship.
Generation is based on element count maps. The output distribution is expected to be exact for most counts,
except for large element group outliers.
Args:
element_1_count: Total number of elements of type 1.
element_2_count: Total number of elements of type 2.
element_1_count_map: Dictionary int:int that maps element 1 count in element 2 group to number of elements 2.
element_2_count_map: Dictionary int:int that maps element 2 count in element 1 group to number of elements 1.
Returns:
Dictionary that maps group 1 id to a list of group 2 ids.
"""
# Count zeros for each group.
element_1_zeros = element_1_count_map[0] if 0 in element_1_count_map else 0
element_2_zeros = element_2_count_map[0] if 0 in element_2_count_map else 0
# Create element ids.
element_1_values = list(range(1, element_1_count - element_1_zeros + 1))
element_2_values = list(range(1, element_2_count - element_2_zeros + 1))
# Get number of elements in each group and remove groups with 0 elements.
elements_per_group_1 = [i for i in element_1_count_map for j in range(element_1_count_map[i]) if i != 0]
elements_per_group_2 = [i for i in element_2_count_map for j in range(element_2_count_map[i]) if i != 0]
element_1_group_counter = {i + 1: elements_per_group_1[i] for i in range(len(elements_per_group_1))}
element_2_group_counter = {i + 1: elements_per_group_2[i] for i in range(len(elements_per_group_2))}
# Create connection dictionary.
element_1_conn_element_2 = {i: set() for i in element_1_values}
# Loop until any group runs out of elements.
while element_1_values and element_2_values:
# Generate a random connection
element_1_gen = random.choice(element_1_values)
element_2_gen = random.choice(element_2_values)
# Check if connection doesn't already exist.
if not element_2_gen in element_1_conn_element_2[element_1_gen]:
# Add to existing connections and reduce count.
element_1_conn_element_2[element_1_gen].add(element_2_gen)
element_1_group_counter[element_1_gen] -= 1
element_2_group_counter[element_2_gen] -= 1
# If have all needed number of connections, remove id from possible options.
if element_1_group_counter[element_1_gen] == 0:
element_1_values.remove(element_1_gen)
if element_2_group_counter[element_2_gen] == 0:
element_2_values.remove(element_2_gen)
# Check if all leftover elements aren't already included in this group.
elif set(element_2_values).issubset(element_1_conn_element_2[element_1_gen]):
element_1_values.remove(element_1_gen)
return element_1_conn_element_2
def _system_collection_to_collection(self):
"""Generates collection - system collection one to many connections."""
self.system_collections_conn_collection = self.get_one_to_many_connections(self.system_collection_count,
self.system_collection_count_map)
def _dataset_collection_to_collection(self):
"""Generates collection - dataset collection one to many connections."""
self.dataset_collections_conn_collection = self.get_one_to_many_connections(self.dataset_collection_count,
self.dataset_collection_count_map)
def _dataset_to_dataset_collection(self):
"""Generates dataset collection - dataset one to many connections."""
self.datasets_conn_collection = self.get_one_to_many_connections(self.dataset_count, self.dataset_count_map)
def _system_to_system_collection(self):
"""Generates system collection - system one to many connections."""
self.systems_conn_collection = self.get_one_to_many_connections(self.system_count, self.system_count_map)
def _dataset_read_to_system_input(self):
"""Generates dataset reads and system inputs many to many connections."""
self.dataset_read_conn_systems = self.get_many_to_many_connections(self.dataset_read_count,
self.system_input_count,
self.dataset_read_count_map,
self.system_input_count_map)
def _dataset_write_to_system_output(self):
"""Generates dataset write and system outputs many to many connections."""
self.dataset_write_conn_systems = self.get_many_to_many_connections(self.dataset_write_count,
self.system_output_count,
self.dataset_write_count_map,
self.system_output_count_map)
def generate(self):
"""Generate all connections for a graph."""
self._dataset_collection_to_collection()
self._system_collection_to_collection()
self._dataset_to_dataset_collection()
self._system_to_system_collection()
self._dataset_read_to_system_input()
self._dataset_write_to_system_output()
|
py | 7dfc9c0b24889a5236d72e369ccbf331dab74e13 | #!/usr/bin/env python
"""Module to setup an RFC2136-capable DNS server"""
import os
import os.path
import shutil
import socket
import subprocess
import sys
import tempfile
import time
from types import TracebackType
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Type
from pkg_resources import resource_filename
BIND_DOCKER_IMAGE = "internetsystemsconsortium/bind9:9.16"
BIND_BIND_ADDRESS = ("127.0.0.1", 45953)
# A TCP DNS message which is a query for '. CH A' transaction ID 0xcb37. This is used
# by _wait_until_ready to check that BIND is responding without depending on dnspython.
BIND_TEST_QUERY = bytearray.fromhex("0011cb37000000010000000000000000010003")
class DNSServer:
"""
DNSServer configures and handles the lifetime of an RFC2136-capable server.
DNServer provides access to the dns_xdist parameter, listing the address and port
to use for each pytest node.
At this time, DNSServer should only be used with a single node, but may be expanded in
future to support parallelization (https://github.com/certbot/certbot/issues/8455).
"""
def __init__(self, unused_nodes: List[str], show_output: bool = False) -> None:
"""
Create an DNSServer instance.
:param list nodes: list of node names that will be setup by pytest xdist
:param bool show_output: if True, print the output of the DNS server
"""
self.bind_root = tempfile.mkdtemp()
self.process: Optional[subprocess.Popen] = None
self.dns_xdist = {"address": BIND_BIND_ADDRESS[0], "port": BIND_BIND_ADDRESS[1]}
# Unfortunately the BIND9 image forces everything to stderr with -g and we can't
# modify the verbosity.
# pylint: disable=consider-using-with
self._output = sys.stderr if show_output else open(os.devnull, "w")
def start(self) -> None:
"""Start the DNS server"""
try:
self._configure_bind()
self._start_bind()
except:
self.stop()
raise
def stop(self) -> None:
"""Stop the DNS server, and clean its resources"""
if self.process:
try:
self.process.terminate()
self.process.wait()
except BaseException as e:
print("BIND9 did not stop cleanly: {}".format(e), file=sys.stderr)
shutil.rmtree(self.bind_root, ignore_errors=True)
if self._output != sys.stderr:
self._output.close()
def _configure_bind(self) -> None:
"""Configure the BIND9 server based on the prebaked configuration"""
bind_conf_src = resource_filename(
"certbot_integration_tests", "assets/bind-config"
)
for directory in ("conf", "zones"):
shutil.copytree(
os.path.join(bind_conf_src, directory), os.path.join(self.bind_root, directory)
)
def _start_bind(self) -> None:
"""Launch the BIND9 server as a Docker container"""
addr_str = "{}:{}".format(BIND_BIND_ADDRESS[0], BIND_BIND_ADDRESS[1])
# pylint: disable=consider-using-with
self.process = subprocess.Popen(
[
"docker",
"run",
"--rm",
"-p",
"{}:53/udp".format(addr_str),
"-p",
"{}:53/tcp".format(addr_str),
"-v",
"{}/conf:/etc/bind".format(self.bind_root),
"-v",
"{}/zones:/var/lib/bind".format(self.bind_root),
BIND_DOCKER_IMAGE,
],
stdout=self._output,
stderr=self._output,
)
if self.process.poll():
raise ValueError("BIND9 server stopped unexpectedly")
try:
self._wait_until_ready()
except:
# The container might be running even if we think it isn't
self.stop()
raise
def _wait_until_ready(self, attempts: int = 30) -> None:
"""
Polls the DNS server over TCP until it gets a response, or until
it runs out of attempts and raises a ValueError.
The DNS response message must match the txn_id of the DNS query message,
but otherwise the contents are ignored.
:param int attempts: The number of attempts to make.
"""
if not self.process:
raise ValueError("DNS server has not been started. Please run start() first.")
for _ in range(attempts):
if self.process.poll():
raise ValueError("BIND9 server stopped unexpectedly")
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(5.0)
try:
sock.connect(BIND_BIND_ADDRESS)
sock.sendall(BIND_TEST_QUERY)
buf = sock.recv(1024)
# We should receive a DNS message with the same tx_id
if buf and len(buf) > 4 and buf[2:4] == BIND_TEST_QUERY[2:4]:
return
# If we got a response but it wasn't the one we wanted, wait a little
time.sleep(1)
except: # pylint: disable=bare-except
# If there was a network error, wait a little
time.sleep(1)
finally:
sock.close()
raise ValueError(
"Gave up waiting for DNS server {} to respond".format(BIND_BIND_ADDRESS)
)
def __start__(self) -> Dict[str, Any]:
self.start()
return self.dns_xdist
def __exit__(self, exc_type: Optional[Type[BaseException]], exc: Optional[BaseException],
traceback: Optional[TracebackType]) -> None:
self.stop()
|
py | 7dfc9c5b77c116d73445a39729b1ee68c0a53e08 | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""Create multiview data."""
import sys
from os import path
# Making sure you can run this, even if pulsar hasn't been installed yet.
sys.path.insert(0, path.join(path.dirname(__file__), "..", ".."))
def create_multiview():
"""Test multiview optimization."""
import imageio
# import cv2
# import skvideo.io
import numpy as np
import torch
from pytorch3d.renderer.points.pulsar import Renderer
from torch import nn
from torch.autograd import Variable
# Constructor.
n_points = 10
width = 1000
height = 1000
class Model(nn.Module):
"""A dummy model to test the integration into a stacked model."""
def __init__(self):
super(Model, self).__init__()
self.gamma = 0.1
self.renderer = Renderer(width, height, n_points)
def forward(self, vp, vc, vr, cam_params):
# self.gamma *= 0.995
# print("gamma: ", self.gamma)
return self.renderer.forward(vp, vc, vr, cam_params, self.gamma, 45.0)
# Generate sample data.
torch.manual_seed(1)
vert_pos = torch.rand(n_points, 3, dtype=torch.float32) * 10.0
vert_pos[:, 2] += 25.0
vert_pos[:, :2] -= 5.0
# print(vert_pos[0])
vert_col = torch.rand(n_points, 3, dtype=torch.float32)
vert_rad = torch.rand(n_points, dtype=torch.float32)
# Distortion.
# vert_pos[:, 1] += 0.5
vert_col *= 0.5
# vert_rad *= 0.7
for device in [torch.device("cuda")]:
model = Model().to(device)
vert_pos = vert_pos.to(device)
vert_col = vert_col.to(device)
vert_rad = vert_rad.to(device)
for angle_idx, angle in enumerate([-1.5, -0.8, -0.4, -0.1, 0.1, 0.4, 0.8, 1.5]):
vert_pos_v = Variable(vert_pos, requires_grad=False)
vert_col_v = Variable(vert_col, requires_grad=False)
vert_rad_v = Variable(vert_rad, requires_grad=False)
cam_params = torch.tensor(
[
np.sin(angle) * 35.0,
0.0,
30.0 - np.cos(angle) * 35.0,
0.0,
-angle,
0.0,
5.0,
2.0,
],
dtype=torch.float32,
).to(device)
cam_params_v = Variable(cam_params, requires_grad=False)
result = model.forward(vert_pos_v, vert_col_v, vert_rad_v, cam_params_v)
result_im = (result.cpu().detach().numpy() * 255).astype(np.uint8)
imageio.imsave(
"reference/examples_TestRenderer_test_multiview_%d.png" % (angle_idx),
result_im,
)
if __name__ == "__main__":
create_multiview()
|
py | 7dfc9d7d8a0bd51d2393faddcc66610e1a13bc83 | def fatorial(numero, show = False):
fat = 1
for cont in range(numero, 0, -1):
if show:
print(f'{cont}', end = ' ')
if cont > 1:
print('X', end = ' ')
else:
print('=', end = ' ')
fat *= cont
return fat
numero = int(input('Digite um número: '))
mostra = str(input('Digite S para mostrar o fatorial ou N para mostra apenas o resultado: ')).strip().upper()
if mostra == 'S':
show = True
else:
show = False
print(fatorial(numero, show = show)) |
py | 7dfc9da7a93d7425ef9d86e75bfcf0a6020b0941 |
def init():
pass
def run(batch):
print(batch) |
py | 7dfc9f1406a414c906e72e9127b8e9471c3884cd | """
ASGI config for AlohomoraCareershala project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'AlohomoraCareershala.settings')
application = get_asgi_application()
|
py | 7dfc9f52a86cb06d3fd0a7efa872557a3995399a | import httpx
import settings
def getUsStats():
headers = {'x-rapidapi-host': settings.X_RAPIDAPI_HOST,
'x-rapidapi-key': settings.X_RAPIDAPI_KEY, 'accept': 'application/json'}
response = httpx.get(settings.COVID_STATS_ENDPOINT + '?format=json&name=usa', headers=headers)
return response.json()
def getUkStats():
headers = {'x-rapidapi-host': settings.X_RAPIDAPI_HOST,
'x-rapidapi-key': settings.X_RAPIDAPI_KEY, 'accept': 'application/json'}
response = httpx.get(settings.COVID_STATS_ENDPOINT + '?format=json&name=uk', headers=headers)
return response.json() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.