input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
trackSort = cms.string('sip2dSig'),
useCategories = cms.bool(True),
useTrackWeights = cms.bool(True),
vertexFlip = cms.bool(False)
),
tagInfos = cms.VInputTag(cms.InputTag("pfImpactParameterTagInfos"), cms.InputTag("pfInclusiveSecondaryVertexFinderCvsLTagInfos"), cms.InputTag("softPFMuonsTagInfos"), cms.InputTag("softPFElectronsTagInfos")),
useAdaBoost = cms.bool(False),
useCondDB = cms.bool(False),
useGBRForest = cms.bool(True),
variables = cms.VPSet(
cms.PSet(
default = cms.double(-1),
name = cms.string('vertexLeptonCategory'),
taggingVarName = cms.string('vertexLeptonCategory')
),
cms.PSet(
default = cms.double(-100),
idx = cms.int32(0),
name = cms.string('trackSip2dSig_0'),
taggingVarName = cms.string('trackSip2dSig')
),
cms.PSet(
default = cms.double(-100),
idx = cms.int32(1),
name = cms.string('trackSip2dSig_1'),
taggingVarName = cms.string('trackSip2dSig')
),
cms.PSet(
default = cms.double(-100),
idx = cms.int32(0),
name = cms.string('trackSip3dSig_0'),
taggingVarName = cms.string('trackSip3dSig')
),
cms.PSet(
default = cms.double(-100),
idx = cms.int32(1),
name = cms.string('trackSip3dSig_1'),
taggingVarName = cms.string('trackSip3dSig')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(0),
name = cms.string('trackPtRel_0'),
taggingVarName = cms.string('trackPtRel')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(1),
name = cms.string('trackPtRel_1'),
taggingVarName = cms.string('trackPtRel')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(0),
name = cms.string('trackPPar_0'),
taggingVarName = cms.string('trackPPar')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(1),
name = cms.string('trackPPar_1'),
taggingVarName = cms.string('trackPPar')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(0),
name = cms.string('trackEtaRel_0'),
taggingVarName = cms.string('trackEtaRel')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(1),
name = cms.string('trackEtaRel_1'),
taggingVarName = cms.string('trackEtaRel')
),
cms.PSet(
default = cms.double(-0.1),
idx = cms.int32(0),
name = cms.string('trackDeltaR_0'),
taggingVarName = cms.string('trackDeltaR')
),
cms.PSet(
default = cms.double(-0.1),
idx = cms.int32(1),
name = cms.string('trackDeltaR_1'),
taggingVarName = cms.string('trackDeltaR')
),
cms.PSet(
default = cms.double(-0.1),
idx = cms.int32(0),
name = cms.string('trackPtRatio_0'),
taggingVarName = cms.string('trackPtRatio')
),
cms.PSet(
default = cms.double(-0.1),
idx = cms.int32(1),
name = cms.string('trackPtRatio_1'),
taggingVarName = cms.string('trackPtRatio')
),
cms.PSet(
default = cms.double(1.1),
idx = cms.int32(0),
name = cms.string('trackPParRatio_0'),
taggingVarName = cms.string('trackPParRatio')
),
cms.PSet(
default = cms.double(1.1),
idx = cms.int32(1),
name = cms.string('trackPParRatio_1'),
taggingVarName = cms.string('trackPParRatio')
),
cms.PSet(
default = cms.double(-0.1),
idx = cms.int32(0),
name = cms.string('trackJetDist_0'),
taggingVarName = cms.string('trackJetDist')
),
cms.PSet(
default = cms.double(-0.1),
idx = cms.int32(1),
name = cms.string('trackJetDist_1'),
taggingVarName = cms.string('trackJetDist')
),
cms.PSet(
default = cms.double(-0.1),
idx = cms.int32(0),
name = cms.string('trackDecayLenVal_0'),
taggingVarName = cms.string('trackDecayLenVal')
),
cms.PSet(
default = cms.double(-0.1),
idx = cms.int32(1),
name = cms.string('trackDecayLenVal_1'),
taggingVarName = cms.string('trackDecayLenVal')
),
cms.PSet(
default = cms.double(0),
name = cms.string('jetNSecondaryVertices'),
taggingVarName = cms.string('jetNSecondaryVertices')
),
cms.PSet(
default = cms.double(-0.1),
name = cms.string('jetNTracks'),
taggingVarName = cms.string('jetNTracks')
),
cms.PSet(
default = cms.double(-0.1),
name = cms.string('trackSumJetEtRatio'),
taggingVarName = cms.string('trackSumJetEtRatio')
),
cms.PSet(
default = cms.double(-0.1),
name = cms.string('trackSumJetDeltaR'),
taggingVarName = cms.string('trackSumJetDeltaR')
),
cms.PSet(
default = cms.double(-0.1),
idx = cms.int32(0),
name = cms.string('vertexMass_0'),
taggingVarName = cms.string('vertexMass')
),
cms.PSet(
default = cms.double(-10),
idx = cms.int32(0),
name = cms.string('vertexEnergyRatio_0'),
taggingVarName = cms.string('vertexEnergyRatio')
),
cms.PSet(
default = cms.double(-999),
idx = cms.int32(0),
name = cms.string('trackSip2dSigAboveCharm_0'),
taggingVarName = cms.string('trackSip2dSigAboveCharm')
),
cms.PSet(
default = cms.double(-999),
idx = cms.int32(0),
name = cms.string('trackSip3dSigAboveCharm_0'),
taggingVarName = cms.string('trackSip3dSigAboveCharm')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(0),
name = cms.string('flightDistance2dSig_0'),
taggingVarName = cms.string('flightDistance2dSig')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(0),
name = cms.string('flightDistance3dSig_0'),
taggingVarName = cms.string('flightDistance3dSig')
),
cms.PSet(
default = cms.double(-0.1),
idx = cms.int32(0),
name = cms.string('vertexJetDeltaR_0'),
taggingVarName = cms.string('vertexJetDeltaR')
),
cms.PSet(
default = cms.double(0),
idx = cms.int32(0),
name = cms.string('vertexNTracks_0'),
taggingVarName = cms.string('vertexNTracks')
),
cms.PSet(
default = cms.double(-0.1),
idx = cms.int32(0),
name = cms.string('massVertexEnergyFraction_0'),
taggingVarName = cms.string('massVertexEnergyFraction')
),
cms.PSet(
default = cms.double(-0.1),
idx = cms.int32(0),
name = cms.string('vertexBoostOverSqrtJetPt_0'),
taggingVarName = cms.string('vertexBoostOverSqrtJetPt')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(0),
name = cms.string('leptonPtRel_0'),
taggingVarName = cms.string('leptonPtRel')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(1),
name = cms.string('leptonPtRel_1'),
taggingVarName = cms.string('leptonPtRel')
),
cms.PSet(
default = cms.double(-10000),
idx = cms.int32(0),
name = cms.string('leptonSip3d_0'),
taggingVarName = cms.string('leptonSip3d')
),
cms.PSet(
default = cms.double(-10000),
idx = cms.int32(1),
name = cms.string('leptonSip3d_1'),
taggingVarName = cms.string('leptonSip3d')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(0),
name = cms.string('leptonDeltaR_0'),
taggingVarName = cms.string('leptonDeltaR')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(1),
name = cms.string('leptonDeltaR_1'),
taggingVarName = cms.string('leptonDeltaR')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(0),
name = cms.string('leptonRatioRel_0'),
taggingVarName = cms.string('leptonRatioRel')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(1),
name = cms.string('leptonRatioRel_1'),
taggingVarName = cms.string('leptonRatioRel')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(0),
name = cms.string('leptonEtaRel_0'),
taggingVarName = cms.string('leptonEtaRel')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(1),
name = cms.string('leptonEtaRel_1'),
taggingVarName = cms.string('leptonEtaRel')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(0),
name = cms.string('leptonRatio_0'),
taggingVarName = cms.string('leptonRatio')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(1),
name = cms.string('leptonRatio_1'),
taggingVarName = cms.string('leptonRatio')
)
),
weightFile = cms.FileInPath('RecoBTag/CTagging/data/c_vs_udsg_sklearn.weight.xml')
)
charmTagsNegativeComputerCvsB = cms.ESProducer("CharmTaggerESProducer",
computer = cms.ESInputTag("combinedSecondaryVertexSoftLeptonComputer:"), #cmssw_11_0
defaultValueNoTracks = cms.bool(False),
gbrForestLabel = cms.string(''),
mvaName = cms.string('BDT'),
slComputerCfg = cms.PSet(
SoftLeptonFlip = cms.bool(True),
calibrationRecords = cms.vstring(
'CombinedSVRecoVertexNoSoftLepton',
'CombinedSVPseudoVertexNoSoftLepton',
'CombinedSVNoVertexNoSoftLepton',
'CombinedSVRecoVertexSoftMuon',
'CombinedSVPseudoVertexSoftMuon',
'CombinedSVNoVertexSoftMuon',
'CombinedSVRecoVertexSoftElectron',
'CombinedSVPseudoVertexSoftElectron',
'CombinedSVNoVertexSoftElectron'
),
categoryVariableName = cms.string('vertexLeptonCategory'),
charmCut = cms.double(1.5),
correctVertexMass = cms.bool(False),
minimumTrackWeight = cms.double(0.5),
pseudoMultiplicityMin = cms.uint32(2),
pseudoVertexV0Filter = cms.PSet(
k0sMassWindow = cms.double(0.05)
),
recordLabel = cms.string(''),
trackFlip = cms.bool(True),
trackMultiplicityMin = cms.uint32(2),
trackPairV0Filter = cms.PSet(
k0sMassWindow = cms.double(0.03)
),
trackPseudoSelection = cms.PSet(
a_dR = cms.double(-0.001053),
a_pT = cms.double(0.005263),
b_dR = cms.double(0.6263),
b_pT = cms.double(0.3684),
jetDeltaRMax = cms.double(0.3),
maxDecayLen = cms.double(5),
maxDistToAxis = cms.double(0.07),
max_pT = cms.double(500),
max_pT_dRcut = cms.double(0.1),
max_pT_trackPTcut = cms.double(3),
min_pT = cms.double(120),
min_pT_dRcut = cms.double(0.5),
normChi2Max = cms.double(99999.9),
pixelHitsMin = cms.uint32(0),
ptMin = cms.double(0.0),
qualityClass = cms.string('any'),
sip2dSigMax = cms.double(-2.0),
sip2dSigMin = cms.double(-99999.9),
sip2dValMax = cms.double(99999.9),
sip2dValMin = cms.double(-99999.9),
sip3dSigMax = cms.double(0),
sip3dSigMin = cms.double(-99999.9),
sip3dValMax = cms.double(99999.9),
sip3dValMin = cms.double(-99999.9),
totalHitsMin = cms.uint32(0),
useVariableJTA = cms.bool(False)
),
trackSelection = cms.PSet(
a_dR = cms.double(-0.001053),
a_pT = cms.double(0.005263),
b_dR = cms.double(0.6263),
b_pT = cms.double(0.3684),
jetDeltaRMax = cms.double(0.3),
maxDecayLen = cms.double(5),
maxDistToAxis = cms.double(0.07),
max_pT = cms.double(500),
max_pT_dRcut = cms.double(0.1),
max_pT_trackPTcut = cms.double(3),
min_pT = cms.double(120),
min_pT_dRcut = cms.double(0.5),
normChi2Max = cms.double(99999.9),
pixelHitsMin = cms.uint32(0),
ptMin = cms.double(0.0),
qualityClass = cms.string('any'),
sip2dSigMax = cms.double(99999.9),
sip2dSigMin = cms.double(-99999.9),
sip2dValMax = cms.double(99999.9),
sip2dValMin = cms.double(-99999.9),
sip3dSigMax = cms.double(0),
sip3dSigMin = cms.double(-99999.9),
sip3dValMax = cms.double(99999.9),
sip3dValMin = cms.double(-99999.9),
totalHitsMin = cms.uint32(0),
useVariableJTA = cms.bool(False)
),
trackSort = cms.string('sip2dSig'),
useCategories = cms.bool(True),
useTrackWeights = cms.bool(True),
vertexFlip = cms.bool(True)
),
tagInfos = cms.VInputTag(cms.InputTag("pfImpactParameterTagInfos"), cms.InputTag("pfInclusiveSecondaryVertexFinderCvsLTagInfos"), cms.InputTag("softPFMuonsTagInfos"), cms.InputTag("softPFElectronsTagInfos")),
useAdaBoost = cms.bool(False),
useCondDB = cms.bool(False),
useGBRForest = cms.bool(True),
variables = cms.VPSet(
cms.PSet(
default = cms.double(-1),
name = cms.string('vertexLeptonCategory'),
taggingVarName = cms.string('vertexLeptonCategory')
),
cms.PSet(
default = cms.double(-100),
idx = cms.int32(0),
name = cms.string('trackSip2dSig_0'),
taggingVarName = cms.string('trackSip2dSig')
),
cms.PSet(
default = cms.double(-100),
idx = cms.int32(1),
name = cms.string('trackSip2dSig_1'),
taggingVarName = cms.string('trackSip2dSig')
),
cms.PSet(
default = cms.double(-100),
idx = cms.int32(0),
name = cms.string('trackSip3dSig_0'),
taggingVarName = cms.string('trackSip3dSig')
),
cms.PSet(
default = cms.double(-100),
idx = cms.int32(1),
name = cms.string('trackSip3dSig_1'),
taggingVarName = cms.string('trackSip3dSig')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(0),
name = cms.string('trackPtRel_0'),
taggingVarName = cms.string('trackPtRel')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(1),
name = cms.string('trackPtRel_1'),
taggingVarName = cms.string('trackPtRel')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(0),
name = cms.string('trackPPar_0'),
taggingVarName = cms.string('trackPPar')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(1),
name = cms.string('trackPPar_1'),
taggingVarName = cms.string('trackPPar')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(0),
name = cms.string('trackEtaRel_0'),
taggingVarName = cms.string('trackEtaRel')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(1),
name = cms.string('trackEtaRel_1'),
taggingVarName = cms.string('trackEtaRel')
),
cms.PSet(
default = cms.double(-0.1),
idx = cms.int32(0),
name = cms.string('trackDeltaR_0'),
taggingVarName = cms.string('trackDeltaR')
),
cms.PSet(
default = cms.double(-0.1),
idx = cms.int32(1),
name = cms.string('trackDeltaR_1'),
taggingVarName = cms.string('trackDeltaR')
),
cms.PSet(
default = cms.double(-0.1),
idx = cms.int32(0),
name = cms.string('trackPtRatio_0'),
taggingVarName = cms.string('trackPtRatio')
),
cms.PSet(
default = cms.double(-0.1),
idx = cms.int32(1),
name = cms.string('trackPtRatio_1'),
taggingVarName = cms.string('trackPtRatio')
),
cms.PSet(
default = cms.double(1.1),
idx = cms.int32(0),
name = cms.string('trackPParRatio_0'),
taggingVarName = cms.string('trackPParRatio')
),
cms.PSet(
default = cms.double(1.1),
idx = cms.int32(1),
name = cms.string('trackPParRatio_1'),
taggingVarName = cms.string('trackPParRatio')
),
cms.PSet(
default = cms.double(-0.1),
idx = cms.int32(0),
name = cms.string('trackJetDist_0'),
taggingVarName = cms.string('trackJetDist')
),
cms.PSet(
default = cms.double(-0.1),
idx = cms.int32(1),
name = cms.string('trackJetDist_1'),
taggingVarName = cms.string('trackJetDist')
), | |
import pydotplus as pdp
import networkx as nx
from copy import deepcopy
import itertools as it
from ..utilities.util import Util
from IPython.display import Image
from IPython import get_ipython
import logging
logger = logging.getLogger('cegpy.chain_event_graph')
class ChainEventGraph(nx.MultiDiGraph):
"""
Class: Chain Event Graph
Input: Staged tree object (StagedTree)
Output: Chain event graphs
"""
def __init__(self, staged_tree=None, **attr):
super().__init__(staged_tree, **attr)
self.sink_suffix = '∞'
self.node_prefix = 'w'
if staged_tree is not None:
try:
self.ahc_output = deepcopy(staged_tree.ahc_output)
except AttributeError:
self.ahc_output = {}
else:
logger.info("Class called with no incoming graph.")
self.evidence = Evidence(self)
@property
def node_prefix(self):
return self._node_prefix
@node_prefix.setter
def node_prefix(self, value):
self._node_prefix = str(value)
@property
def sink_suffix(self):
return self._sink_suffix
@sink_suffix.setter
def sink_suffix(self, value):
self._sink_suffix = str(value)
@property
def sink_node(self):
return "%s%s" % (self.node_prefix, self.sink_suffix)
@property
def root_node(self):
return ("%s0" % self.node_prefix)
@property
def certain_evidence(self):
return self._certain_evidence
@certain_evidence.setter
def certain_evidence(self, value):
self._certain_evidence = value
@property
def uncertain_evidence(self):
return self._uncertain_evidence
@uncertain_evidence.setter
def uncertain_evidence(self, value):
self._uncertain_evidence = value
@property
def path_list(self):
return self.__path_list
@path_list.setter
def path_list(self, value):
self.__path_list = value
@property
def reduced(self):
return self.evidence.reduced_graph
@property
def stages(self):
self.__stages = {}
node_stages = dict(self.nodes(data='stage', default=None))
for k, v in node_stages.items():
try:
self.__stages[v].append(k)
except KeyError:
self.__stages[v] = [k]
return self.__stages
def clear_evidence(self):
self.evidence = Evidence(self)
def generate(self):
'''
This function takes the output of the AHC algorithm and identifies
the positions i.e. the vertices of the CEG and the edges of the CEG
along with their edge labels and edge counts. Here we use the
algorithm in our paper with the optimal stopping time.
'''
def check_vertices_can_be_merged(v1, v2) -> bool:
has_same_successor_nodes = \
set(self.adj[v1].keys()) == set(self.adj[v2].keys())
if has_same_successor_nodes:
has_same_outgoing_edges = True
v1_adj = self.succ[v1]
for succ_node in list(v1_adj.keys()):
v1_edges = self.succ[v1][succ_node]
v2_edges = self.succ[v2][succ_node]
if v1_edges is None or v2_edges is None:
has_same_outgoing_edges &= False
break
v2_edge_labels = \
[label for label in v2_edges.keys()]
for label in v1_edges.keys():
if label not in v2_edge_labels:
has_same_outgoing_edges &= False
break
else:
has_same_outgoing_edges &= True
else:
has_same_outgoing_edges = False
try:
in_same_stage = \
self.nodes[v1]['stage'] == self.nodes[v2]['stage']
except KeyError:
in_same_stage = False
return in_same_stage and \
has_same_successor_nodes and has_same_outgoing_edges
def merge_nodes(nodes_to_merge):
"""nodes to merge should be a set of 2 element tuples"""
temp_1 = 'temp_1'
temp_2 = 'temp_2'
while nodes_to_merge != set():
nodes = nodes_to_merge.pop()
new_node = nodes[0]
# Copy nodes to temp nodes
node_map = {
nodes[0]: temp_1,
nodes[1]: temp_2
}
nx.relabel_nodes(self, node_map, copy=False)
ebunch_to_remove = [] # List of edges to remove
self.add_node(new_node)
for succ, t1_edge_dict in self.succ[temp_1].items():
edge_labels = list(t1_edge_dict.keys())
while edge_labels != []:
new_edge_data = {}
label = edge_labels.pop(0)
t1_edge = t1_edge_dict[label]
t2_edge = self.succ[temp_2][succ][label]
new_edge_data['count'] = \
t1_edge['count'] + t2_edge['count']
new_edge_data['prior'] = \
t1_edge['prior'] + t2_edge['prior']
new_edge_data['posterior'] = \
t1_edge['posterior'] + t2_edge['posterior']
try:
new_edge_data['probability'] = \
t1_edge['probability']
self.add_edge(
u_for_edge=new_node,
v_for_edge=succ,
key=label,
count=new_edge_data['count'],
prior=new_edge_data['prior'],
posterior=new_edge_data['posterior'],
probability=new_edge_data['probability']
)
except KeyError:
self.add_edge(
u_for_edge=new_node,
v_for_edge=succ,
key=label,
count=new_edge_data['count'],
prior=new_edge_data['prior'],
posterior=new_edge_data['posterior']
)
ebunch_to_remove.append((temp_1, succ, label))
ebunch_to_remove.append((temp_2, succ, label))
self.remove_edges_from(ebunch_to_remove)
nx.relabel_nodes(
G=self,
mapping={temp_1: new_node, temp_2: new_node},
copy=False
)
# Some nodes have been removed, we need to update the
# mergeable list to point to new nodes if required
temp_list = list(nodes_to_merge)
for pair in temp_list:
if nodes[1] in pair:
new_pair = (
# the other node of the pair
pair[pair.index(nodes[1]) - 1],
# the new node it will be merged to
new_node
)
nodes_to_merge.remove(pair)
if new_pair[0] != new_pair[1]:
nodes_to_merge.add(new_pair)
pass
def relabel_nodes(base_nodes, renamed_nodes=[]):
next_level = []
# first, relabel the successors of this node
for node in base_nodes:
node_mapping = {}
for succ in self.succ[node].keys():
if succ != self.sink_node and succ not in renamed_nodes:
node_mapping[succ] = self.__get_next_node_name()
next_level.append(node_mapping[succ])
renamed_nodes.append(node_mapping[succ])
if node_mapping != {}:
nx.relabel_nodes(
self,
node_mapping,
copy=False
)
if next_level != []:
relabel_nodes(next_level, renamed_nodes)
if self.ahc_output == {}:
raise ValueError("Run staged tree AHC transitions first.")
# rename root node:
nx.relabel_nodes(self, {'s0': self.root_node}, copy=False)
self.__update_probabilities()
self.__trim_leaves_from_graph()
self.__update_distances_of_nodes_to_sink_node()
src_node_gen = self.__gen_nodes_with_increasing_distance(
start=1
)
next_set_of_nodes = next(src_node_gen)
while next_set_of_nodes != [self.root_node]:
nodes_to_merge = set()
while len(next_set_of_nodes) > 1:
node_1 = next_set_of_nodes.pop(0)
for node_2 in next_set_of_nodes:
mergeable = check_vertices_can_be_merged(node_1, node_2)
if mergeable:
nodes_to_merge.add((node_1, node_2))
merge_nodes(nodes_to_merge)
try:
next_set_of_nodes = next(src_node_gen)
except StopIteration:
next_set_of_nodes = []
relabel_nodes([self.root_node])
self.__update_path_list()
@property
def dot_graph(self):
return self._generate_dot_graph()
def _generate_dot_graph(self):
graph = pdp.Dot(graph_type='digraph', rankdir='LR')
edge_probabilities = list(
self.edges(data='probability', default=1, keys=True)
)
for (u, v, k, p) in edge_probabilities:
full_label = "{}\n{:.2f}".format(k, p)
graph.add_edge(
pdp.Edge(
src=u,
dst=v,
label=full_label,
labelfontcolor='#009933',
fontsize='10.0',
color='black'
)
)
nodes = list(nx.topological_sort(self))
for node in nodes:
try:
fill_colour = self.nodes[node]['colour']
except KeyError:
fill_colour = 'white'
label = "<" + node[0] + "<SUB>" + node[1:] + "</SUB>" + ">"
graph.add_node(
pdp.Node(
name=node,
label=label,
style='filled',
fillcolor=fill_colour
)
)
return graph
def create_figure(self, filename):
"""
Draws the chain event graph representation of the stage tree,
and saves it to "<filename>.filetype". Supports any filetype that
graphviz supports. e.g: "event_tree.png" or "event_tree.svg" etc.
"""
filename, filetype = Util.generate_filename_and_mkdir(filename)
graph = self.dot_graph
graph.write(str(filename), format=filetype)
if get_ipython() is None:
return None
else:
return Image(graph.create_png())
def __update_probabilities(self):
count_total_lbl = 'count_total'
edge_counts = list(self.edges(data='count', keys=True, default=0))
for stage, stage_nodes in self.stages.items():
count_total = 0
stage_edges = {}
if stage is not None:
for (u, _, k, c) in edge_counts:
if u in stage_nodes:
count_total += c
try:
stage_edges[k] += c
except KeyError:
stage_edges[k] = c
for node in stage_nodes:
self.nodes[node][count_total_lbl] = count_total
for (u, v, k, _) in edge_counts:
if u in stage_nodes:
self.edges[u, v, k]['probability'] =\
stage_edges[k] / count_total
else:
for node in stage_nodes:
count_total = 0
stage_edges = {}
for (u, _, k, c) in edge_counts:
if u == node:
count_total += c
try:
stage_edges[k] += c
except KeyError:
stage_edges[k] = c
self.nodes[node][count_total_lbl] = count_total
for (u, v, k, _) in edge_counts:
if u == node:
self.edges[u, v, k]['probability'] =\
stage_edges[k] / count_total
def __update_path_list(self) -> None:
path_generator = nx.all_simple_edge_paths(
self,
self.root_node,
self.sink_node
)
path_list = []
while True:
try:
path_list.append(next(path_generator))
except StopIteration:
self.path_list = path_list
break
def __update_distances_of_nodes_to_sink_node(self) -> None:
"""
Iterates through the graph until it finds the root node.
For each node, it determines the maximum number of edges
from that node to the sink node.
"""
max_dist = 'max_dist_to_sink'
self.nodes[self.sink_node][max_dist] = 0
node_queue = [self.sink_node]
while node_queue != [self.root_node]:
node = node_queue.pop(0)
for pred in self.predecessors(node):
max_dist_to_sink = set()
for succ in self.successors(pred):
try:
max_dist_to_sink.add(
self.nodes[succ][max_dist]
)
self.nodes[pred][max_dist] = max(max_dist_to_sink) + 1
except KeyError:
break
if pred not in node_queue:
node_queue.append(pred)
def __gen_nodes_with_increasing_distance(self, start=0) -> list:
max_dists = nx.get_node_attributes(self, 'max_dist_to_sink')
distance_dict = {}
for key, value in max_dists.items():
distance_dict.setdefault(value, []).append(key)
for dist in range(len(distance_dict)):
if dist >= start:
yield distance_dict[dist]
def __get_next_node_name(self):
try:
num = str(next(self._num_iter))
except AttributeError:
self._num_iter = it.count(1, 1)
num = str(next(self._num_iter))
return str(self.node_prefix) + num
def __trim_leaves_from_graph(self):
# Create new CEG sink node
self.add_node(self.sink_node, colour='lightgrey')
outgoing_edges = deepcopy(self.succ).items()
# Check to see if any nodes have no outgoing edges.
for node, outgoing_edges in outgoing_edges:
if outgoing_edges == {} and node != self.sink_node:
incoming_edges = deepcopy(self.pred[node]).items()
# When node is identified as a leaf check the
# predessesor nodes that have edges that enter this node.
for pred_node, edges in incoming_edges:
for edge_label, edge in edges.items():
# Create new edge that points to the sink node,
# with all the same data as the edge we will delete.
try:
prob = edge['probability']
except KeyError:
prob = 1
self.add_edge(
pred_node,
self.sink_node,
key=edge_label,
count=edge['count'],
prior=edge['prior'],
posterior=edge['posterior'],
probability=prob
)
self.remove_node(node)
class Evidence:
CERTAIN = True
UNCERTAIN = False
def __init__(self, graph):
self.__graph = graph
self.certain_edges = []
self.uncertain_edges = []
self.certain_vertices = set()
self.uncertain_vertices = set()
@property
def reduced_graph(self):
return self.__create_reduced_graph()
@property
def path_list(self):
return self._path_list
@path_list.setter
def path_list(self, value):
self._path_list = value
@property
def edges(self):
return list(self._edges)
@edges.setter
def edges(self, value):
self._edges = value
@property
def vertices(self):
return list(self._vertices)
@vertices.setter
def vertices(self, value):
self._vertices = value
def add_edge(self, u, v, label, certain):
edge = (u, v, label)
if certain:
self.certain_edges.append(edge)
else:
self.uncertain_edges.append(edge)
def add_edges_from(self, edges, certain):
for (u, v, k) in edges:
self.add_edge(u, v, k, certain)
def remove_edge(self, u, v, label, certain):
if certain:
self.certain_edges.remove((u, v, label))
else:
self.uncertain_edges.remove((u, v, label))
def remove_edges_from(self, edges, certain):
for | |
"""
GP + transit example for Kepler 1627 / exoplanet case-studies docs.
"""
import exoplanet as xo
xo.utils.docs_setup()
print(f"exoplanet.__version__ = '{xo.__version__}'")
##########################################
import lightkurve as lk
import numpy as np, matplotlib.pyplot as plt
# Get long cadence light curves for all quarters. Median normalize all
# quarters, remove nans, and run a 5-sigma outlier clipping.
lcf = lk.search_lightcurve(
"6184894", mission="Kepler", author="Kepler", cadence="long"
).download_all()
lc = lcf.stitch().remove_nans().remove_outliers()
# Require non-zero quality flags, since we have an abundance of data.
lc = lc[lc.quality == 0]
# Make sure that the data type is consistent
x = np.ascontiguousarray(lc.time.value, dtype=np.float64)
y = np.ascontiguousarray(lc.flux, dtype=np.float64)
yerr = np.ascontiguousarray(lc.flux_err, dtype=np.float64)
texp = np.median(np.diff(x))
# Normalize around zero for GP fitting. Keep in units of relative flux, rather
# than say ppt.
mu = np.nanmedian(y)
y = (y / mu - 1)
yerr = yerr / mu
# Visualize the data.
# Plot #0: the full dataset
# Plot #1: a 100 day slice
# Plot #2: center it on the known Kepler ephemeris.
plt.plot(x, y, "k")
plt.xlim(x.min(), x.max())
plt.xlabel("time [days]")
plt.ylabel("relative flux [ppt]")
plt.title("Kepler 1627")
plt.savefig("temp0.png", bbox_inches='tight')
plt.plot(x, y, "k")
plt.xlabel("time [days]")
plt.ylabel("relative flux [ppt]")
plt.xlim([550,650])
plt.title("Kepler 1627")
plt.savefig("temp1.png", bbox_inches='tight')
plt.plot(x, y, "k")
plt.xlabel("time [days]")
plt.ylabel("relative flux")
plt.xlim([120.6,121]) # transit is here
plt.ylim([-30e-3,-5e-3])
plt.title("Kepler 1627 b")
plt.savefig("temp2.png", bbox_inches='tight')
##########################################
#
# Begin main block
#
import pymc3 as pm
import pymc3_ext as pmx
import aesara_theano_fallback.tensor as tt
from celerite2.theano import terms, GaussianProcess
from astropy import units as units, constants as const
def build_model(mask=None, start=None):
if mask is None:
mask = np.ones(len(x), dtype=bool)
with pm.Model() as model:
# Shared parameters
mean = pm.Normal("mean", mu=0, sd=1, testval=0)
# Stellar parameters. These are usually determined from spectroscopy
# and/or isochrone fits. We set a bound on the R_star prior simply to
# show how one might do this.
logg_star = pm.Normal("logg_star", mu=4.53, sd=0.05)
r_star = pm.Normal("r_star", mu=1.0, sd=0.018)
# Here "factor" is defined s.t. factor * 10**logg / r_star = rho
factor = 5.141596357654149e-05
rho_star = pm.Deterministic(
"rho_star", factor*10**logg_star / r_star
)
# Limb-darkening: adopt Kipping 2013.
u_star = xo.QuadLimbDark("u_star")
star = xo.LimbDarkLightCurve(u_star)
# To get Rp/R*, fit for log(depth). This requires an impact parameter
# prior from 0 to 1, because otherwise there's a sqrt(1-b^2) in the
# conversion that doesn't make sense. See
# https://github.com/exoplanet-dev/exoplanet/blob/e99d1bd68654f21efbbf8400a83889a470d2baf7/src/exoplanet/light_curves/limb_dark.py#L73
b = pm.Uniform("b", lower=0, upper=1)
log_depth = pm.Normal("log_depth", mu=np.log(1.8e-3), sigma=1)
depth = pm.Deterministic("depth", tt.exp(log_depth))
ror = pm.Deterministic(
"ror", star.get_ror_from_approx_transit_depth(depth, b),
)
r_pl = pm.Deterministic(
"r_pl", ror*r_star
)
# Orbital parameters for the planet. Use mean values from Holczer+16.
t0 = pm.Normal("t0", mu=120.790531, sd=0.02, testval=120.790531)
period = pm.Normal("period", mu=7.202806, sd=0.01, testval=7.202806)
# Let eccentricity float, for funsies.
nplanets = 1
ecs = pmx.UnitDisk(
"ecs", shape=(2, nplanets),
testval=0.01 * np.ones((2, nplanets))
)
ecc = pm.Deterministic(
"ecc",
tt.sum(ecs ** 2, axis=0)
)
omega = pm.Deterministic(
"omega", tt.arctan2(ecs[1], ecs[0])
)
xo.eccentricity.vaneylen19(
"ecc_prior",
multi=False, shape=nplanets, fixed=True, observed=ecc
)
# Define the orbit model.
orbit = xo.orbits.KeplerianOrbit(
period=period,
t0=t0,
b=b,
rho_star=rho_star,
r_star=r_star,
ecc=ecc,
omega=omega
)
transit_model = (
mean + tt.sum(
star.get_light_curve(
orbit=orbit, r=r_pl, t=x, texp=texp), axis=-1
)
)
# Convenience function for plotting.
pm.Deterministic(
'transit_pred', star.get_light_curve(
orbit=orbit, r=r_pl, t=x[mask], texp=texp
)
)
# Use the GP model from the stellar variability tutorial at
# https://gallery.exoplanet.codes/en/latest/tutorials/stellar-variability/
# A jitter term describing excess white noise
log_jitter = pm.Normal("log_jitter", mu=np.log(np.mean(yerr)), sd=2)
# The parameters of the RotationTerm kernel
sigma_rot = pm.InverseGamma(
"sigma_rot", **pmx.estimate_inverse_gamma_parameters(1, 5)
)
# Rotation period is 2.6 days, from Lomb Scargle
log_prot = pm.Normal(
"log_prot", mu=np.log(2.606418), sd=0.02
)
prot = pm.Deterministic("prot", tt.exp(log_prot))
log_Q0 = pm.Normal(
"log_Q0", mu=0, sd=2
)
log_dQ = pm.Normal(
"log_dQ", mu=0, sd=2
)
f = pm.Uniform(
"f", lower=0.01, upper=1
)
# Set up the Gaussian Process model. See
# https://celerite2.readthedocs.io/en/latest/tutorials/first/ for an
# introduction. Here, we have a quasiperiodic term:
kernel = terms.RotationTerm(
sigma=sigma_rot,
period=prot,
Q0=tt.exp(log_Q0),
dQ=tt.exp(log_dQ),
f=f,
)
#
# Note mean of the GP is defined here to be zero.
#
gp = GaussianProcess(
kernel,
t=x[mask],
diag=yerr[mask]**2 + tt.exp(2 * log_jitter),
quiet=True,
)
# Compute the Gaussian Process likelihood and add it into the
# the PyMC3 model as a "potential"
gp.marginal("transit_obs", observed=y[mask]-transit_model)
# Compute the GP model prediction for plotting purposes
pm.Deterministic(
"gp_pred", gp.predict(y[mask]-transit_model)
)
# Track planet radius in Jovian radii
r_planet = pm.Deterministic(
"r_planet", (ror*r_star)*( 1*units.Rsun/(1*units.Rjup) ).cgs.value
)
# Optimize the MAP solution.
if start is None:
start = model.test_point
map_soln = start
map_soln = pmx.optimize(
start=map_soln,
vars=[sigma_rot, f, prot, log_Q0, log_dQ]
)
map_soln = pmx.optimize(
start=map_soln,
vars=[log_depth, b, ecc, omega, t0, period, r_star, logg_star,
u_star, mean]
)
map_soln = pmx.optimize(start=map_soln)
return model, map_soln
model, map_estimate = build_model()
##########################################
import matplotlib as mpl
from copy import deepcopy
# NOTE: maybe lightkurve has some analog of this function from astrobase? See
# https://github.com/waqasbhatti/astrobase; you might need to
# ! pip install astrobase
from astrobase.lcmath import phase_bin_magseries
##########################################
def plot_light_curve(x, y, soln, mask=None):
if mask is None:
mask = np.ones(len(x), dtype=bool)
plt.close("all")
fig, axes = plt.subplots(4, 1, figsize=(10, 10), sharex=True)
ax = axes[0]
if len(x[mask]) > int(2e4):
# see https://github.com/matplotlib/matplotlib/issues/5907
mpl.rcParams["agg.path.chunksize"] = 10000
ax.scatter(x[mask], y[mask], c="k", s=0.5, rasterized=True,
label="data", linewidths=0, zorder=42)
gp_mod = soln["gp_pred"] + soln["mean"]
ax.plot(x[mask], gp_mod, color="C2", label="MAP gp model",
zorder=41, lw=0.5)
ax.legend(fontsize=10)
ax.set_ylabel("$f$")
ax = axes[1]
ax.plot(x[mask], y[mask] - gp_mod, "k", label="data - MAPgp")
for i, l in enumerate("b"):
mod = soln["transit_pred"][:, i]
ax.plot(
x[mask], mod, label="planet {0} [model under]".format(l),
zorder=-10
)
ax.legend(fontsize=10, loc=3)
ax.set_ylabel("$f_\mathrm{dtr}$")
ax = axes[2]
ax.plot(x[mask], y[mask] - gp_mod, "k", label="data - MAPgp")
for i, l in enumerate("b"):
mod = soln["transit_pred"][:, i]
ax.plot(
x[mask], mod, label="planet {0} [model over]".format(l)
)
ax.legend(fontsize=10, loc=3)
ax.set_ylabel("$f_\mathrm{dtr}$ [zoom]")
ymin = np.min(mod)-0.05*abs(np.min(mod))
ymax = abs(ymin)
ax.set_ylim([ymin, ymax])
ax = axes[3]
mod = gp_mod + np.sum(soln["transit_pred"], axis=-1)
ax.plot(x[mask], y[mask] - mod, "k")
ax.axhline(0, color="#aaaaaa", lw=1)
ax.set_ylabel("residuals")
ax.set_xlim(x[mask].min(), x[mask].max())
ax.set_xlabel("time [days]")
fig.tight_layout()
def doublemedian(x):
return np.median(np.median(x, axis=0), axis=0)
def doublemean(x):
return np.nanmean(np.nanmean(x, axis=0), axis=0)
def doublepctile(x, SIGMA=[2.5,97.5]):
# [16, 84] for 1-sigma
# flatten/merge cores and chains. then percentile over both.
return np.percentile(
np.reshape(
np.array(x), (x.shape[0]*x.shape[1], x.shape[2])
),
SIGMA, axis=0
)
def get_ylimguess(y):
ylow = np.nanpercentile(y, 0.1)
yhigh = np.nanpercentile(y, 99.9)
ydiff = (yhigh-ylow)
ymin = ylow - 0.35*ydiff
ymax = yhigh + 0.35*ydiff
return [ymin,ymax]
def plot_phased_light_curve(
x, y, yerr, soln, mask=None, from_trace=False, ylimd=None,
binsize_minutes=20, map_estimate=None, fullxlim=False, BINMS=3,
show_rms_err=True, hlines=None
):
"""
Plot a phased light curve using either a MAP solution, or using the full
MCMC posterior. (Or both). Overkill for a minimum-working-example, but
this is what I had already cooked up.
Args:
soln (az.data.inference_data.InferenceData): Can be MAP solution from
PyMC3. Can also be the posterior's trace itself
(model.trace.posterior). If the posterior is passed, bands showing the
2-sigma uncertainty interval will be drawn.
from_trace (bool): set to be True if using model.trace.posterior as
your `soln`.
ylimd (dict): dictionary the sets the ylimits of the plot, e.g.,
`{"A": [-2.2,0.5], "B": [-0.1,0.1]}`.
binsize_minutes (float): how many minutes per bin?
map_estimate: if passed, this is used as the "best fit" line (useful
when doing uncertainty bands from the full MCMC posterior). Otherwise,
the nanmean is used. This is most useful when drawing uncertainty
bands with `soln` being `model.trace.posterior`.
fullxlim (bool): if True, the xlimits of the plot will be the full
orbit. Otherwise, it'll be a ~20 hour window centered on the transit.
BINMS (float): marker size for binned data points.
show_rms_err (bool): if True, a representative error will be drawn
for the binned points using the scatter of the out of transit points.
hlines (list): if passed, horizontal lines will be drawn.
"""
if not fullxlim:
scale_x = lambda x: x*24
else:
scale_x = lambda x: x
if mask is None:
mask = np.ones(len(x), dtype=bool)
plt.close("all")
fig = plt.figure(figsize=(8,6))
axd = fig.subplot_mosaic(
"""
A
B
""",
gridspec_kw={
"height_ratios": [1,1]
}
)
if from_trace==True:
_t0 = np.nanmean(soln["t0"])
_per = np.nanmean(soln["period"])
if len(soln["gp_pred"].shape)==3:
# ncores X nchains X time
medfunc = doublemean
pctfunc = doublepctile
elif len(soln["gp_pred"].shape)==2:
medfunc = lambda x: np.mean(x, axis=0)
pctfunc = lambda x: np.percentile(x, [2.5,97.5], axis=0)
else:
raise NotImplementedError
gp_mod = (
medfunc(soln["gp_pred"]) +
medfunc(soln["mean"])
)
lc_mod = (
medfunc(np.sum(soln["transit_pred"], axis=-1))
)
lc_mod_band = (
pctfunc(np.sum(soln["transit_pred"], axis=-1))
)
_yerr = (
np.sqrt(yerr[mask] ** 2 +
np.exp(2 * medfunc(soln["log_jitter"])))
)
med_error = np.nanmedian(yerr[mask])
med_jitter = np.nanmedian(np.exp(medfunc(soln["log_jitter"])))
print(42*"-")
print(f"WRN! Median σ_f = {med_error:.2e}. "
f"Median jitter = {med_jitter:.2e}")
print(42*"-")
if (from_trace == False) or (map_estimate is | |
isinstance(hsg_thing, np.ndarray):
self.parameters = parameter_dict.copy() # Probably shouldn't shoehorn this in this way
self.addenda = []
self.subtrahenda = []
self.ccd_data = np.array(hsg_thing)
self.ccd_data[:, 0] = 1239.84 / self.ccd_data[:, 0]
# This data won't have an error column, so attached a column of ones
self.ccd_data = np.column_stack((self.ccd_data, np.ones_like(self.ccd_data[:,1])))
self.ccd_data = np.flipud(self.ccd_data) # Because turning into eV switches direction
self.fname = "Live Data"
else:
raise Exception("I don't know what this file type is {}, type: {}".format(
hsg_thing, type(hsg_thing)
))
self.proc_data = np.array(self.ccd_data)
# proc_data is now a 1600 long array with [frequency (eV), signal (counts / FEL pulse), S.E. of signal mean]
# self.parameters["nir_freq"] = 1239.84 / float(self.parameters["nir_lambda"])
self.parameters["nir_freq"] = 1239.84 / float(self.parameters.get("nir_lambda", -1))
# self.parameters["thz_freq"] = 0.000123984 * float(self.parameters["fel_lambda"])
self.parameters["thz_freq"] = 0.000123984 * float(self.parameters.get("fel_lambda", -1))
# self.parameters["nir_power"] = float(self.parameters["nir_power"])
self.parameters["nir_power"] = float(self.parameters.get("nir_power", -1))
try: # This is the new way of doing things. Also, now it's power
self.parameters["thz_energy"] = float(self.parameters["pulseEnergies"]["mean"])
self.parameters["thz_energy_std"] = float(self.parameters["pulseEnergies"]["std"])
except: # This is the old way TODO: DEPRECATE THIS
self.parameters["thz_energy"] = float(self.parameters.get("fel_power", -1))
# things used in fitting/guessing
self.sb_list = np.array([])
self.sb_index = np.array([])
self.sb_dict = {}
self.sb_results = np.array([])
self.full_dict = {}
def __add__(self, other):
"""
Add together the image data from self.proc_data, or add a constant to
that np.array. It will then combine the addenda and subtrahenda lists,
as well as add the fel_pulses together. If type(other) is a CCD object,
then it will add the errors as well.
Input:
self = CCD-like object
other = int, float or CCD object
Internal:
ret.proc_data = the self.proc_data + other(.proc_data)
ret.addenda = combination of two input addenda lists
This raises a FutureWarning because these were designed early on and
haven't been used much.
:param other: The thing to be added, it's either a int/float or a HighSidebandCCD object
:type other: int/float or HighSidebandCCD
:return: Sum of self and other
:rtype: HighSidebandCCD
"""
raise FutureWarning
ret = copy.deepcopy(self)
# Add a constant offset to the data
if type(other) in (int, float):
ret.proc_data[:, 1] = self.proc_data[:, 1] + other
ret.addenda[0] = ret.addenda[0] + other
# or add the data of two hsg_spectra together
else:
if np.isclose(ret.parameters['center_lambda'], other.parameters['center_lambda']):
ret.proc_data[:, 1] = self.proc_data[:, 1] + other.proc_data[:, 1]
ret.proc_data[:, 2] = np.sqrt(self.proc_data[:, 1] ** 2 + other.proc_data[:, 1] ** 2)
ret.addenda[0] = ret.addenda[0] + other.addenda[0]
ret.addenda.extend(other.addenda[1:])
ret.subtrahenda.extend(other.subtrahenda)
ret.parameters['fel_pulses'] += other.parameters['fel_pulses']
else:
raise Exception('Source: Spectrum.__add__:\nThese are not from the same grating settings')
return ret
def __sub__(self, other):
"""
This subtracts constants or other data sets between self.proc_data. I
think it even keeps track of what data sets are in the file and how
they got there.
See how __add__ works for more information.
This raises a FutureWarning because these were designed early on and
haven't been used much.
:param other: The thing to be subtracted, it's either a int/float or a HighSidebandCCD object
:type other: int/float or HighSidebandCCD
:return: Sum of self and other
:rtype: HighSidebandCCD
"""
raise FutureWarning
ret = copy.deepcopy(self)
# Subtract a constant offset to the data
if type(other) in (int, float):
ret.proc_data[:, 1] = self.proc_data[:, 1] - other # Need to choose a name
ret.addenda[0] = ret.addenda[0] - other
# Subtract the data of two hsg_spectra from each other
else:
if np.isclose(ret.proc_data[0, 0], other.proc_data[0, 0]):
ret.proc_data[:, 1] = self.proc_data[:, 1] - other.proc_data[:, 1]
ret.proc_data[:, 2] = np.sqrt(self.proc_data[:, 1] ** 2 + other.proc_data[:, 1] ** 2)
ret.subtrahenda.extend(other.addenda[1:])
ret.addenda.extend(other.subtrahenda)
else:
raise Exception('Source: Spectrum.__sub__:\nThese are not from the same grating settings')
return ret
def __repr__(self):
base = """
fname: {},
Series: {series},
spec_step: {spec_step},
fel_lambda: {fel_lambda},
nir_lambda: {nir_lambda}""".format(os.path.basename(self.fname),**self.parameters)
return base
__str__ = __repr__
def calc_approx_sb_order(self, test_nir_freq):
"""
This simple method will simply return a float approximating the order
of the frequency input. We need this because the CCD wavelength
calibration is not even close to perfect. And it shifts by half a nm
sometimes.
:param test_nir_freq: the frequency guess of the nth sideband
:type test_nir_freq: float
:return: The approximate order of the sideband in question
:rtype: float
"""
nir_freq = self.parameters['nir_freq']
thz_freq = self.parameters['thz_freq']
# If thz = 0, prevent error
if not thz_freq: thz_freq = 1
approx_order = (test_nir_freq - nir_freq) / thz_freq
return approx_order
def guess_sidebands(self, cutoff=4.5, verbose=False, plot=False, **kwargs):
"""
Update 05/24/18:
Hunter had two different loops for negative order sidebands,
then positive order sidebands. They're done pretty much identically,
so I've finally merged them into one.
Finds the locations of all the sidebands in the proc_data array to be
able to seed the fitting method. This works by finding the maximum data
value in the array and guessing what sideband it is. It creates an array
that includes this information. It will then step down, initially by one
THz frequency, then by twos after it hasn't found any odd ones. It then
goes up from the max and finds everything above in much the same way.
There is currently no rhyme or reason to a cutoff of 8. I don't know what
it should be changed to, though.
Input:
cutoff = signal-to-noise threshold to count a sideband candidate.
kwargs:
window_size: how big of a window (in pixels) to use for checking for
sidebands. Specified in half-width
default: 15
Internal:
self.sb_list = List of all of the orders the method found
self.sb_index = index of all of the peaks of the sidebands
self.sb_guess = three-part list including the frequency, amplitude and
error guesses for each sideband
"""
# TODO: this isn't commented appropriately. Will it be made more readable first?
if "cutoff" in self.parameters:
cutoff = self.parameters["cutoff"]
else:
self.parameters['cutoff for guess_sidebands'] = cutoff
if verbose:
print("=" * 15)
print()
print("Guessing CCD Sideband parameters")
print(os.path.basename(self.fname))
print("\tCutoff = {}".format(cutoff))
print()
print("=" * 15)
x_axis = np.array(self.proc_data[:, 0])
y_axis = np.array(self.proc_data[:, 1])
try:
error = np.array(self.proc_data[:, 2])
except IndexError:
# Happens on old data where spectra weren't calculated in the live
# software.
error = np.ones_like(x_axis)
min_sb = int(self.calc_approx_sb_order(x_axis[0])) + 1
try:
max_sb = int(self.calc_approx_sb_order(x_axis[-1]))
except ValueError:
print(x_axis)
nir_freq = self.parameters["nir_freq"]
thz_freq = self.parameters["thz_freq"]
if verbose:
print("min_sb: {} | max_sb: {}".format(min_sb, max_sb))
# Find max strength sideband and it's order
global_max = np.argmax(y_axis)
order_init = int(round(self.calc_approx_sb_order(x_axis[global_max])))
# if verbose:
# print "The global max is at index", global_max
if global_max < 15:
check_y = y_axis[:global_max + 15]
check_y = np.concatenate((np.zeros(15 - global_max), check_y))
elif global_max > 1585:
check_y = y_axis[global_max - 15:]
check_y = np.concatenate((check_y, np.zeros(global_max - 1585)))
else:
check_y = y_axis[global_max - 15:global_max + 15]
check_max_index = np.argmax(check_y)
check_max_area = np.sum(check_y[check_max_index - 2:check_max_index + 3])
check_ave = np.mean(check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]])
check_stdev = np.std(check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]])
check_ratio = (check_max_area - 3 * check_ave) / check_stdev
if verbose:
print(("{:^16}" * 5).format(
"global_max idx", "check_max_area", "check_ave", "check_stdev",
"check_ratio"))
print(("{:^16.5g}" * 5).format(
global_max, check_max_area, check_ave, check_stdev, check_ratio))
if check_ratio > cutoff:
self.sb_list = [order_init]
self.sb_index = [global_max]
sb_freq_guess = [x_axis[global_max]]
sb_amp_guess = [y_axis[global_max]]
sb_error_est = [
np.sqrt(sum([i ** 2 for i in error[global_max - 2:global_max + 3]])) / (
check_max_area - 5 * check_ave)]
else:
print("There are no sidebands in", self.fname)
raise RuntimeError
if verbose:
print("\t Looking for sidebands with f < {:.6f}".format(sb_freq_guess[0]))
last_sb = sb_freq_guess[0]
index_guess = global_max
# keep track of how many consecutive sidebands we've skipped. Sometimes one's
# noisy or something, so we want to keep looking after skipping one
consecutive_null_sb = 0
consecutive_null_odd = 0
no_more_odds = False
break_condition = False
for order in range(order_init - 1, min_sb - 1, -1):
# Check to make sure we're not looking at an odd when
# we've decided to skip them.
if no_more_odds == True and order % 2 == 1:
last_sb = last_sb - thz_freq
if verbose:
print("I skipped", order)
continue
# Window size to look for next sideband. Needs to be order dependent
# because higher orders get wider, so we need to look at more.
| |
from django.test import TestCase, Client
from django.urls import reverse
from django.test.utils import setup_test_environment
from bs4 import BeautifulSoup
import re
import time
from projects.models import *
from projects.forms import *
client = Client()
# length of base template, used to test for empty pages
LEN_BASE = 2600
class BaseWebsiteTestCase(TestCase):
def setUp(self):
super()
def test_homepage_load(self):
url = reverse("projects:home")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_homepage_not_empty(self):
url = reverse("projects:home")
response = self.client.get(url)
self.assertGreater(len(response.content), LEN_BASE)
def test_project_list_load(self):
url = reverse("projects:projects_list")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_project_list_not_empty(self):
url = reverse("projects:projects_list")
response = self.client.get(url)
self.assertGreater(len(response.content), LEN_BASE)
def test_project_students_load(self):
url = reverse("projects:students")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_project_students_not_empty(self):
url = reverse("projects:students")
response = self.client.get(url)
self.assertGreater(len(response.content), LEN_BASE)
def test_project_educators_load(self):
url = reverse("projects:educators")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_project_educators_not_empty(self):
url = reverse("projects:educators")
response = self.client.get(url)
self.assertGreater(len(response.content), LEN_BASE)
def test_project_leaders_load(self):
url = reverse("projects:leaders")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_project_leaders_not_empty(self):
url = reverse("projects:leaders")
response = self.client.get(url)
self.assertGreater(len(response.content), LEN_BASE)
VERBOSE = False
class TraverseLinksTest(TestCase):
def setUp(self):
# By default, login as superuser
um = OpenSUTDUserManager()
um.create_user("tom", display_name="<NAME>",
display_picture="https://via.placeholder.com/150",
graduation_year=2018, pillar="ISTD",
password="<PASSWORD>")
self.client = Client()
self.superuser = User.objects.get(username="tom")
self.client.login(username="tom", password="<PASSWORD>")
@classmethod
def setUpTestData(cls):
pm = OpenSUTDProjectManager()
um = OpenSUTDUserManager()
pm.create_project(project_uid="ACAD_00001",
title="OpenSUTD Web Platform",
caption="Sample project 1",
category="ACAD",
url="https://github.com/OpenSUTD/web-platform-prototype",
poster_url="https://via.placeholder.com/150",
featured_image="https://via.placeholder.com/150")
um.create_user("dick", display_name="<NAME>",
display_picture="https://via.placeholder.com/150",
graduation_year=2019, pillar="ISTD")
um.create_user("jane", display_name="<NAME>",
display_picture="https://via.placeholder.com/150",
graduation_year=2021, pillar="ESD")
pm.create_project(project_uid="ACAD_00002",
title="RandomZZZZZ",
caption="Sample project 2",
category="ACAD",
url="https://github.com/OpenSUTD/web-platform-prototype",
poster_url="https://via.placeholder.com/150",
featured_image="https://via.placeholder.com/150")
pm.set_project_status("ACAD_00001", "ACCEPT")
pm.add_user_to_project("ACAD_00001", "dick")
pm.add_user_to_project("ACAD_00001", "jane")
pm.add_tag_to_project(
"ACAD_00001", "rand1,rand2,education,student,policy")
pm.add_user_to_project("ACAD_00002", "jane")
pm.add_tag_to_project(
"ACAD_00002", "rand1,rand2,education,student,policy")
def test_traverse_urls(self):
# Fill these lists as needed with your site specific URLs to check and to avoid
to_traverse_list = ["/", "/projects/",
"/students/", "/educators/", "/leaders/"]
to_avoid_list = ["javascript:history\.back()", "https://*",
"javascript:history\.go\(-1\)", "^mailto:.*"]
done_list = []
error_list = []
source_of_link = dict()
for link in to_traverse_list:
source_of_link[link] = "initial"
(to_traverse_list, to_avoid_list, done_list, error_list, source_of_link) = \
self.recurse_into_path(
to_traverse_list, to_avoid_list, done_list, error_list, source_of_link)
print("END REACHED\nStats:")
if VERBOSE:
print("\nto_traverse_list = " + str(to_traverse_list))
if VERBOSE:
print("\nto_avoid_list = " + str(to_avoid_list))
if VERBOSE:
print("\nsource_of_link = " + str(source_of_link))
if VERBOSE:
print("\ndone_list = " + str(done_list))
print("Followed " + str(len(done_list)) + " links successfully")
print("Avoided " + str(len(to_avoid_list)) + " links")
if error_list:
print("!! " + str(len(error_list)) + " error(s) : ")
for error in error_list:
print(str(error) + " found in page " +
source_of_link[error[0]])
print("Errors found traversing links")
assert False
else:
print("No errors")
def recurse_into_path(self, to_traverse_list, to_avoid_list, done_list, error_list, source_of_link):
""" Dives into first item of to_traverse_list
Returns: (to_traverse_list, to_avoid_list, done_list, source_of_link)
"""
if to_traverse_list:
url = to_traverse_list.pop()
if not match_any(url, to_avoid_list):
print("\nSurfing to " + str(url) +
", discovered in " + str(source_of_link[url]))
response = self.client.get(url, follow=True)
if response.status_code == 200:
soup = BeautifulSoup(response.content, "html.parser")
text = soup.get_text()
for link in soup.find_all("a"):
new_link = link.get("href")
if VERBOSE:
print(" Found link: " + str(new_link))
if match_any(new_link, to_avoid_list):
if VERBOSE:
print(" Avoiding it")
elif new_link in done_list:
if VERBOSE:
print(" Already done, ignoring")
elif new_link in to_traverse_list:
if VERBOSE:
print(" Already in to traverse list, ignoring")
else:
if VERBOSE:
print(
" New, unknown link: Storing it to traverse later")
source_of_link[new_link] = url
to_traverse_list.append(new_link)
done_list.append(url)
if VERBOSE:
print("Done")
else:
error_list.append((url, response.status_code))
to_avoid_list.append(url)
if VERBOSE:
print("Diving into next level")
return self.recurse_into_path(to_traverse_list, to_avoid_list, done_list, error_list, source_of_link)
else:
# Nothing to traverse
if VERBOSE:
print("Returning to upper level")
return to_traverse_list, to_avoid_list, done_list, error_list, source_of_link
def match_any(my_string, regexp_list):
if my_string:
combined = "(" + ")|(".join(regexp_list) + ")"
return re.match(combined, my_string)
else:
# "None" as string always matches
return True
class SecuredPageTestCase(TestCase):
def setUp(self):
pm = OpenSUTDProjectManager()
pm.create_project(project_uid="ACAD_00001",
title="OpenSUTD Web Platform",
caption="Sample project 1",
category="ACAD",
url="https://github.com/OpenSUTD/web-platform-prototype",
poster_url="https://via.placeholder.com/150",
featured_image="https://via.placeholder.com/150")
um = OpenSUTDUserManager()
um.create_user("tom", display_name="<NAME>",
display_picture="https://via.placeholder.com/150",
graduation_year=2018, pillar="ISTD")
def test_auth_approval_view(self):
url = reverse("projects:approval")
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
def test_auth_submit_view(self):
url = reverse("projects:submit_new")
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
def test_auth_submit_reject(self):
url = reverse("projects:reject", args=("ACAD_00001",))
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
def test_auth_submit_approve(self):
url = reverse("projects:approve", args=("ACAD_00001",))
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
def test_auth_user_edit(self):
url = reverse("projects:user_edit", args=("tom",))
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
def test_auth_project_edit(self):
url = reverse("projects:project_edit", args=("ACAD_00001",))
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
def test_auth_project_bypass(self):
url = reverse("projects:project_page_bypass", args=("ACAD_00001",))
response = self.client.get(url)
# actually a custom 404 page
self.assertEqual(response.status_code, 200)
class SubmissionFormTest(TestCase):
def setUp(self):
self.client = Client()
um = OpenSUTDUserManager()
um.create_user("tom", display_name="<NAME>",
display_picture="https://via.placeholder.com/150",
graduation_year=2018, pillar="ISTD",
password="<PASSWORD>")
self.client.login(username="tom", password="<PASSWORD>")
def test_submission_form_entry(self):
response = self.client.get(reverse("projects:submit_new"))
self.assertEqual(response.status_code, 200)
# test submission mechanism
form = SubmissionForm({"project_name": "test",
"caption": "test caption",
"category": "ACAD",
"featured_image": "http://pluspng.com/img-png/user-png-icon-male-user-icon-512.png",
"github_url": "https://github.com/OpenSUTD/web-platform-prototype",
"poster_url": "http://pluspng.com/img-png/user-png-icon-male-user-icon-512.png"})
self.assertEqual(form.is_valid(), True)
def test_submission_form_entry_invalid(self):
response = self.client.get(reverse("projects:submit_new"))
self.assertEqual(response.status_code, 200)
# test submission mechanism
form = SubmissionForm({"project_name": "",
"caption": "",
"category": "",
"featured_image": "",
"github_url": "",
"poster_url": ""})
self.assertEqual(form.is_valid(), False)
def test_submission_form_entry_not_github(self):
response = self.client.get(reverse("projects:submit_new"))
self.assertEqual(response.status_code, 200)
# test submission mechanism
form = SubmissionForm({"project_name": "test",
"caption": "test caption",
"category": "ACAD",
"featured_image": "http://pluspng.com/img-png/user-png-icon-male-user-icon-512.png",
"github_url": "https://lolcats.com/OpenSUTD/web-platform-prototype",
"poster_url": "http://pluspng.com/img-png/user-png-icon-male-user-icon-512.png"})
self.assertEqual(form.is_valid(), False)
class UserProfileFormTest(TestCase):
def setUp(self):
self.client = Client()
um = OpenSUTDUserManager()
um.create_user("tom", display_name="<NAME>",
display_picture="https://via.placeholder.com/150",
graduation_year=2018, pillar="ISTD",
password="<PASSWORD>")
self.client.login(username="tom", password="<PASSWORD>")
def test_submission_form_entry(self):
# test user can actually get to the page
response = self.client.get(
reverse("projects:user_edit", args=("tom",)))
self.assertEqual(response.status_code, 200)
# test submission mechanism
form = UserProfileForm({"display_name": "tom2",
"display_picture": "http://pluspng.com/img-png/user-png-icon-male-user-icon-512.png",
"graduation_year": 2019,
"pillar": "ISTD",
"bio": "Hi I am Tom",
"contact_email": "<EMAIL>",
"personal_links": "tlkh.design"})
self.assertEqual(form.is_valid(), True)
def test_submission_form_entry_invalid(self):
# test user can actually get to the page
response = self.client.get(
reverse("projects:user_edit", args=("tom",)))
self.assertEqual(response.status_code, 200)
# test submission mechanism
form = UserProfileForm({"display_name": "",
"display_picture": "",
"graduation_year": 2019,
"pillar": "",
"bio": "",
"contact_email": "",
"personal_links": ""})
self.assertEqual(form.is_valid(), False)
class ProjectEditFormTest(TestCase):
def setUp(self):
self.client = Client()
um = OpenSUTDUserManager()
um.create_user("tom", display_name="<NAME>",
display_picture="https://via.placeholder.com/150",
graduation_year=2018, pillar="ISTD",
password="<PASSWORD>")
pm = OpenSUTDProjectManager()
pm.create_project(project_uid="ACAD_00001",
title="OpenSUTD Web Platform",
caption="Sample project 1",
category="ACAD",
url="https://github.com/OpenSUTD/web-platform-prototype",
poster_url="https://via.placeholder.com/150",
featured_image="https://via.placeholder.com/150")
pm.set_project_status("ACAD_00001", "ACCEPT")
self.client.login(username="tom", password="<PASSWORD>")
def test_submission_form_entry_invalid(self):
# test user can actually get to the page
response = self.client.get(
reverse("projects:project_edit", args=("ACAD_00001",)))
self.assertEqual(response.status_code, 200)
# test submission mechanism
form = ProjectEditForm({"title": "",
"caption": "",
"featured_image": "",
"url": "",
"poster_url": ""})
self.assertEqual(form.is_valid(), False)
def test_submission_form_entry(self):
# test user can actually get to the page
response = self.client.get(
reverse("projects:project_edit", args=("ACAD_00001",)))
self.assertEqual(response.status_code, 200)
# test submission mechanism
form = ProjectEditForm({"title": "lalalal",
"caption": "lalalal",
"featured_image": "lalalal.com",
"url": "https://github.com/OpenSUTD/web-platform-prototype",
"poster_url": "lalalal.com"})
self.assertEqual(form.is_valid(), True)
class LogintoSecuredPageTestCase(TestCase):
def setUp(self):
self.client = Client()
um = OpenSUTDUserManager()
um.create_user("tom", display_name="<NAME>",
display_picture="https://via.placeholder.com/150",
graduation_year=2018, pillar="ISTD",
password="<PASSWORD>")
pm = OpenSUTDProjectManager()
pm.create_project(project_uid="ACAD_00001",
title="OpenSUTD Web Platform",
caption="Sample project 1",
category="ACAD",
url="https://github.com/OpenSUTD/web-platform-prototype",
poster_url="https://via.placeholder.com/150",
featured_image="https://via.placeholder.com/150")
self.client.login(username="tom", password="<PASSWORD>")
def test_login_approval_view(self):
response = self.client.get(reverse("projects:approval"))
self.assertEqual(response.status_code, 200)
def test_login_submission_view(self):
response = self.client.get(reverse("projects:submit_new"))
self.assertEqual(response.status_code, 200)
def test_login_user_edit(self):
url = reverse("projects:user_edit", args=("tom",))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_login_project_edit(self):
pm = OpenSUTDProjectManager()
pm.set_project_status("ACAD_00001", "ACCEPT")
url = reverse("projects:project_edit", args=("ACAD_00001",))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
class UserTestCase(TestCase):
def setUp(self):
um = OpenSUTDUserManager()
um.create_user("tom", display_name="<NAME>",
display_picture="https://via.placeholder.com/150",
graduation_year=2018, pillar="ISTD")
um.create_user("jane", display_name="<NAME>",
display_picture="https://via.placeholder.com/150",
graduation_year=2021, pillar="ESD")
def test_user_get_name(self):
tom = User.objects.get(username="tom")
self.assertEqual(tom.display_name, "<NAME>")
jane = User.objects.get(username="jane")
self.assertEqual(jane.display_name, "<NAME>")
def test_user_get_year(self):
tom = User.objects.get(username="tom")
self.assertEqual(tom.graduation_year, 2018)
jane = User.objects.get(username="jane")
self.assertEqual(jane.graduation_year, 2021)
def test_user_get_pillar(self):
tom = User.objects.get(username="tom")
self.assertEqual(tom.pillar, "ISTD")
jane = User.objects.get(username="jane")
self.assertEqual(jane.pillar, "ESD")
# test user profile page contents
def test_user_page_load(self):
url = reverse("projects:user", args=("tom",))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
url = reverse("projects:user", args=("jane",))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_user_page_not_empty(self):
url = reverse("projects:user", args=("tom",))
response = self.client.get(url)
self.assertGreater(len(response.content), LEN_BASE)
url = reverse("projects:user", args=("jane",))
response = self.client.get(url)
self.assertGreater(len(response.content), LEN_BASE)
def test_user_page_name(self):
url = reverse("projects:user", args=("tom",))
response = str(self.client.get(url).content)
self.assertEqual("<NAME>" in response, True)
url = reverse("projects:user", args=("jane",))
response = str(self.client.get(url).content)
self.assertEqual("<NAME>" in response, True)
def test_user_page_year(self):
url = reverse("projects:user", args=("tom",))
response = str(self.client.get(url).content)
self.assertEqual("2018" in response, True)
url = reverse("projects:user", args=("jane",))
response = str(self.client.get(url).content)
self.assertEqual("2021" in response, True)
def test_user_page_pillar(self):
url = reverse("projects:user", args=("tom",))
response = str(self.client.get(url).content)
self.assertEqual("ISTD" in response, True)
url = reverse("projects:user", args=("jane",))
response = str(self.client.get(url).content)
self.assertEqual("ESD" in response, True)
def test_user_page_performance(self):
start = time.time()
for i in range(10):
url = reverse("projects:user", args=("tom",))
response = self.client.get(url)
url = reverse("projects:user", args=("jane",))
response = self.client.get(url)
duration = time.time() - start
self.assertLess(duration, 1.5)
class ProjectShowcaseTestCase(TestCase):
def setUp(self):
pm = OpenSUTDProjectManager()
pm.create_project(project_uid="ACAD_00001",
title="OpenSUTD Web Platform",
caption="Sample project 1",
category="ACAD",
url="https://github.com/OpenSUTD/web-platform-prototype",
poster_url="https://via.placeholder.com/150",
featured_image="https://via.placeholder.com/150")
um = OpenSUTDUserManager()
um.create_user("tom", display_name="<NAME>",
display_picture="https://via.placeholder.com/150",
graduation_year=2018, pillar="ISTD")
um.create_user("jane", display_name="<NAME>",
display_picture="https://via.placeholder.com/150",
graduation_year=2021, pillar="ESD")
def test_project_properties(self):
proj = Project.objects.get(project_uid="ACAD_00001")
self.assertEqual(proj.title, "OpenSUTD Web Platform")
def test_add_user_project(self):
pm = OpenSUTDProjectManager()
pm.add_user_to_project("ACAD_00001", "tom")
proj = Project.objects.get(project_uid="ACAD_00001")
self.assertEqual(len(proj.users.all()), 1)
pm.add_user_to_project("ACAD_00001", "jane")
self.assertEqual(len(proj.users.all()), 2)
def test_add_tag_project(self):
pm = | |
<gh_stars>10-100
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'ApplicationCSVMappingParametersArgs',
'ApplicationCatalogConfigurationArgs',
'ApplicationCheckpointConfigurationArgs',
'ApplicationCloudWatchLoggingOptionCloudWatchLoggingOptionArgs',
'ApplicationCodeConfigurationArgs',
'ApplicationCodeContentArgs',
'ApplicationConfigurationArgs',
'ApplicationCustomArtifactConfigurationArgs',
'ApplicationDeployAsApplicationConfigurationArgs',
'ApplicationEnvironmentPropertiesArgs',
'ApplicationFlinkApplicationConfigurationArgs',
'ApplicationGlueDataCatalogConfigurationArgs',
'ApplicationInputLambdaProcessorArgs',
'ApplicationInputParallelismArgs',
'ApplicationInputProcessingConfigurationArgs',
'ApplicationInputSchemaArgs',
'ApplicationInputArgs',
'ApplicationJSONMappingParametersArgs',
'ApplicationKinesisFirehoseInputArgs',
'ApplicationKinesisStreamsInputArgs',
'ApplicationMappingParametersArgs',
'ApplicationMavenReferenceArgs',
'ApplicationMonitoringConfigurationArgs',
'ApplicationOutputResourceDestinationSchemaArgs',
'ApplicationOutputResourceKinesisFirehoseOutputArgs',
'ApplicationOutputResourceKinesisStreamsOutputArgs',
'ApplicationOutputResourceLambdaOutputArgs',
'ApplicationOutputResourceOutputArgs',
'ApplicationParallelismConfigurationArgs',
'ApplicationPropertyGroupArgs',
'ApplicationRecordColumnArgs',
'ApplicationRecordFormatArgs',
'ApplicationReferenceDataSourceCSVMappingParametersArgs',
'ApplicationReferenceDataSourceJSONMappingParametersArgs',
'ApplicationReferenceDataSourceMappingParametersArgs',
'ApplicationReferenceDataSourceRecordColumnArgs',
'ApplicationReferenceDataSourceRecordFormatArgs',
'ApplicationReferenceDataSourceReferenceDataSourceArgs',
'ApplicationReferenceDataSourceReferenceSchemaArgs',
'ApplicationReferenceDataSourceS3ReferenceDataSourceArgs',
'ApplicationS3ContentBaseLocationArgs',
'ApplicationS3ContentLocationArgs',
'ApplicationSnapshotConfigurationArgs',
'ApplicationSqlApplicationConfigurationArgs',
'ApplicationTagArgs',
'ApplicationZeppelinApplicationConfigurationArgs',
'ApplicationZeppelinMonitoringConfigurationArgs',
]
@pulumi.input_type
class ApplicationCSVMappingParametersArgs:
def __init__(__self__, *,
record_column_delimiter: pulumi.Input[str],
record_row_delimiter: pulumi.Input[str]):
pulumi.set(__self__, "record_column_delimiter", record_column_delimiter)
pulumi.set(__self__, "record_row_delimiter", record_row_delimiter)
@property
@pulumi.getter(name="recordColumnDelimiter")
def record_column_delimiter(self) -> pulumi.Input[str]:
return pulumi.get(self, "record_column_delimiter")
@record_column_delimiter.setter
def record_column_delimiter(self, value: pulumi.Input[str]):
pulumi.set(self, "record_column_delimiter", value)
@property
@pulumi.getter(name="recordRowDelimiter")
def record_row_delimiter(self) -> pulumi.Input[str]:
return pulumi.get(self, "record_row_delimiter")
@record_row_delimiter.setter
def record_row_delimiter(self, value: pulumi.Input[str]):
pulumi.set(self, "record_row_delimiter", value)
@pulumi.input_type
class ApplicationCatalogConfigurationArgs:
def __init__(__self__, *,
glue_data_catalog_configuration: Optional[pulumi.Input['ApplicationGlueDataCatalogConfigurationArgs']] = None):
if glue_data_catalog_configuration is not None:
pulumi.set(__self__, "glue_data_catalog_configuration", glue_data_catalog_configuration)
@property
@pulumi.getter(name="glueDataCatalogConfiguration")
def glue_data_catalog_configuration(self) -> Optional[pulumi.Input['ApplicationGlueDataCatalogConfigurationArgs']]:
return pulumi.get(self, "glue_data_catalog_configuration")
@glue_data_catalog_configuration.setter
def glue_data_catalog_configuration(self, value: Optional[pulumi.Input['ApplicationGlueDataCatalogConfigurationArgs']]):
pulumi.set(self, "glue_data_catalog_configuration", value)
@pulumi.input_type
class ApplicationCheckpointConfigurationArgs:
def __init__(__self__, *,
configuration_type: pulumi.Input[str],
checkpoint_interval: Optional[pulumi.Input[int]] = None,
checkpointing_enabled: Optional[pulumi.Input[bool]] = None,
min_pause_between_checkpoints: Optional[pulumi.Input[int]] = None):
pulumi.set(__self__, "configuration_type", configuration_type)
if checkpoint_interval is not None:
pulumi.set(__self__, "checkpoint_interval", checkpoint_interval)
if checkpointing_enabled is not None:
pulumi.set(__self__, "checkpointing_enabled", checkpointing_enabled)
if min_pause_between_checkpoints is not None:
pulumi.set(__self__, "min_pause_between_checkpoints", min_pause_between_checkpoints)
@property
@pulumi.getter(name="configurationType")
def configuration_type(self) -> pulumi.Input[str]:
return pulumi.get(self, "configuration_type")
@configuration_type.setter
def configuration_type(self, value: pulumi.Input[str]):
pulumi.set(self, "configuration_type", value)
@property
@pulumi.getter(name="checkpointInterval")
def checkpoint_interval(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "checkpoint_interval")
@checkpoint_interval.setter
def checkpoint_interval(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "checkpoint_interval", value)
@property
@pulumi.getter(name="checkpointingEnabled")
def checkpointing_enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "checkpointing_enabled")
@checkpointing_enabled.setter
def checkpointing_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "checkpointing_enabled", value)
@property
@pulumi.getter(name="minPauseBetweenCheckpoints")
def min_pause_between_checkpoints(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "min_pause_between_checkpoints")
@min_pause_between_checkpoints.setter
def min_pause_between_checkpoints(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_pause_between_checkpoints", value)
@pulumi.input_type
class ApplicationCloudWatchLoggingOptionCloudWatchLoggingOptionArgs:
def __init__(__self__, *,
log_stream_arn: pulumi.Input[str]):
pulumi.set(__self__, "log_stream_arn", log_stream_arn)
@property
@pulumi.getter(name="logStreamARN")
def log_stream_arn(self) -> pulumi.Input[str]:
return pulumi.get(self, "log_stream_arn")
@log_stream_arn.setter
def log_stream_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "log_stream_arn", value)
@pulumi.input_type
class ApplicationCodeConfigurationArgs:
def __init__(__self__, *,
code_content: pulumi.Input['ApplicationCodeContentArgs'],
code_content_type: pulumi.Input[str]):
pulumi.set(__self__, "code_content", code_content)
pulumi.set(__self__, "code_content_type", code_content_type)
@property
@pulumi.getter(name="codeContent")
def code_content(self) -> pulumi.Input['ApplicationCodeContentArgs']:
return pulumi.get(self, "code_content")
@code_content.setter
def code_content(self, value: pulumi.Input['ApplicationCodeContentArgs']):
pulumi.set(self, "code_content", value)
@property
@pulumi.getter(name="codeContentType")
def code_content_type(self) -> pulumi.Input[str]:
return pulumi.get(self, "code_content_type")
@code_content_type.setter
def code_content_type(self, value: pulumi.Input[str]):
pulumi.set(self, "code_content_type", value)
@pulumi.input_type
class ApplicationCodeContentArgs:
def __init__(__self__, *,
s3_content_location: Optional[pulumi.Input['ApplicationS3ContentLocationArgs']] = None,
text_content: Optional[pulumi.Input[str]] = None,
zip_file_content: Optional[pulumi.Input[str]] = None):
if s3_content_location is not None:
pulumi.set(__self__, "s3_content_location", s3_content_location)
if text_content is not None:
pulumi.set(__self__, "text_content", text_content)
if zip_file_content is not None:
pulumi.set(__self__, "zip_file_content", zip_file_content)
@property
@pulumi.getter(name="s3ContentLocation")
def s3_content_location(self) -> Optional[pulumi.Input['ApplicationS3ContentLocationArgs']]:
return pulumi.get(self, "s3_content_location")
@s3_content_location.setter
def s3_content_location(self, value: Optional[pulumi.Input['ApplicationS3ContentLocationArgs']]):
pulumi.set(self, "s3_content_location", value)
@property
@pulumi.getter(name="textContent")
def text_content(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "text_content")
@text_content.setter
def text_content(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "text_content", value)
@property
@pulumi.getter(name="zipFileContent")
def zip_file_content(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "zip_file_content")
@zip_file_content.setter
def zip_file_content(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "zip_file_content", value)
@pulumi.input_type
class ApplicationConfigurationArgs:
def __init__(__self__, *,
application_code_configuration: Optional[pulumi.Input['ApplicationCodeConfigurationArgs']] = None,
application_snapshot_configuration: Optional[pulumi.Input['ApplicationSnapshotConfigurationArgs']] = None,
environment_properties: Optional[pulumi.Input['ApplicationEnvironmentPropertiesArgs']] = None,
flink_application_configuration: Optional[pulumi.Input['ApplicationFlinkApplicationConfigurationArgs']] = None,
sql_application_configuration: Optional[pulumi.Input['ApplicationSqlApplicationConfigurationArgs']] = None,
zeppelin_application_configuration: Optional[pulumi.Input['ApplicationZeppelinApplicationConfigurationArgs']] = None):
if application_code_configuration is not None:
pulumi.set(__self__, "application_code_configuration", application_code_configuration)
if application_snapshot_configuration is not None:
pulumi.set(__self__, "application_snapshot_configuration", application_snapshot_configuration)
if environment_properties is not None:
pulumi.set(__self__, "environment_properties", environment_properties)
if flink_application_configuration is not None:
pulumi.set(__self__, "flink_application_configuration", flink_application_configuration)
if sql_application_configuration is not None:
pulumi.set(__self__, "sql_application_configuration", sql_application_configuration)
if zeppelin_application_configuration is not None:
pulumi.set(__self__, "zeppelin_application_configuration", zeppelin_application_configuration)
@property
@pulumi.getter(name="applicationCodeConfiguration")
def application_code_configuration(self) -> Optional[pulumi.Input['ApplicationCodeConfigurationArgs']]:
return pulumi.get(self, "application_code_configuration")
@application_code_configuration.setter
def application_code_configuration(self, value: Optional[pulumi.Input['ApplicationCodeConfigurationArgs']]):
pulumi.set(self, "application_code_configuration", value)
@property
@pulumi.getter(name="applicationSnapshotConfiguration")
def application_snapshot_configuration(self) -> Optional[pulumi.Input['ApplicationSnapshotConfigurationArgs']]:
return pulumi.get(self, "application_snapshot_configuration")
@application_snapshot_configuration.setter
def application_snapshot_configuration(self, value: Optional[pulumi.Input['ApplicationSnapshotConfigurationArgs']]):
pulumi.set(self, "application_snapshot_configuration", value)
@property
@pulumi.getter(name="environmentProperties")
def environment_properties(self) -> Optional[pulumi.Input['ApplicationEnvironmentPropertiesArgs']]:
return pulumi.get(self, "environment_properties")
@environment_properties.setter
def environment_properties(self, value: Optional[pulumi.Input['ApplicationEnvironmentPropertiesArgs']]):
pulumi.set(self, "environment_properties", value)
@property
@pulumi.getter(name="flinkApplicationConfiguration")
def flink_application_configuration(self) -> Optional[pulumi.Input['ApplicationFlinkApplicationConfigurationArgs']]:
return pulumi.get(self, "flink_application_configuration")
@flink_application_configuration.setter
def flink_application_configuration(self, value: Optional[pulumi.Input['ApplicationFlinkApplicationConfigurationArgs']]):
pulumi.set(self, "flink_application_configuration", value)
@property
@pulumi.getter(name="sqlApplicationConfiguration")
def sql_application_configuration(self) -> Optional[pulumi.Input['ApplicationSqlApplicationConfigurationArgs']]:
return pulumi.get(self, "sql_application_configuration")
@sql_application_configuration.setter
def sql_application_configuration(self, value: Optional[pulumi.Input['ApplicationSqlApplicationConfigurationArgs']]):
pulumi.set(self, "sql_application_configuration", value)
@property
@pulumi.getter(name="zeppelinApplicationConfiguration")
def zeppelin_application_configuration(self) -> Optional[pulumi.Input['ApplicationZeppelinApplicationConfigurationArgs']]:
return pulumi.get(self, "zeppelin_application_configuration")
@zeppelin_application_configuration.setter
def zeppelin_application_configuration(self, value: Optional[pulumi.Input['ApplicationZeppelinApplicationConfigurationArgs']]):
pulumi.set(self, "zeppelin_application_configuration", value)
@pulumi.input_type
class ApplicationCustomArtifactConfigurationArgs:
def __init__(__self__, *,
artifact_type: pulumi.Input[str],
maven_reference: Optional[pulumi.Input['ApplicationMavenReferenceArgs']] = None,
s3_content_location: Optional[pulumi.Input['ApplicationS3ContentLocationArgs']] = None):
pulumi.set(__self__, "artifact_type", artifact_type)
if maven_reference is not None:
pulumi.set(__self__, "maven_reference", maven_reference)
if s3_content_location is not None:
pulumi.set(__self__, "s3_content_location", s3_content_location)
@property
@pulumi.getter(name="artifactType")
def artifact_type(self) -> pulumi.Input[str]:
return pulumi.get(self, "artifact_type")
@artifact_type.setter
def artifact_type(self, value: pulumi.Input[str]):
pulumi.set(self, "artifact_type", value)
@property
@pulumi.getter(name="mavenReference")
def maven_reference(self) -> Optional[pulumi.Input['ApplicationMavenReferenceArgs']]:
return pulumi.get(self, "maven_reference")
@maven_reference.setter
def maven_reference(self, value: Optional[pulumi.Input['ApplicationMavenReferenceArgs']]):
pulumi.set(self, "maven_reference", value)
@property
@pulumi.getter(name="s3ContentLocation")
def s3_content_location(self) -> Optional[pulumi.Input['ApplicationS3ContentLocationArgs']]:
return pulumi.get(self, "s3_content_location")
@s3_content_location.setter
def s3_content_location(self, value: Optional[pulumi.Input['ApplicationS3ContentLocationArgs']]):
pulumi.set(self, "s3_content_location", value)
@pulumi.input_type
class ApplicationDeployAsApplicationConfigurationArgs:
def __init__(__self__, *,
s3_content_location: pulumi.Input['ApplicationS3ContentBaseLocationArgs']):
pulumi.set(__self__, "s3_content_location", s3_content_location)
@property
@pulumi.getter(name="s3ContentLocation")
def s3_content_location(self) -> pulumi.Input['ApplicationS3ContentBaseLocationArgs']:
return pulumi.get(self, "s3_content_location")
@s3_content_location.setter
def s3_content_location(self, value: pulumi.Input['ApplicationS3ContentBaseLocationArgs']):
pulumi.set(self, "s3_content_location", value)
@pulumi.input_type
class ApplicationEnvironmentPropertiesArgs:
def __init__(__self__, *,
property_groups: Optional[pulumi.Input[Sequence[pulumi.Input['ApplicationPropertyGroupArgs']]]] = None):
if property_groups is not None:
pulumi.set(__self__, "property_groups", property_groups)
@property
@pulumi.getter(name="propertyGroups")
def property_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ApplicationPropertyGroupArgs']]]]:
return pulumi.get(self, "property_groups")
@property_groups.setter
def property_groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ApplicationPropertyGroupArgs']]]]):
pulumi.set(self, "property_groups", value)
@pulumi.input_type
class ApplicationFlinkApplicationConfigurationArgs:
def __init__(__self__, *,
checkpoint_configuration: Optional[pulumi.Input['ApplicationCheckpointConfigurationArgs']] = None,
monitoring_configuration: Optional[pulumi.Input['ApplicationMonitoringConfigurationArgs']] = None,
parallelism_configuration: Optional[pulumi.Input['ApplicationParallelismConfigurationArgs']] = None):
if checkpoint_configuration is not None:
pulumi.set(__self__, "checkpoint_configuration", checkpoint_configuration)
if monitoring_configuration is not None:
pulumi.set(__self__, "monitoring_configuration", monitoring_configuration)
if parallelism_configuration is not None:
pulumi.set(__self__, "parallelism_configuration", parallelism_configuration)
@property
@pulumi.getter(name="checkpointConfiguration")
def checkpoint_configuration(self) -> Optional[pulumi.Input['ApplicationCheckpointConfigurationArgs']]:
return pulumi.get(self, "checkpoint_configuration")
@checkpoint_configuration.setter
def checkpoint_configuration(self, value: Optional[pulumi.Input['ApplicationCheckpointConfigurationArgs']]):
pulumi.set(self, "checkpoint_configuration", value)
@property
@pulumi.getter(name="monitoringConfiguration")
def monitoring_configuration(self) -> Optional[pulumi.Input['ApplicationMonitoringConfigurationArgs']]:
return pulumi.get(self, "monitoring_configuration")
@monitoring_configuration.setter
def monitoring_configuration(self, value: Optional[pulumi.Input['ApplicationMonitoringConfigurationArgs']]):
pulumi.set(self, "monitoring_configuration", value)
@property
@pulumi.getter(name="parallelismConfiguration")
def parallelism_configuration(self) -> Optional[pulumi.Input['ApplicationParallelismConfigurationArgs']]:
return pulumi.get(self, "parallelism_configuration")
@parallelism_configuration.setter
def parallelism_configuration(self, value: Optional[pulumi.Input['ApplicationParallelismConfigurationArgs']]):
pulumi.set(self, "parallelism_configuration", value)
@pulumi.input_type
class ApplicationGlueDataCatalogConfigurationArgs:
def __init__(__self__, *,
database_arn: Optional[pulumi.Input[str]] = None):
if database_arn is not None:
pulumi.set(__self__, "database_arn", database_arn)
@property
@pulumi.getter(name="databaseARN")
def database_arn(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "database_arn")
@database_arn.setter
def database_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "database_arn", value)
@pulumi.input_type
class ApplicationInputLambdaProcessorArgs:
def __init__(__self__, *,
resource_arn: pulumi.Input[str]):
pulumi.set(__self__, "resource_arn", resource_arn)
@property
@pulumi.getter(name="resourceARN")
def resource_arn(self) -> pulumi.Input[str]:
return pulumi.get(self, "resource_arn")
@resource_arn.setter
def resource_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_arn", value)
@pulumi.input_type
class ApplicationInputParallelismArgs:
def __init__(__self__, *,
count: Optional[pulumi.Input[int]] = None):
if count is not None:
pulumi.set(__self__, "count", count)
@property
@pulumi.getter
def count(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "count")
@count.setter
def count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "count", value)
@pulumi.input_type
class ApplicationInputProcessingConfigurationArgs:
def __init__(__self__, *,
input_lambda_processor: Optional[pulumi.Input['ApplicationInputLambdaProcessorArgs']] = None):
if input_lambda_processor is not None:
pulumi.set(__self__, "input_lambda_processor", input_lambda_processor)
@property
@pulumi.getter(name="inputLambdaProcessor")
def input_lambda_processor(self) -> Optional[pulumi.Input['ApplicationInputLambdaProcessorArgs']]:
return pulumi.get(self, "input_lambda_processor")
@input_lambda_processor.setter
def input_lambda_processor(self, value: Optional[pulumi.Input['ApplicationInputLambdaProcessorArgs']]):
pulumi.set(self, "input_lambda_processor", value)
@pulumi.input_type
class ApplicationInputSchemaArgs:
def __init__(__self__, *,
record_columns: pulumi.Input[Sequence[pulumi.Input['ApplicationRecordColumnArgs']]],
record_format: pulumi.Input['ApplicationRecordFormatArgs'],
record_encoding: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "record_columns", record_columns)
pulumi.set(__self__, "record_format", record_format)
if record_encoding is not None:
pulumi.set(__self__, "record_encoding", record_encoding)
@property
@pulumi.getter(name="recordColumns")
def record_columns(self) -> pulumi.Input[Sequence[pulumi.Input['ApplicationRecordColumnArgs']]]:
return pulumi.get(self, "record_columns")
@record_columns.setter
def record_columns(self, value: pulumi.Input[Sequence[pulumi.Input['ApplicationRecordColumnArgs']]]):
pulumi.set(self, "record_columns", value)
@property
@pulumi.getter(name="recordFormat")
def record_format(self) -> pulumi.Input['ApplicationRecordFormatArgs']:
return pulumi.get(self, "record_format")
@record_format.setter
def record_format(self, value: pulumi.Input['ApplicationRecordFormatArgs']):
pulumi.set(self, "record_format", value)
@property
@pulumi.getter(name="recordEncoding")
def record_encoding(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "record_encoding")
@record_encoding.setter
def record_encoding(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "record_encoding", value)
@pulumi.input_type
class ApplicationInputArgs:
def __init__(__self__, *,
input_schema: pulumi.Input['ApplicationInputSchemaArgs'],
name_prefix: pulumi.Input[str],
input_parallelism: Optional[pulumi.Input['ApplicationInputParallelismArgs']] = None,
input_processing_configuration: Optional[pulumi.Input['ApplicationInputProcessingConfigurationArgs']] = None,
kinesis_firehose_input: Optional[pulumi.Input['ApplicationKinesisFirehoseInputArgs']] = None,
kinesis_streams_input: Optional[pulumi.Input['ApplicationKinesisStreamsInputArgs']] = None):
pulumi.set(__self__, "input_schema", input_schema)
pulumi.set(__self__, "name_prefix", name_prefix)
if input_parallelism is not None:
pulumi.set(__self__, "input_parallelism", input_parallelism)
if input_processing_configuration is not None:
pulumi.set(__self__, "input_processing_configuration", input_processing_configuration)
if kinesis_firehose_input is not None:
pulumi.set(__self__, "kinesis_firehose_input", kinesis_firehose_input)
if kinesis_streams_input is not None:
pulumi.set(__self__, "kinesis_streams_input", kinesis_streams_input)
@property
@pulumi.getter(name="inputSchema")
def input_schema(self) -> pulumi.Input['ApplicationInputSchemaArgs']:
return pulumi.get(self, "input_schema")
@input_schema.setter
def input_schema(self, value: pulumi.Input['ApplicationInputSchemaArgs']):
pulumi.set(self, "input_schema", value)
@property
@pulumi.getter(name="namePrefix")
def name_prefix(self) -> pulumi.Input[str]:
return pulumi.get(self, "name_prefix")
@name_prefix.setter
def name_prefix(self, value: pulumi.Input[str]):
pulumi.set(self, "name_prefix", value)
@property
@pulumi.getter(name="inputParallelism")
def input_parallelism(self) -> Optional[pulumi.Input['ApplicationInputParallelismArgs']]:
return pulumi.get(self, "input_parallelism")
@input_parallelism.setter
def input_parallelism(self, value: Optional[pulumi.Input['ApplicationInputParallelismArgs']]):
pulumi.set(self, "input_parallelism", value)
@property
@pulumi.getter(name="inputProcessingConfiguration")
def input_processing_configuration(self) -> Optional[pulumi.Input['ApplicationInputProcessingConfigurationArgs']]:
return pulumi.get(self, "input_processing_configuration")
@input_processing_configuration.setter
def input_processing_configuration(self, value: Optional[pulumi.Input['ApplicationInputProcessingConfigurationArgs']]):
pulumi.set(self, "input_processing_configuration", value)
@property
@pulumi.getter(name="kinesisFirehoseInput")
def kinesis_firehose_input(self) -> Optional[pulumi.Input['ApplicationKinesisFirehoseInputArgs']]:
return pulumi.get(self, "kinesis_firehose_input")
@kinesis_firehose_input.setter
def kinesis_firehose_input(self, value: Optional[pulumi.Input['ApplicationKinesisFirehoseInputArgs']]):
pulumi.set(self, "kinesis_firehose_input", value)
@property
@pulumi.getter(name="kinesisStreamsInput")
def kinesis_streams_input(self) -> Optional[pulumi.Input['ApplicationKinesisStreamsInputArgs']]:
return pulumi.get(self, "kinesis_streams_input")
@kinesis_streams_input.setter
def kinesis_streams_input(self, value: Optional[pulumi.Input['ApplicationKinesisStreamsInputArgs']]):
pulumi.set(self, "kinesis_streams_input", value)
@pulumi.input_type
class ApplicationJSONMappingParametersArgs:
def __init__(__self__, *,
record_row_path: pulumi.Input[str]):
pulumi.set(__self__, "record_row_path", record_row_path)
@property
@pulumi.getter(name="recordRowPath")
def record_row_path(self) -> pulumi.Input[str]:
return pulumi.get(self, "record_row_path")
@record_row_path.setter
def record_row_path(self, value: pulumi.Input[str]):
pulumi.set(self, "record_row_path", value)
@pulumi.input_type
class ApplicationKinesisFirehoseInputArgs:
def __init__(__self__, *,
resource_arn: pulumi.Input[str]):
pulumi.set(__self__, "resource_arn", resource_arn)
@property
@pulumi.getter(name="resourceARN")
def resource_arn(self) -> pulumi.Input[str]:
return pulumi.get(self, "resource_arn")
@resource_arn.setter
def resource_arn(self, value: pulumi.Input[str]):
pulumi.set(self, | |
from __future__ import division
import logging
import sys
import os
import math
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import xml.etree.ElementTree as ET
from collections import OrderedDict
import numpy as NP
from scipy.constants import mu_0
from scipy.io import savemat
from scipy.interpolate import CubicSpline
from ..signal.spectrum import nextpow2
from ..mag.iaga2002 import parse
from ..util.nan import nan_interp
from ..util.date import toJ2000
def calc_e_3d(Bx,
By,
Zxx_function,
Zxy_function,
Zyx_function,
Zyy_function,
interval):
"""
Calculate the electric field induced by the magnetic field (*Bx*,
eastward oriented magnetic field intensity, and *By*, northward
oriented magnetic field intensity). *Zxx_function*,
*Z_xy_function*, *Z_yx_function*, and *Z_yy_function* are the
surface impedance functions (input is angular frequency [rad] and
output is impedance [Ohms]). Return the tuple *Ex*, *Ey* (same
orientation as the magnetic field).
Units:
(input) Bx, By: [T]
(input) Zxx_function: [rad] -> [Ohm]
(input) Zxy_function: [rad] -> [Ohm]
(input) Zyx_function: [rad] -> [Ohm]
(input) Zyy_function: [rad] -> [Ohm]
(output) Ex, Ey: [V/m]
"""
# This function is the reimplementation of Prof. Zhu's matlab code
# which is based on the algorithm detailed in the NERC Application
# Guide, Dec. 2013. Note that the NERC Guide contains a sign error
# that has been corrected in the code below.
# function global variables
lp = 60 # length at beginning and end of time series to fit
# detrend parameters
p = 60 # length at beginning and end of time series for
# frequency domain window
# get the FFT size
assert len(Bx) == len(By)
N = len(Bx)
Nfft = nextpow2(N)
# zero-mean and remove linear trend from the magnetic time series
# data
ax = NP.mean(Bx[:lp])
bx = NP.mean(Bx[-lp-1:])
N_range = NP.arange(1, N+1)
cx = ax * (N - N_range) / N + bx * N_range / N
Bx0 = Bx - cx
ay = NP.mean(By[:lp])
by = NP.mean(By[-lp-1:])
cy = ay * (N - N_range) / N + by * N_range / N
By0 = By - cy
# window the magnetic data time series
wp = NP.hstack((0.5 * (1 - NP.cos(2 * math.pi * NP.arange(0, p/2) / p)),
NP.ones(N - p),
0.5 * (1 - NP.cos(2 * math.pi * NP.arange(p/2 - 1, -1, -1) / p))))
Bx1 = Bx0 * wp
By1 = By0 * wp
# compute FFT
Sbx = NP.fft.fft(Bx1, n=Nfft) / N
Sby = NP.fft.fft(By1, n=Nfft) / N
freq = NP.arange(0, Nfft) / Nfft / interval
omega = 2 * math.pi * freq
Zxx_positive = Zxx_function(omega[1:])
Zxx = NP.hstack(([0], Zxx_positive))
Zxx2 = NP.hstack((Zxx[:(int(Nfft/2)+1)],
NP.conj(Zxx[1:int(Nfft/2)])[::-1]))
Zxy_positive = Zxy_function(omega[1:])
Zxy = NP.hstack(([0], Zxy_positive))
Zxy2 = NP.hstack((Zxy[:(int(Nfft/2)+1)],
NP.conj(Zxy[1:int(Nfft/2)])[::-1]))
Zyx_positive = Zyx_function(omega[1:])
Zyx = NP.hstack(([0], Zyx_positive))
Zyx2 = NP.hstack((Zyx[:(int(Nfft/2)+1)],
NP.conj(Zyx[1:int(Nfft/2)])[::-1]))
Zyy_positive = Zyy_function(omega[1:])
Zyy = NP.hstack(([0], Zyy_positive))
Zyy2 = NP.hstack((Zyy[:(int(Nfft/2)+1)],
NP.conj(Zyy[1:int(Nfft/2)])[::-1]))
Se_x = []
Se_y = []
for Zxx_i, Zxy_i, Zyx_i, Zyy_i, Sbx_i, Sby_i in zip(Zxx2, Zxy2, Zyx2, Zyy2, Sbx, Sby):
Z = NP.array([[Zxx_i, Zxy_i], [Zyx_i, Zyy_i]])
Sb_i = NP.array([[Sbx_i / mu_0], [Sby_i / mu_0]])
Se_i = NP.dot(Z, Sb_i)
Se_x.append(Se_i[0, 0])
Se_y.append(Se_i[1, 0])
Ex = NP.real(NP.fft.ifft(Se_x, Nfft) * N)
Ey = NP.real(NP.fft.ifft(Se_y, Nfft) * N)
return (Ex[:N],
Ey[:N])
def parse_xml(xml_fname):
"""
Parse the E-M transfer function file *xml_fname* returning a
mapping between period keys (in [s]) and the 2x2 matrices
containing the associated Zxx, Zxy, Zyx, and Zyy parameters (in
[mV / km] / [nT]).
These files are available at http://ds.iris.edu/spud/emtf.
"""
tree = ET.parse(xml_fname)
root = tree.getroot()
data_list = root.findall('Data')
assert len(data_list) == 1
data = data_list[0]
Z_map = OrderedDict()
for period in data.findall('Period'):
Z_list = period.findall('Z')
assert len(Z_list) == 1
Z = Z_list[0]
values = []
for value, name in zip(Z, ['Zxx', 'Zxy', 'Zyx', 'Zyy']):
if value.attrib['name'] != name and value.attrib['name'] != name.upper():
raise ValueError('name mismatch ({} != {})'.format(value.attrib['name'], name))
values.append(complex(*map(float, value.text.split())))
Z_array = NP.array(values)
Z_array.shape = 2, 2
Z_map[float(period.attrib['value'])] = Z_array
return Z_map
def parse_xml_header(xml_fname):
"""
Parse the E-M transfer function file *xml_fname* returning a
mapping of site specific information, such as location and data
quality.
"""
header_map = {}
tree = ET.parse(xml_fname)
root = tree.getroot()
copyright_list = root.findall('Copyright')
assert len(copyright_list) == 1
copyright_ = copyright_list[0]
for copyright_node in copyright_:
if copyright_node.tag == 'Acknowledgement':
header_map['acknowledgement'] = copyright_node.text
site_list = root.findall('Site')
assert len(site_list) == 1
site = site_list[0]
for site_node in site:
if site_node.tag == 'Id':
header_map['id'] = site_node.text
elif site_node.tag == 'Location':
for child in site_node:
if child.tag == 'Latitude':
header_map['lat'] = float(child.text)
elif child.tag == 'Longitude':
header_map['lon'] = float(child.text)
elif child.tag == 'Elevation':
assert child.attrib['units'] == 'meters'
header_map['el'] = float(child.text)
elif site_node.tag == 'DataQualityNotes':
for child in site_node:
if child.tag == 'Rating':
header_map['rating'] = int(child.text)
elif child.tag == 'GoodFromPeriod':
header_map['good_from'] = float(child.text)
try:
header_map['good_to_mHz'] = 1/float(child.text) * 1e3
except ZeroDivisionError:
header_map['good_to_mHz'] = float('inf')
elif child.tag == 'GoodToPeriod':
header_map['good_to'] = float(child.text)
try:
header_map['good_from_mHz'] = 1/float(child.text) * 1e3
except ZeroDivisionError:
header_map['good_from_mHz'] = float('inf')
elif child.tag == 'Comments':
header_map['data_quality_comments'] = child.text
elif site_node.tag == 'DataQualityWarnings':
for child in site_node:
if child.tag == 'Flag':
header_map['data_quality_flag'] = int(child.text)
elif child.tag == 'Comments':
header_map['data_quality_warning_comments'] = child.text
elif site_node.tag == 'Acknowledgment':
header_map['acknowledgment'] = site_node.text
return header_map
class Zw_interpolator(object):
def __init__(self, Z_map, extrapolate0=False):
"""
Construct a cubic-spline 3-D E-M transfer function interpolater
using the information in *Z_map* returned from
:func:`parse_xml` as the function samples. If *extrapolate0*,
then 0s are inserted in the transfer function response where
extrapolation would occur (this happens when transfer function
response is requested at frequencies outside the range
provided in the .XML file record).
"""
self.Z_map = Z_map
periods = Z_map.keys()
self.f = NP.array([1/x for x in periods[::-1]])
self.omega = 2 * math.pi * self.f
self.Zxx_interp = CubicSpline(self.omega, [x[0, 0] for x in Z_map.values()[::-1]],
extrapolate=False)
self.Zxy_interp = CubicSpline(self.omega, [x[0, 1] for x in Z_map.values()[::-1]],
extrapolate=False)
self.Zyx_interp = CubicSpline(self.omega, [x[1, 0] for x in Z_map.values()[::-1]],
extrapolate=False)
self.Zyy_interp = CubicSpline(self.omega, [x[1, 1] for x in Z_map.values()[::-1]],
extrapolate=False)
self.key_map = {'xx': self.Zxx_interp,
'xy': self.Zxy_interp,
'yx': self.Zyx_interp,
'yy': self.Zyy_interp}
self.extrapolate0 = extrapolate0
def __call__(self, omega, key):
"""
Return the interpolated value of the transfer function (*key* is
xx, xy, yx, or yy) at angular frequency *omega*. The units of
*omega* and the output are the same as *Z_map* ([rad] and
[mV/km]/[nT] by default).
"""
if self.extrapolate0:
y = NP.zeros_like(omega, dtype=NP.complex128)
I = NP.where((omega >= self.omega[0]) & (omega <= self.omega[-1]))
y[I] = self.key_map[key](NP.asarray(omega)[I])
return y
return self.key_map[key](omega)
def apply_transfer_function(Bx, By, interval, xml_fname, extrapolate0=False):
"""
Filter *Bx* and *By* (in [T]) with 3-D transfer function where
*interval* is the sample period (in [s]). Uses the 3-D transfer
function model given in *xml_fname*. Return Ex and Ey (in
[V/m]). If *extrapolate0*, use 0s in the transfer function
response at frequencies outside the range provided in *xml_fname*.
"""
# setup surface impedance function
Z_map = parse_xml(xml_fname)
interp = Zw_interpolator(Z_map, extrapolate0=extrapolate0)
# mu_0 * 1e3 converts from [mv / km] / [nT] to [Ohm]
Zxx_function = lambda omega: interp(omega, 'xx') * mu_0 * 1e3
Zxy_function = lambda omega: interp(omega, 'xy') * mu_0 * 1e3
Zyx_function = lambda omega: interp(omega, 'yx') * mu_0 * 1e3
Zyy_function = lambda omega: interp(omega, 'yy') * mu_0 * 1e3
# calculate E field
Ex, Ey = calc_e_3d(nan_interp(Bx),
nan_interp(By),
Zxx_function,
Zxy_function,
Zyx_function,
Zyy_function,
interval)
return Ex, Ey
def process(output_mat_fname,
input_iaga2002_fname,
xml_fname,
save_B=False):
"""
End-to-end processing of an IAGA2002 magnetometer data record
*input_iaga2002_fname* to the output file *output_mat_fname*
containing the calculated E-field (units are [V/m]). Use the 3-D
transfer function model given in *xml_fname.* Also save the
B-field information if *save_B*.
"""
# gather Bx and By magnetometer measurements
_, data_map = parse(input_iaga2002_fname)
interval = int((data_map.keys()[1] - data_map.keys()[0]).total_seconds())
Bx = nan_interp([record.x * 1e-9 for record in data_map.itervalues()])
By = nan_interp([record.y * 1e-9 for record in data_map.itervalues()])
# filter with transfer function
Ex, Ey = apply_transfer_function(Bx,
By,
interval,
xml_fname)
# save E field
stn_name = os.path.basename(input_iaga2002_fname)[:3]
j2000 = map(toJ2000, data_map.iterkeys())
mdict = {'Ex': Ex,
'Ey': Ey,
'j2000': j2000,
'input_fname': os.path.abspath(input_iaga2002_fname),
'xml_fname': os.path.abspath(xml_fname)}
if save_B:
mdict['Bx'] = Bx
mdict['By'] = By
mdict['Bx_raw'] = [x.x * 1e-9 for x in data_map.itervalues()]
mdict['By_raw'] = [x.y * 1e-9 for x in data_map.itervalues()]
savemat(output_mat_fname, mdict)
return output_mat_fname
def main(argv=None):
if argv is None:
argv = sys.argv
| |
"""
A ProblemSet contains one or more (related) Problems. Each Problem has one or more Instances corresponding
a to a different input. A "simple" problem like (lambda x: x + "world" == "Hello world") that has no inputs has
just one instance,
It is important that different ProblemSets do not overlap, in terms of duplicates or answers should not be given
away, like if one had a weighted shortest path problem in one problem set and an unweighted shortest path in another,
they should be combined into a single ProblemSet. This us useful for tests which involve giving away some solutions
but not others.
"""
import inspect
import json
from typing import List, Dict, Callable, Set, Tuple
import random
import os
import sys
import traceback
import time
import utils
# The seed used for randomness is important because if a solver has access to this seed it can cheat and
# reverse-engineer the solutions to some puzzles. Don't share the seed with AI puzzle solvers :-)
_AI_SEED = 12389484322359235125123212243523534510980967133563
DEFAULT_TIMEOUT = 1.0 # seconds
PATH = os.path.join(utils.my_path, "problems/")
class InterpreterError(Exception): pass
def my_exec(cmd, globals=None, locals=None, description='source string'):
"""
https://stackoverflow.com/questions/28836078/how-to-get-the-line-number-of-an-error-from-exec-or-execfile-in-python
"""
try:
exec(cmd, globals, locals)
except SyntaxError as err:
error_class = err.__class__.__name__
detail = err.args[0] if err.args else ""
line_number = err.lineno
except Exception as err:
error_class = err.__class__.__name__
detail = err.args[0] if err.args else ""
cl, exc, tb = sys.exc_info()
line_number = traceback.extract_tb(tb)[-1][1]
else:
return
cmd_str = "\n".join([f"{i + 1}: {x}" for i, x in enumerate(cmd.split("\n"))])
raise InterpreterError("%s at line %d of %s: %s\n%s" % (error_class, line_number, description, detail, cmd_str))
def type_str(ty: type) -> str:
"""
Convert type ty to string.
:param ty: str, typing.List[int] , typing.List[typing.List[bool]], etc.
:return: string form of type, "str", "List[int]" , "List[List[bool]]", etc.
"""
type_str = str(ty).replace("typing.", "")
return type_str[8:-2] if type_str.startswith("<class '") else type_str
def gen_dump_code(var_name: str, ty: type) -> str:
"""
create code to output an object of type ty as a string
:param var_name: The variable name, like "x"
:param ty: str, typing.List[int] , typing.List[typing.List[bool]], etc.
:return: code that writes the variable to standard out as a json object
"""
tys = type_str(ty)
if tys.startswith("Set["):
return "print(json.dumps({k : 1 for k in " + var_name + "})) # write sets as dictionaries\n"
return f"print(json.dumps({var_name}))\n"
def gen_type_assertion(var_name: str, ty: type) -> str:
"""
create code to assert type of var_name is ty
:param var_name: The variable name, like "x"
:param ty: str, List[int] , List[List[bool]], etc.
:return: code that asserts that var_name is of type ty
"""
tys = type_str(ty)
vars = [c for c in 'abcdefghijklmnop' if c != var_name][::-1]
def helper(var_name, tys):
tys = tys.strip()
pre_bracket = tys.split("[")[0].lower() # part before [ (or the entire string if no bracket
ans = f"type({var_name}) is {pre_bracket}"
if "[" in tys:
inside = tys[tys.index("[") + 1:-1]
new_var = vars.pop()
if pre_bracket == "list" or pre_bracket == "set":
inside_check = helper(new_var, inside)
# if " and " in inside_check:
# inside_check = "(" + inside_check + ")"
ans += f" and all({inside_check} for {new_var} in {var_name})"
elif pre_bracket == "dict":
depth = 0
for i, c in enumerate(inside):
if c == "[":
depth += 1
elif c == "]":
depth -= 1
elif c == "," and depth == 0:
break
assert depth == 0 and c == ",", "Dict[(expecting comma inside)]"
key_var = vars.pop()
key_check = helper(key_var, tys[:i])
val_check = helper(new_var, tys[i + 1:])
ans += f" and all({key_check} and {val_check} for {key_var}, {new_var} in {var_name}.items())"
else:
assert False, f"Unknown type `{tys}`"
return ans
return f"assert {helper(var_name, tys)}, '{var_name} must be of type {tys}'"
def gen_load_code(var_name: str, ty: type) -> str:
"""
create code to load an object of type ty as a string
:param var_name: The variable name, like "x"
:param ty: str, typing.List[int] , typing.List[typing.List[bool]], etc.
:return: code that reads the variable from stdin as a json object
"""
tys = type_str(ty)
if tys.startswith("Set["):
assert tys.endswith("]")
inside = tys[4:-1]
ans = f"{var_name} = set(json.load(sys.stdin))) # convert set (stored as json dictionary)"
assertions = [f"all(isinstance(x, {inside}) for x in {var_name})"]
else:
ans = f"{var_name} = json.load(sys.stdin)"
num_lists = tys.count("List[")
assert tys.startswith("List[" * num_lists) and tys.endswith("]" * num_lists)
inside = tys[5 * num_lists: len(tys) - num_lists]
if num_lists == 0:
assertions = [f"isinstance({var_name}, {inside})"]
else:
assertions = [f"isinstance({var_name}, list)"]
if num_lists == 1:
assertions.append(f"all(isinstance(x, {inside}) for x in {var_name})")
else:
assertions.append(f"all(isinstance(x, list) for x in {var_name})")
if num_lists == 2:
assertions.append(f"all(isinstance(y, {inside}) for x in {var_name} for y in x)")
elif num_lists == 3:
assertions += [f"all(isinstance(y, list) for x in {var_name} for y in x)",
f"all(isinstance(z, {inside}) for x in {var_name} for y in x for z in y)"]
else:
assert False, f'Unknown type {tys}'
assert inside in ["int", "float", "bool", "str"], f'Unknown type {tys}'
return ans + "\n\n" + "\n".join(f"assert {a}, 'Type error: expecting `{tys}`'" for a in assertions)
def add_preamble(src):
preamble = []
types = []
if "List[" in src:
types.append("List")
if "Set[" in src:
types.append("Set")
if types:
preamble.append(f"from typing import {','.join(types)}")
if "json." in src:
preamble.append("import json")
if "sys." in src:
preamble.append("import sys")
return "\n".join(preamble) + "\n" * 3 + src if preamble else src
def gen_prob_code(var_name: str, var_type: type, prob_src: str, inputs: str):
s = f"""{prob_src}
{gen_load_code(var_name, var_type)}
inputs = {inputs}
assert problem({var_name}, **inputs)
print("Success!")
"""
# import inspect
# print(inspect.getsource(problem))
return add_preamble(s)
def gen_sol_code(var_name: str, var_type: type, sol_src: str, inputs: str):
s = f"""{sol_src}
inputs = {inputs}
{var_name} = solution(**inputs)
{gen_dump_code(var_name, var_type)}
"""
return add_preamble(s)
class BuilderRandom(random.Random):
"""Adds extra random functions useful for building instances."""
def __init__(self, seed=None):
self._init_seed = seed
super().__init__(seed)
def reseed(self):
self.seed(self._init_seed)
def pseudo_word(self, min_len=1, max_len=20):
w = "".join(self.choice(["text", "th", "ch", "qu", *"bcdfghjklmnprstvwxz"]) + self.choice("aeiyou")
for _ in range(1 + max_len // 2))
return w[:self.randrange(min_len, max_len + 1)]
def heavy_tail_float(self, lower=-1000.0, upper=1000.0, median_dev=1.0): # heavy tailed distribution
mean = (lower + upper) / 2.0
trunc = (upper - lower) / 2.0
while True:
r = (self.random() ** (-2) - 1) / 3
if self.randrange(2):
r = -r
x = mean - median_dev * r
if abs(x - mean) <= trunc:
return x
def char(self, chars="0123456789abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ!"):
return self.choice(chars)
class ProblemSet:
def __init__(self, name, summary=None, path=PATH):
self.problems = [] # Problem's
self.summary = summary
self.name = name
self.path = PATH
self._already_tested = None
# self.np_random = np.random.default_rng([ord(c) for c in seed])
def add(self, problem):
self.problems.append(problem)
def get_filename(self):
return self.path and (os.path.join(self.path, self.name.split(".")[-1]) + ".json")
def get_already_tested(self):
if self._already_tested is None:
try:
js = utils.load_json(self.get_filename())
except:
js = []
self._already_tested = {j["sat"]: {sol for sol in j["sols"]} for j in js}
return self._already_tested
def save(self):
obj = []
for p in self.problems:
for i in p.instances:
z = {"name": i.name, "sat": i.src, "sols": i.sol_srcs}
if p.timeout is not None and p.timeout != 1:
z["timeout"] = p.timeout
obj.append(z)
filename = self.get_filename()
if not filename:
json.dumps(obj) # for debugging, just to make sure that it can be converted to json
utils.warning(f"No path, not saving. {[len(p.instances) for p in self.problems]}")
else:
try:
os.makedirs(self.path, exist_ok=True)
with open(filename, "w") as f:
json.dump(obj, f, indent=2)
solved = sum((1 if i.sol_srcs else 0) for p in self.problems for i in p.instances)
dur = sum(p.build_time for p in self.problems)
utils.info(f"Solved {solved:5,}/{sum([len(p.instances) for p in self.problems]):5,} instances of "
f"{len(self.problems):3,} probs in {dur:.2f}s => {filename}")
except FileNotFoundError:
utils.error(f"***Could not save {filename}, perhaps a path problem?")
return
# for e in self.problems[0].instances[:10]:
# utils.debug(str(e)[:100])
def get_problems(globs: dict):
seen = {Problem} # don't add abstract class Problem
ans = []
for v in globs.values():
try:
if v in seen:
continue
else:
seen.add(v)
except TypeError:
continue
try:
is_prob = isinstance(v, Problem)
except TypeError:
is_prob = False
if is_prob:
ans.append(v)
else:
try:
is_prob_class = issubclass(v, Problem)
except TypeError:
is_prob_class = False
if is_prob_class:
ans.append(v())
return ans
def deep_copy(obj):
t = type(obj)
if t in {tuple, list, set}:
return t(deep_copy(x) for x in obj)
if t == dict:
return {k: deep_copy(v) for k, v in obj.items()}
return obj
def get_type(obj, ignore_errors=False): # better than type(x) because it can do things like List[int], etc.
try:
t = type(obj)
if t in {int, float, bool, complex, range, str}:
| |
row_number = row_count - (sheet_count * row_limit)
if sheet_count > 0:
row_number += 1
return sheets[sheet_count], sheets[sheet_count].row(row_number)
# Write the table contents
subheading = None
odd_style = styles["odd"]
even_style = styles["even"]
subheader_style = styles["subheader"]
for row in rows:
# Current row
row_index += 1
current_sheet, current_row = get_current_row(row_index, row_limit)
style = even_style if row_index % 2 == 0 else odd_style
# Group headers
if report_groupby:
represent = s3_strip_markup(s3_str(row[report_groupby]))
if subheading != represent:
# Start of new group - write group header
subheading = represent
current_sheet.write_merge(row_index, row_index, 0, total_cols,
subheading,
subheader_style,
)
# Move on to next row
row_index += 1
current_sheet, current_row = get_current_row(row_index, row_limit)
style = even_style if row_index % 2 == 0 else odd_style
col_index = 0
remaining_fields = lfields
# Custom row style?
row_style = None
if "_style" in row:
stylename = row["_style"]
if stylename in styles:
row_style = styles[stylename]
# Group header/footer row?
if "_group" in row:
group_info = row["_group"]
label = group_info.get("label")
totals = group_info.get("totals")
if label:
label = s3_strip_markup(s3_str(label))
style = row_style or subheader_style
span = group_info.get("span")
if span == 0:
current_sheet.write_merge(row_index,
row_index,
0,
total_cols - 1,
label,
style,
)
if totals:
# Write totals into the next row
row_index += 1
current_sheet, current_row = \
get_current_row(row_index, row_limit)
else:
current_sheet.write_merge(row_index,
row_index,
0,
span - 1,
label,
style,
)
col_index = span
remaining_fields = lfields[span:]
if not totals:
continue
for field in remaining_fields:
label = headers[field]
if label == groupby_label:
continue
if label == "Id":
# Skip the ID column from XLS exports
col_index += 1
continue
if field not in row:
represent = ""
else:
represent = s3_strip_markup(s3_str(row[field]))
coltype = types[col_index]
if coltype == "sort":
continue
if len(represent) > MAX_CELL_SIZE:
represent = represent[:MAX_CELL_SIZE]
value = represent
if coltype == "date":
try:
cell_datetime = datetime.datetime.strptime(value,
date_format_str)
date_tuple = (cell_datetime.year,
cell_datetime.month,
cell_datetime.day)
value = xldate_from_date_tuple(date_tuple, 0)
style.num_format_str = date_format
except:
pass
elif coltype == "datetime":
try:
cell_datetime = datetime.datetime.strptime(value,
date_format_str)
date_tuple = (cell_datetime.year,
cell_datetime.month,
cell_datetime.day,
cell_datetime.hour,
cell_datetime.minute,
cell_datetime.second)
value = xldate_from_datetime_tuple(date_tuple, 0)
style.num_format_str = datetime_format
except:
pass
elif coltype == "time":
try:
cell_datetime = datetime.datetime.strptime(value,
date_format_str)
date_tuple = (cell_datetime.hour,
cell_datetime.minute,
cell_datetime.second)
value = xldate_from_time_tuple(date_tuple)
style.num_format_str = time_format
except:
pass
elif coltype == "integer":
try:
value = int(value)
style.num_format_str = "0"
except:
pass
elif coltype == "double":
try:
value = float(value)
style.num_format_str = "0.00"
except:
pass
if has_id:
# Adjust for the skipped column
write_col_index = col_index - 1
else:
write_col_index = col_index
current_row.write(write_col_index, value, style)
width = len(represent) * COL_WIDTH_MULTIPLIER
if width > column_widths[col_index]:
column_widths[col_index] = width
current_sheet.col(write_col_index).width = width
col_index += 1
# Additional sheet settings
for sheet in sheets:
sheet.panes_frozen = True
sheet.horz_split_pos = 1
# Write output
output = BytesIO()
book.save(output)
output.seek(0)
if attr_get("as_stream", False):
return output
# Response headers
filename = "%s_%s.xls" % (request.env.server_name, title)
disposition = "attachment; filename=\"%s\"" % filename
response = current.response
response.headers["Content-Type"] = contenttype(".xls")
response.headers["Content-disposition"] = disposition
return output.read()
# -------------------------------------------------------------------------
@staticmethod
def expand_hierarchy(rfield, num_levels, rows):
"""
Expand a hierarchical foreign key column into one column
per hierarchy level
@param rfield: the column (S3ResourceField)
@param num_levels: the number of levels (from root)
@param rows: the Rows from S3ResourceData
@returns: list of keys (column names) for the inserted columns
"""
field = rfield.field
if not field or rfield.ftype[:9] != "reference":
return []
# Get the look-up table
ktablename = s3_get_foreign_key(field, m2m=False)[0]
if not ktablename:
return []
colname = rfield.colname
represent = field.represent
# Get the hierarchy
from ..s3hierarchy import S3Hierarchy
h = S3Hierarchy(ktablename)
if not h.config:
return []
# Collect the values from rows
values = set()
for row in rows:
value = row["_row"][colname]
if type(value) is list:
value = value[0]
values.add(value)
# Generate the expanded values
expanded = h.repr_expand(values,
levels = num_levels,
represent = represent,
)
# ...and add them into the rows
colnames = ["%s__%s" % (colname, l) for l in range(num_levels)]
for row in rows:
value = row["_row"][colname]
if type(value) is list:
value = value[0]
hcols = expanded.get(value)
for level in range(num_levels):
row[colnames[level]] = hcols[level] if hcols else None
return colnames
# -------------------------------------------------------------------------
@staticmethod
def encode_pt(pt, title):
"""
Encode a S3PivotTable as XLS sheet
@param pt: the S3PivotTable
@param title: the title for the report
@returns: the XLS file as stream
"""
output = BytesIO()
book = S3PivotTableXLS(pt).encode(title)
book.save(output)
output.seek(0)
return output
# -------------------------------------------------------------------------
@staticmethod
def dt_format_translate(pyfmt):
"""
Translate a Python datetime format string into an
Excel datetime format string
@param pyfmt: the Python format string
"""
translate = {"%a": "ddd",
"%A": "dddd",
"%b": "mmm",
"%B": "mmmm",
"%c": "",
"%d": "dd",
"%f": "",
"%H": "hh",
"%I": "hh",
"%j": "",
"%m": "mm",
"%M": "mm",
"%p": "AM/PM",
"%S": "ss",
"%U": "",
"%w": "",
"%W": "",
"%x": "",
"%X": "",
"%y": "yy",
"%Y": "yyyy",
"%z": "",
"%Z": "",
}
PERCENT = "__percent__"
xlfmt = str(pyfmt).replace("%%", PERCENT)
for tag, translation in translate.items():
xlfmt = xlfmt.replace(tag, translation)
return xlfmt.replace(PERCENT, "%")
# -------------------------------------------------------------------------
@classmethod
def _styles(cls,
use_colour = False,
evenodd = True,
datetime_format = None,
):
"""
XLS encoder standard cell styles
@param use_colour: use background colour in cells
@param evenodd: render different background colours
for even/odd rows ("stripes")
@param datetime_format: the date/time format
"""
import xlwt
if datetime_format is None:
# Support easier usage from external functions
datetime_format = cls.dt_format_translate(current.deployment_settings.get_L10n_datetime_format())
# Styles
large_header = xlwt.XFStyle()
large_header.font.bold = True
large_header.font.height = 400
if use_colour:
SOLID_PATTERN = large_header.pattern.SOLID_PATTERN
large_header.alignment.horz = large_header.alignment.HORZ_CENTER
large_header.pattern.pattern = SOLID_PATTERN
large_header.pattern.pattern_fore_colour = cls.LARGE_HEADER_COLOUR
notes = xlwt.XFStyle()
notes.font.italic = True
notes.font.height = 160 # 160 Twips = 8 point
notes.num_format_str = datetime_format
header = xlwt.XFStyle()
header.font.bold = True
header.num_format_str = datetime_format
if use_colour:
header.pattern.pattern = SOLID_PATTERN
header.pattern.pattern_fore_colour = cls.HEADER_COLOUR
subheader = xlwt.XFStyle()
subheader.font.bold = True
if use_colour:
subheader.pattern.pattern = SOLID_PATTERN
subheader.pattern.pattern_fore_colour = cls.SUB_HEADER_COLOUR
subtotals = xlwt.XFStyle()
subtotals.font.bold = True
if use_colour:
subtotals.pattern.pattern = SOLID_PATTERN
subtotals.pattern.pattern_fore_colour = cls.SUB_TOTALS_COLOUR
totals = xlwt.XFStyle()
totals.font.bold = True
if use_colour:
totals.pattern.pattern = SOLID_PATTERN
totals.pattern.pattern_fore_colour = cls.TOTALS_COLOUR
odd = xlwt.XFStyle()
if use_colour and evenodd:
odd.pattern.pattern = SOLID_PATTERN
odd.pattern.pattern_fore_colour = cls.ROW_ALTERNATING_COLOURS[0]
even = xlwt.XFStyle()
if use_colour and evenodd:
even.pattern.pattern = SOLID_PATTERN
even.pattern.pattern_fore_colour = cls.ROW_ALTERNATING_COLOURS[1]
return {"large_header": large_header,
"notes": notes,
"header": header,
"subheader": subheader,
"subtotals": subtotals,
"totals": totals,
"odd": odd,
"even": even,
}
# =============================================================================
class S3PivotTableXLS(object):
"""
XLS encoder for S3PivotTables
@todo: merge+DRY with S3XLS?
@todo: support multiple layers (=write multiple sheets)
@todo: handle huge pivot tables (=exceeding XLS rows/cols limits)
"""
def __init__(self, pt):
"""
Constructor
@param pt: the S3PivotTable to encode
"""
self.pt = pt
# Initialize properties
self._styles = None
self._formats = None
self.lookup = {}
self.valuemap = {}
# -------------------------------------------------------------------------
def encode(self, title):
"""
Convert this pivot table into an XLS file
@param title: the title of the report
@returns: the XLS workbook
"""
try:
import xlwt
except ImportError:
error = S3XLS.ERROR.XLWT_ERROR
current.log.error(error)
raise HTTP(503, body=error)
T = current.T
TOTAL = s3_str(s3_str(T("Total")).upper())
pt = self.pt
# Get report options
report_options = pt.resource.get_config("report_options", {})
# Report dimensions
fact = pt.facts[0]
layer = fact.layer
rows_dim = pt.rows
cols_dim = pt.cols
numrows = pt.numrows
numcols = pt.numcols
# Resource fields for dimensions
rfields = pt.rfields
fact_rfield = rfields[fact.selector]
rows_rfield = rfields[rows_dim] if rows_dim else None
cols_rfield = rfields[cols_dim] if cols_dim else None
# Dimension labels
get_label = fact._get_field_label
if rows_dim:
# Get row axis label
rows_label = s3_str(get_label(rows_rfield,
report_options.get("rows"),
))
else:
rows_label = ""
if cols_dim:
cols_label = s3_str(get_label(cols_rfield,
report_options.get("cols"),
))
else:
cols_label = ""
fact_label = s3_str(fact.get_label(fact_rfield,
report_options.get("fact"),
))
# Index of the column for row totals
total_column = (numcols + 1) if cols_dim else 1
# Sort+represent rows and columns
rows, cols = self.sortrepr()
# Create workbook and sheet
book = xlwt.Workbook(encoding = "utf-8")
sheet = book.add_sheet(s3_str(title))
write = self.write
# Write header
title_row = current.deployment_settings.get_xls_title_row()
if callable(title_row):
# Custom header (returns number of header rows)
title_length = title_row(sheet)
elif title_row:
# Default header
title_length = 2
# Report title
write(sheet, 0, 0, s3_str(title),
colspan = numcols + 2,
style = "title",
)
# Current date/time (in | |
"""
aspen.configuration
+++++++++++++++++++
Define configuration objects.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import errno
import mimetypes
import os
import sys
import traceback
import pkg_resources
from collections import defaultdict
import aspen
import aspen.logging
from aspen.configuration import parse
from aspen.configuration.exceptions import ConfigurationError
from aspen.configuration.options import OptionParser, DEFAULT
from aspen.utils import ascii_dammit
from aspen.typecasting import defaults as default_typecasters
import aspen.body_parsers
# Defaults
# ========
# The from_unicode callable converts from unicode to whatever format is
# required by the variable, raising ValueError appropriately. Note that
# name is supposed to match the options in our optparser. I like it wet.
KNOBS = \
{ 'configuration_scripts': (lambda: [], parse.list_)
, 'project_root': (None, parse.identity)
, 'logging_threshold': (0, int)
, 'www_root': (None, parse.identity)
# Extended Options
# 'name': (default, from_unicode)
, 'changes_reload': (False, parse.yes_no)
, 'charset_dynamic': ('UTF-8', parse.charset)
, 'charset_static': (None, parse.charset)
, 'indices': ( lambda: ['index.html', 'index.json', 'index'] +
['index.html.spt', 'index.json.spt', 'index.spt']
, parse.list_
)
, 'list_directories': (False, parse.yes_no)
, 'media_type_default': ('text/plain', parse.media_type)
, 'media_type_json': ('application/json', parse.media_type)
, 'renderer_default': ('stdlib_percent', parse.renderer)
, 'show_tracebacks': (False, parse.yes_no)
}
DEFAULT_CONFIG_FILE = 'configure-aspen.py'
# Configurable
# ============
# Designed as a singleton.
class Configurable(object):
"""Mixin object for aggregating configuration from several sources.
"""
protected = False # Set to True to require authentication for all
# requests.
@classmethod
def from_argv(cls, argv):
"""return a Configurable based on the passed-in arguments list
"""
configurable = cls()
configurable.configure(argv)
return configurable
def _set(self, name, hydrated, flat, context, name_in_context):
"""Set value at self.name, calling value if it's callable.
"""
if aspen.is_callable(hydrated):
hydrated = hydrated() # Call it if we can.
setattr(self, name, hydrated)
if name_in_context:
assert isinstance(flat, unicode) # sanity check
name_in_context = " %s=%s" % (name_in_context, flat)
out = " %-22s %-30s %-24s"
return out % (name, hydrated, context + name_in_context)
def set(self, name, raw, from_unicode, context, name_in_context):
error = None
try:
value = raw
if isinstance(value, str):
value = raw.decode('US-ASCII')
hydrated = from_unicode(value)
except UnicodeDecodeError, error:
value = ascii_dammit(value)
error_detail = "Configuration values must be US-ASCII."
except ValueError, error:
error_detail = error.args[0]
if error is not None:
msg = "Got a bad value '%s' for %s %s:"
msg %= (value, context, name_in_context)
if error_detail:
msg += " " + error_detail + "."
raise ConfigurationError(msg)
# special-case lists, so we can layer them
if from_unicode is parse.list_:
extend, new_value = hydrated
if extend:
old_value = getattr(self, name)
hydrated = old_value + new_value
else:
hydrated = new_value
args = (name, hydrated, value, context, name_in_context)
return self._set(*args)
def configure(self, argv):
"""Takes an argv list, and gives it straight to optparser.parse_args.
The argv list should not include the executable name.
"""
# Do some base-line configuration.
# ================================
# We want to do the following configuration of our Python environment
# regardless of the user's configuration preferences
# mimetypes
aspens_mimetypes = os.path.join(os.path.dirname(__file__), 'mime.types')
mimetypes.knownfiles += [aspens_mimetypes]
# mimetypes.init is called below after the user has a turn.
# XXX register codecs here
self.typecasters = default_typecasters
# Parse argv.
# ===========
opts, args = OptionParser().parse_args(argv)
# Configure from defaults, environment, and command line.
# =======================================================
msgs = ["Reading configuration from defaults, environment, and "
"command line."] # can't actually log until configured
for name, (default, func) in sorted(KNOBS.items()):
# set the default value for this variable
msgs.append(self._set(name, default, None, "default", ''))
# set from the environment
envvar = 'ASPEN_' + name.upper()
value = os.environ.get(envvar, '').strip()
if value:
msgs.append(self.set( name
, value
, func
, "environment variable"
, envvar
))
# set from the command line
value = getattr(opts, name)
if value is not DEFAULT:
msgs.append(self.set( name
, value
, func
, "command line option"
, "--"+name
))
# Set some attributes.
# ====================
def safe_getcwd(errorstr):
try:
# If the working directory no longer exists, then the following
# will raise OSError: [Errno 2] No such file or directory. I
# swear I've seen this under supervisor, though I don't have
# steps to reproduce. :-( To get around this you specify a
# www_root explicitly, or you can use supervisor's cwd
# facility.
return os.getcwd()
except OSError, err:
if err.errno != errno.ENOENT:
raise
raise ConfigurationError(errorstr)
# LOGGING_THRESHOLD
# -----------------
# This is initially set to -1 and not 0 so that we can tell if the user
# changed it programmatically directly before we got here. I do this in
# the testing module, that's really what this is about.
if aspen.logging.LOGGING_THRESHOLD == -1:
aspen.logging.LOGGING_THRESHOLD = self.logging_threshold
# Now that we know the user's desires, we can log appropriately.
aspen.log_dammit(os.linesep.join(msgs))
# project root
if self.project_root is None:
aspen.log_dammit("project_root not configured (no template bases, "
"etc.).")
else:
# canonicalize it
if not os.path.isabs(self.project_root):
aspen.log_dammit("project_root is relative to CWD: '%s'."
% self.project_root)
cwd = safe_getcwd("Could not get a current working "
"directory. You can specify "
"ASPEN_PROJECT_ROOT in the environment, "
"or --project_root on the command line.")
self.project_root = os.path.join(cwd, self.project_root)
self.project_root = os.path.realpath(self.project_root)
aspen.log_dammit("project_root set to %s." % self.project_root)
# mime.types
users_mimetypes = os.path.join(self.project_root, 'mime.types')
mimetypes.knownfiles += [users_mimetypes]
# configure-aspen.py
configure_aspen_py = os.path.join( self.project_root
, DEFAULT_CONFIG_FILE
)
self.configuration_scripts.append(configure_aspen_py) # last word
# PYTHONPATH
sys.path.insert(0, self.project_root)
# www_root
if self.www_root is None:
self.www_root = safe_getcwd("Could not get a current working "
"directory. You can specify "
"ASPEN_WWW_ROOT in the environment, "
"or --www_root on the command line.")
self.www_root = os.path.realpath(self.www_root)
# load bodyparsers
self.body_parsers = {
"application/x-www-form-urlencoded": aspen.body_parsers.formdata,
"multipart/form-data": aspen.body_parsers.formdata,
self.media_type_json: aspen.body_parsers.jsondata
}
# load renderers
self.renderer_factories = {}
for name in aspen.BUILTIN_RENDERERS:
# Pre-populate renderers so we can report on ImportErrors early
try:
capture = {}
python_syntax = 'from aspen.renderers.%s import Factory'
exec python_syntax % name in capture
make_renderer = capture['Factory'](self)
except ImportError, err:
make_renderer = err
err.info = sys.exc_info()
self.renderer_factories[name] = make_renderer
for entrypoint in pkg_resources.iter_entry_points(group='aspen.renderers'):
render_module = entrypoint.load()
self.renderer_factories[entrypoint.name] = render_module.Factory(self)
aspen.log_dammit("Found plugin for renderer '%s'" % entrypoint.name)
self.default_renderers_by_media_type = defaultdict(lambda: self.renderer_default)
self.default_renderers_by_media_type[self.media_type_json] = 'json_dump'
# mime.types
# ==========
# It turns out that init'ing mimetypes is somewhat expensive. This is
# significant in testing, though in dev/production you wouldn't notice.
# In any case this means that if a devuser inits mimetypes themselves
# then we won't do so again here, which is fine. Right?
if not mimetypes.inited:
mimetypes.init()
self.run_config_scripts()
self.show_renderers()
def show_renderers(self):
aspen.log_dammit("Renderers (*ed are unavailable, CAPS is default):")
width = max(map(len, self.renderer_factories))
for name, factory in self.renderer_factories.items():
star = " "
if isinstance(factory, ImportError):
star = "*"
error = "ImportError: " + factory.args[0]
else:
error = ""
if name == self.renderer_default:
name = name.upper()
name = name.ljust(width + 2)
aspen.log_dammit(" %s%s%s" % (star, name, error))
default_renderer = self.renderer_factories[self.renderer_default]
if isinstance(default_renderer, ImportError):
msg = "\033[1;31mImportError loading the default renderer, %s:\033[0m"
aspen.log_dammit(msg % self.renderer_default)
sys.excepthook(*default_renderer.info)
raise default_renderer
def run_config_scripts(self):
# Finally, exec any configuration scripts.
# ========================================
# The user gets self as 'website' inside their configuration scripts.
default_cfg_filename = None
if self.project_root is not None:
default_cfg_filename = os.path.join(self.project_root, DEFAULT_CONFIG_FILE)
for filepath in self.configuration_scripts:
if not filepath.startswith(os.sep):
if self.project_root is None:
raise ConfigurationError("You must set project_root in "
"order to specify a configuratio"
"n_script relatively.")
filepath = os.path.join(self.project_root, filepath)
filepath = os.path.realpath(filepath)
try:
execfile(filepath, {'website': self})
except IOError, err:
# Re-raise the error if it happened inside the script.
if err.filename != filepath:
raise
# I was checking os.path.isfile for these, but then we have a
# race condition that smells to me like a potential security
# vulnerability.
## determine if it's a default configscript or a specified one
cfgtype = "configuration"
if filepath == default_cfg_filename:
cfgtype = "default " + cfgtype
## pick the right error mesage
if err.errno == errno.ENOENT:
msg = ("The %s script %s doesn't seem to exist.")
elif err.errno == errno.EACCES:
msg = ("It appears that you don't have permission to read "
"the %s script %s.")
else:
msg = ("There was a problem reading the %s script %s:")
msg += os.sep + traceback.format_exc()
## do message-string substitutions
msg = msg % (cfgtype, filepath)
## output the message
if not "default" in cfgtype:
# if you specify a config file, it's an | |
"""Defines metrics used to evaluate uncertainty."""
import numpy as np
from scipy.stats import norm
from utils.util import to_one_hot
def gaussian_nll(y, mu, var):
"""Calculates the negative log likelihood of Gaussian distribution.
Args:
y: numpy array, shape [batch_size], the true labels.
mu: numpy array, shape [batch_size], the predicted means.
var: numpy array, shape [batch_size], the predicted variances.
Returns:
nll: float, the resulting negative log likelihood
"""
y, mu, var = y.squeeze(), mu.squeeze(), var.squeeze()
nll = -np.mean(norm.logpdf(y, loc=mu, scale=np.sqrt(var)))
return float(nll)
def rmse(y_pred, y):
"""Calculates the root mean squared error.
Args:
y_pred: numpy array, shape [batch_size], the predictions.
y: numpy array, shape [batch_size], the corresponding labels.
Returns:
rmse: float, the resulting root mean squared error.
"""
y_pred, y = y_pred.squeeze(), y.squeeze()
rmse = np.sqrt(np.mean((y - y_pred) ** 2))
return float(rmse)
def compute_regression_calibration(pred_mean, pred_var, target, num_bins=10):
"""Compute the regression calibration. Note that we assume that the
probabilistic forecase taking the form of Gaussian.
References:
[1] https://arxiv.org/abs/1807.00263
Args:
pred_mean: numpy array, shape [num_data, ], the predicted mean.
pred_var: numpy array, shape [num_data, ], the predicted variance.
target: numpy array, shape [num_data, ], the ground truths.
num_bins: number of bins.
Returns:
cal: a dictionary
{reliability_diag: realibility diagram
calibration_error: calibration error,
sharpness: sharpness
}
"""
# Make sure the inputs have valid shape
pred_mean = pred_mean.flatten()
pred_var = pred_var.flatten()
target = target.flatten()
# Compute the predicted CDF
predicted_cdf = norm.cdf(target, loc=pred_mean, scale=np.sqrt(pred_var))
# Compute the empirical CDF
# empirical_cdf = np.zeros(len(predicted_cdf))
# for i, p in enumerate(predicted_cdf):
# empirical_cdf[i] = np.mean(predicted_cdf <= p)
# Initialize the expected confidence levels according to the number of bins
expected_conf_levels = np.linspace(0, 1, num_bins+1)[1:]
# Compute the observed confidence levels, Eq (8) in [1].
observed_conf_levels = np.zeros_like(expected_conf_levels)
for i, p in enumerate(expected_conf_levels):
observed_conf_levels[i] = np.mean(predicted_cdf < p)
# Compute the calibration error, Eq (9) in [1].
calibration_error = float(np.sum((expected_conf_levels -
observed_conf_levels)**2))
# Compute the sharpness of the predictions, Eq (10) in [1].
sharpness = np.mean(pred_var)
# Repliability diagram
reliability_diag = {
"expected_conf_levels": expected_conf_levels,
"observed_conf_levels": observed_conf_levels
}
# Saving
cal = {
'reliability_diag': reliability_diag,
'calibration_error': calibration_error,
'sharpness': sharpness
}
return cal
def filter_top_k(probabilities, labels, top_k):
"""Extract top k predicted probabilities and corresponding ground truths"""
labels_one_hot = np.zeros(probabilities.shape)
labels_one_hot[np.arange(probabilities.shape[0]), labels] = 1
if top_k is None:
return probabilities, labels_one_hot
negative_prob = -1. * probabilities
ind = np.argpartition(negative_prob, top_k-1, axis=-1)
top_k_ind = ind[:, :top_k]
rows = np.expand_dims(np.arange(probabilities.shape[0]), axis=1)
lowest_k_negative_probs = negative_prob[rows, top_k_ind]
output_probs = -1. * lowest_k_negative_probs
labels_one_hot_k = labels_one_hot[rows, top_k_ind]
return output_probs, labels_one_hot_k
def get_multiclass_predictions_and_correctness(probabilities, labels, top_k=1):
"""Returns predicted class, correctness boolean vector."""
if top_k == 1:
class_predictions = np.argmax(probabilities, -1)
top_k_probs = probabilities[np.arange(len(labels)), class_predictions]
is_correct = np.equal(class_predictions, labels)
else:
top_k_probs, is_correct = filter_top_k(probabilities, labels, top_k)
return top_k_probs, is_correct
def nll(probabilities, labels):
"""Computes the negative log-likelihood for classification problem
(cross-entropy).
Args:
probabilities: Array of probabilities of shape
[num_samples, num_classes].
labels: Integer array labels of shape [num_samples].
Returns:
float: computed NLL.
"""
score = -np.log(probabilities)[range(labels.shape[0]), labels].mean()
return score
def brier_score(probabilities, labels):
"""Computes the Brier score.
Args:
probabilities: Array of probabilities of shape
[num_samples, num_classes].
labels: Integer array labels of shape [num_samples].
Returns:
float: computed Brier score.
"""
num_classes = probabilities.shape[1]
targets_one_hot = np.zeros_like(probabilities)
targets_one_hot[np.arange(labels.shape[0]), labels] = 1.
squared_diff = (targets_one_hot - probabilities) ** 2
score = np.mean(np.sum(squared_diff, axis=1) / num_classes)
return score
def accuracy(probabilities, labels):
"""Computes the top-1 accuracy of predictions.
Args:
probabilities: Array of probabilities of shape
[num_samples, num_classes].
labels: Integer array labels of shape [num_samples].
Returns:
float: Top-1 accuracy of predictions.
"""
return accuracy_top_k(probabilities, labels, 1)
def accuracy_top_k(probabilities, labels, top_k):
"""Computes the top-k accuracy of predictions.
Args:
probabilities: Array of probabilities of shape
[num_samples, num_classes].
labels: Integer array labels of shape [num_samples].
top_k: Integer. Number of highest-probability classes to consider.
Returns:
float: Top-k accuracy of predictions.
"""
_, ground_truth = filter_top_k(probabilities, labels, top_k)
return ground_truth.any(axis=-1).mean()
def bin_predictions_and_accuracies(probabilities, ground_truth, bins=10):
"""Computes histograms of probabilities into bins.
Args:
probabilities: A numpy vector of N probabilities assigned to
each prediction
ground_truth: A numpy vector of N ground truth labels in
{0,1, True, False}
bins: Number of equal width bins to bin predictions into in [0, 1],
or an array representing bin edges.
Returns:
bin_edges: Numpy vector of floats containing the edges of the bins
(including leftmost and rightmost).
accuracies: Numpy vector of floats for the average accuracy of the
predictions in each bin.
counts: Numpy vector of ints containing the number of examples per bin.
"""
if isinstance(bins, int):
num_bins = bins
else:
num_bins = bins.size - 1
probabilities = np.where(probabilities == 0, 1e-8, probabilities)
counts, bin_edges = np.histogram(probabilities, bins=bins, range=[0., 1.])
indices = np.digitize(probabilities, bin_edges, right=True)
accuracies = np.array([np.mean(ground_truth[indices == i])
for i in range(1, num_bins + 1)])
return bin_edges, accuracies, counts
def bin_centers_of_mass(probabilities, bin_edges):
probabilities = np.where(probabilities == 0, 1e-8, probabilities)
indices = np.digitize(probabilities, bin_edges, right=True)
return np.array([np.mean(probabilities[indices == i])
for i in range(1, len(bin_edges))])
def ece(probabilities, ground_truth, bins=10):
"""Compute the expected calibration error of a set of preditions in [0, 1].
Args:
probabilities: A numpy vector of N probabilities assigned
to each prediction
ground_truth: A numpy vector of N ground truth labels in
{0,1, True, False}
bins: Number of equal width bins to bin predictions into in [0, 1], or
an array representing bin edges.
Returns:
float: the expected calibration error.
"""
bin_edges, accuracies, counts = bin_predictions_and_accuracies(
probabilities, ground_truth, bins)
bin_centers = bin_centers_of_mass(probabilities, bin_edges)
num_examples = np.sum(counts)
ece = np.sum([(counts[i] / float(num_examples)) * np.sum(
np.abs(bin_centers[i] - accuracies[i]))
for i in range(bin_centers.size) if counts[i] > 0])
return ece
def ece_multiclass(probabilities, labels, bins=10,
top_k=1):
"""Computes expected calibration error from Guo et al. 2017.
Args:
probabilities: Array of probabilities of shape
[num_samples, num_classes].
labels: Integer array labels of shape [num_samples].
bins: Number of equal width bins to bin predictions into in [0, 1], or
an array representing bin edges.
top_k: Integer or None. If integer, use the top k predicted
probabilities in ECE calculation (can be informative for problems
with many classes and lower top-1 accuracy).
If None, use all classes.
Returns:
float: Expected calibration error.
"""
top_k_probs, is_correct = get_multiclass_predictions_and_correctness(
probabilities, labels, top_k)
top_k_probs = top_k_probs.flatten()
is_correct = is_correct.flatten()
return ece(top_k_probs, is_correct, bins)
def compute_accuracies_at_confidences(labels, probs, thresholds):
"""Compute accuracy of samples above each confidence threshold.
Args:
labels: Array of integer categorical labels.
probs: Array of categorical probabilities.
thresholds: Array of floating point probability thresholds in [0, 1).
Returns:
accuracies: Array of accuracies over examples with confidence > T for
each T in thresholds.
counts: Count of examples with confidence > T for each T in thresholds.
"""
assert probs.shape[:-1] == labels.shape
predict_class = probs.argmax(-1)
predict_confidence = probs.max(-1)
shape = (len(thresholds),) + probs.shape[:-2]
accuracies = np.zeros(shape)
counts = np.zeros(shape)
eq = np.equal(predict_class, labels)
for i, thresh in enumerate(thresholds):
mask = predict_confidence >= thresh
counts[i] = mask.sum(-1)
accuracies[i] = np.ma.masked_array(eq, mask=~mask).mean(-1)
return accuracies, counts
def compute_calibration(y, p_mean, num_bins=10):
"""Compute the calibration.
References:
https://arxiv.org/abs/1706.04599
https://arxiv.org/abs/1807.00263
Args:
y: numpy array, shape [num_classes], the true labels.
p_mean: numpy array, size (?, num_classes)
containing the mean output predicted probabilities
num_bins: number of bins
Returns:
cal: a dictionary
{reliability_diag: realibility diagram
ece: Expected Calibration Error
mce: Maximum Calibration Error
}
"""
# Compute for every test sample x, the predicted class.
class_pred = np.argmax(p_mean, axis=1)
# Convert labels to one-hot encoding
y = to_one_hot(y)
# Compute the confidence (probability) associated with it.
conf = np.max(p_mean, axis=1)
# Convert y from one-hot encoding to the number of the class
y = np.argmax(y, axis=1)
# Storage
acc_tab = np.zeros(num_bins) # empirical (true) confidence
mean_conf = np.zeros(num_bins) # predicted confidence
nb_items_bin = np.zeros(num_bins) # number of items in the bins
tau_tab = np.linspace(0, 1, num_bins+1) # confidence bins
for i in np.arange(num_bins): # iterate over the bins
# Select the items where the predicted max probability falls in the bin
sec = (tau_tab[i + 1] > conf) & (conf >= tau_tab[i])
nb_items_bin[i] = np.sum(sec) # Number of items in the bin
# Select the predicted classes, and the true classes
class_pred_sec, y_sec = class_pred[sec], | |
(Edges)"
"dim" value: 2
"single" value: -
Type: Group of Faces
Number: 1
Name: "AllSubShapes (Faces)"
"dim" value: 3
"single" value: -
Type: Group of Solids
Number: 1
Name: "AllSubShapes (Solids)"
Conditions of use:
-
"""
if dim not in [0, 1, 2, 3]: print "[X] There is no shape to return corresponding to the given dimension."; return
# Get the input shape(s)
shape = GetGUISelection(shape)
shape = GetObject(shape)
#-
# Make this function recursive
if isinstance(shape, list):
return_list = []
for sub_object in shape:
return_list.append(PutAllSubShapesInAGroup(dim, sub_object, add, infa))
return return_list
#-
# Check the input shape existence
if "error" in [shape] or None in [shape]: return
#-
# Set father object
father = None
if infa == True: father = shape
#-
if False: pass
else:# All checks done
# Get the group type
if dim == 0: group_type = geompy.ShapeType["VERTEX"]
if dim == 1: group_type = geompy.ShapeType["EDGE"]
if dim == 2: group_type = geompy.ShapeType["FACE"]
if dim == 3: group_type = geompy.ShapeType["SOLID"]
#-
# Create the group
group = geompy.CreateGroup(shape, group_type)
#-
# Get the sub - shape IDs
sub_shape_ids = geompy.SubShapeAllIDs(shape, group_type)
#-
# Add the sub-shapes in the group
for sub_shape_id in sub_shape_ids:
geompy.AddObject(group, sub_shape_id)
#-
# Publish the group
if add == True:
if dim == 0: geompy.addToStudyInFather(father, group, "AllSubShapes (Vertexes)")
if dim == 1: geompy.addToStudyInFather(father, group, "AllSubShapes (Edges)")
if dim == 2: geompy.addToStudyInFather(father, group, "AllSubShapes (Faces)")
if dim == 3: geompy.addToStudyInFather(father, group, "AllSubShapes (Solids)")
# Update the study tree
salome.sg.updateObjBrowser(1)
#-
return group
#-
passiag = PutAllSubShapesInAGroup
def SetRandomColors( ):
"""
Description:
Applies random colors on selected shapes in the Geometry module's 3D windows.
On mesh groups and sub-meshes, the coloration takes effect only if the input objects were not displayed yet. Else, the mesh has to be cleared and computed again.
Arguments:
# -
Description: -
Type: -
GUI selection: -
Selection by name: -
Recursive: -
Default value: -
Returned Values:
"dim" value: -
"single" value: -
Type: -
Number: -
Name: -
Conditions of use:
This functions works only when used from the GUI.
"""
gg = salome.ImportComponentGUI("GEOM")
# Get selected objects
selected_object_ids = salome.sg.getAllSelected()
nb_selected_objects = len(selected_object_ids)
selected_objects = []
for selected_object_id in selected_object_ids:
selected_objects.append(salome.myStudy.FindObjectID(selected_object_id).GetObject())
#-
# Define colors
colors = [\
[255, 0, 0], \
[0, 0, 255], \
[0, 255, 0], \
[0, 255, 255], \
[255, 0, 128], \
[255, 128, 0], \
[255, 255, 0], \
[235, 235, 235], \
[20, 20, 20], \
[255, 0, 255], \
[255, 128, 128], \
[128, 255, 128], \
[0, 128, 255], \
[255, 255, 128], \
[255, 128, 255], \
[128, 255, 255], \
[128, 0, 255], \
[0, 255, 128], \
[128, 128, 255], \
[128, 255, 0], \
[128, 128, 128], \
]
nb_colors = len(colors)
#-
# Define random colors if necessary
for i in range(nb_selected_objects - nb_colors):
color = []
for i in range(3):
color.append(int(random.random() * 255))
colors.append(color)
#-
colors.reverse()
#random.shuffle(colors)
# Set color of selected objects
for i in range(nb_selected_objects):
color = colors.pop()
selected_object = selected_objects[i]
if "<SMESH." in str(selected_object):
try:
selected_object.SetColor(SALOMEDS.Color(color[0] / 255.0, color[1] / 255.0, color[2] / 255.0))
except:
pass
if "<GEOM." in str(selected_object):
gg.setColor(selected_object_ids[i], color[0], color[1], color[2])
#-
src = SetRandomColors
def ExportCSVFile( compound = None, file = None, head = True ):
"""
Description:
Exports a 3D vertex compound into a CSV file.
Arguments:
# compound
Description: The vertex compound to export.
Type: Compound of Vertexes
GUI selection: yes
Selection by name: yes
Recursive: -
Default value: None
# file
Description: The name of the file to write.
Type: String
GUI selection: -
Selection by name: -
Recursive: -
Default value: None
# head
Description: Defines if the function has to write a header to the file.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
Returned Values:
"dim" value: -
"single" value: -
Type: -
Number: -
Name: -
Conditions of use:
-
"""
input_shape = compound
# Get the input shape(s)
input_shape = GetGUISelection(input_shape)
input_shape = GetObject(input_shape)
#-
# Check the input shape existence
if "error" in [input_shape] or None in [input_shape]: return
#-
compound = input_shape
if False: pass
else:# All checks done
# Get vertexes
vertexes = GetSubShapes(compound)[0]
#-
# Get the file name
if file == None:
file = compound.GetName()
#-
# Export them in the CSV file
with open(file, "wb") as csvfile:
writer = csv.writer(csvfile, quoting = csv.QUOTE_NONNUMERIC)
if head == True:
writer.writerow(["X","Y","Z"])
for vertex in vertexes:
writer.writerow(geompy.PointCoordinates(vertex))
#-
ecf = ExportCSVFile
def ImportCSVFile( file, single = True, add = True ):
"""
Description:
Imports a CSV file describing a 3D set of vertexes.
Arguments:
# file
Description: The name of the file to read.
Type: String
GUI selection: -
Selection by name: -
Recursive: yes
Default value: -
# single
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
Returned Values:
"dim" value: -
"single" value: False
Type: Vertex
Number: n
Name: "VertexFromCSVFile"
"dim" value: -
"single" value: True
Type: Compound of Vertexes
Number: 1
Name: "VertexesFromCSVFile"
Conditions of use:
-
"""
# Make this function recursive
if isinstance(file, list):
return_list = []
for sub_object in file:
return_list.append(ImportCSVFile(sub_object, single, add))
return return_list
#-
if False: pass
else:# All checks done
# Put the CSV file into a list of lines
file_line_list = []
with open(file, "r") as opened_file:
for line in opened_file:
if not line.isspace():
file_line_list.append(line)
#-
# Get the separator
separator_list = [",", ";", "\t", "|", "^"]
right_separator = ""
right_nb_columns = 0
# for separator in separator_list:
#
# separator_found = True
#
# nb_columns = 0
#
# this_was_first_line = True
# for line in file_line_list:
#
# split_line = line.split(separator)
# if not this_was_first_line:
# if len(split_line) != nb_columns or len(split_line) <= 1:
# separator_found = False
# break
#
# nb_columns = len(split_line)
#
# this_was_first_line = False
#
#
# if separator_found:
#
# right_separator = separator
# right_nb_columns = nb_columns
#
# if right_separator == "":
#
# print "[X] The CSV file separator could not be determined. Please, use one of these separator characters: , ; | ^ tab"
# return
#
# else:
#
# separator = right_separator
# nb_columns = right_nb_columns
#
#
# #-
#
# # Check the number of columns
#
# if nb_columns not in [2, 3]:
# print "[X] The CSV file should contain a number of columns between two and three."
# return
#
#
#-
# Import the vertexes from the CSV file
vertex_list = []
for line in file_line_list:
split_line = line.split(",")
try:
x = float(split_line[0])
except:
continue
try:
y = float(split_line[1])
except:
continue
if len(split_line) == 3:
try:
z = float(split_line[2])
except:
continue
else:
z = 0.0
vertex = geompy.MakeVertex(x, y, z)
vertex_list.append(vertex)
#-
to_return = vertex_list
to_return_name = "VertexFromCSVFile"
if single == True:
compound = geompy.MakeCompound(vertex_list)
to_return = compound
to_return_name = "VertexesFromCSVFile"
# Add and return the resulting shape(s)
if add == True:
slow_add = False
if not isinstance(to_return, list) or single == True: slow_add = True
AddToStudy(to_return, to_return_name, suffix = slow_add, refresh = slow_add)
if slow_add == False:
if salome.sg.hasDesktop():
salome.sg.updateObjBrowser(1)
return to_return
#-
icf = ImportCSVFile
def MakeVirtualOffsetEdgeSubmeshes( thick_and_size, group_and_mesh = [None], np = 40, curv = True, rev = False, add = True, infa = False, dim = -1 ):
"""
Description:
Creates submeshes on an edge group so as to prepare it for automatic viscous layer meshing.
Arguments:
# thick_and_size
Description: The desired viscous layer thickness and the desired cell size along the edge.
Type: List of 2 Floats
GUI selection: -
Selection by name: -
Recursive: -
Default value: -
# group_and_mesh
Description: The input group and the mesh in which to create sub-meshes.
Type: List of1 Group of Edges + 1 Mesh
GUI selection: yes
Selection by name: yes
Recursive: -
Default value: [None]
# np
Description: See here.
Type: Integer
GUI selection: -
Selection by name: -
Recursive: -
Default value: 40
# curv
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# rev
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
# add
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: True
# infa
Description: See here.
Type: Boolean
GUI selection: -
Selection by name: -
Recursive: -
Default value: False
# dim
Description: See here.
Type: Integer
GUI selection: -
Selection by name: -
Recursive: -
Default value: -1
Returned Values:
"dim" value: 1
"single" value: -
Type: Compound of Edges
Number: 1
Name: "VirtualOffset"
"dim" value: -1
"single" value: -
Type: -
Number: -
Name: -
Conditions of use:
-
"""
if dim not in [-1, 0, 1]: print "[X] There is no shape to return corresponding to the given dimension."; return
if not isinstance(thick_and_size, list):
print "[X] The first argument (thick_and_size) should be an array."; return
if not isinstance(group_and_mesh, list):
print "[X] The second argument (group_and_mesh) should be an array."; return
if len(thick_and_size) != 2:
print "[X] The first argument (thick_and_size) should have exactly two elements."; return
input_shapes = group_and_mesh
# Get the input shape(s)
input_shapes = GetGUISelection(input_shapes)
input_shapes = GetObject(input_shapes, "GEOM", silent = True) + GetObject(input_shapes, "SMESH", silent = True)
#-
# Distinguish input shapes
group = None
mesh = None
for object in input_shapes:
if "GEOM_Object instance" in str(object): group = object
if "SMESH_Mesh instance" in str(object) or "meshProxy instance" in str(object) or "Mesh object" in str(object): mesh = object
if None | |
"""
Copyright (c) 2019, 2020, Oracle Corporation and/or its affiliates.
Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
Utility CLS methods shared by multiple tools.
"""
import os
from java.io import File
from java.io import IOException
from java.lang import IllegalArgumentException
from java.lang import String
from oracle.weblogic.deploy.util import FileUtils
from oracle.weblogic.deploy.util import TranslateException
from oracle.weblogic.deploy.util import VariableException
from oracle.weblogic.deploy.validate import ValidateException
import oracle.weblogic.deploy.util.PyOrderedDict as OrderedDict
from wlsdeploy.aliases.wlst_modes import WlstModes
from wlsdeploy.exception import exception_helper
from wlsdeploy.logging.platform_logger import PlatformLogger
from wlsdeploy.tool.util import filter_helper
from wlsdeploy.tool.util.archive_helper import ArchiveHelper
from wlsdeploy.tool.validate.validator import Validator
from wlsdeploy.util import cla_utils
from wlsdeploy.util import getcreds
from wlsdeploy.util import model_helper
from wlsdeploy.util import model_translator
from wlsdeploy.util import path_utils
from wlsdeploy.util import tool_exit
from wlsdeploy.util import variables
from wlsdeploy.util.cla_utils import CommandLineArgUtil
from wlsdeploy.util.model_translator import FileToPython
__logger = PlatformLogger('wlsdeploy.util')
_class_name = 'cla_helper'
_store_environment_variable = '__WLSDEPLOY_STORE_MODEL__'
__tmp_model_dir = None
def validate_optional_archive(program_name, optional_arg_map):
"""
If the archive file was specified on the command line, verify that it exists.
:param program_name: the name of the calling program, for logging
:param optional_arg_map: the optional arguments from the command line
:raises CLAException: if the archive was specified and does not exist
"""
_method_name = 'validate_optional_archive'
if CommandLineArgUtil.ARCHIVE_FILE_SWITCH in optional_arg_map:
archive_file_name = optional_arg_map[CommandLineArgUtil.ARCHIVE_FILE_SWITCH]
archive_files = cla_utils.get_archive_files(archive_file_name)
for archive_file in archive_files:
try:
FileUtils.validateExistingFile(archive_file)
except IllegalArgumentException, iae:
ex = exception_helper.create_cla_exception('WLSDPLY-20014', program_name, archive_file_name,
iae.getLocalizedMessage(), error=iae)
ex.setExitCode(CommandLineArgUtil.ARG_VALIDATION_ERROR_EXIT_CODE)
__logger.throwing(ex, class_name=_class_name, method_name=_method_name)
raise ex
def validate_model_present(program_name, optional_arg_map):
"""
Determine if the model file was passed separately or requires extraction from the archive.
If the model is in the archive, extract it to the temporary model location, and set that file as the
MODEL_FILE_SWITCH argument.
The MODEL_FILE_SWITCH value may be specified as multiple comma-separated models.
The ARCHIVE_FILE_SWITCH value may be specified as multiple comma-separated archives.
:param program_name: the name of the calling program, for logging
:param optional_arg_map: the optional arguments from the command line
:raises CLAException: if the specified model is not an existing file, or the model is not found in the archive,
or the model is not found from either argument
"""
_method_name = 'validate_model_present'
global __tmp_model_dir
if CommandLineArgUtil.MODEL_FILE_SWITCH in optional_arg_map:
model_file_value = optional_arg_map[CommandLineArgUtil.MODEL_FILE_SWITCH]
model_files = cla_utils.get_model_files(model_file_value)
for model_file in model_files:
try:
FileUtils.validateExistingFile(model_file)
except IllegalArgumentException, iae:
ex = exception_helper.create_cla_exception('WLSDPLY-20006', program_name, model_file,
iae.getLocalizedMessage(), error=iae)
ex.setExitCode(CommandLineArgUtil.ARG_VALIDATION_ERROR_EXIT_CODE)
__logger.throwing(ex, class_name=_class_name, method_name=_method_name)
raise ex
elif CommandLineArgUtil.ARCHIVE_FILE_SWITCH in optional_arg_map:
archive_file = optional_arg_map[CommandLineArgUtil.ARCHIVE_FILE_SWITCH]
archive_helper = ArchiveHelper(archive_file, None, __logger, exception_helper.ExceptionType.CLA)
if archive_helper.contains_model():
tmp_model_dir, tmp_model_file = archive_helper.extract_model(program_name)
model_file_name = FileUtils.fixupFileSeparatorsForJython(tmp_model_file.getAbsolutePath())
optional_arg_map[CommandLineArgUtil.MODEL_FILE_SWITCH] = model_file_name
else:
ex = exception_helper.create_cla_exception('WLSDPLY-20026', program_name, archive_file,
CommandLineArgUtil.MODEL_FILE_SWITCH)
ex.setExitCode(CommandLineArgUtil.ARG_VALIDATION_ERROR_EXIT_CODE)
__logger.throwing(ex, class_name=_class_name, method_name=_method_name)
raise ex
else:
ex = exception_helper.create_cla_exception('WLSDPLY-20015', program_name,
CommandLineArgUtil.MODEL_FILE_SWITCH,
CommandLineArgUtil.ARCHIVE_FILE_SWITCH)
ex.setExitCode(CommandLineArgUtil.USAGE_ERROR_EXIT_CODE)
__logger.throwing(ex, class_name=_class_name, method_name=_method_name)
raise ex
return
def validate_variable_file_exists(program_name, argument_map):
"""
Validate that the variable file(s) exist.
Assume that the caller allows multiple variables files.
:param program_name: the name of the tool
:param argument_map: the program arguments
"""
method_name = 'validate_variable_file_exists'
if CommandLineArgUtil.VARIABLE_FILE_SWITCH in argument_map:
result_files = [] # type: list
value = argument_map[CommandLineArgUtil.VARIABLE_FILE_SWITCH]
files = value.split(CommandLineArgUtil.MODEL_FILES_SEPARATOR)
for file in files:
try:
variable_file = FileUtils.validateExistingFile(file)
result_files.append(variable_file.getAbsolutePath())
except IllegalArgumentException, iae:
ex = exception_helper.create_cla_exception('WLSDPLY-20031', program_name, file,
iae.getLocalizedMessage(), error=iae)
ex.setExitCode(CommandLineArgUtil.ARG_VALIDATION_ERROR_EXIT_CODE)
__logger.throwing(ex, class_name=_class_name, method_name=method_name)
raise ex
argument_map[CommandLineArgUtil.VARIABLE_FILE_SWITCH] = ",".join(result_files)
return
def process_encryption_args(optional_arg_map):
"""
If the user is using model encryption, get the passphrase from stdin, and put it in the argument map.
If the passphrase switch was specified in the arg map, just use it directly.
:param optional_arg_map: the optional arguments map
:raises CLAException: if an error occurs reading the passphrase input from the user
"""
_method_name = '__process_encryption_args'
if CommandLineArgUtil.USE_ENCRYPTION_SWITCH in optional_arg_map and \
CommandLineArgUtil.PASSPHRASE_SWITCH not in optional_arg_map:
try:
passphrase = getcreds.getpass('<PASSWORD>')
except IOException, ioe:
ex = exception_helper.create_cla_exception('WLSDPLY-20003', ioe.getLocalizedMessage(), error=ioe)
ex.setExitCode(CommandLineArgUtil.ARG_VALIDATION_ERROR_EXIT_CODE)
__logger.throwing(ex, class_name=_class_name, method_name=_method_name)
raise ex
optional_arg_map[CommandLineArgUtil.PASSPHRASE_SWITCH] = String(passphrase)
return
def validate_model(program_name, model_dictionary, model_context, aliases, wlst_mode):
"""
Validate the model dictionary based on the specified model context and aliases.
The tool will exit if exceptions are encountered, or the validation returns a STOP code.
:param program_name: the program name, for logging
:param model_dictionary: the model dictionary
:param model_context: the model context
:param aliases: the aliases
:param wlst_mode: offline or online
:return:
"""
_method_name = 'validate_model'
try:
validator = Validator(model_context, aliases, wlst_mode=wlst_mode)
# no need to pass the variable file for processing, substitution has already been performed
return_code = validator.validate_in_tool_mode(model_dictionary, variables_file_name=None,
archive_file_name=model_context.get_archive_file_name())
except ValidateException, ex:
__logger.severe('WLSDPLY-20000', program_name, ex.getLocalizedMessage(), error=ex,
class_name=_class_name, method_name=_method_name)
clean_up_temp_files()
tool_exit.end(model_context, CommandLineArgUtil.PROG_ERROR_EXIT_CODE)
if return_code == Validator.ReturnCode.STOP:
__logger.severe('WLSDPLY-20001', program_name, class_name=_class_name, method_name=_method_name)
clean_up_temp_files()
tool_exit.end(model_context, CommandLineArgUtil.PROG_ERROR_EXIT_CODE)
def load_model(program_name, model_context, aliases, filter_type, wlst_mode):
"""
Load the model based on the arguments in the model context.
Apply the variable substitution, if specified, and validate the model.
Apply any model filters of the specified type that are configured, and re-validate if necessary
The tool will exit if exceptions are encountered.
:param program_name: the program name, for logging
:param model_context: the model context
:param aliases: the alias configuration
:param filter_type: the type of any filters to be applied
:param wlst_mode: offline or online
:return: the resulting model dictionary
"""
_method_name = 'load_model'
variable_map = {}
try:
if model_context.get_variable_file():
# callers of this method allow multiple variable files
variable_map = variables.load_variables(model_context.get_variable_file(), allow_multiple_files=True)
except VariableException, ex:
__logger.severe('WLSDPLY-20004', program_name, ex.getLocalizedMessage(), error=ex,
class_name=_class_name, method_name=_method_name)
clean_up_temp_files()
tool_exit.end(model_context, CommandLineArgUtil.PROG_ERROR_EXIT_CODE)
model_file_value = model_context.get_model_file()
try:
model_dictionary = merge_model_files(model_file_value, variable_map)
except TranslateException, te:
__logger.severe('WLSDPLY-09014', program_name, model_file_value, te.getLocalizedMessage(), error=te,
class_name=_class_name, method_name=_method_name)
clean_up_temp_files()
tool_exit.end(model_context, CommandLineArgUtil.PROG_ERROR_EXIT_CODE)
try:
variables.substitute(model_dictionary, variable_map, model_context)
except VariableException, ex:
__logger.severe('WLSDPLY-20004', program_name, ex.getLocalizedMessage(), error=ex,
class_name=_class_name, method_name=_method_name)
clean_up_temp_files()
tool_exit.end(model_context, CommandLineArgUtil.PROG_ERROR_EXIT_CODE)
persist_model(model_context, model_dictionary)
validate_model(program_name, model_dictionary, model_context, aliases, wlst_mode)
if filter_helper.apply_filters(model_dictionary, filter_type):
# if any filters were applied, re-validate the model
validate_model(program_name, model_dictionary, model_context, aliases, wlst_mode)
return model_dictionary
def process_online_args(optional_arg_map):
"""
Determine if we are executing in online mode and if so, validate/prompt for the necessary parameters.
:param optional_arg_map: the optional arguments map
:return: the WLST mode
:raises CLAException: if an error occurs reading input from the user
"""
_method_name = 'process_online_args'
mode = WlstModes.OFFLINE
if CommandLineArgUtil.ADMIN_URL_SWITCH in optional_arg_map:
if CommandLineArgUtil.ADMIN_USER_SWITCH not in optional_arg_map:
try:
username = getcreds.getuser('WLSDPLY-09001')
except IOException, ioe:
ex = exception_helper.create_cla_exception('WLSDPLY-09002', ioe.getLocalizedMessage(), error=ioe)
ex.setExitCode(CommandLineArgUtil.ARG_VALIDATION_ERROR_EXIT_CODE)
__logger.throwing(ex, class_name=_class_name, method_name=_method_name)
raise ex
optional_arg_map[CommandLineArgUtil.ADMIN_USER_SWITCH] = username
if CommandLineArgUtil.ADMIN_PASS_SWITCH not in optional_arg_map:
try:
password = <PASSWORD>creds.<PASSWORD>pass('<PASSWORD>')
except IOException, ioe:
ex = exception_helper.create_cla_exception('WLSDPLY-09004', ioe.getLocalizedMessage(), error=ioe)
ex.setExitCode(CommandLineArgUtil.ARG_VALIDATION_ERROR_EXIT_CODE)
__logger.throwing(ex, class_name=_class_name, method_name=_method_name)
raise ex
optional_arg_map[CommandLineArgUtil.ADMIN_PASS_SWITCH] = String(password)
mode = WlstModes.ONLINE
optional_arg_map[CommandLineArgUtil.TARGET_MODE_SWITCH] = 'online'
return mode
def clean_up_temp_files():
"""
If a temporary directory was created to extract the model from the archive, delete the directory and its contents.
"""
global __tmp_model_dir
if __tmp_model_dir is not None:
FileUtils.deleteDirectory(__tmp_model_dir)
__tmp_model_dir = None
def merge_model_files(model_file_value, variable_map=None):
"""
Merge the model files specified by the model file value.
It may be a single file, or a comma-separated list of files.
:param variable_map: variables to be used for name resolution, or None
:param model_file_value: the value specified as a command argument
:return: the merge model dictionary
"""
merged_model = OrderedDict()
model_files = cla_utils.get_model_files(model_file_value)
for model_file in model_files:
model = FileToPython(model_file, True).parse()
merge_model_dictionaries(merged_model, model, variable_map)
return merged_model
def merge_model_dictionaries(dictionary, new_dictionary, variable_map):
"""
Merge the values from the new dictionary to the existing one.
Use variables to resolve keys.
:param dictionary: the existing dictionary
:param new_dictionary: the new dictionary to be merged
:param variable_map: variables to be used for name resolution, or None
"""
for new_key in new_dictionary:
new_value = new_dictionary[new_key]
dictionary_key, replace_key = _find_dictionary_merge_key(dictionary, new_key, variable_map)
# the key is not in the original dictionary, just add it
if dictionary_key is None:
dictionary[new_key] = new_value
# the new key should replace the existing one - delete the existing key and add the new one
elif replace_key:
del dictionary[dictionary_key]
if not model_helper.is_delete_name(new_key):
dictionary[new_key] = new_value
# the key is in both dictionaries - merge if the values are dictionaries, otherwise replace the value
else:
value = dictionary[dictionary_key]
if isinstance(value, dict) and isinstance(new_value, dict):
merge_model_dictionaries(value, new_value, variable_map)
else:
dictionary[new_key] = new_value
def _find_dictionary_merge_key(dictionary, new_key, variable_map):
"""
Find the key corresponding to new_key in the specified dictionary.
Determine if the new_key should completely replace the value in the dictionary.
If no direct match is found, and a variable map is specified, perform check with variable substitution.
If keys have the same name, but one has delete notation (!server), that is a match, and replace is true.
:param dictionary: the dictionary to be searched
:param new_key: the | |
from typing import Tuple
import numpy as np
import pandas as pd
from pvrpm.core.enums import ConfigKeys as ck
from pvrpm.core.case import SamCase
from pvrpm.core.utils import summarize_dc_energy
from pvrpm.core.logger import logger
from pvrpm.core.modules import failure, monitor, repair
class Components:
"""
Data container for each component in the simulation, as well as component and simulation level data
"""
def __init__(self, case: SamCase):
self.case = case
self.comps = {}
self.fails = {c: [] for c in ck.component_keys if self.case.config.get(c, None)}
self.monitors = {c: [] for c in ck.component_keys if self.case.config.get(c, None)}
self.repairs = {c: [] for c in ck.component_keys if self.case.config.get(c, None)}
self.costs = {}
# keep track of total days spent on monitoring and repairs
self.total_repair_time = {}
self.total_monitor_time = {}
lifetime = self.case.config[ck.LIFETIME_YRS]
# additional aggreate data to track during simulation
self.module_degradation_factor = np.zeros(lifetime * 365)
self.dc_power_availability = np.zeros(lifetime * 365)
self.ac_power_availability = np.zeros(lifetime * 365)
# static monitoring setup
# if theres no static monitoring defined, an exception is raised
try:
self.indep_monitor = monitor.IndepMonitor(self.case, self.comps, self.costs, self.dc_power_availability)
except AttributeError:
self.indep_monitor = None
# every component level will contain a dataframe containing the data for all the components in that level
for c in ck.component_keys:
if self.case.config.get(c, None):
self.total_repair_time[c] = 0
self.total_monitor_time[c] = 0
self.costs[c] = np.zeros(lifetime * 365)
df, fails, monitors, repairs = self.initialize_components(c)
self.comps[c] = df
self.fails[c] += fails
self.monitors[c] += monitors
self.repairs[c] += repairs
# Data from simulation at end of realization
self.timeseries_dc_power = None
self.timeseries_ac_power = None
self.lcoe = None
self.npv = None
self.annual_energy = None
self.tax_cash_flow = None
self.losses = {}
if case.config[ck.TRACKING]:
self.tracker_power_loss_factor = np.zeros(lifetime * 365)
self.tracker_availability = np.zeros(lifetime * 365)
@staticmethod
def compound_failures(function: str, parameters: dict, num_fails: int):
"""
Compounds the failures using the provided function and it's parameters to calculate the number of days to reduce the time to detection by
Possible functions and their parameters are:
- step: Failures follow a step function, each step reducing the detection time by a static amount
- threshold (float): fraction 0 <= threshold <= 1 that signifies the amount of modules that must fail before next step is reached. So if this is 0.2, every 0.2 * total_components components that fail will reduce detection time by step
- step (int): The amount of days to reduce detection time for every step. So 2 steps reduces detection time by 2 * step
- exponential: Failures compound on an exponential function
- base (float): The base for the exponential function > 0
- log: Failures compound on a logorithmic function
- base (float): The base for the log function > 0
- linear: Failures compound linearly
- slope (float): Slope of the linear function > 0
- constant: Each failure reduces the time to detection by a static fraction constant
- constant (float): fraction 0 <= frac <= 1 that specifies how much of the overall time each failure reduces. So if fraction is 0.1, a failure will reduce time to detection by "time_to_detection * 0.1"
"""
pass
def summarize_failures(self, component_level: str):
"""
Returns the number of failures per day for every failure defined
Args:
component_level (str): The configuration key for this component level
Returns:
:obj:`dict`: Dictionary containing the failure mode mapped to an np array of fails per each day
"""
fails = {}
for f in self.fails[component_level]:
fails.update(f.fails_per_day)
return fails
def update_labor_rates(self, new_labor: float):
"""
Update labor rates for a all levels for all types of repairs
Args:
new_labor (float): The new labor rate
"""
if self.indep_monitor:
self.indep_monitor.update_labor_rate(new_labor)
for c in ck.component_keys:
if self.case.config.get(c, None):
for r in self.repairs[c]:
r.update_labor_rate(new_labor)
def initialize_components(self, component_level: str) -> pd.DataFrame:
"""
Initalizes all components for the first time
Args:
component_level (str): The configuration key for this component level
Returns:
:obj:`pd.DataFrame`: A dataframe containing the initalized values for this component level
Note: Individual components have these columns:
- state (bool): Operational (True) or failed (False)
- defective (bool): Whehter component has a defect. True means the component is also eligible for the defective failure mode
- time_to_failure (float): Number of days until component fails
- failure_type (str): The name of the failure type time_to_failure represents
- time_to_repair (float): Number of days from failure until compoent is repaired
- time_to_detection (float): Number of days until the component failure is detected and repairs start
- repair_times (float): the total repair time for this repair
- monitor_times (float): the total monitoring time before repairs start
- time_left_on_warranty (int): Number of days left on warranty (if applicable)
- cumulative_failures (int): Total number of failures for that component
- cumulative_oow_failures (int): Total number of out of warranty failures (if applicable)
- failure_by_type_n (int): Each failure will have its own column with the number of failures of this type
- defective (bool): Whether this component is defective or not (for defective failures)
- defective_failures (int): Total number of defective failures
- avail_downtime (int): How many hours this component was available
- degradation_factor (float): 1 - percentage component has degraded at this time (module only)
- days_of_degradation (int): Time that the module has been degrading for
"""
component_info = self.case.config[component_level]
component_ind = np.arange(component_info[ck.NUM_COMPONENT])
df = pd.DataFrame(index=component_ind)
# operational
df["state"] = 1
# degradation gets reset to zero
if component_info.get(ck.DEGRADE, None):
df["days_of_degradation"] = 0
df["degradation_factor"] = 1
if component_info.get(ck.WARRANTY, None):
df["time_left_on_warranty"] = component_info[ck.WARRANTY][ck.DAYS]
else:
df["time_left_on_warranty"] = 0
df["cumulative_failures"] = 0
df["cumulative_oow_failures"] = 0
df["avail_downtime"] = 0
# if component can't fail, nothing else needs to be initalized
if not component_info[ck.CAN_FAIL]:
return (df, [], [], [])
if component_info.get(ck.FAILURE, None):
fails = [failure.TotalFailure(component_level, df, self.case, self.indep_monitor)]
else:
fails = []
partial_failures = component_info.get(ck.PARTIAL_FAIL, {})
partial_fails = []
for mode in partial_failures.keys():
partial_fails.append(failure.PartialFailure(component_level, df, self.case, mode, self.indep_monitor))
# monitoring times, these will be added to the repair time for each component
# basically, the time until each failure is detected
monitors = []
# for independent monitoring, may not be used if none is defined
df["indep_monitor"] = False
if component_info[ck.CAN_REPAIR]:
if component_info[ck.CAN_MONITOR]:
monitors.append(monitor.LevelMonitor(component_level, df, self.case))
# monitoring across levels, only applies if properly defined and monitoring at the component level can_monitor is false
elif component_info.get(ck.COMP_MONITOR, None):
monitors.append(monitor.CrossLevelMonitor(component_level, df, self.case))
# only static detection available
elif component_info.get(ck.INDEP_MONITOR, None):
# the proper detection time with only static monitoring is the difference between the static monitoring that occurs after the failure
# this will be set when a component fails for simplicity sake, since multiple static monitoring schemes can be defined,
# and the time to detection would be the the time from the component fails to the next static monitoring occurance
# so, set these to None and assume they will be properly updated in the simulation
df["monitor_times"] = None
df["time_to_detection"] = None
# time to replacement/repair in case of failure
if not component_info[ck.CAN_REPAIR]:
repairs = []
df["time_to_repair"] = 1 # just initalize to 1 if no repair modes, means components cannot be repaired
elif component_info.get(ck.REPAIR, None):
repairs = []
repairs.append(
repair.TotalRepair(
component_level,
df,
self.case,
self.costs[component_level],
fails,
repairs,
monitors,
self.indep_monitor,
)
)
else:
repairs = []
df["time_to_repair"] = 1
partial_repairs = component_info.get(ck.PARTIAL_REPAIR, {})
if len(partial_repairs) == 1:
repair_mode = list(component_info[ck.PARTIAL_REPAIR].keys())[0]
for i, fail_mode in enumerate(partial_failures.keys()):
repairs.append(
repair.PartialRepair(
component_level,
df,
self.case,
self.costs[component_level],
partial_fails[i],
fail_mode,
repair_mode,
self.indep_monitor,
)
)
else:
for i, (fail_mode, repair_mode) in enumerate(zip(partial_failures.keys(), partial_repairs.keys())):
repairs.append(
repair.PartialRepair(
component_level,
df,
self.case,
self.costs[component_level],
partial_fails[i],
fail_mode,
repair_mode,
self.indep_monitor,
)
)
fails += partial_fails
return (df, fails, monitors, repairs)
def tracker_power_loss(self, day: int) -> Tuple[float, float]:
"""
Calculates the current loss factor due to failed trackers
Args:
day (int): Current day in the simulation
Returns:
Tuple[float, float]: The fraction of trackers operational and the loss factor for failed trackers
"""
df = self.comps[ck.TRACKER]
day = day % 365
operational_trackers = len(df[df["state"] == 1])
fraction = operational_trackers / len(df)
adjusted_factor = 1
if self.case.config[ck.TRACKER][ck.CAN_FAIL]:
adjusted_factor = min(
1, self.case.daily_tracker_coeffs[day] + fraction * (1 - self.case.daily_tracker_coeffs[day])
)
return fraction, adjusted_factor
def current_degradation(self) -> float:
"""
Calculates | |
+ len(name), 0x1206) + name.encode(
"ascii"
)
if (
(self.rotation is not None)
or (self.magnification is not None)
or self.x_reflection
):
word = 0
values = b""
if self.x_reflection:
word += 0x8000
if not (self.magnification is None):
# This flag indicates that the magnification is absolute, not
# relative (not supported).
# word += 0x0004
values += struct.pack(">2H", 12, 0x1B05) + _eight_byte_real(
self.magnification
)
if not (self.rotation is None):
# This flag indicates that the rotation is absolute, not
# relative (not supported).
# word += 0x0002
values += struct.pack(">2H", 12, 0x1C05) + _eight_byte_real(
self.rotation
)
data += struct.pack(">3H", 6, 0x1A01, word) + values
return data + struct.pack(
">2H2l2H",
12,
0x1003,
int(round(self.origin[0] * multiplier)),
int(round(self.origin[1] * multiplier)),
4,
0x1100,
)
def area(self, by_spec=False):
"""
Calculate the total area of the referenced cell with the
magnification factor included.
Parameters
----------
by_spec : bool
If True, the return value is a dictionary with the areas
of each individual pair (layer, datatype).
Returns
-------
out : number, dictionary
Area of this cell.
"""
if not isinstance(self.ref_cell, Cell):
return dict() if by_spec else 0
if self.magnification is None:
return self.ref_cell.area(by_spec)
else:
if by_spec:
factor = self.magnification ** 2
cell_area = self.ref_cell.area(True)
for kk in cell_area.keys():
cell_area[kk] *= factor
return cell_area
else:
return self.ref_cell.area() * self.magnification ** 2
def get_polygons(self, by_spec=False, depth=None):
"""
Return the list of polygons created by this reference.
Parameters
----------
by_spec : bool
If True, the return value is a dictionary with the
polygons of each individual pair (layer, datatype).
depth : integer or None
If not None, defines from how many reference levels to
retrieve polygons. References below this level will result
in a bounding box. If `by_spec` is True the key will be the
name of the referenced cell.
Returns
-------
out : list of array-like[N][2] or dictionary
List containing the coordinates of the vertices of each
polygon, or dictionary with the list of polygons (if
`by_spec` is True).
Note
----
Instances of `FlexPath` and `RobustPath` are also included in
the result by computing their polygonal boundary.
"""
if not isinstance(self.ref_cell, Cell):
return dict() if by_spec else []
if self.rotation is not None:
ct = numpy.cos(self.rotation * numpy.pi / 180.0)
st = numpy.sin(self.rotation * numpy.pi / 180.0) * _mpone
if self.x_reflection:
xrefl = _pmone_int
if self.magnification is not None:
mag = self.magnification * _one
if self.origin is not None:
orgn = numpy.array(self.origin)
if by_spec:
polygons = self.ref_cell.get_polygons(True, depth)
for kk in polygons.keys():
for ii in range(len(polygons[kk])):
if self.x_reflection:
polygons[kk][ii] = polygons[kk][ii] * xrefl
if self.magnification is not None:
polygons[kk][ii] = polygons[kk][ii] * mag
if self.rotation is not None:
polygons[kk][ii] = (
polygons[kk][ii] * ct + polygons[kk][ii][:, ::-1] * st
)
if self.origin is not None:
polygons[kk][ii] = polygons[kk][ii] + orgn
else:
polygons = self.ref_cell.get_polygons(depth=depth)
for ii in range(len(polygons)):
if self.x_reflection:
polygons[ii] = polygons[ii] * xrefl
if self.magnification is not None:
polygons[ii] = polygons[ii] * mag
if self.rotation is not None:
polygons[ii] = polygons[ii] * ct + polygons[ii][:, ::-1] * st
if self.origin is not None:
polygons[ii] = polygons[ii] + orgn
return polygons
def get_polygonsets(self, depth=None):
"""
Return the list of polygons created by this reference.
Parameters
----------
depth : integer or None
If not None, defines from how many reference levels to
retrieve polygons from.
Returns
-------
out : list of `PolygonSet`
List containing the polygons in this cell and its
references.
"""
if not isinstance(self.ref_cell, Cell):
return []
if self.rotation is not None:
ct = numpy.cos(self.rotation * numpy.pi / 180.0)
st = numpy.sin(self.rotation * numpy.pi / 180.0) * _mpone
if self.x_reflection:
xrefl = _pmone_int
if self.magnification is not None:
mag = self.magnification * _one
if self.origin is not None:
orgn = numpy.array(self.origin)
polygonsets = self.ref_cell.get_polygonsets(depth=depth)
for ps in polygonsets:
for ii in range(len(ps.polygons)):
if self.x_reflection:
ps.polygons[ii] = ps.polygons[ii] * xrefl
if self.magnification is not None:
ps.polygons[ii] = ps.polygons[ii] * mag
if self.rotation is not None:
ps.polygons[ii] = (
ps.polygons[ii] * ct + ps.polygons[ii][:, ::-1] * st
)
if self.origin is not None:
ps.polygons[ii] = ps.polygons[ii] + orgn
return polygonsets
def get_paths(self, depth=None):
"""
Return the list of paths created by this reference.
Parameters
----------
depth : integer or None
If not None, defines from how many reference levels to
retrieve paths from.
Returns
-------
out : list of `FlexPath` or `RobustPath`
List containing the paths in this cell and its references.
"""
if not isinstance(self.ref_cell, Cell):
return []
if self.origin is not None:
trans = numpy.array(self.origin)
else:
trans = None
if self.rotation is not None:
rot = self.rotation * numpy.pi / 180.0
else:
rot = None
return [
p.transform(trans, rot, self.magnification, self.x_reflection)
for p in self.ref_cell.get_paths(depth=depth)
]
def get_labels(self, depth=None):
"""
Return the list of labels created by this reference.
Parameters
----------
depth : integer or None
If not None, defines from how many reference levels to
retrieve labels from.
Returns
-------
out : list of `Label`
List containing the labels in this cell and its references.
"""
if not isinstance(self.ref_cell, Cell):
return []
if self.rotation is not None:
ct = numpy.cos(self.rotation * numpy.pi / 180.0)
st = numpy.sin(self.rotation * numpy.pi / 180.0) * _mpone
if self.x_reflection:
xrefl = _pmone_int
if self.magnification is not None:
mag = self.magnification * _one
if self.origin is not None:
orgn = numpy.array(self.origin)
labels = self.ref_cell.get_labels(depth=depth)
for lbl in labels:
if self.x_reflection:
lbl.position = lbl.position * xrefl
if self.magnification is not None:
lbl.position = lbl.position * mag
if self.rotation is not None:
lbl.position = lbl.position * ct + lbl.position[::-1] * st
if self.origin is not None:
lbl.position = lbl.position + orgn
return labels
def get_bounding_box(self):
"""
Calculate the bounding box for this reference.
Returns
-------
out : Numpy array[2, 2] or None
Bounding box of this cell [[x_min, y_min], [x_max, y_max]],
or None if the cell is empty.
"""
if not isinstance(self.ref_cell, Cell):
return None
if (
self.rotation is None
and self.magnification is None
and self.x_reflection is None
):
key = self
else:
key = (self.ref_cell, self.rotation, self.magnification, self.x_reflection)
deps = self.ref_cell.get_dependencies(True)
if not (
self.ref_cell._bb_valid
and all(ref._bb_valid for ref in deps)
and key in _bounding_boxes
):
for ref in deps:
ref.get_bounding_box()
self.ref_cell.get_bounding_box()
tmp = self.origin
self.origin = None
polygons = self.get_polygons()
self.origin = tmp
if len(polygons) == 0:
bb = None
else:
all_points = numpy.concatenate(polygons).transpose()
bb = numpy.array(
(
(all_points[0].min(), all_points[1].min()),
(all_points[0].max(), all_points[1].max()),
)
)
_bounding_boxes[key] = bb
else:
bb = _bounding_boxes[key]
if self.origin is None or bb is None:
return bb
else:
return bb + numpy.array(
((self.origin[0], self.origin[1]), (self.origin[0], self.origin[1]))
)
def translate(self, dx, dy):
"""
Translate this reference.
Parameters
----------
dx : number
Distance to move in the x-direction.
dy : number
Distance to move in the y-direction.
Returns
-------
out : `CellReference`
This object.
"""
self.origin = (self.origin[0] + dx, self.origin[1] + dy)
return self
class CellArray(object):
"""
Multiple references to an existing cell in an array format.
Parameters
----------
ref_cell : `Cell` or string
The referenced cell or its name.
columns : positive integer
Number of columns in the array.
rows : positive integer
Number of columns in the array.
spacing : array-like[2]
distances between adjacent columns and adjacent rows.
origin : array-like[2]
Position where the cell is inserted.
rotation : number
Angle of rotation of the reference (in *degrees*).
magnification : number
Magnification factor for the reference.
x_reflection : bool
If True, the reference is reflected parallel to the x
direction before being rotated.
ignore_missing : bool
If False a warning is issued when the referenced cell is not
found.
"""
__slots__ = (
"ref_cell",
"origin",
"rotation",
"magnification",
"x_reflection",
"columns",
"rows",
"spacing",
)
def __init__(
self,
ref_cell,
columns,
rows,
spacing,
origin=(0, 0),
rotation=None,
magnification=None,
x_reflection=False,
ignore_missing=False,
):
self.columns = columns
self.rows = rows
self.spacing = spacing
self.origin = origin
self.ref_cell = current_library.cell_dict.get(ref_cell, ref_cell)
self.rotation = rotation
self.magnification = magnification
self.x_reflection = x_reflection
if not isinstance(self.ref_cell, Cell) and not | |
import time
from random import randint
import board
import simpleio
import busio
import terminalio
import neopixel
from digitalio import DigitalInOut, Direction, Pull
from analogio import AnalogIn
import displayio
import adafruit_imageload
from adafruit_display_text import label
import adafruit_displayio_ssd1306
# uncomment if using USB MIDI
# import usb_midi
from adafruit_display_shapes.rect import Rect
import adafruit_midi
from adafruit_midi.note_on import NoteOn
from adafruit_midi.note_off import NoteOff
from adafruit_midi.control_change import ControlChange
displayio.release_displays()
oled_reset = board.D9
#turn off on-board neopixel
pixel = neopixel.NeoPixel(board.NEOPIXEL, 1, brightness=0)
pixel.fill((0, 0, 0))
# Use for I2C for STEMMA OLED
i2c = board.I2C()
display_bus = displayio.I2CDisplay(i2c, device_address=0x3D, reset=oled_reset)
# STEMMA OLED dimensions. can have height of 64, but 32 makes text larger
WIDTH = 128
HEIGHT = 32
BORDER = 0
# blinka sprite indexes
EMPTY = 0
BLINKA_1 = 1
BLINKA_2 = 2
# setup for STEMMA OLED
display = adafruit_displayio_ssd1306.SSD1306(display_bus, width=WIDTH, height=HEIGHT)
# create the displayio object
splash = displayio.Group(max_size=40)
display.show(splash)
# text for BPM
bpm_text = "BPM: "
bpm_text_area = label.Label(
terminalio.FONT, text=bpm_text, color=0xFFFFFF, x=4, y=6
)
splash.append(bpm_text_area)
bpm_rect = Rect(0, 0, 50, 16, fill=None, outline=0xFFFFFF)
splash.append(bpm_rect)
# text for key
key_text = "Key: "
key_text_area = label.Label(
terminalio.FONT, text=key_text, color=0xFFFFFF, x=4, y=21
)
splash.append(key_text_area)
key_rect = Rect(0, 15, 50, 16, fill=None, outline=0xFFFFFF)
splash.append(key_rect)
# text for mode
mode_text = "Mode: "
mode_text_area = label.Label(
terminalio.FONT, text=mode_text, color=0xFFFFFF, x=54, y=21
)
splash.append(mode_text_area)
mode_rect = Rect(50, 15, 78, 16, fill=None, outline=0xFFFFFF)
splash.append(mode_rect)
# text for beat division
beat_text = "Div: "
beat_text_area = label.Label(
terminalio.FONT, text=beat_text, color=0xFFFFFF, x=54, y=6
)
splash.append(beat_text_area)
beat_rect = Rect(50, 0, 78, 16, fill=None, outline=0xFFFFFF)
splash.append(beat_rect)
# Blinka sprite setup
blinka, blinka_pal = adafruit_imageload.load("/spritesWhite.bmp",
bitmap=displayio.Bitmap,
palette=displayio.Palette)
# creates a transparent background for Blinka
blinka_pal.make_transparent(7)
blinka_grid = displayio.TileGrid(blinka, pixel_shader=blinka_pal,
width=1, height=1,
tile_height=16, tile_width=16,
default_tile=EMPTY)
blinka_grid.x = 112
blinka_grid.y = 0
splash.append(blinka_grid)
# imports MIDI
# USB MIDI:
# midi = adafruit_midi.MIDI(midi_out=usb_midi.ports[1], out_channel=0)
# UART MIDI:
midi = adafruit_midi.MIDI(midi_out=busio.UART(board.TX, board.RX, baudrate=31250), out_channel=0)
# potentiometer pin setup
key_pot = AnalogIn(board.A1)
mode_pot = AnalogIn(board.A2)
beat_pot = AnalogIn(board.A3)
bpm_slider = AnalogIn(board.A4)
mod_pot = AnalogIn(board.A5)
# run switch setup
run_switch = DigitalInOut(board.D5)
run_switch.direction = Direction.INPUT
run_switch.pull = Pull.UP
# arrays of notes in each key
key_of_C = [60, 62, 64, 65, 67, 69, 71, 72]
key_of_Csharp = [61, 63, 65, 66, 68, 70, 72, 73]
key_of_D = [62, 64, 66, 67, 69, 71, 73, 74]
key_of_Dsharp = [63, 65, 67, 68, 70, 72, 74, 75]
key_of_E = [64, 66, 68, 69, 71, 73, 75, 76]
key_of_F = [65, 67, 69, 70, 72, 74, 76, 77]
key_of_Fsharp = [66, 68, 70, 71, 73, 75, 77, 78]
key_of_G = [67, 69, 71, 72, 74, 76, 78, 79]
key_of_Gsharp = [68, 70, 72, 73, 75, 77, 79, 80]
key_of_A = [69, 71, 73, 74, 76, 78, 80, 81]
key_of_Asharp = [70, 72, 74, 75, 77, 79, 81, 82]
key_of_B = [71, 73, 75, 76, 78, 80, 82, 83]
# array of keys
keys = [key_of_C, key_of_Csharp, key_of_D, key_of_Dsharp, key_of_E, key_of_F, key_of_Fsharp,
key_of_G, key_of_Gsharp, key_of_A, key_of_Asharp, key_of_B]
# array of note indexes for modes
fifths = [0, 4, 3, 7, 2, 6, 4, 7]
major = [4, 2, 0, 3, 5, 7, 6, 4]
minor = [5, 7, 2, 4, 6, 5, 1, 3]
pedal = [5, 5, 5, 6, 5, 5, 5, 7]
# defining variables for key name strings
C_name = "C"
Csharp_name = "C#"
D_name = "D"
Dsharp_name = "D#"
E_name = "E"
F_name = "F"
Fsharp_name = "F#"
G_name = "G"
Gsharp_name = "G#"
A_name = "A"
Asharp_name = "A#"
B_name = "B"
# array of strings for key names for use with the display
key_names = [C_name, Csharp_name, D_name, Dsharp_name, E_name, F_name, Fsharp_name,
G_name, Gsharp_name, A_name, Asharp_name, B_name]
# function for reading analog inputs
def val(voltage):
return voltage.value
# comparitors for pots' values
mod_val2 = 0
beat_val2 = 0
bpm_val2 = 120
key_val2 = 0
mode_val2 = 0
# time.monotonic for running the modes
run = 0
# state for being on/off
run_state = False
# indexes for modes
r = 0
b = 0
f = 0
p = 0
maj = 0
mi = 0
random = 0
# mode states
play_pedal = False
play_fifths = False
play_maj = False
play_min = False
play_rando = False
play_scale = True
# state for random beat division
rando = False
# comparitors for states
last_r = 0
last_f = 0
last_maj = 0
last_min = 0
last_p = 0
last_random = 0
# index for random beat division
hit = 0
# default tempo
tempo = 60
# beat division
sixteenth = 15 / tempo
eighth = 30 / tempo
quarter = 60 / tempo
half = 120 / tempo
whole = 240 / tempo
# time.monotonic for blinka animation
slither = 0
# blinka animation sprite index
g = 1
# array for random beat division values
rando_div = [240, 120, 60, 30, 15]
# array of beat division values
beat_division = [whole, half, quarter, eighth, sixteenth]
# strings for beat division names
beat_division_name = ["1", "1/2", "1/4", "1/8", "1/16", "Random"]
while True:
# mapping analog pot values to the different parameters
# MIDI modulation 0-127
mod_val1 = round(simpleio.map_range(val(mod_pot), 0, 65535, 0, 127))
# BPM range 60-220
bpm_val1 = simpleio.map_range(val(bpm_slider), 0, 65535, 60, 220)
# 6 options for beat division
beat_val1 = round(simpleio.map_range(val(beat_pot), 0, 65535, 0, 5))
# 12 options for key selection
key_val1 = round(simpleio.map_range(val(key_pot), 0, 65535, 0, 11))
# 6 options for mode selection
mode_val1 = round(simpleio.map_range(val(mode_pot), 0, 65535, 0, 5))
# sending MIDI modulation
if abs(mod_val1 - mod_val2) > 2:
# updates previous value to hold current value
mod_val2 = mod_val1
# MIDI data has to be sent as an integer
# this converts the pot data into an int
modulation = int(mod_val2)
# int is stored as a CC message
modWheel = ControlChange(1, modulation)
# CC message is sent
midi.send(modWheel)
print(modWheel)
# delay to settle MIDI data
time.sleep(0.001)
# sets beat division
if abs(beat_val1 - beat_val2) > 0:
# updates previous value to hold current value
beat_val2 = beat_val1
print("beat div is", beat_val2)
# updates display
beat_text_area.text = "Div:%s" % beat_division_name[beat_val2]
# sets random beat division state
if beat_val2 == 5:
rando = True
else:
rando = False
time.sleep(0.001)
# mode selection
if abs(mode_val1 - mode_val2) > 0:
# updates previous value to hold current value
mode_val2 = mode_val1
# scale mode
if mode_val2 == 0:
play_scale = True
play_maj = False
play_min = False
play_fifths = False
play_pedal = False
play_rando = False
# updates display
mode_text_area.text = "Mode:Scale"
print("scale")
# major triads mode
if mode_val2 == 1:
play_scale = False
play_maj = True
play_min = False
play_fifths = False
play_pedal = False
play_rando = False
print("major chords")
# updates display
mode_text_area.text = "Mode:MajorTriads"
# minor triads mode
if mode_val2 == 2:
play_scale = False
play_maj = False
play_min = True
play_fifths = False
play_pedal = False
play_rando = False
print("minor")
# updates display
mode_text_area.text = "Mode:MinorTriads"
# fifths mode
if mode_val2 == 3:
play_scale = False
play_maj = False
play_min = False
play_fifths = True
play_pedal = False
play_rando = False
print("fifths")
# updates display
mode_text_area.text = "Mode:Fifths"
# pedal tone mode
if mode_val2 == 4:
play_scale = False
play_maj = False
play_min = False
play_fifths = False
play_pedal = True
play_rando = False
print("play random")
# updates display
mode_text_area.text = 'Mode:Pedal'
# random mode
if mode_val2 == 5:
play_scale = False
play_maj = False
play_min = False
play_fifths = False
play_pedal = False
play_rando = True
print("play random")
# updates display
mode_text_area.text = 'Mode:Random'
time.sleep(0.001)
# key selection
if abs(key_val1 - key_val2) > 0:
# updates previous value to hold current value
key_val2 = key_val1
# indexes the notes in each key array
for k in keys:
o = keys.index(k)
octave = keys[o]
# updates display
key_text_area.text = 'Key:%s' % key_names[key_val2]
print("o is", o)
time.sleep(0.001)
# BPM adjustment
if abs(bpm_val1 - bpm_val2) > 1:
# updates previous value to hold current value
bpm_val2 = bpm_val1
# updates tempo
tempo = int(bpm_val2)
# updates calculations for beat division
sixteenth = 15 / tempo
eighth = 30 / tempo
quarter = 60 / tempo
half = 120 / tempo
whole = 240 / tempo
# updates array of beat divisions
beat_division = [whole, half, quarter, eighth, sixteenth]
# updates display
bpm_text_area.text = "BPM:%d" % tempo
print("tempo is", tempo)
time.sleep(0.05)
# if the run switch is pressed:
if run_switch.value:
run_state = True
# if random beat division, then beat_division index is randomized with index hit
if rando:
divide = beat_division[hit]
# if not random, then beat_division is the value of the pot
else:
divide = beat_division[beat_val2]
# blinka animation in time with BPM and beat division
# she will slither every time a note is played
if (time.monotonic() - slither) >= divide:
blinka_grid[0] = g
g += 1
slither = time.monotonic()
if g > 2:
g = 1
# holds key | |
self.comp_ids = dataset['comp_ids']
except KeyError:
pass
if 'centered' in dataset.keys():
self.centered = dataset['centered'][()]
def load(self, dataset_path):
"""Read data set.
Parameters
----------
dataset_path : :obj:`str`
Path to NumPy ``npz`` file.
"""
dataset_npz = np.load(dataset_path, allow_pickle=True)
npz_type = str(dataset_npz.f.type[()])
if npz_type != 'd':
raise ValueError(f'{npz_type} is not a data set.')
else:
self._update(dict(dataset_npz))
def _get_Rset_id(
self, data, selected_rset_id=None
):
"""Determines the numerical Rset ID for this data set.
Parameters
----------
Rset : :obj:`mbgdml.data`
A loaded :obj:`mbgdml.data.structureSet` or
:obj:`mbgdml.data.dataSet` object.
selected_rset_id : obj:`int`, optional
Currently dset sampling can only be done for one rset_id at a time.
This specifies which rset structures in the data set to sample from.
Defaults to ``None``.
Returns
-------
:obj:`int`
Numerical ID for the Rset.
"""
# Attempts to match any previous sampling to this structure set.
# If not, adds this structure set to the Rset_md5 information.
Rset_id = None
if self.Rset_md5 != {}:
if data.type == 's':
md5 = data.md5
elif data.type == 'd':
md5 = data.Rset_md5[selected_rset_id]
new_k = 0 # New key should be one more than the last key.
for k,v in self.Rset_md5.items():
if v == md5:
Rset_id = k
break
new_k += 1
# If no matches.
if Rset_id is None:
Rset_id = new_k
else:
Rset_id = 0
return Rset_id
def _check_entity_comp_ids(
self, entity_ids, comp_ids, data_entity_ids, data_comp_ids,
sampled_entity_ids_split
):
"""
Parameters
----------
entity_ids : :obj:`numpy.ndarray`
Already sampled entity_ids of the data set. Could be an empty array.
comp_ids : :obj:`numpy.ndarray`
Already sampled comp_ids of the data set. Could be an empty array.
data_entity_ids :obj:`numpy.ndarray`
entity_ids of a data or structure set being sampled.
data_comp_ids :obj:`numpy.ndarray`
comp_ids of a data or structure set being sampled.
sampled_entity_ids_split : :obj:`list` [:obj:`numpy.ndarray`]
The unique data entity_ids of each new entity for this data set.
For example, all the data entity_ids (from a structure set) that
are included as entity_id = 0 in this data set.
Returns
-------
:obj:`numpy.ndarray`
The correct entity_ids of this data set.
:obj:`numpy.ndarray`
The correct comp_ids of this data set.
"""
if len(entity_ids) == 0 and comp_ids.shape == (1, 0):
# If there is no previous sampling.
# Just need to check that the new entities are self compatible.
entity_ids = [] # Start a new entity id list.
comp_ids = [] # Start a new component id list.
# Loops through every column/entity that we sampled.
for entity_id in range(len(sampled_entity_ids_split)):
# Gets the size of the entity in the first structure.
ref_entity_size = np.count_nonzero(
data_entity_ids == entity_id
)
# Adds the entity_ids of this entity
entity_ids.extend([entity_id for _ in range(ref_entity_size)])
# Adds comp_id
comp_id = data_comp_ids[entity_id][1]
comp_ids.append(
[str(entity_id), comp_id]
)
# We should not have to check the entities because we already
# check z.
entity_ids = np.array(entity_ids)
comp_ids = np.array(comp_ids)
else:
# If there was previous sampling
# Need to also check if compatible with the data set.
# We should not have to check the entities because we already
# check z.
pass
return entity_ids, comp_ids
def _generate_structure_samples(
self, quantity, size, data_ids, structure_idxs, max_sample_entity_ids
):
"""
Parameters
----------
sample_type : :obj:`str`
``'num'`` or ``'all'`` depending on ``quantity``.
size : :obj:`str`
data_ids : :obj:`numpy.ndarray`
structure_idxs : :obj:`numpy.ndarray`
All structure indices that we could sample from.
max_sample_entity_ids : :obj:`int`
The largest ``entity_id`` from the data or structure set we are
sampling from.
"""
if isinstance(quantity, int) or str(quantity).isdigit():
while True:
struct_num_selection = randrange(len(structure_idxs))
data_id = choice(data_ids)
comp_selection = sorted(sample(range(max_sample_entity_ids + 1), size))
Rset_selection = [data_id, struct_num_selection] + comp_selection
yield Rset_selection
elif quantity == 'all':
comb_list = list(itertools.combinations(range(max_sample_entity_ids + 1), size))
for struct_i in structure_idxs:
for comb in comb_list:
for data_id in data_ids:
data_selection = [data_id, struct_i] + list(comb)
yield data_selection
def _sample(
self, z, R, E, F, sample_data, quantity, data_ids, Rset_id, Rset_info,
size, criteria=None, z_slice=[], cutoff=[], sampling_updates=False,
copy_EF=True
):
"""Selects all Rset structures for data set.
Generally organized by adding all structures of a single entity_id one at
a time. For example, if we were adding all monomers from a cluster with
two water molecules, we would select the first molecule (``0``), add
all of its information, then add the second molecule (``1``). This
method was chosen due to a previous methodology used to calculate
energy and gradients of partitions.
Parameters
----------
z : :obj:`numpy.ndarray`
Atomic numbers of the atoms in every structure prior to sampling.
R : :obj:`numpy.ndarray`
Cartesian atomic coordinates of data set structures prior to
sampling.
E : :obj:`numpy.ndarray`
Energies of data set structures prior to sampling.
F : :obj:`numpy.ndarray`
Atomic forces of data set structures prior to sampling.
sample_data : :obj:`mbgdml.data`
A loaded structure or data set object.
quantity : :obj:`str`
Number of structures to sample from the structure set. For example,
``'100'``, ``'452'``, or even ``'all'``.
data_ids : :obj:`numpy.ndarray`
Array of :obj:`int` of the structure set used to sampled data
from. For example, if you are sampling from a data set this would be
the ``Rset_id`` in that data set not the new ``Rset_id`` for this
current data set.
Rset_id : :obj:`int`
The :obj:`int` that specifies the Rset (key in ``self.Rset_md5``) in
this current data set.
Rset_info : :obj:`int`
An array specifying where each structure in R originates from.
size : :obj:`int`
Desired number of molecules in each selection.
criteria : :obj:`mbgdml.sample.sampleCritera`, optional
Structure criteria during the sampling procedure. Defaults to
``None`` if no criteria should be used.
z_slice : :obj:`numpy.ndarray`, optional
Indices of the atoms to be used for the cutoff calculation. Defaults
to ``[]`` is no criteria is selected or if it is not required for
the selected criteria.
cutoff : :obj:`list`, optional
Distance cutoff between the atoms selected by ``z_slice``. Must be
in the same units (e.g., Angstrom) as ``R``. Defaults to ``[]`` if
no criteria is selected or a cutoff is not desired.
sampling_updates : :obj:`bool`, optional
Will print something for every 100 successfully sampled structures.
Defaults to ``False``.
Returns
-------
:obj:`numpy.ndarray`
An array specifying where each structure in R originates from.
:obj:`numpy.ndarray`
Atomic coordinates of structure(s).
:obj:`numpy.ndarray`
The energies of structure(s). All are NaN.
:obj:`numpy.ndarray`
Atomic forces of atoms in structure(s). All are NaN.
"""
sample_data_type = sample_data.type
sample_data_z = sample_data.z
sample_data_R = sample_data.R
sample_entity_ids = sample_data.entity_ids
max_sample_entity_ids = max(sample_entity_ids)
if sample_data_type == 'd':
sample_data_Rset_info = sample_data.Rset_info
sample_data_E = sample_data.E
sample_data_F = sample_data.F
structure_idxs = np.array([], dtype=np.int64)
for data_id in data_ids:
structure_idxs = np.concatenate(
(
structure_idxs,
np.where(sample_data_Rset_info[:,0] == data_id)[0]
),
axis=0
)
elif sample_data_type == 's':
structure_idxs = np.arange(0, len(sample_data_R))
num_accepted_r = 0
for data_selection in self._generate_structure_samples(
quantity, size, data_ids, structure_idxs, max_sample_entity_ids
):
# Ends sampling for number quantities.
# The generator does not stop.
if isinstance(quantity, int) or str(quantity).isdigit():
if num_accepted_r == quantity:
break
# Sampling updates
if sampling_updates:
if isinstance(quantity, int) or str(quantity).isdigit():
if num_accepted_r%500 == 0:
print(f'Successfully found {num_accepted_r} structures')
elif quantity == 'all':
if (num_accepted_r+1)%500 == 0:
print(
f'Successfully sampled {num_accepted_r+1} clusters'
)
i_r_sample = data_selection[1]
# Gets Rset_selection instead of data_selection
if sample_data_type == 'd':
i_r_rset = sample_data_Rset_info[i_r_sample][1]
i_r_rset_entity_ids = sample_data_Rset_info[i_r_sample][2:][data_selection[2:]]
Rset_selection = [Rset_id, i_r_rset] + list(i_r_rset_entity_ids)
elif sample_data_type == 's':
Rset_selection = data_selection
# Checks if Rset_info is already present.
if Rset_info.shape[1] == 0: # No previous Rset_info.
Rset_axis = 1
else:
Rset_axis = 0
# Checks to see if combination is already in data set.
if (Rset_info[...]==Rset_selection).all(1).any():
# Does not add the combination.
continue
# Gets atomic indices from entity_ids in the Rset.
atom_idx = []
for entity_id in data_selection[2:]:
atom_idx.extend(
[i for i,x in enumerate(sample_entity_ids) if x == entity_id]
)
# Checks compatibility with atoms.
if len(z) == 0:
z = sample_data_z[atom_idx]
else:
if not np.all([z, sample_data_z[atom_idx]]):
print(f'z of data set: {z}')
print(f'Rset_info of selection: {Rset_selection}')
print(f'z of selection: {sample_data_z[atom_idx]}')
raise ValueError(f'z of the selection is incompatible.')
# Checks any structure criteria.
r_selection = np.array([sample_data_R[i_r_sample, atom_idx, :]])
| |
"evap"),
("/staff/semester/1/import", "evap"),
("/staff/questionnaire/create", "evap"),
("/staff/user/create", "evap"),
]
for form in forms:
response = self.get_submit_assert_200(form[0], form[1])
self.assertIn("is required", response)
forms = [
("/student/vote/5", "lazy.student"),
("/staff/semester/1/course/1/email", "evap"),
]
for form in forms:
response = self.get_submit_assert_200(form[0], form[1])
self.assertIn("alert-danger", response)
def test_failing_questionnaire_copy(self):
"""
Tests whether copying and submitting a questionnaire form wihtout entering a new name fails.
"""
response = self.get_submit_assert_200("/staff/questionnaire/2/copy", "evap")
self.assertIn("already exists", response)
"""
The following tests test whether forms that succeed when
submitting them without entering any data actually do that.
They are in individual methods because most of them change the database.
"""
def test_staff_semester_x_edit__nodata_success(self):
self.get_submit_assert_302("/staff/semester/1/edit", "evap")
def test_staff_semester_x_delete__nodata_success(self):
self.get_submit_assert_302("/staff/semester/2/delete", "evap")
def test_staff_semester_x_assign__nodata_success(self):
self.get_submit_assert_302("/staff/semester/1/assign", "evap")
def test_staff_semester_x_lottery__nodata_success(self):
self.get_submit_assert_200("/staff/semester/1/lottery", "evap")
def test_staff_semester_x_course_y_edit__nodata_success(self):
self.get_submit_assert_302("/staff/semester/1/course/1/edit", "evap", name="operation", value="save")
def test_staff_semester_x_course_y_delete__nodata_success(self):
self.get_submit_assert_302("/staff/semester/1/course/1/delete", "evap"),
def test_staff_questionnaire_x_edit__nodata_success(self):
self.get_submit_assert_302("/staff/questionnaire/3/edit", "evap")
def test_staff_questionnaire_x_delete__nodata_success(self):
self.get_submit_assert_302("/staff/questionnaire/3/delete", "evap"),
def test_staff_user_x_delete__nodata_success(self):
self.get_submit_assert_302("/staff/user/4/delete", "evap"),
def test_staff_user_x_edit__nodata_success(self):
self.get_submit_assert_302("/staff/user/4/edit", "evap")
def test_staff_template_x__nodata_success(self):
self.get_submit_assert_200("/staff/template/1", "evap")
def test_staff_faq__nodata_success(self):
self.get_submit_assert_302("/staff/faq/", "evap")
def test_staff_faq_x__nodata_success(self):
self.get_submit_assert_302("/staff/faq/1", "evap")
def test_contributor_settings(self):
self.get_submit_assert_302("/contributor/settings", "responsible")
def test_course_email_form(self):
"""
Tests the CourseEmailForm with one valid and one invalid input dataset.
"""
course = Course.objects.get(pk="1")
data = {"body": "wat", "subject": "some subject", "recipients": ["due_participants"]}
form = CourseEmailForm(instance=course, data=data)
self.assertTrue(form.is_valid())
self.assertTrue(form.missing_email_addresses() == 0)
form.send()
data = {"body": "wat", "subject": "some subject"}
form = CourseEmailForm(instance=course, data=data)
self.assertFalse(form.is_valid())
def test_user_form(self):
"""
Tests the UserForm with one valid and one invalid input dataset.
"""
user = UserProfile.objects.get(pk=1)
another_user = UserProfile.objects.get(pk=2)
data = {"username": "mklqoep50x2", "email": "<EMAIL>"}
form = UserForm(instance=user, data=data)
self.assertTrue(form.is_valid())
data = {"username": another_user.username, "email": "[email protected]"}
form = UserForm(instance=user, data=data)
self.assertFalse(form.is_valid())
def test_contributor_form_set(self):
"""
Tests the ContributionFormset with various input data sets.
"""
course = mommy.make(Course)
ContributionFormset = inlineformset_factory(Course, Contribution, formset=ContributionFormSet, form=ContributionForm, extra=0)
data = {
'contributions-TOTAL_FORMS': 1,
'contributions-INITIAL_FORMS': 0,
'contributions-MAX_NUM_FORMS': 5,
'contributions-0-course': course.pk,
'contributions-0-questionnaires': [1],
'contributions-0-order': 0,
'contributions-0-responsibility': "RESPONSIBLE",
'contributions-0-comment_visibility': "ALL",
}
# no contributor and no responsible
self.assertFalse(ContributionFormset(instance=course, form_kwargs={'course': course}, data=data.copy()).is_valid())
# valid
data['contributions-0-contributor'] = 1
self.assertTrue(ContributionFormset(instance=course, form_kwargs={'course': course}, data=data.copy()).is_valid())
# duplicate contributor
data['contributions-TOTAL_FORMS'] = 2
data['contributions-1-contributor'] = 1
data['contributions-1-course'] = course.pk
data['contributions-1-questionnaires'] = [1]
data['contributions-1-order'] = 1
self.assertFalse(ContributionFormset(instance=course, form_kwargs={'course': course}, data=data).is_valid())
# two responsibles
data['contributions-1-contributor'] = 2
data['contributions-1-responsibility'] = "RESPONSIBLE"
self.assertFalse(ContributionFormset(instance=course, form_kwargs={'course': course}, data=data).is_valid())
def test_semester_deletion(self):
"""
Tries to delete two semesters via the respective view,
only the second attempt should succeed.
"""
self.assertFalse(Semester.objects.get(pk=1).can_staff_delete)
self.client.login(username='evap', password='<PASSWORD>')
response = self.client.get("/staff/semester/1/delete", follow=True)
self.assertIn("cannot be deleted", list(response.context['messages'])[0].message)
self.assertTrue(Semester.objects.filter(pk=1).exists())
self.assertTrue(Semester.objects.get(pk=2).can_staff_delete)
self.get_submit_assert_302("/staff/semester/2/delete", "evap")
self.assertFalse(Semester.objects.filter(pk=2).exists())
def helper_semester_state_views(self, course_ids, old_state, new_state, operation):
page = self.app.get("/staff/semester/1", user="evap")
form = page.forms["form_" + old_state]
for course_id in course_ids:
self.assertIn(Course.objects.get(pk=course_id).state, old_state)
form['course'] = course_ids
response = form.submit('operation', value=operation)
form = lastform(response)
response = form.submit()
self.assertIn("Successfully", str(response))
for course_id in course_ids:
self.assertEqual(Course.objects.get(pk=course_id).state, new_state)
"""
The following tests make sure the course state transitions are triggerable via the UI.
"""
def test_semester_publish(self):
self.helper_semester_state_views([7], "reviewed", "published", "publish")
def test_semester_reset(self):
self.helper_semester_state_views([2], "prepared", "new", "revertToNew")
def test_semester_approve_1(self):
self.helper_semester_state_views([1], "new", "approved", "approve")
def test_semester_approve_2(self):
self.helper_semester_state_views([2], "prepared", "approved", "approve")
def test_semester_approve_3(self):
self.helper_semester_state_views([3], "editorApproved", "approved", "approve")
def test_semester_contributor_ready_1(self):
self.helper_semester_state_views([1, 10], "new", "prepared", "prepare")
def test_semester_contributor_ready_2(self):
self.helper_semester_state_views([3], "editorApproved", "prepared", "reenableEditorReview")
def test_semester_unpublish(self):
self.helper_semester_state_views([8], "published", "reviewed", "unpublish")
def test_course_create(self):
"""
Tests the course creation view with one valid and one invalid input dataset.
"""
data = dict(name_de="asdf", name_en="asdf", type="asdf", degrees=["1"],
vote_start_date="02/1/2014", vote_end_date="02/1/2099", general_questions=["2"])
response = self.get_assert_200("/staff/semester/1/course/create", "evap")
form = lastform(response)
form["name_de"] = "lfo9e7bmxp1xi"
form["name_en"] = "asdf"
form["type"] = "a type"
form["degrees"] = ["1"]
form["vote_start_date"] = "02/1/2099"
form["vote_end_date"] = "02/1/2014" # wrong order to get the validation error
form["general_questions"] = ["2"]
form['contributions-TOTAL_FORMS'] = 1
form['contributions-INITIAL_FORMS'] = 0
form['contributions-MAX_NUM_FORMS'] = 5
form['contributions-0-course'] = ''
form['contributions-0-contributor'] = 6
form['contributions-0-questionnaires'] = [1]
form['contributions-0-order'] = 0
form['contributions-0-responsibility'] = "RESPONSIBLE"
form['contributions-0-comment_visibility'] = "ALL"
form.submit()
self.assertNotEqual(Course.objects.order_by("pk").last().name_de, "lfo9e7bmxp1xi")
form["vote_start_date"] = "02/1/2014"
form["vote_end_date"] = "02/1/2099" # now do it right
form.submit()
self.assertEqual(Course.objects.order_by("pk").last().name_de, "lfo9e7bmxp1xi")
def test_single_result_create(self):
"""
Tests the single result creation view with one valid and one invalid input dataset.
"""
response = self.get_assert_200("/staff/semester/1/singleresult/create", "evap")
form = lastform(response)
form["name_de"] = "qwertz"
form["name_en"] = "qwertz"
form["type"] = "a type"
form["degrees"] = ["1"]
form["event_date"] = "02/1/2014"
form["answer_1"] = 6
form["answer_3"] = 2
# missing responsible to get a validation error
form.submit()
self.assertNotEqual(Course.objects.order_by("pk").last().name_de, "qwertz")
form["responsible"] = 2 # now do it right
form.submit()
self.assertEqual(Course.objects.order_by("pk").last().name_de, "qwertz")
def test_course_email(self):
"""
Tests whether the course email view actually sends emails.
"""
page = self.get_assert_200("/staff/semester/1/course/5/email", user="evap")
form = lastform(page)
form.get("recipients", index=0).checked = True # send to all participants
form["subject"] = "asdf"
form["body"] = "asdf"
form.submit()
self.assertEqual(len(mail.outbox), 2)
def test_questionnaire_deletion(self):
"""
Tries to delete two questionnaires via the respective view,
only the second attempt should succeed.
"""
self.assertFalse(Questionnaire.objects.get(pk=2).can_staff_delete)
self.client.login(username='evap', password='<PASSWORD>')
page = self.client.get("/staff/questionnaire/2/delete", follow=True)
self.assertIn("cannot be deleted", list(page.context['messages'])[0].message)
self.assertTrue(Questionnaire.objects.filter(pk=2).exists())
self.assertTrue(Questionnaire.objects.get(pk=3).can_staff_delete)
self.get_submit_assert_302("/staff/questionnaire/3/delete", "evap")
self.assertFalse(Questionnaire.objects.filter(pk=3).exists())
def test_create_user(self):
"""
Tests whether the user creation view actually creates a user.
"""
page = self.get_assert_200("/staff/user/create", "evap")
form = lastform(page)
form["username"] = "mflkd862xmnbo5"
form["first_name"] = "asd"
form["last_name"] = "asd"
form["email"] = "[email protected]"
form.submit()
self.assertEqual(UserProfile.objects.order_by("pk").last().username, "mflkd862xmnbo5")
def test_emailtemplate(self):
"""
Tests the emailtemplate view with one valid and one invalid input datasets.
"""
page = self.get_assert_200("/staff/template/1", "evap")
form = lastform(page)
form["subject"] = "subject: mflkd862xmnbo5"
form["body"] = "body: mflkd862xmnbo5"
response = form.submit()
self.assertEqual(EmailTemplate.objects.get(pk=1).body, "body: mflkd862xmnbo5")
form["body"] = " invalid tag: {{}}"
response = form.submit()
self.assertEqual(EmailTemplate.objects.get(pk=1).body, "body: mflkd862xmnbo5")
def test_contributor_course_edit(self):
"""
Tests whether the "save" button in the contributor's course edit view does not
change the course's state, and that the "approve" button does that.
"""
page = self.get_assert_200("/contributor/course/2/edit", user="responsible")
form = lastform(page)
form["vote_start_date"] = "02/1/2098"
form["vote_end_date"] = "02/1/2099"
form.submit(name="operation", value="save")
self.assertEqual(Course.objects.get(pk=2).state, "prepared")
form.submit(name="operation", value="approve")
self.assertEqual(Course.objects.get(pk=2).state, "editorApproved")
# test what happens if the operation is not specified correctly
response = form.submit(expect_errors=True)
self.assertEqual(response.status_code, 403)
def test_student_vote(self):
"""
Submits a student vote for coverage, verifies that an error message is
displayed if not all rating questions have been answered and that all
given answers stay selected/filled and that the student cannot vote on
the course a second time.
"""
page = self.get_assert_200("/student/vote/5", user="lazy.student")
form = lastform(page)
form["question_17_2_3"] = "some text"
form["question_17_2_4"] = 1
form["question_17_2_5"] = 6
form["question_18_1_1"] = "some other text"
form["question_18_1_2"] = 1
form["question_19_1_1"] = "some more text"
form["question_19_1_2"] = 1
form["question_20_1_1"] = "and the last text"
response = form.submit()
self.assertIn("vote for all rating questions", response)
form = lastform(page)
self.assertEqual(form["question_17_2_3"].value, "some text")
self.assertEqual(form["question_17_2_4"].value, "1")
self.assertEqual(form["question_17_2_5"].value, "6")
self.assertEqual(form["question_18_1_1"].value, "some other text")
self.assertEqual(form["question_18_1_2"].value, "1")
self.assertEqual(form["question_19_1_1"].value, "some more text")
self.assertEqual(form["question_19_1_2"].value, "1")
self.assertEqual(form["question_20_1_1"].value, "and the last text")
form["question_20_1_2"] = 1 # give missing answer
response = form.submit()
self.get_assert_403("/student/vote/5", user="lazy.student")
class CourseFormTests(TestCase):
def helper_test_course_form_same_name(self, CourseFormClass):
courses = Course.objects.all()
form_data = get_form_data_from_instance(CourseForm, courses[0])
form_data["vote_start_date"] = "02/1/2098" # needed to fix the form
form_data["vote_end_date"] = "02/1/2099" # needed to fix the form
form = CourseForm(form_data, instance=courses[0])
self.assertTrue(form.is_valid())
form_data['name_de'] = courses[1].name_de
form = CourseForm(form_data, instance=courses[0])
self.assertFalse(form.is_valid())
def test_course_form_same_name(self):
"""
Test whether giving a course the same name as another course
in the same semester in the course edit form is invalid.
"""
courses = mommy.make(Course, semester=mommy.make(Semester), degrees=[mommy.make(Degree)], _quantity=2)
courses[0].general_contribution.questionnaires = [mommy.make(Questionnaire)]
courses[1].general_contribution.questionnaires = [mommy.make(Questionnaire)]
self.helper_test_course_form_same_name(CourseForm)
self.helper_test_course_form_same_name(ContributorCourseForm)
def helper_date_validation(self, CourseFormClass, start_date, end_date, expected_result):
course = Course.objects.get()
form_data = get_form_data_from_instance(CourseFormClass, course)
form_data["vote_start_date"] = start_date
form_data["vote_end_date"] = end_date
form = CourseFormClass(form_data, instance=course)
self.assertEqual(form.is_valid(), expected_result)
def test_contributor_course_form_date_validation(self):
"""
Tests validity of various start/end date combinations in
the two course edit forms.
"""
course = mommy.make(Course, degrees=[mommy.make(Degree)])
course.general_contribution.questionnaires = [mommy.make(Questionnaire)]
# contributors: start date must be in the future
self.helper_date_validation(ContributorCourseForm, "02/1/1999", "02/1/2099", False)
# contributors: end date must be in the future
self.helper_date_validation(ContributorCourseForm, "02/1/2099", "02/1/1999", False)
# contributors: start date must be < end date
self.helper_date_validation(ContributorCourseForm, "02/1/2099", "02/1/2098", False)
# contributors: valid data
self.helper_date_validation(ContributorCourseForm, "02/1/2098", "02/1/2099", True)
# staff: neither end nor start date must be in the future
self.helper_date_validation(CourseForm, "02/1/1998", "02/1/1999", True)
# staff: valid data in the future
self.helper_date_validation(CourseForm, "02/1/2098", "02/1/2099", True)
# staff: but start date must be < end date
self.helper_date_validation(CourseForm, "02/1/1999", "02/1/1998", False)
class ContributionFormsetTests(TestCase):
def test_dont_validate_deleted_contributions(self):
"""
Tests whether contributions marked for deletion are validated.
Regression test for #415 and #244
"""
course = mommy.make(Course)
user1 = mommy.make(UserProfile)
user2 = mommy.make(UserProfile)
user3 = mommy.make(UserProfile)
questionnaire = mommy.make(Questionnaire, is_for_contributors=True)
ContributionFormset = inlineformset_factory(Course, Contribution, formset=ContributionFormSet, form=ContributionForm, extra=0)
# here we have two responsibles (one of | |
<gh_stars>0
from __future__ import annotations
import os
import sys
import numpy as np
import openpyxl
import pandas as pd
import printj
from ortools.sat.python import cp_model
from printj import ColorText as ct
# from typing import Union
class TimeVar:
def __init__(self, hours: int, minutes: int):
while minutes > 60:
minutes -= 60
hours += 1
self.hours = hours
self.minutes = minutes
self.time_str = f'{hours}:{minutes}'
def __str__(self):
return self.time_str
def __add__(self, added_time: TimeVar):
return TimeVar(self.hours + added_time.hours, self.minutes + added_time.minutes)
@classmethod
def by_string(cls, time: str):
time_split_hour_min = time.split(":")
hours = int(time_split_hour_min[0])
minutes = int(time_split_hour_min[1])
return cls(hours, minutes)
# # function to get unique values
# def unique(list1):
# # insert the list to the set
# list_set = set(list1)
# # convert the set to the list
# unique_list = (list(list_set))
# # for x in unique_list:
# # print x,
# return unique_list
class Scheduler:
def __init__(self) -> None:
# pass
self.input_data_package = None
self.input_data_worker = None
self.input_data_location = None
self.time_shifts = None
self.num_vehicles = None
def __call__(self, input_data_package, input_data_worker, input_data_location,
time_shifts,
num_vehicles: int = 4, ):
self.input_data_package = input_data_package
self.input_data_worker = input_data_worker
self.input_data_location = input_data_location
self.time_shifts = time_shifts
self.num_vehicles = num_vehicles
self.input_data_package.dropna(subset = ["package"], inplace=True)
self.input_data_package.dropna(axis=1, how='all')
self.input_data_package_orig, self.input_data_worker_orig, self.input_data_location_orig = self.input_data_package.copy(
), self.input_data_worker.copy(), self.input_data_location.copy()
def solution_printer(self):
alphabets = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"""
data = []
for p in range(num_packages):
# print('Package %i' % p)
data_i = []
for s in range(num_shifts):
s_val = ct.white('0 ')
for w in range(num_workers):
is_working = False
for v in range(num_vehicles):
if solver.Value(self.shifts[(w, p, v, s)]) == 1:
is_working = True
# print(' Worker %i works shift %i' % (w, s))
text_worker = ct.green(
f'Worker {alphabets[w]}')
# text_shift = ct.purple(f'shift {["9:00", "10:00", "11:00", "12:00", ][s]}')
text_shift = ct.purple(f'shift {time_shifts[s]}')
# text_shift = ct.purple(f'shift {s}')
text_package = ct.cyan(f'package-{p}')
text_vehicle = ct.yellow(
f'vehicle {v+1}')
# text_keiro = ct.yellow(
# f'keiro {["Main2", "Main1", "SUB", ][v]}')
# if p in [2, 4]:
# print(
# f' {text_worker} at {text_shift} moves {text_package} using {text_vehicle}')
s_val = ct.green(f'{alphabets[w]}{v+1} ')
data_i.append(s_val)
data.append(data_i)
# data = pd.DataFrame(data, columns=time_shifts)
data = pd.DataFrame(data, columns=[ct.yellow(f' {s}') for s in time_shifts])
"""
data = []
data_moved = []
for p in range(self.num_packages):
# print('Package %i' % p)
num_packages_moved = 0
data_i = []
for s in range(self.num_shifts):
s_val = '0 '
for w in range(self.num_workers):
is_working = False
for v in range(self.num_vehicles):
# print("self.solver.Value(self.shifts[(w, p, v, s)])", self.solver.Value(self.shifts[(w, p, v, s)]))
if self.solver.Value(self.shifts[(w, p, v, s)]) == 1:
# is_working = True
# # print(' Worker %i works shift %i' % (w, s))
# text_worker = f'Worker {alphabets[w]}'
# # text_shift = ct.purple(f'shift {["9:00", "10:00", "11:00", "12:00", ][s]}')
# text_shift = f'shift {self.time_shifts[s]}'
# # text_shift = ct.purple(f'shift {s}')
# text_package = f'package-{p}'
# text_vehicle = f'vehicle {v+1}'
# # text_keiro = ct.yellow(
# # f'keiro {["Main2", "Main1", "SUB", ][v]}')
# # if p in [2, 4]:
# # print(
# # f' {text_worker} at {text_shift} moves {text_package} using {text_vehicle}')
s_val = f'{alphabets[w]}{v+1} '
num_packages_moved += 1
data_i.append(s_val)
data.append(data_i)
data_moved.append([
num_packages_moved,
self.input_data_package.quantity[p] - num_packages_moved,
self.input_data_package.yesterday[p] + num_packages_moved - self.input_data_package.decay[p]*self.num_shifts])
# data = pd.DataFrame(data, columns=time_shifts)
data = pd.DataFrame(data, columns=[f' {s}' for s in self.time_shifts])
data_moved = pd.DataFrame(data_moved, columns=['moved', 'not_moved', 'q_at_destination'])
# print(data_moved)
self.input_data_package_orig = pd.concat([
self.input_data_package_orig[['package', 'quantity', 'decay',
'location', 'vehicle', 'next', 'yesterday']],
data,
data_moved], axis=1).copy()
# data.index = [f'Package-{p}' for p in range(self.num_packages)]
# self.data = self.data.reset_index(drop=True)
# self.data.dropna(axis=1, how='any')
self.data = self.input_data_package_orig.copy()
# print()
print(self.data)
return self.data
def solution_writer(self):
output_path = 'test/xl.xlsx'
print()
print(self.input_data_package_orig)
# Create a Pandas Excel writer using XlsxWriter as the engine.
writer = pd.ExcelWriter(output_path, engine='xlsxwriter')
# Write each dataframe to a different worksheet.
self.input_data_package_orig.to_excel(
writer, sheet_name='Sheet_package', index=False)
self.input_data_worker_orig.to_excel(writer, sheet_name='Sheet_worker', index=False)
self.input_data_location_orig.to_excel(
writer, sheet_name='Sheet_location', index=False)
# output_data.to_excel(writer, sheet_name='Sheet_schedule')
writer.save()
def run(self):
# Data.
# package_to_table = [
# [1, 0, 0, 0, 0, 0],
# [1, 1, 0, 0, 0, 0],
# [0, 0, 1, 1, 0, 0],
# [0, 0, 0, 0, 1, 0],
# [0, 0, 0, 1, 0, 1],
# [1, 1, 1, 1, 1, 1],
# ]
# workers_to_table = [
# [1, 1, 1, 1, 0, 1],
# [1, 1, 1, 1, 1, 0],
# [1, 1, 1, 1, 0, 1],
# [1, 1, 1, 1, 1, 0],
# ]
printj.yellow('::::::::::::::::::: preprocess :::::::::::::::::::')
print(self.input_data_package)
print(self.input_data_package.dtypes)
if isinstance(self.input_data_package.vehicle[0], str):
self.input_data_package.vehicle = [
[int(i) for i in v.split(",")] for v in self.input_data_package.vehicle]
self.input_data_package.next = [v if isinstance(
v, int) else None for v in self.input_data_package.next]
if isinstance(self.input_data_worker.location[0], str):
self.input_data_worker.location = [
[int(i) for i in v.split(",")] for v in self.input_data_worker.location]
self.input_data_worker.vehicle = [
[int(i) for i in v.split(",")] for v in self.input_data_worker.vehicle]
self.num_locations = len(self.input_data_location.location)
# package_to_location = pd.crosstab(
# index=self.input_data_package['package'], columns=self.input_data_package['location']).to_numpy()
package_to_location = pd.DataFrame({p: [1 if l in [location_list] else 0 for l in range(self.num_locations)]
for p, location_list in enumerate(self.input_data_package.location)}).T.to_numpy() # num_location
package_to_vehicle = pd.DataFrame({p: [1 if (v+1) in vehicles_list else 0 for v in range(self.num_vehicles)]
for p, vehicles_list in enumerate(self.input_data_package.vehicle)}).T.to_numpy() # num_vehicle = 4
worker_to_vehicle = pd.DataFrame({p: [1 if (v+1) in vehicles_list else 0 for v in range(self.num_vehicles)]
for p, vehicles_list in enumerate(self.input_data_worker.vehicle)}).T.to_numpy() # num_vehicle = 4
location_to_worker = pd.DataFrame({p: [1 if v in worker_list else 0 for v in range(
self.num_locations)] for p, worker_list in enumerate(self.input_data_worker.location)}).to_numpy() # num_keiro = 6
package_orders = [[i, int(next_i)] for (i, next_i) in zip(
self.input_data_package.package, self.input_data_package.next) if pd.notna(next_i)]
print("package_to_vehicle\n", package_to_vehicle)
print("worker_to_vehicle\n", worker_to_vehicle)
print("package_to_location\n", package_to_location)
print("location_to_worker\n", location_to_worker)
print("package_orders\n", package_orders)
print()
print()
# print(package_to_location.to_numpy())
# sys.exit()
# package_orders = [[0, 1], [1, 2], ]
# main2, main1, sub
# package_to_vehicle = np.array([
# [1, 1, 1, 1],
# [1, 0, 0, 0],
# [1, 0, 0, 0],
# [0, 1, 1, 0],
# [0, 0, 1, 1],
# [0, 0, 1, 1],
# ])
# package_to_location = np.array([
# [1, 0, 0],
# [1, 0, 0],
# [1, 0, 0],
# [0, 1, 0],
# [1, 0, 0],
# [0, 0, 1],
# ])
# workers_to_keiro = np.array([
# [1, 0, 1],
# [1, 1, 0],
# [1, 0, 1],
# [1, 1, 0],
# ])
self.num_workers = len(self.input_data_worker.worker) # 4
self.num_packages = len(self.input_data_package.package) # 5
self.num_shifts = len(self.time_shifts)
# num_tables = 6
all_workers = range(self.num_workers)
all_packages = range(self.num_packages)
all_shifts = range(self.num_shifts)
all_vehicles = range(self.num_vehicles)
all_locations = range(self.num_locations)
# print(all_vehicles)
print(
f'\nNo. of package {self.num_packages}, No. of workers {self.num_workers}')
alphabets = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"""
available_workers_per_package = []
for i, item in enumerate(package_to_vehicle):
available_workers_list = []
for j, table in enumerate(item):
if table == 1:
available_workers_list += [k for k in range(len(workers_to_keiro)) if workers_to_keiro[k][j]==1]
available_workers_list = unique(available_workers_list)
print(f'Package-{i} can be moved by workers {"".join(alphabets[l] for l in available_workers_list)}')
available_workers_per_package.append(available_workers_list)
print(available_workers_per_package)
print(np.array(available_workers_per_package))
"""
# package_to_worker = np.matmul(package_to_location, workers_to_keiro.T)
# print(package_to_location.shape, location_to_worker.shape)
package_to_worker = np.matmul(package_to_location, location_to_worker)
available_workers_per_package = [
[i for i, ll in enumerate(l) if ll == 1] for l in package_to_worker]
available_vehicles_per_package = [
[i for i, ll in enumerate(l) if ll == 1] for l in package_to_vehicle]
available_packages_per_location = [
[i for i, ll in enumerate(l) if ll == 1] for l in package_to_location.T]
available_vehicles_per_worker = [
[i for i, ll in enumerate(l) if ll == 1] for l in worker_to_vehicle]
# print()
# for p, item in enumerate(available_workers_per_package):
# text_worker = ct.green(
# f'workers {"".join(alphabets[l] for l in item)}')
# text_package = ct.cyan(f'Package-{p}')
# print(f'{text_package} can be moved by {text_worker}')
print()
for w, item in enumerate(available_vehicles_per_worker):
text_vehicle = ct.green(
f'vehicle {", ".join(f"{l+1}" for l in item)}')
text_worker = ct.cyan(f'worker {alphabets[w]}')
print(f'{text_worker} can use {text_vehicle}')
print()
# for p, item in enumerate(available_vehicles_per_package):
# text_vehicle = ct.yellow(
# f'vehicle {" ".join(["Main2", "Main1", "SUB", ][l] for l in item)}')
# text_package = ct.cyan(f'Package-{p}')
# print(f'{text_package} can be moved to {text_vehicle}')
# print()
for p, (workers, vehicles) in enumerate(zip(available_workers_per_package, available_vehicles_per_package)):
text_worker = ct.green(
f'workers {", ".join(alphabets[l] for l in workers)}')
text_vehicle = ct.yellow(
f'vehicle {", ".join(str(v) for v in vehicles)}')
text_package = ct.cyan(f'Package-{p}')
print(f'{text_package} can be moved by \t{text_worker}\tusing {text_vehicle}')
print()
for l, item in enumerate(available_packages_per_location):
text_package = ct.cyan(f'package {", ".join(f"{i}" for i in item)}')
| |
province in Canada.
Notes
- Creating geolocation and geolocation alias resource record sets in private hosted zones
is not supported.
- If you create separate resource record sets for overlapping geographic regions (for
example, one resource record set for a continent and one for a country on the same
continent), priority goes to the smallest geographic region. This allows you to route
most queries for a continent to one resource and to route queries for a country on that
continent to a different resource.
- You can't create two geolocation resource record sets that specify the same geographic
location.
- The value * in the CountryCode element matches all geographic locations that aren't
specified in other geolocation resource record sets that have the same values for the
Name and Type elements.
- Geolocation works by mapping IP addresses to locations. However, some IP addresses
aren't mapped to geographic locations, so even if you create geolocation resource
record sets that cover all seven continents, Amazon Route 53 will receive some DNS
queries from locations that it can't identify. We recommend that you create a resource
record set for which the value of CountryCode is *, which handles both queries that
come from locations for which you haven't created geolocation resource record sets and
queries from IP addresses that aren't mapped to a location. If you don't create a *
resource record set, Amazon Route 53 returns a "no answer" response for queries from
those locations.
- You can't create non-geolocation resource record sets that have the same values for the
Name and Type elements as geolocation resource record sets.
TTL
The resource record cache time to live (TTL), in seconds.
Note the following:
- If you're creating an alias resource record set, omit TTL. Amazon Route 53 uses the
value of TTL for the alias target.
- If you're associating this resource record set with a health check (if you're adding
a HealthCheckId element), we recommend that you specify a TTL of 60 seconds or less so
clients respond quickly to changes in health status.
- All of the resource record sets in a group of weighted, latency, geolocation, or
failover resource record sets must have the same value for TTL.
- If a group of weighted resource record sets includes one or more weighted alias
resource record sets for which the alias target is an ELB load balancer, we recommend
that you specify a TTL of 60 seconds for all of the non-alias weighted resource record
sets that have the same name and type. Values other than 60 seconds (the TTL for load
balancers) will change the effect of the values that you specify for Weight.
ResourceRecords
A list, containing one or more values for the resource record. No single value can exceed
4,000 characters. For details on how to format values for different record types, see
`Supported DNS Resource Record Types`__ in the Amazon Route 53 Developer Guide.
.. __: http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html
Note: You can specify more than one value for all record types except CNAME and SOA.
It is also possible to pass "magic" strings as resource record values. This functionality
can easily be extended, but for the moment supports the following:
'magic:ec2_instance_tag:some_tag_name:some_string:some_instance_attr'
This tells salt to lookup an EC2 instance with a tag 'some_tag_name' which has the value
'some_string' and substitute the 'some_instance_attr' attribute of that instance as the
resource record value being evaluated.
This should work generally for any EC2 instance tags, as long as the instance attribute
being fetched is available to getattr(instance, 'attribute') as seen in the code below.
Anything else will most likely require this function to be extended to handle it.
The canonical use-case for this (at least at our site) is to query the Name tag (which
we always populate with the host's FQDN) to lookup the public or private IPs bound to the
instance, so we can then automgically create Route 53 records for them.
AliasTarget
The rules governing how to define an AliasTarget for the various supported use-cases are
obtuse beyond reason and attempting to paraphrase them (or even worse, cut-and-paste them
in their entirety) would be silly and counterproductive. If you need this feature, then
Read The Fine Materials at the `Boto 3 Route 53 page`__ and/or the `AWS Route 53 docs`__
and suss them for yourself - I sure won't claim to understand them partcularly well.
.. __: http://boto3.readthedocs.io/en/latest/reference/services/route53.html#Route53.Client.change_resource_record_sets
.. __: http://docs.aws.amazon.com/Route53/latest/APIReference/API_AliasTarget.html
region
The region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
Dict, or pillar key pointing to a dict, containing AWS region/key/keyid.
'''
Name = Name if Name else name
Name = _to_aws_encoding(Name)
if Type is None:
raise SaltInvocationError("'Type' is a required parameter when adding or updating"
"resource records.")
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
args = {'Id': HostedZoneId, 'Name': DomainName, 'PrivateZone': PrivateZone,
'region': region, 'key': key, 'keyid': keyid, 'profile': profile}
zone = __salt__['boto3_route53.find_hosted_zone'](**args)
if not zone:
ret['comment'] = 'Route 53 {} hosted zone {} not found'.format('private' if PrivateZone
else 'public', DomainName)
log.info(ret['comment'])
return ret
zone = zone[0]
HostedZoneId = zone['HostedZone']['Id']
# Convert any magic RR values to something AWS will understand, and otherwise clean them up.
fixed_rrs = []
for rr in ResourceRecords:
if rr.startswith('magic:'):
fields = rr.split(':')
if fields[1] == 'ec2_instance_tag':
if len(fields) != 5:
log.warning("Invalid magic RR value seen: '{}'. Passing as-is.".format(rr))
fixed_rrs += [rr]
continue
tag_name = fields[2]
tag_value = fields[3]
instance_attr = fields[4]
good_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped')
r = __salt__['boto_ec2.find_instances'](
tags={tag_name: tag_value}, return_objs=True, in_states=good_states,
region=region, key=key, keyid=keyid, profile=profile)
if len(r) < 1:
ret['comment'] = 'No EC2 instance with tag {} == {} found'.format(tag_name,
tag_value)
log.error(ret['comment'])
ret['result'] = False
return ret
if len(r) > 1:
ret['comment'] = 'Multiple EC2 instances with tag {} == {} found'.format(
tag_name, tag_value)
log.error(ret['comment'])
ret['result'] = False
return ret
instance = r[0]
res = getattr(instance, instance_attr, None)
if res:
log.debug('Found {} {} for instance {}'.format(instance_attr, res, instance.id))
fixed_rrs += [_to_aws_encoding(res)]
else:
ret['comment'] = 'Attribute {} not found on instance {}'.format(instance_attr,
instance.id)
log.error(ret['comment'])
ret['result'] = False
return ret
else:
ret['comment'] = ('Unknown RR magic value seen: {}. Please extend the '
'boto3_route53 state module to add support for your preferred '
'incantation.'.format(fields[1]))
log.error(ret['comment'])
ret['result'] = False
return ret
else:
fixed_rrs += [rr]
ResourceRecords = [{'Value': rr} for rr in sorted(fixed_rrs)]
recordsets = __salt__['boto3_route53.get_resource_records'](HostedZoneId=HostedZoneId,
StartRecordName=Name, StartRecordType=Type, region=region, key=key, keyid=keyid,
profile=profile)
if SetIdentifier and recordsets:
log.debug('Filter recordsets {} by SetIdentifier {}.'.format(recordsets, SetIdentifier))
recordsets = [r for r in recordsets if r.get('SetIdentifier') == SetIdentifier]
log.debug('Resulted in recordsets {}.'.format(recordsets))
create = False
update = False
updatable = ['SetIdentifier', 'Weight', 'Region', 'GeoLocation', 'Failover', 'TTL',
'AliasTarget', 'HealthCheckId', 'TrafficPolicyInstanceId']
if not recordsets:
create = True
if __opts__['test']:
ret['comment'] = 'Route 53 resource record {} with type {} would be added.'.format(
Name, Type)
ret['result'] = None
return ret
elif len(recordsets) > 1:
ret['comment'] = 'Given criteria matched more than one ResourceRecordSet.'
log.error(ret['comment'])
ret['result'] = False
return ret
else:
rrset = recordsets[0]
for u in updatable:
if locals().get(u) != rrset.get(u):
update = True
break
if ResourceRecords != sorted(rrset.get('ResourceRecords'), key=lambda x: x['Value']):
update = True
if not create and not update:
ret['comment'] = ('Route 53 resource record {} with type {} is already in the desired state.'
''.format(Name, Type))
log.info(ret['comment'])
return ret
else:
if __opts__['test']:
ret['comment'] = 'Route 53 resource record {} with type {} would be updated.'.format(
Name, Type)
ret['result'] = None
return ret
ResourceRecordSet = {
'Name': Name,
'Type': Type,
'ResourceRecords': ResourceRecords
}
for u in updatable:
ResourceRecordSet.update({u: locals().get(u)}) if locals().get(u) else None
ChangeBatch = {
'Changes': [
{
'Action': 'UPSERT',
'ResourceRecordSet': ResourceRecordSet,
}
]
}
if __salt__['boto3_route53.change_resource_record_sets'](HostedZoneId=HostedZoneId,
ChangeBatch=ChangeBatch, region=region, key=key, keyid=keyid, profile=profile):
ret['comment'] = 'Route 53 resource record {} with type {} {}.'.format(Name,
Type, 'created' if create else 'updated')
log.info(ret['comment'])
| |
# -*- coding: utf-8 -*-
import os
import re
import traceback
import shutil
import urllib
import xml.etree.ElementTree as ET
import xbmc
import xbmcvfs
import xbmcaddon
# Import the common settings
from settings import Settings
from settings import log
from settings import os_path_join
from settings import os_path_split
from settings import dir_exists
import epub
from mobi import Mobi
from kiehinen.ebook import Book as KMobi
from kindleunpack import kindleunpack
from cStringIO import StringIO
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfparser import PDFParser
from pdfminer.pdftypes import resolve1
from pdfminer.pdfpage import PDFPage
from pdfminer.psparser import PSLiteral
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
ADDON = xbmcaddon.Addon(id='script.ebooks')
FANART = ADDON.getAddonInfo('fanart')
CWD = ADDON.getAddonInfo('path').decode("utf-8")
RES_DIR = xbmc.translatePath(os.path.join(CWD, 'resources').encode("utf-8")).decode("utf-8")
MEDIA_DIR = xbmc.translatePath(os.path.join(RES_DIR, 'media').encode("utf-8")).decode("utf-8")
# Generic class for handling EBook details
class EBookBase():
def __init__(self, eBookFilePath, removeFileWhenComplete=False):
log("EBookBase: Loading book %s" % eBookFilePath)
self.filePath = eBookFilePath
self.fileName = os_path_split(eBookFilePath)[-1]
self.isTempBookFile = removeFileWhenComplete
try:
self.filePath = self.filePath.decode("utf-8")
except:
pass
@staticmethod
def createEBookObject(filePath):
localFilePath = filePath
removeWhenComplete = False
if filePath.startswith('smb://') or filePath.startswith('nfs://'):
try:
# Copy the file to the local disk
justFileName = os_path_split(filePath)[-1]
copiedFile = os_path_join(Settings.getTempLocation(), justFileName)
copy = xbmcvfs.copy(filePath, copiedFile)
if copy:
log("EBookBase: copy successful for %s" % copiedFile)
localFilePath = copiedFile
removeWhenComplete = True
else:
log("EBookBase: copy failed from %s to %s" % (filePath, copiedFile))
except:
log("EBookBase: Failed to copy file %s to local directory" % filePath)
elif filePath.startswith('http://') or filePath.startswith('https://'):
log("EBookBase: Book source is %s" % filePath)
try:
justFileName = 'opds.epub'
if '/mobi/' in filePath:
justFileName = 'opds.mobi'
elif '/pdf/' in filePath:
justFileName = 'opds.pdf'
copiedFile = os_path_join(Settings.getTempLocation(), justFileName)
fp, h = urllib.urlretrieve(filePath, copiedFile)
log(h)
localFilePath = copiedFile
removeWhenComplete = True
except:
log("EBookBase: Failed to download file %s to local directory" % filePath)
bookType = None
# Check which type of EBook it is
if localFilePath.lower().endswith('.epub'):
bookType = EPubEBook(localFilePath, removeWhenComplete)
elif localFilePath.lower().endswith('.mobi'):
bookType = MobiEBook(localFilePath, removeWhenComplete)
elif localFilePath.lower().endswith('.pdf'):
bookType = PdfEBook(localFilePath, removeWhenComplete)
else:
log("EBookBase: Unknown book type for %s (%s)" % (filePath, localFilePath))
return bookType
@staticmethod
def getCoverImage(filePath, eBookFileName):
# Check if there is a cached version
coverTargetName = None
fullpathLocalImage, bookExt = os.path.splitext(filePath)
fullpathLocalImage = "%s.jpg" % fullpathLocalImage
if xbmcvfs.exists(fullpathLocalImage):
log("EBookBase: Found local cached image %s" % fullpathLocalImage)
return fullpathLocalImage
# Check for a cached cover
coverTargetName = EBookBase.getCachedCover(eBookFileName)
# If we reach here, then there was no cached cover image, so we need to extract one
if coverTargetName in [None, ""]:
ebook = EBookBase.createEBookObject(filePath)
coverTargetName = ebook.extractCoverImage()
ebook.tidyUp()
del ebook
# If there is still no cover image, check for folder.jpg in the same directory
if coverTargetName in [None, ""]:
baseDirectory = (os_path_split(filePath))[0]
subdirs, filesInDir = xbmcvfs.listdir(baseDirectory)
for fileInDir in filesInDir:
if fileInDir.lower() in ['folder.jpg', 'cover.jpg', 'folder.png', 'cover.png']:
coverTargetName = os_path_join(baseDirectory, fileInDir)
return coverTargetName
@staticmethod
def getFanArt(filePath):
# Check if there is a cached version
fullpathLocalImage, bookExt = os.path.splitext(filePath)
fullpathLocalImage = "%s-fanart.jpg" % fullpathLocalImage
if xbmcvfs.exists(fullpathLocalImage):
log("EBookBase: Found book fanart image %s" % fullpathLocalImage)
return fullpathLocalImage
# Now check if there is a default fanart file
fanartImage = FANART
baseDirectory = (os_path_split(filePath))[0]
subdirs, filesInDir = xbmcvfs.listdir(baseDirectory)
for fileInDir in filesInDir:
if fileInDir.lower() in ['fanart.jpg', 'fanart.png']:
fanartImage = os_path_join(baseDirectory, fileInDir)
return fanartImage
def tidyUp(self):
# If we had to copy the file locally, make sure we delete it
if self.isTempBookFile:
if xbmcvfs.exists(self.filePath):
xbmcvfs.delete(self.filePath)
def getTitle(self):
return ""
def getAuthor(self):
return ""
def getDescription(self):
return ""
# Checks the cache to see if there is a cover for this ebook
@staticmethod
def getCachedCover(fileName):
cachedCover = None
# check if the directory exists before searching
dirs, files = xbmcvfs.listdir(Settings.getCoverCacheLocation())
for aFile in files:
# Get the filename without extension
coverSrc, ext = os.path.splitext(aFile)
# Get the name that the cached cover will have been stored as
targetSrc, bookExt = os.path.splitext(fileName)
if targetSrc == coverSrc:
cachedCover = os_path_join(Settings.getCoverCacheLocation(), aFile)
log("EBookBase: Cached cover found: %s" % cachedCover)
# There is a special case for PDF files that we have a default image
if (cachedCover is None) and fileName.endswith('.pdf'):
cachedCover = os.path.join(MEDIA_DIR, 'pdf_icon.png')
return cachedCover
# Extracts the cover image from the ebook to the supplied location
def extractCoverImage(self):
return None
def getChapterDetails(self):
return []
def getChapterContents(self, chapterLink):
return ""
# Get the text for a given chapter
def convertHtmlIntoKodiText(self, htmlText):
# Remove the header section of the page
regexHeader = re.compile("<head>.*?</head>", re.IGNORECASE | re.DOTALL)
plainText = regexHeader.sub("", htmlText)
# Remove random spaces in epub files
plainText = re.sub('\s+', ' ', plainText)
# Replace the bold tags
plainText = plainText.replace('<br></br><br></br>', '<p></p>')
plainText = plainText.replace('<b>', '[B]<b>')
plainText = plainText.replace('<b class=', '[B]<b class=')
plainText = plainText.replace('</b>', '</b>[/B]')
plainText = plainText.replace('<B>', '[B]<B>')
plainText = plainText.replace('</B>', '</B>[/B]')
# Replace italic tags
plainText = plainText.replace('<i>', '[I]<i>')
plainText = plainText.replace('</i>', '</i>[/I]')
plainText = plainText.replace('<I>', '[I]<I>')
plainText = plainText.replace('</I>', '</I>[/I]')
# Add an extra line for paragraphs
plainText = plainText.replace('</p>', '</p>\n')
plainText = plainText.replace('<p>', '\n<p>')
plainText = plainText.replace('<p ', '\n<p ')
# The html is not handle s well by ElementTree, so replace
# it with a space before we start
plainText = plainText.replace(' ', ' ')
# Replace headers <h2> etc
plainText = plainText.replace('<h1', '[B]<h1')
plainText = plainText.replace('</h1>', '</h1>[/B]')
plainText = plainText.replace('<h2', '[B]<h2')
plainText = plainText.replace('</h2>', '</h2>[/B]')
plainText = plainText.replace('<h3', '[B]<h3')
plainText = plainText.replace('</h3>', '</h3>[/B]')
plainText = plainText.replace('<h4', '[I][B]<h4')
plainText = plainText.replace('</h4>', '</h4>[/B][/I]')
try:
plainText = ''.join(ET.fromstring(plainText).itertext())
except:
log("EBookBase: Failed to strip html text with ElementTree, error: %s" % traceback.format_exc())
log("EBookBase: Using regex for content handling")
plainText = re.sub(r'<[^>]+>', '', plainText)
# Replace any quotes or other escape characters
plainText = plainText.replace('"e;', '"')
plainText = plainText.replace(' ', ' ')
# Need to remove double tags as they are not handled very well when
# displayed, they do not get nested, so the first end will close all
# instances of this tag
plainText = plainText.replace('[B][B]', '[B]')
plainText = plainText.replace('[/B][/B]', '[/B]')
plainText = plainText.replace('[I][I]', '[I]')
plainText = plainText.replace('[/I][/I]', '[/I]')
# Remove empty space between tags, where there is no content
plainText = re.sub("\[B]\s*\[/B]", "", plainText)
plainText = re.sub("\[I\]\s*\[/I\]", "", plainText)
# Remove blank lines at the start of the chapter
plainText = plainText.lstrip('\n')
return plainText
def getFallbackTitle(self):
# Remove anything after the final dot
sections = self.fileName.split('.')
sections.pop()
# Replace the dots with spaces
return ' '.join(sections)
# Class to process the mobi formatted books
class MobiEBook(EBookBase):
def __init__(self, filePath, removeFileWhenComplete=False):
EBookBase.__init__(self, filePath, removeFileWhenComplete)
self.book = None
self.bookFallback = None
try:
self.book = KMobi(self.filePath)
except:
log("MobiEBook: Failed to process eBook %s with error: %s" % (self.filePath, traceback.format_exc()), xbmc.LOGERROR)
# A secondary Mobi reader, if the first can not handle the given file
def _getFallbackReader(self):
if self.bookFallback is None:
try:
self.bookFallback = Mobi(str(self.filePath))
# Need to parse all the header data in the book
self.bookFallback.parse()
except:
log("MobiEBook: Expected exception for secondary reader, book %s with error: %s" % (self.filePath, traceback.format_exc()), xbmc.LOGERROR)
return self.bookFallback
def getTitle(self):
# Default the title to the filename - this should be overwritten
title = None
if self.book is not None:
try:
title = self.book.title
except:
log("MobiEBook: Failed to get title for mobi %s with error: %s" % (self.filePath, traceback.format_exc()), xbmc.LOGERROR)
# If we failed to get the title, use the fallback Mobi reader
if title in [None, ""]:
fallback = self._getFallbackReader()
if fallback is not None:
try:
title = fallback.title()
except:
log("MobiEBook: Failed to get title using fallback mobi %s with error: %s" % (self.filePath, traceback.format_exc()), xbmc.LOGERROR)
if title in [None, ""]:
title = self.getFallbackTitle()
try:
log("MobiEBook: Title is %s for book %s" % (title.decode('utf-8', 'ignore'), self.filePath))
except:
pass
return title
def getAuthor(self):
author = ""
if self.book is not None:
try:
author = self.book.author
except:
log("MobiEBook: Failed to get author for mobi %s with error: %s" % (self.filePath, traceback.format_exc()), xbmc.LOGERROR)
# If we failed to get the author, use the fallback Mobi reader
if author in [None, ""]:
fallback = self._getFallbackReader()
if fallback is not None:
try:
author = fallback.author()
except:
log("MobiEBook: Failed to get author using fallback mobi %s with error: %s" % (self.filePath, traceback.format_exc()), xbmc.LOGERROR)
try:
log("MobiEBook: Author is %s for book %s" % (author.decode('utf-8', 'ignore'), self.filePath))
except:
pass
return author
def getDescription(self):
description = ""
if self.book is not None:
try:
description | |
SGTE Data for Pure Elements, NPL Report
DMA(A)195, Rev. August 1990'
REF78 DUP-ALNI '<NAME>, J Alloy and Compounds, (1997); Al-Ni'
87GU3 87Gui3 '<NAME>, Z Metallkde, Vol 78 (1987)
p 639-647
TRITA-MAC 324B (1986); CO-NI'
87GU4 87Gui4 '<NAME>, Int. J of Thermophys vol 8
(1987)
p 481-510, TRITA-MAC 308 (1986); CO'
90DIN1 89Din '<NAME>, SGTE Data for Pure Elements, NPL Report
DMA(A)195, September 1989'
88GUI 88Gui2 '<NAME>, Z. Metallkde. Vol 79(1988)
p.524-536,
TRITA-MAC 362 (1988); C-CO-NI AND C-CO-FE-NI'
93SAU NIG-ALCU 'Nigel Saunders, COST 507 round 1, (1993); Al-Cu'
91SAU NIG-ALTI 'Nigel Saunders, COST 507 round 1, (1991); Al-Ti'
88GUS1 '<NAME>, CALPHAD 12 (1988) 277-292; Cr-W, Cr-Ni-W,
Modified by <NAME>, NIST, Gaithersburg, MD, USA '
98DUP '<NAME>, <NAME>,
Thermodynamic Assessment of the System Al-Co,
Revue de Metallurgie, 95(9), 1121-1129 (1998); Al-Co'
97KUS '<NAME>, <NAME>,
A Thermodynamic Evaluation of the Co-Cr and the C-Co-Cr Systems,
Calphad, 21(3), 321-333 (1997); Co-Cr'
15LIU '<NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>,
First-principles calculations, experimental study,
and thermodynamic modeling of the Al-Co-Cr-Ni system
(2015); X.L. Liu Ph.D. Thesis'
!
"""
ISSUE43_TDB = """
$ Database file written 2016- 5-19
$ From database: TTNI5
ELEMENT /- ELECTRON_GAS 0.0000E+00 0.0000E+00 0.0000E+00!
ELEMENT VA VACUUM 0.0000E+00 0.0000E+00 0.0000E+00!
ELEMENT AL FCC_A1 2.6982E+01 4.5400E+03 2.8300E+01!
ELEMENT CR BCC_A2 5.1996E+01 4.0500E+03 2.3543E+01!
ELEMENT NI FCC_A1 5.8690E+01 4.7870E+03 2.9796E+01!
FUNCTION GHSERAL 298.14 -7976.15+137.093038*T-24.3671976*T*LN(T)
-.001884662*T**2-8.77664E-07*T**3+74092*T**(-1); 700 Y
-11276.24+223.048446*T-38.5844296*T*LN(T)+.018531982*T**2
-5.764227E-06*T**3+74092*T**(-1); 933.47 Y
-11278.378+188.684153*T-31.748192*T*LN(T)-1.231E+28*T**(-9); 2900 N !
FUNCTION ZERO 298.15 +0.0; 6000 N !
FUNCTION GFCCCR 298.15 +7284+.163*T+GHSERCR#; 6000 N !
FUNCTION GHSERNI 298.14 -5179.159+117.854*T-22.096*T*LN(T)-.0048407*T**2;
1728 Y
-27840.655+279.135*T-43.1*T*LN(T)+1.12754E+31*T**(-9); 3000 N !
FUNCTION GHSERCR 298.14 -8856.94+157.48*T-26.908*T*LN(T)+.00189435*T**2
-1.47721E-06*T**3+139250*T**(-1); 2180 Y
-34869.344+344.18*T-50*T*LN(T)-2.88526E+32*T**(-9); 6000 N !
FUNCTION UN_ASS 298.15 +0; 300 N !
TYPE_DEFINITION % SEQ *!
DEFINE_SYSTEM_DEFAULT ELEMENT 2 !
DEFAULT_COMMAND DEF_SYS_ELEMENT VA /- !
TYPE_DEFINITION & GES A_P_D FCC_A1 MAGNETIC -3.0 2.80000E-01 !
PHASE FCC_A1 %& 2 1 1!
CONSTITUENT FCC_A1 :AL,CR,NI% :VA: !
PARAMETER G(FCC_A1,AL:VA;0) 298.15 +GHSERAL#; 6000 N REF0 !
PARAMETER TC(FCC_A1,AL:VA;0) 298.15 +ZERO#; 6000 N REF0 !
PARAMETER BMAGN(FCC_A1,AL:VA;0) 298.15 +ZERO#; 6000 N REF0 !
PARAMETER G(FCC_A1,CR:VA;0) 298.15 +GFCCCR#; 6000 N REF0 !
PARAMETER TC(FCC_A1,CR:VA;0) 298.15 -1109; 6000 N REF0 !
PARAMETER BMAGN(FCC_A1,CR:VA;0) 298.15 -2.46; 6000 N REF0 !
PARAMETER G(FCC_A1,NI:VA;0) 298.15 +GHSERNI#; 6000 N REF0 !
PARAMETER TC(FCC_A1,NI:VA;0) 298.15 +633; 6000 N REF0 !
PARAMETER BMAGN(FCC_A1,NI:VA;0) 298.15 +.52; 6000 N REF0 !
PARAMETER G(FCC_A1,AL,CR:VA;0) 298.15 -45900+6*T; 6000 N REF0 !
PARAMETER G(FCC_A1,AL,CR,NI:VA;0) 298.15 +30500-5*T; 6000 N REF0 !
PARAMETER G(FCC_A1,AL,NI:VA;0) 298.15 -173950+19.35*T; 6000 N
REF0 !
PARAMETER G(FCC_A1,AL,NI:VA;1) 298.15 +28500; 6000 N REF0 !
PARAMETER G(FCC_A1,AL,NI:VA;2) 298.15 +47000; 6000 N REF0 !
PARAMETER G(FCC_A1,CR,NI:VA;0) 298.15 +8030-12.8801*T; 6000 N
REF0 !
PARAMETER G(FCC_A1,CR,NI:VA;1) 298.15 +33080-16.0362*T; 6000 N
REF0 !
PARAMETER TC(FCC_A1,CR,NI:VA;0) 298.15 -3605; 6000 N REF0 !
PARAMETER BMAGN(FCC_A1,CR,NI:VA;0) 298.15 -1.91; 6000 N REF0 !
TYPE_DEFINITION ' GES A_P_D GAMMA_PRIME MAGNETIC -3.0 2.80000E-01 !
PHASE GAMMA_PRIME %' 2 .75 .25 !
CONSTITUENT GAMMA_PRIME :AL,CR,NI% : AL%,CR,NI : !
PARAMETER G(GAMMA_PRIME,AL:AL;0) 298.15 +GHSERAL#; 6000 N REF0 !
PARAMETER G(GAMMA_PRIME,CR:AL;0) 298.15 -8606+1.125*T+.75*GFCCCR#
+.25*GHSERAL#; 6000 N REF0 !
PARAMETER G(GAMMA_PRIME,NI:AL;0) 298.15 -39860+3.175*T+.75*GHSERNI#
+.25*GHSERAL#; 6000 N REF0 !
PARAMETER G(GAMMA_PRIME,AL:CR;0) 298.15 -8606+1.125*T+.75*GHSERAL#
+.25*GFCCCR#; 6000 N REF0 !
PARAMETER G(GAMMA_PRIME,CR:CR;0) 298.15 +GFCCCR#; 6000 N REF0 !
PARAMETER G(GAMMA_PRIME,NI:CR;0) 298.15 -1915-.91*T+.75*GHSERNI#
+.25*GFCCCR#; 6000 N REF0 !
PARAMETER G(GAMMA_PRIME,AL:NI;0) 298.15 -35000+5*T+.75*GHSERAL#
+.25*GHSERNI#; 6000 N REF0 !
PARAMETER G(GAMMA_PRIME,CR:NI;0) 298.15 +10000-3.92*T+.75*GFCCCR#
+.25*GHSERNI#; 6000 N REF0 !
PARAMETER G(GAMMA_PRIME,NI:NI;0) 298.15 +GHSERNI#; 6000 N REF0 !
PARAMETER TC(GAMMA_PRIME,NI:NI;0) 298.15 +633; 6000 N REF0 !
PARAMETER BMAGN(GAMMA_PRIME,NI:NI;0) 298.15 +.52; 6000 N REF0 !
PARAMETER G(GAMMA_PRIME,AL,NI:AL;0) 298.15 -59500+20*T; 6000 N REF0 !
PARAMETER G(GAMMA_PRIME,AL,NI:AL;1) 298.15 +53350; 6000 N REF0 !
PARAMETER G(GAMMA_PRIME,AL:AL,NI;0) 298.15 +10000; 6000 N REF0 !
PARAMETER G(GAMMA_PRIME,AL:AL,CR;0) 298.15 -2869+.375*T; 6000 N REF0 !
PARAMETER G(GAMMA_PRIME,CR:AL,NI;0) 298.15 +10000; 6000 N REF0 !
PARAMETER G(GAMMA_PRIME,CR:AL,CR;0) 298.15 -2869+.375*T; 6000 N REF0 !
PARAMETER G(GAMMA_PRIME,NI:AL,NI;0) 298.15 +9100-1.5*T; 6000 N REF0 !
PARAMETER G(GAMMA_PRIME,NI:AL,NI;1) 298.15 -5400; 6000 N REF0 !
PARAMETER G(GAMMA_PRIME,NI:AL,CR;0) 298.15 -6250+T; 6000 N REF0 !
PARAMETER G(GAMMA_PRIME,AL,NI:CR;0) 298.15 -60000+20*T; 6000 N REF0 !
PARAMETER G(GAMMA_PRIME,AL,NI:NI;0) 298.15 -50000; 6000 N REF0 !
PARAMETER G(GAMMA_PRIME,AL,CR:*;0) 298.15 -25819+3.375*T; 6000 N REF0 !
PARAMETER G(GAMMA_PRIME,*:CR,NI;0) 298.15 +3000; 6000 N REF0 !
PARAMETER G(GAMMA_PRIME,CR,NI:*;0) 298.15 -1500; 6000 N REF0 !
LIST_OF_REFERENCES
NUMBER SOURCE
!
"""
FEMN_TDB = """
$ FOR TESTING PURPOSES ONLY -- NOT FOR RESEARCH
$ NO INTERACTION PARAMETERS
ELEMENT /- ELECTRON_GAS 0.0000E+00 0.0000E+00 0.0000E+00!
ELEMENT VA VACUUM 0.0000E+00 0.0000E+00 0.0000E+00!
ELEMENT FE BCC_A2 5.5847E+01 4.4890E+03 2.7280E+01!
ELEMENT MN CBCC_A12 5.4938E+01 4.9960E+03 3.2008E+01!
FUNCTION TEMP 0.99 +T**(-1); 6000 N !
FUNCTION RTEMP 0.99 +R#**(-1)*TEMP#; 6000 N !
FUNCTION ZERO 0.99 +0.0; 6000 N !
FUNCTION INTR 0.99 +1000; 6000 N !
FUNCTION INFINITE 0.99 +1000000; 6000 N !
FUNCTION G2STFCC1 0.99 +9023.52375-2.4952226*T; 6000 N !
FUNCTION G2STFCC2 0.99 -G2STFCC1#*RTEMP#; 6000 N !
FUNCTION G2STFCC3 0.99 +1+1*EXP(G2STFCC2#); 6000 N !
FUNCTION G2STFCC4 0.99 +1*LN(G2STFCC3#); 6000 N !
FUNCTION G2STFCC 0.99 -R#*T*G2STFCC4#; 6000 N !
FUNCTION GGBCCL 0.99 -8410.26596-.0032390815*T**2-3.3191338E-14*T**5;
6000 N !
FUNCTION GGBCCH 0.99 -33208.4173+165.40504*T-21.0474823*T*LN(T)
-2.781828E+18*T**(-5)+4.2811788E+37*T**(-11); 6000 N !
FUNCTION GEIN309 0.99 +0.0; 6000 N !
FUNCTION GGFCCL 0.99 -2615.00904-.0027933375*T**2-2.1239087E-14*T**5
+G2STFCC#; 6000 N !
FUNCTION GGFCCH 0.99 -32389.5438+168.31394*T-20.9834791*T*LN(T)
+9.2444598E+18*T**(-5)-1.4750509E+37*T**(-11)+G2STFCC#; 6000 N !
FUNCTION GHSERFE 0.99 +GGBCCL#+8927.2831-9.7228331*T; 1811 Y
+GGBCCH#+8927.2831-9.7228331*T; 6000 N !
FUNCTION GGHCPL 0.99 -3307.07993-.00431797569*T**2-2.1239086E-14*T**5;
6000 N !
FUNCTION GGHCPH 0.99 -24924.6659+161.764208*T-21.0325858*T*LN(T)
-4.71013343E+18*T**(-5); 6000 N !
FUNCTION TECBCCMN 0.99 +287.60608; 6000 N !
FUNCTION TECUBMN 0.99 +247.87895; 6000 N !
FUNCTION TEFCCMN 0.99 +257.58049; 6000 N !
FUNCTION TEBCCMN 0.99 +265.03333; 6000 N !
FUNCTION TELIQMN 0.99 +119.579259; 6000 N !
FUNCTION TEHCPMN 0.99 +438.579784; 6000 N !
FUNCTION BETA1 0.99 +1.22; 6000 N !
FUNCTION BETA2 0.99 +1.62; 6000 N !
FUNCTION BETA3 0.99 +1.27; 6000 N !
FUNCTION MNCBCCB 0.99 +1*LN(BETA1#); 6000 N !
FUNCTION MNFCCB 0.99 +1*LN(BETA2#); 6000 N !
FUNCTION MNBCCB 0.99 +1*LN(BETA3#); 6000 N !
FUNCTION GGMNCBCL 0.99 -8621.707-.0054695069*T**2+3.6645644E-08*T**3
-2.588845E-14*T**5; 6000 N !
FUNCTION GGMNCBCH 0.99 -25954.328+130.73341*T+20.178863*T
-20.178863*T*LN(T)-7.4173873E+17*T**(-5)+3.5722E+36*T**(-11); 6000 N !
FUNCTION GGMNCUBL 0.99 -6181.4708-.0057218755*T**2+8.9632678E-12*T**3
-1.9360399E-14*T**5; 6000 N !
FUNCTION GGMNCUBH 0.99 -24465.161+136.70198*T+21.095719*T
-21.095719*T*LN(T)-3.967E+17*T**(-5)+2.9953E+36*T**(-11); 6000 N !
FUNCTION GGMNFCCL 0.99 -5356.328-.0044627084*T**2-3.7906096E-10*T**4
+8.1018765E-14*T**5; 6000 N !
FUNCTION GGMNFCCH 0.99 -22440.434+135.25456*T+20.797951*T
-20.797951*T*LN(T)-1.8964E+18*T**(-5)+5.208E+36*T**(-11); 6000 N !
FUNCTION GGMNBCCL 0.99 -4692.6774-.006846492*T**2-3.2311349E-15*T**5;
6000 N !
FUNCTION GGMNBCCH 0.99 -18620.432+127.31728*T+20.301009*T
-20.301009*T*LN(T)-2.2035E+18*T**(-5)+5.1886E+36*T**(-11); 6000 N !
FUNCTION GGMNHCPL 0.99 -9755.88935-.00699764166*T**2+4.12888434E-15*T**5;
6000 N !
FUNCTION GGMNHCPH 0.99 -25379.581+133.19315*T+21.0957581*T
-21.0957581*T*LN(T)-1.1043E+18*T**(-5)+3.3993E+36*T**(-11); 6000 N !
FUNCTION UN_ASS 0.99 +0.0; 300 N !
TYPE_DEFINITION % SEQ *!
DEFINE_SYSTEM_DEFAULT ELEMENT 2 !
DEFAULT_COMMAND DEF_SYS_ELEMENT VA /- !
TYPE_DEFINITION * GES A_P_D LIQUID MAGNETIC 0.0 0.25 !
PHASE LIQUID %* 2 1 1 !
CONSTITUENT LIQUID :FE,MN : VA : !
PARAMETER G(LIQUID,FE:VA;0) 0.99 +7103.20801
-.0019730116*T**2+1392.1182-8.2584018*T; 6000 N REF0 !
PARAMETER TC(LIQUID,FE:VA;0) 0.99 +200; 6000 N REF0 !
PARAMETER NT(LIQUID,FE:VA;0) 0.99 -200; 6000 N REF0 !
PARAMETER BMAGN(LIQUID,FE:VA;0) 0.99 +1.7; 6000 N REF0 !
PARAMETER THETA(LIQUID,FE:VA;0) 0.99 +1*LN(245); 6000 N REF0 !
PARAMETER GD(LIQUID,FE:VA;0) 0.99 +42754.9478-7.624*T
-1.08230446*T*LN(T); 6000 N REF0 !
PARAMETER G(LIQUID,MN:VA;0) 0.99 +13256.8283
-.0017557494*T**2; 6000 N REF0 !
PARAMETER THETA(LIQUID,MN:VA;0) 0.99 +1*LN(TELIQMN#); 6000 N
REF0 !
PARAMETER GD(LIQUID,MN:VA;0) 0.99 +50493.6966-8.314*T
-1.07778322*T*LN(T); 6000 N REF0 !
LIST_OF_REFERENCES
NUMBER SOURCE
!
"""
ALNI_TOUGH_CHEMPOT_TDB = """
$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
$ Date: 2016-12-14 15:39
$ Components: AL, NI, VA
$ Phases: AL3NI1, AL3NI2, AL3NI5, BCC_B2, FCC_L12, LIQUID
$ Generated by rotis (pycalphad 0.4.2+61.g2a7ce13.dirty)
$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
ELEMENT AL BLANK 0 0 0 !
ELEMENT NI BLANK 0 0 0 !
ELEMENT VA BLANK 0 0 0 !
FUNCTION GBCCAL 298.15 -4.813*T + GHSERAL# + 10083; 2900.0 N !
FUNCTION GBCCNI 298.15 -3.556*T + GHSERNI# + 8715.084; 3000.0 N !
FUNCTION GFCCAL 298.15 GHSERAL#; 2900.0 N !
FUNCTION GFCCNI 298.15 GHSERNI#; 3000.0 N !
FUNCTION GHSERAL 298.15 -8.77664E-7*T**3 - 0.001884662*T**2 -
24.3671976*T*LN(T) + 137.093038*T - 7976.15 + 74092*T**(-1); 700.0 Y
-5.764227E-6*T**3 + 0.018531982*T**2 - 38.5844296*T*LN(T) + 223.048446*T -
11276.24 + 74092*T**(-1); 933.47 Y -31.748192*T*LN(T) + 188.684153*T -
11278.378 - 1.230524E+28*T**(-9); 2900.0 N !
FUNCTION GHSERNI 298.15 -0.0048407*T**2 - 22.096*T*LN(T) + 117.854*T -
5179.159; 1728.0 Y -43.1*T*LN(T) + 279.135*T - 27840.655 +
1.12754E+31*T**(-9); 3000.0 N !
FUNCTION GHSERVA 1 0; 10000 N !
FUNCTION GLIQAL 298.15 7.9337E-20*T**7 - 11.841867*T + GHSERAL# + 11005.029;
700.0 Y | |
# import mysql.connector as mysql
import datetime
from types import resolve_bases
from dotenv import load_dotenv
import os
from flaskext.mysql import MySQL
import pymysql
from datetime import date
load_dotenv()
SQL_HOST = os.getenv("SQL_HOST")
SQL_USER = os.getenv('SQL_USER')
SQL_PASSWORD = os.getenv('SQL_PASSWORD')
class DB:
def __init__(self, app):
# app.config['MYSQL-HOST'] = SQL_HOST
# app.config['MYSQL_USER'] = SQL_USER
# app.config['MYSQL_PASSWORD'] = SQL_PASSWORD
# app.config['MYSQL_DB'] = "sli_database"
# app.config['MYSQL_AUTOCOMMIT'] = True
# app.config['MYSQL_AUTH_PLUGIN'] = "mysql_native_password"
# self.mysql = MySQL(app, host = SQL_HOST, user = SQL_USER, password = <PASSWORD>, db = "sli_database", autocommit = True)
# self.db = pymysql.connect(host=SQL_HOST, user=SQL_USER, password=<PASSWORD>, database='sli_database', autocommit=True)
# # self.mysql.init_app(app)
# # self.db = pymysql.connect(host=SQL_HOST, user=SQL_USER, password=<PASSWORD>_PASSWORD, database='sli_database', autocommit=True)
# print(self.db)
# print(self.db.ping(reconnect=False))
# self.cursor = self.db.cursor()
# self.cursor.close()
"""
def create_connection(self):
return pymysql.connect(
host=SQL_HOST,
db='sli_database',
user=SQL_USER,
password=<PASSWORD>,
cursorclass=pymysql.cursors.DictCursor
)
"""
# Gets login information to verify password based on username input if username is present in database.
#
# Parameters:
# username: user's username
# Returns:
# results: list of singular entry retrieved from database in the form [username, password, role]
def getLogin(self, username):
connection = pymysql.connect(host=SQL_HOST, user=SQL_USER, password=<PASSWORD>, database='sli_database', autocommit=True)
cursor = connection.cursor()
# print(username)
cursor.execute("SELECT username, password, role, fname FROM `User` WHERE username LIKE \"%" + str(username) + "\"")
result = cursor.fetchall()
connection.close()
# print("get login done")
return result
'''def getTeacherLogin(self, email):
self.cursor = self.db.cursor()
sql = "SELECT id, email, password FROM teacher WHERE email = %s"
get_email = (str(email), )
self.cursor.execute(sql, get_email)
result = self.cursor.fetchall()
self.cursor.close()
return result'''
# Gets user's first name to display in welcome message on dashboard.
#
# Parameters:
# username: user's username
#
# Returns:
# results: list of singular tuple entry retrieved from database in the form (fname)
def getUserInfo(self, username):
connection = pymysql.connect(host=SQL_HOST, user=SQL_USER, password=<PASSWORD>, database='sli_database', autocommit=True)
cursor = connection.cursor()
sql = "SELECT fname FROM `User` WHERE username = %s"
get_teacher_info = (str(username), )
cursor.execute(sql, get_teacher_info)
result = cursor.fetchall()
connection.close()
return result
def getUserToken(self, token):
connection = pymysql.connect(host=SQL_HOST, user=SQL_USER, password=<PASSWORD>, database='sli_database', autocommit=True)
cursor = connection.cursor()
# print("entered sql")
sql = "SELECT `user`, token_val FROM Token WHERE token_val = '{}'".format(token)
# print("wrote sql script")
#get_token = (str(token), )
# print("got token")
#################################################
try:
#self.cursor.execute(sql, get_token)
cursor.execute(sql)
except Exception as e:
# print(e)
return e
# print("printing")
result = cursor.fetchall()
# print(result)
connection.close()
return result
def insertToken(self, username, token):
# print("ADB")
connection = pymysql.connect(host=SQL_HOST, user=SQL_USER, password=<PASSWORD>, database='sli_database', autocommit=True)
cursor = connection.cursor()
deleted_old_token = "DELETE FROM Token WHERE `user`=%s"
del_input = (username, )
cursor.execute("""DELETE FROM Token WHERE `user`=%s""", (username,))
connection.commit()
result = cursor.fetchall()
# print(result)
#print("insert new token")
connection.close()
connection = pymysql.connect(host=SQL_HOST, user=SQL_USER, password=<PASSWORD>, database='sli_database', autocommit=True)
cursor = connection.cursor()
# print("insert new token")
sql = "INSERT INTO Token VALUES (%s, %s)"
input = (str(username), str(token))
cursor.execute(sql, input)
connection.close()
return "ok"
def createNewClass(self, teacher, class_name):
connection = pymysql.connect(host=SQL_HOST, user=SQL_USER, password=<PASSWORD>, database='sli_database', autocommit=True)
cursor = connection.cursor()
insert_sql = "INSERT INTO Class VALUES (%s, %s)"
insert_input = (str(teacher), str(class_name))
cursor.execute(insert_sql, insert_input)
# print("created new class successful")
connection.close()
def deleteToken(self, username):
connection = pymysql.connect(host=SQL_HOST, user=SQL_USER, password=<PASSWORD>, database='sli_database', autocommit=True)
cursor = connection.cursor()
# print("deleting...")
sql = "DELETE FROM Token WHERE `user` = %s"
del_input = (str(username), )
cursor.execute(sql, del_input)
# print("deleted!")
connection.close()
def createAccount(self, username, password, role, fname=None, lname=None):
connection = pymysql.connect(host=SQL_HOST, user=SQL_USER, password=<PASSWORD>, database='sli_database', autocommit=True)
cursor = connection.cursor()
sql = "INSERT INTO `User` VALUES (%s, %s, %s, %s, %s)"
inputs = (str(username), str(password), str(role), str(fname), str(lname))
cursor.execute(sql, inputs)
connection.close()
return "success"
def getClasses(self, teacher):
connection = pymysql.connect(host=SQL_HOST, user=SQL_USER, password=<PASSWORD>, database='sli_database', autocommit=True)
cursor = connection.cursor()
sql = "SELECT name FROM Class WHERE teacher = %s"
get_id = (str(teacher), )
cursor.execute(sql, get_id)
result = cursor.fetchall()
connection.close()
return result
def getStudentsOfClass(self, teacher, class_name):
connection = pymysql.connect(host=SQL_HOST, user=SQL_USER, password=<PASSWORD>, database='sli_database', autocommit=True)
cursor = connection.cursor()
# print("here")
# print(teacher, class_name)
sql = "SELECT student FROM InClass WHERE teacher LIKE %s AND class LIKE %s ORDER BY student ASC"
input_sql = (str(teacher), str(class_name))
# print("good")
cursor.execute(sql, input_sql)
# print("haha")
result = cursor.fetchall()
# print(type(result))
connection.close()
return result
def addStudentToClass(self, teacher, class_name, student):
connection = pymysql.connect(host=SQL_HOST, user=SQL_USER, password=<PASSWORD>, database='sli_database', autocommit=True)
cursor = connection.cursor()
sql = "INSERT INTO InClass VALUES (%s, %s, %s)"
value = (str(teacher), str(class_name), str(student))
cursor.execute(sql, value)
connection.close()
def logWork(self, username, project, sdg, date, hours, description):
connection = pymysql.connect(host=SQL_HOST, user=SQL_USER, password=<PASSWORD>, database='sli_database', autocommit=True)
cursor = connection.cursor()
sql = "INSERT INTO `Work` (user, project, SDG, date, hours, description) VALUES (%s, %s, %s, %s, %s, %s)"
inputs = (str(username), str(project), str(sdg), str(date), int(hours), str(description))
# print(inputs)
cursor.execute(sql, inputs)
connection.close()
'''
Gets all campagins for the class that the given student is in.
Parameters:
student: username of student who's campaigns we want
Returns:
results: list of tuple entries retrieved from database in the form (campaign name, total_hours, start_date, due_date)
in ascending order of due date
'''
def studentGetCampaigns(self, student):
connection = pymysql.connect(host=SQL_HOST, user=SQL_USER, password=<PASSWORD>, database='sli_database', autocommit=True)
cursor = connection.cursor()
sql = "SELECT name, total_hours, start_date, due_date FROM Campaign WHERE (teacher, class) in (SELECT teacher, class FROM InClass WHERE student LIKE %s) ORDER BY due_date desc"
inputs = (str(student), )
cursor.execute(sql, inputs)
results = cursor.fetchall()
connection.close()
return results
def getGoal(self, student):
connection = pymysql.connect(host=SQL_HOST, user=SQL_USER, password=<PASSWORD>, database='sli_database', autocommit=True)
cursor = connection.cursor()
sql = "SELECT total_hours, target_date FROM Goal WHERE user like %s"
inputs = (str(student), )
cursor.execute(sql, inputs)
results = cursor.fetchall()
connection.close()
return (results[0][0], str(results[0][1]))
def createCampaign(self, teacher, class_name, name, total_hours, start_date, due_date):
connection = pymysql.connect(host=SQL_HOST, user=SQL_USER, password=<PASSWORD>, database='sli_database', autocommit=True)
cursor = connection.cursor()
sql = "INSERT INTO Campaign VALUES (%s, %s, %s, %s, %s, %s)"
inputs = (str(teacher), str(class_name), str(name), str(total_hours), str(start_date), str(due_date))
cursor.execute(sql, inputs)
connection.close()
def createGoal(self, student, total_hours, target_date):
connection = pymysql.connect(host=SQL_HOST, user=SQL_USER, password=<PASSWORD>, database='sli_database', autocommit=True)
cursor = connection.cursor()
sql = "DELETE FROM Goal WHERE user LIKE %s"
inputs = (str(student), )
cursor.execute(sql, inputs)
sql = "INSERT INTO Goal VALUES (%s, %s, %s)"
inputs = (str(student), str(total_hours), str(target_date))
cursor.execute(sql, inputs)
connection.close()
def createPasswordLink(self, email, link):
connection = pymysql.connect(host=SQL_HOST, user=SQL_USER, password=<PASSWORD>, database='sli_database', autocommit=True)
cursor = connection.cursor()
sql = "DELETE FROM ResetLink WHERE user LIKE %s"
inputs = (str(email), )
cursor.execute(sql, inputs)
sql = "INSERT INTO ResetLink VALUES (%s, %s)"
inputs = (str(email), link)
cursor.execute(sql, inputs)
connection.close()
def getPasswordLink(self, link):
connection = pymysql.connect(host=SQL_HOST, user=SQL_USER, password=<PASSWORD>, database='sli_database', autocommit=True)
cursor = connection.cursor()
sql = "SELECT user, link FROM ResetLink WHERE link LIKE %s"
inputs = (link, )
cursor.execute(sql, inputs)
results = cursor.fetchall()
connection.close()
return results
def resetPassword(self, username, password):
connection = pymysql.connect(host=SQL_HOST, user=SQL_USER, password=<PASSWORD>, database='sli_database', autocommit=True)
cursor = connection.cursor()
sql = "UPDATE User SET password = %s WHERE username LIKE %s"
inputs = (str(password), str(username))
cursor.execute(sql, inputs)
sql = "DELETE FROM ResetLink WHERE user LIKE %s"
inputs = (str(username), )
cursor.execute(sql, inputs)
connection.close()
# Gets all campaigns for a given class that a teacher owns to be displayed on the teacher's dashboard.
#
# Parameters:
# username: teacher's username
# className: name of teacher's class
#
# Returns:
# results: list of tuple entries retrieved from database in the form (campaign name, total_hours, start_date, due_date)
# in ascending order of due date
def teacherGetCampaigns(self, username, className):
connection = pymysql.connect(host=SQL_HOST, user=SQL_USER, password=<PASSWORD>, database='sli_database', autocommit=True)
cursor = connection.cursor()
sql = "SELECT name, total_hours, start_date, due_date FROM Campaign WHERE teacher LIKE %s AND class LIKE %s ORDER BY due_date desc"
inputs = (str(username), str(className))
cursor.execute(sql, inputs)
results = cursor.fetchall()
connection.close()
return results
# Gets total hours of logged work between the given start and due dates for a specific user.
#
# Parameters:
# start_date: earliest date threshold
# due_date: latest date threshold
# username: user whose total hours we want to retrieve
#
# Returns:
# results: list of tuple entries retrieved from database in the form (user, total_hours)
def getStudentProgress(self, username, start_date, due_date):
if not start_date:
start_date = "1900-01-01"
if not due_date:
due_date = str(date.today())
connection = pymysql.connect(host=SQL_HOST, user=SQL_USER, password=<PASSWORD>, database='sli_database', autocommit=True)
cursor = connection.cursor()
sql = "SELECT user, sum(hours) FROM Work WHERE date BETWEEN %s AND %s AND user LIKE %s GROUP BY user;"
inputs = (str(start_date), str(due_date), str(username))
cursor.execute(sql, inputs)
| |
for reduction
if post_reduction:
stage += 1
stage_type = "red_out"
placeholders.add("ops%d" % stage)
post_reduction = False
stage_map[arg_i] = (stage,stage_type)
for i in range(2):
operand_i, operand = stack.pop()
if operand[0] is ng.GPUTensor:
stage_map[operand_i] = (stage,stage_type)
elif arg_type in _float_ops:
# For each tensor argument asign the stage that it belongs to.
for i in range(_float_ops[arg_type][0]):
operand_i, operand = stack.pop()
if operand[0] is ng.GPUTensor:
# If we are in post reduction and see a tensor we need to
# switch stages to an ew loop.
if post_reduction:
stage += 1
stage_type = "loop"
placeholders.add("loads%d" % stage)
placeholders.add("ops%d" % stage)
post_reduction = False
stage_map[operand_i] = (stage,stage_type)
# just append the temp float as a placeholder
stack.append((-1,(float,)))
# Tie this operation to a stage
stage_map[arg_i] = (stage,stage_type)
# Each time we do a reduction we need to setup a new elementwise loop
elif arg_type in _reduction_ops:
# It's possible to have back to back reductions.
# If so start a new ew loop stage.
if post_reduction:
stage += 1
stage_type = "loop"
placeholders.add("loads%d" % stage)
placeholders.add("ops%d" % stage)
# Tie this operation to a stage
stage_map[arg_i] = (stage,stage_type)
# Tie a tensor to the stage if one precedes the reduction.
operand_i, operand = stack.pop()
if operand[0] is ng.GPUTensor:
stage_map[operand_i] = (stage,stage_type)
# just append the temp float as a placeholder
stack.append((-1,(float,)))
# generate a unique signature for this reduction op
red_sig = []
for i, a in enumerate(type_args):
# find everything tied to this stage
if i in stage_map and stage_map[i][0] == stage:
# for operations, just append the name
if type(a[0]) is str:
red_sig.append(a[0])
# For tensor or constant, append type and id.
# Note that constants have unique ids and will prevent
# duplicate detection. Need to know diff between contants that
# can change or are actually static... save for another day.
# TODO: this has implications for cached execution plans.
else:
red_sig.append(tuple(a[0:2]))
red_sig = tuple(red_sig)
# Look for duplicate reductions
if red_sig in dup_reduction:
# remove duplicate placeholders
placeholders.remove("loads%d" % stage)
placeholders.remove("ops%d" % stage)
# print "dup: ", stage, arg[1], red_sig, dup_reduction[red_sig]
# link the dup stage with the original
dup_reduction[stage] = dup_reduction[red_sig]
else:
# tie each reduction signature to its stage
dup_reduction[red_sig] = stage
# finish building the reduction stage
placeholders.add("reduction%d" % stage)
# The ops section begins a new stage
# We could try and find the longest common op string and reuse these ops
# along with the reduction but it's not worth the complication.
stage += 1
stage_type = "red_ops"
placeholders.add("ops%d" % stage)
post_reduction = True
else:
# build the stack with the operands
stack.append((arg_i, arg))
# print "\n".join(str(stage_map[i]) + " " + str(s) for i,s in enumerate(type_args))
# print "\n"
# print "\n".join(str(s) for s in placeholders)
# exit()
sig = "P" # first param for rand_state
stack = []
array_ids = set()
fp16In = False
rand_init = False
rand_func = False
current_stage = None
stack = []
red_regsiters = {}
template = _ew_template
template_vals = { "name" : "kernel_" }
for key in placeholders:
template_vals[key] = []
for arg_i, arg in enumerate(type_args):
arg_type, arg_id = arg[0:2]
stage, stage_type = stage_map[arg_i]
# build out the template as we process operations (strings)
if type(arg_type) is str:
# don't build duplicate stages
if stage not in dup_reduction:
# build the template as the stage and stage_type combination changes
if current_stage != stage_map[arg_i]:
current_stage = stage_map[arg_i]
template += _stage_template[stage_type].format(stage)
# the reduction op shares the stage with its loop
# so append that separately here.
if arg_type in _reduction_ops:
template += _stage_template["red"].format(stage)
else:
current_stage = stage_map[arg_i]
# Array operands
if arg_type is ng.GPUTensor:
dtype = arg[2]
#TODO: need to be able to handle more than 26 params..
template_vals["name"] += dtype + chr(ord("A") + arg_id)
if stage not in dup_reduction:
# first arg is output array, don't put on stack
if arg_i > 0:
stack.append("a%d" % arg_id)
else:
out_dtype = dtype
# 0: arg_id, 1: stage, 2: type, 3: cvt
ew_dtype = _ew_types[dtype]
fmt = (arg_id, stage, ew_dtype["type"], ew_dtype["cvt"])
# First time we see a tensor initialize everything
if arg_id not in array_ids:
array_ids.add(arg_id)
array_ids.add((arg_id,stage))
sig += "Pii"
# input tensors
if arg_i > 0:
ew_in = _ew_strings["in"]
loads = "loads%d" % stage
template_vals["arguments"].append(ew_in["arguments"].format(*fmt))
template_vals["inits" ].append(ew_in["inits" ].format(*fmt))
template_vals[loads ].append(ew_in["loads" ].format(*fmt))
# output tensor
else:
for key in ("arguments","inits"):
template_vals[key].append(_ew_strings["out"][key].format(*fmt))
if dtype == 'f2' and not fp16In:
template_vals["common"].append(_common_fp16_to_fp32)
fp16In = True
# Subsequent times we see a tensor just initialize inits and loads
# But only for arrays of diferent non-dup stages
elif (arg_id,stage) not in array_ids:
array_ids.add((arg_id,stage))
ew_in = _ew_strings["in"]
loads = "loads%d" % stage
template_vals["inits"].append(ew_in["inits"].format(*fmt))
template_vals[loads ].append(ew_in["loads"].format(*fmt))
# Constant operands
elif arg_type is float:
sig += "f"
template_vals["name"] += "f4" + chr(ord("a") + arg_id)
if stage not in dup_reduction:
stack.append("c%d" % arg_id)
ew_const = _ew_strings["const"]
template_vals["arguments"].append(ew_const["arguments"].format(arg_id))
# Operations (arg_type = op_name)
else:
template_vals["name"] += "_%s_" % arg_type
if arg_type == "assign":
rounding = arg[2]
ops = "ops%d" % stage
# loop end condition for last stage
sig += "i"
template_vals["arguments"].append("int n%d" % stage)
out_val = stack.pop()
# rounding
if out_dtype != "f4":
round_val = "r%d" % arg_id
ew_round = _ew_strings["round"]
if out_dtype == "f2":
# random rounding
if rounding > 0:
if not rand_init:
rand_init = _init_rand(template_vals)
template_vals["common"].append(_common_random_round)
template_vals["name"] += "rr"
template_vals[ops].append(ew_round["random"].format(round_val, out_val))
# nearest rounding (unbiased)
else:
template_vals["common"].append(_common_fp32_to_fp16)
template_vals["name"] += "rn"
template_vals[ops].append(ew_round["nearest"].format(round_val, out_val))
# int8 and uint8:
else:
if out_dtype == "i1":
template_vals["common"].append(_common_fp32_to_i1)
else:
template_vals["common"].append(_common_fp32_to_u1)
template_vals[ops].append(ew_round[out_dtype].format(round_val, out_val))
out_val = round_val
template_vals[ops].append(_float_ops[arg_type][1].format(out_val))
elif arg_type in _float_ops:
if stage not in dup_reduction:
ops = "ops%d" % stage
(num_ops, op_code) = _float_ops[arg_type]
if arg_type == "rand":
if not rand_init:
rand_init = _init_rand(template_vals)
if not rand_func:
template_vals["common"].append(_common_frand)
rand_func = True
op_list = [ "r%d" % arg_id ]
#build the operands from the stack
for i in range(num_ops):
op_list.append(stack.pop())
template_vals[ops].append(op_code.format(*op_list))
stack.append(op_list[0])
elif arg_type in _reduction_ops:
# loop end condition for current stage
# add regardless of duplicate reduction stage
sig += "i"
template_vals["arguments"].append("int n%d" % stage)
# if this is a duplicate reduction just push the previous
# result back onto the stack.
if stage in dup_reduction:
stack.append(red_regsiters[dup_reduction[stage]])
# Otherwise fill out the reduction template
else:
ops = "ops%d" % stage
red = "reduction%d" % stage
red_arg = "r%d" % arg_id
red_strings = _reduction_ops[arg_type]
stack_arg = stack.pop()
template_vals["inits"].append(red_strings["inits" ].format(red_arg))
template_vals[ops ].append(red_strings["ops" ].format(red_arg, stack_arg))
template_vals[red ].append(red_strings["reduction"].format(red_arg))
stack.append(red_arg)
# remember this register in case a duplicate needs it.
red_regsiters[stage] = red_arg
else:
raise ValueError("Bad op type.")
template += _fin_template
# convert lists to strings
template_vals["common"] = "\n".join(template_vals["common"])
template_vals["arguments"] = ",\n ".join(template_vals["arguments"])
template_vals["inits"] = "\n ".join(template_vals["inits"])
template_vals["finish"] = "\n".join(template_vals["finish"])
for key in ("common","arguments","inits","finish"):
placeholders.remove(key)
# add the dynamic placeholders: loads#, ops#, reduction#
for key in placeholders:
template_vals[key] = "\n ".join(template_vals[key])
module = _get_module(template, template_vals)
kernel = module.get_function(template_vals["name"])
kernel.prepare(sig)
return kernel
# TODO: build a program wide DAG and only call this once at startup per assignment.
# TODO: allow multiple shape compatible assignments.
def call_compound_kernel(rand_state, *args):
"""
Pass in a list of GPUTensor objects, constants and operators in postfix notation..
C += 2.5 * A * B + 1
call_compound_ew_kernel(C, 2.5, A, "mul", B, "mul", 1, "add", C, "add", "assign")
"""
out = None
arg_cnt = 0
op_cnt = 0
array_ids = {}
kernel_args = [ rand_state, ]
type_args = []
shape_stack = []
# Apply reduction constraints and determine thread axis
# Blocks will be allocated counter to this axis
reduction = False
axis = 1
for arg in args:
if type(arg) is dict:
if arg["op"] in _reduction_ops:
# To reduce a whole tensor (axis=None) reduce along each axis in succession.
if arg.get("axis",None) not in (0,1):
raise ValueError("Only reduction along an axis currently supported")
# Keep axis values consistent within the same kernel
if reduction is True:
if arg["axis"] != axis:
raise ValueError("Reduction only allowed along one | |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest
import frappe, erpnext
import frappe.model
from erpnext.accounts.doctype.payment_entry.payment_entry import get_payment_entry
from frappe.utils import cint, flt, today, nowdate, add_days, getdate
import frappe.defaults
from erpnext.stock.doctype.purchase_receipt.test_purchase_receipt import make_purchase_receipt, get_taxes
from erpnext.controllers.accounts_controller import get_payment_terms
from erpnext.exceptions import InvalidCurrency
from erpnext.stock.doctype.stock_entry.test_stock_entry import get_qty_after_transaction
from erpnext.projects.doctype.project.test_project import make_project
from erpnext.accounts.doctype.account.test_account import get_inventory_account, create_account
from erpnext.stock.doctype.item.test_item import create_item
test_dependencies = ["Item", "Cost Center", "Payment Term", "Payment Terms Template"]
test_ignore = ["Serial No"]
class TestPurchaseInvoice(unittest.TestCase):
@classmethod
def setUpClass(self):
unlink_payment_on_cancel_of_invoice()
frappe.db.set_value("Buying Settings", None, "allow_multiple_items", 1)
@classmethod
def tearDownClass(self):
unlink_payment_on_cancel_of_invoice(0)
def test_gl_entries_without_perpetual_inventory(self):
frappe.db.set_value("Company", "_Test Company", "round_off_account", "Round Off - _TC")
pi = frappe.copy_doc(test_records[0])
self.assertTrue(not cint(erpnext.is_perpetual_inventory_enabled(pi.company)))
pi.insert()
pi.submit()
expected_gl_entries = {
"_Test Payable - _TC": [0, 1512.0],
"_Test Account Cost for Goods Sold - _TC": [1250, 0],
"_Test Account Shipping Charges - _TC": [100, 0],
"_Test Account Excise Duty - _TC": [140, 0],
"_Test Account Education Cess - _TC": [2.8, 0],
"_Test Account S&H Education Cess - _TC": [1.4, 0],
"_Test Account CST - _TC": [29.88, 0],
"_Test Account VAT - _TC": [156.25, 0],
"_Test Account Discount - _TC": [0, 168.03],
"Round Off - _TC": [0, 0.3]
}
gl_entries = frappe.db.sql("""select account, debit, credit from `tabGL Entry`
where voucher_type = 'Purchase Invoice' and voucher_no = %s""", pi.name, as_dict=1)
for d in gl_entries:
self.assertEqual([d.debit, d.credit], expected_gl_entries.get(d.account))
def test_gl_entries_with_perpetual_inventory(self):
pi = make_purchase_invoice(company="_Test Company with perpetual inventory",
warehouse= "Stores - TCP1", cost_center = "Main - TCP1",
expense_account ="_Test Account Cost for Goods Sold - TCP1",
get_taxes_and_charges=True, qty=10)
self.assertTrue(cint(erpnext.is_perpetual_inventory_enabled(pi.company)), 1)
self.check_gle_for_pi(pi.name)
def test_terms_added_after_save(self):
pi = frappe.copy_doc(test_records[1])
pi.insert()
self.assertTrue(pi.payment_schedule)
self.assertEqual(pi.payment_schedule[0].due_date, pi.due_date)
def test_payment_entry_unlink_against_purchase_invoice(self):
from erpnext.accounts.doctype.payment_entry.test_payment_entry import get_payment_entry
unlink_payment_on_cancel_of_invoice(0)
pi_doc = make_purchase_invoice()
pe = get_payment_entry("Purchase Invoice", pi_doc.name, bank_account="_Test Bank - _TC")
pe.reference_no = "1"
pe.reference_date = nowdate()
pe.paid_from_account_currency = pi_doc.currency
pe.paid_to_account_currency = pi_doc.currency
pe.source_exchange_rate = 1
pe.target_exchange_rate = 1
pe.paid_amount = pi_doc.grand_total
pe.save(ignore_permissions=True)
pe.submit()
pi_doc = frappe.get_doc('Purchase Invoice', pi_doc.name)
pi_doc.load_from_db()
self.assertTrue(pi_doc.status, "Paid")
self.assertRaises(frappe.LinkExistsError, pi_doc.cancel)
unlink_payment_on_cancel_of_invoice()
def test_purchase_invoice_for_blocked_supplier(self):
supplier = frappe.get_doc('Supplier', '_Test Supplier')
supplier.on_hold = 1
supplier.save()
self.assertRaises(frappe.ValidationError, make_purchase_invoice)
supplier.on_hold = 0
supplier.save()
def test_purchase_invoice_for_blocked_supplier_invoice(self):
supplier = frappe.get_doc('Supplier', '_Test Supplier')
supplier.on_hold = 1
supplier.hold_type = 'Invoices'
supplier.save()
self.assertRaises(frappe.ValidationError, make_purchase_invoice)
supplier.on_hold = 0
supplier.save()
def test_purchase_invoice_for_blocked_supplier_payment(self):
supplier = frappe.get_doc('Supplier', '_Test Supplier')
supplier.on_hold = 1
supplier.hold_type = 'Payments'
supplier.save()
pi = make_purchase_invoice()
self.assertRaises(
frappe.ValidationError, get_payment_entry, dt='Purchase Invoice', dn=pi.name, bank_account="_Test Bank - _TC")
supplier.on_hold = 0
supplier.save()
def test_purchase_invoice_for_blocked_supplier_payment_today_date(self):
supplier = frappe.get_doc('Supplier', '_Test Supplier')
supplier.on_hold = 1
supplier.hold_type = 'Payments'
supplier.release_date = nowdate()
supplier.save()
pi = make_purchase_invoice()
self.assertRaises(
frappe.ValidationError, get_payment_entry, dt='Purchase Invoice', dn=pi.name,
bank_account="_Test Bank - _TC")
supplier.on_hold = 0
supplier.save()
def test_purchase_invoice_for_blocked_supplier_payment_past_date(self):
# this test is meant to fail only if something fails in the try block
with self.assertRaises(Exception):
try:
supplier = frappe.get_doc('Supplier', '_Test Supplier')
supplier.on_hold = 1
supplier.hold_type = 'Payments'
supplier.release_date = '2018-03-01'
supplier.save()
pi = make_purchase_invoice()
get_payment_entry('Purchase Invoice', dn=pi.name, bank_account="_Test Bank - _TC")
supplier.on_hold = 0
supplier.save()
except:
pass
else:
raise Exception
def test_purchase_invoice_blocked_invoice_must_be_in_future(self):
pi = make_purchase_invoice(do_not_save=True)
pi.release_date = nowdate()
self.assertRaises(frappe.ValidationError, pi.save)
pi.release_date = ''
pi.save()
def test_purchase_invoice_temporary_blocked(self):
pi = make_purchase_invoice(do_not_save=True)
pi.release_date = add_days(nowdate(), 10)
pi.save()
pi.submit()
pe = get_payment_entry('Purchase Invoice', dn=pi.name, bank_account="_Test Bank - _TC")
self.assertRaises(frappe.ValidationError, pe.save)
def test_purchase_invoice_explicit_block(self):
pi = make_purchase_invoice()
pi.block_invoice()
self.assertEqual(pi.on_hold, 1)
pi.unblock_invoice()
self.assertEqual(pi.on_hold, 0)
def test_gl_entries_with_perpetual_inventory_against_pr(self):
pr = make_purchase_receipt(company="_Test Company with perpetual inventory", supplier_warehouse="Work In Progress - TCP1", warehouse= "Stores - TCP1", cost_center = "Main - TCP1", get_taxes_and_charges=True,)
pi = make_purchase_invoice(company="_Test Company with perpetual inventory", supplier_warehouse="Work In Progress - TCP1", warehouse= "Stores - TCP1", cost_center = "Main - TCP1", expense_account ="_Test Account Cost for Goods Sold - TCP1", get_taxes_and_charges=True, qty=10,do_not_save= "True")
for d in pi.items:
d.purchase_receipt = pr.name
pi.insert()
pi.submit()
pi.load_from_db()
self.assertTrue(pi.status, "Unpaid")
self.check_gle_for_pi(pi.name)
def check_gle_for_pi(self, pi):
gl_entries = frappe.db.sql("""select account, sum(debit) as debit, sum(credit) as credit
from `tabGL Entry` where voucher_type='Purchase Invoice' and voucher_no=%s
group by account""", pi, as_dict=1)
self.assertTrue(gl_entries)
expected_values = dict((d[0], d) for d in [
["Creditors - TCP1", 0, 720],
["Stock Received But Not Billed - TCP1", 500.0, 0],
["_Test Account Shipping Charges - TCP1", 100.0, 0.0],
["_Test Account VAT - TCP1", 120.0, 0]
])
for i, gle in enumerate(gl_entries):
self.assertEqual(expected_values[gle.account][0], gle.account)
self.assertEqual(expected_values[gle.account][1], gle.debit)
self.assertEqual(expected_values[gle.account][2], gle.credit)
def test_purchase_invoice_change_naming_series(self):
pi = frappe.copy_doc(test_records[1])
pi.insert()
pi.naming_series = 'TEST-'
self.assertRaises(frappe.CannotChangeConstantError, pi.save)
pi = frappe.copy_doc(test_records[0])
pi.insert()
pi.load_from_db()
self.assertTrue(pi.status, "Draft")
pi.naming_series = 'TEST-'
self.assertRaises(frappe.CannotChangeConstantError, pi.save)
def test_gl_entries_for_non_stock_items_with_perpetual_inventory(self):
pi = make_purchase_invoice(item_code = "_Test Non Stock Item",
company = "_Test Company with perpetual inventory", warehouse= "Stores - TCP1",
cost_center = "Main - TCP1", expense_account ="_Test Account Cost for Goods Sold - TCP1")
self.assertTrue(pi.status, "Unpaid")
gl_entries = frappe.db.sql("""select account, debit, credit
from `tabGL Entry` where voucher_type='Purchase Invoice' and voucher_no=%s
order by account asc""", pi.name, as_dict=1)
self.assertTrue(gl_entries)
expected_values = [
["_Test Account Cost for Goods Sold - TCP1", 250.0, 0],
["Creditors - TCP1", 0, 250]
]
for i, gle in enumerate(gl_entries):
self.assertEqual(expected_values[i][0], gle.account)
self.assertEqual(expected_values[i][1], gle.debit)
self.assertEqual(expected_values[i][2], gle.credit)
def test_purchase_invoice_calculation(self):
pi = frappe.copy_doc(test_records[0])
pi.insert()
pi.load_from_db()
expected_values = [
["_Test Item Home Desktop 100", 90, 59],
["_Test Item Home Desktop 200", 135, 177]
]
for i, item in enumerate(pi.get("items")):
self.assertEqual(item.item_code, expected_values[i][0])
self.assertEqual(item.item_tax_amount, expected_values[i][1])
self.assertEqual(item.valuation_rate, expected_values[i][2])
self.assertEqual(pi.base_net_total, 1250)
# tax amounts
expected_values = [
["_Test Account Shipping Charges - _TC", 100, 1350],
["_Test Account Customs Duty - _TC", 125, 1350],
["_Test Account Excise Duty - _TC", 140, 1490],
["_Test Account Education Cess - _TC", 2.8, 1492.8],
["_Test Account S&H Education Cess - _TC", 1.4, 1494.2],
["_Test Account CST - _TC", 29.88, 1524.08],
["_Test Account VAT - _TC", 156.25, 1680.33],
["_Test Account Discount - _TC", 168.03, 1512.30],
]
for i, tax in enumerate(pi.get("taxes")):
self.assertEqual(tax.account_head, expected_values[i][0])
self.assertEqual(tax.tax_amount, expected_values[i][1])
self.assertEqual(tax.total, expected_values[i][2])
def test_purchase_invoice_with_subcontracted_item(self):
wrapper = frappe.copy_doc(test_records[0])
wrapper.get("items")[0].item_code = "_Test FG Item"
wrapper.insert()
wrapper.load_from_db()
expected_values = [
["_Test FG Item", 90, 59],
["_Test Item Home Desktop 200", 135, 177]
]
for i, item in enumerate(wrapper.get("items")):
self.assertEqual(item.item_code, expected_values[i][0])
self.assertEqual(item.item_tax_amount, expected_values[i][1])
self.assertEqual(item.valuation_rate, expected_values[i][2])
self.assertEqual(wrapper.base_net_total, 1250)
# tax amounts
expected_values = [
["_Test Account Shipping Charges - _TC", 100, 1350],
["_Test Account Customs Duty - _TC", 125, 1350],
["_Test Account Excise Duty - _TC", 140, 1490],
["_Test Account Education Cess - _TC", 2.8, 1492.8],
["_Test Account S&H Education Cess - _TC", 1.4, 1494.2],
["_Test Account CST - _TC", 29.88, 1524.08],
["_Test Account VAT - _TC", 156.25, 1680.33],
["_Test Account Discount - _TC", 168.03, 1512.30],
]
for i, tax in enumerate(wrapper.get("taxes")):
self.assertEqual(tax.account_head, expected_values[i][0])
self.assertEqual(tax.tax_amount, expected_values[i][1])
self.assertEqual(tax.total, expected_values[i][2])
def test_purchase_invoice_with_advance(self):
from erpnext.accounts.doctype.journal_entry.test_journal_entry \
import test_records as jv_test_records
jv = frappe.copy_doc(jv_test_records[1])
jv.insert()
jv.submit()
pi = frappe.copy_doc(test_records[0])
pi.disable_rounded_total = 1
pi.allocate_advances_automatically = 0
pi.append("advances", {
"reference_type": "Journal Entry",
"reference_name": jv.name,
"reference_row": jv.get("accounts")[0].name,
"advance_amount": 400,
"allocated_amount": 300,
"remarks": jv.remark
})
pi.insert()
self.assertEqual(pi.outstanding_amount, 1212.30)
pi.disable_rounded_total = 0
pi.get("payment_schedule")[0].payment_amount = 1512.0
pi.save()
self.assertEqual(pi.outstanding_amount, 1212.0)
pi.submit()
pi.load_from_db()
self.assertTrue(frappe.db.sql("""select name from `tabJournal Entry Account`
where reference_type='Purchase Invoice'
and reference_name=%s and debit_in_account_currency=300""", pi.name))
pi.cancel()
self.assertFalse(frappe.db.sql("""select name from `tabJournal Entry Account`
where reference_type='Purchase Invoice' and reference_name=%s""", pi.name))
def test_invoice_with_advance_and_multi_payment_terms(self):
from erpnext.accounts.doctype.journal_entry.test_journal_entry \
import test_records as jv_test_records
jv = frappe.copy_doc(jv_test_records[1])
jv.insert()
jv.submit()
pi = frappe.copy_doc(test_records[0])
pi.disable_rounded_total = 1
pi.allocate_advances_automatically = 0
pi.append("advances", {
"reference_type": "Journal Entry",
"reference_name": jv.name,
"reference_row": jv.get("accounts")[0].name,
"advance_amount": 400,
"allocated_amount": 300,
"remarks": jv.remark
})
pi.insert()
pi.update({
"payment_schedule": get_payment_terms("_Test Payment Term Template",
pi.posting_date, pi.grand_total)
})
pi.save()
pi.submit()
self.assertEqual(pi.payment_schedule[0].payment_amount, 606.15)
self.assertEqual(pi.payment_schedule[0].due_date, pi.posting_date)
self.assertEqual(pi.payment_schedule[1].payment_amount, 606.15)
self.assertEqual(pi.payment_schedule[1].due_date, add_days(pi.posting_date, 30))
pi.load_from_db()
self.assertTrue(
frappe.db.sql(
"select name from `tabJournal Entry Account` where reference_type='Purchase Invoice' and "
"reference_name=%s and debit_in_account_currency=300", pi.name)
)
self.assertEqual(pi.outstanding_amount, 1212.30)
pi.cancel()
self.assertFalse(
frappe.db.sql(
"select name from `tabJournal Entry Account` where reference_type='Purchase Invoice' and "
"reference_name=%s", pi.name)
)
def test_total_purchase_cost_for_project(self):
if not frappe.db.exists("Project", {"project_name": "_Test Project for Purchase"}):
project = make_project({'project_name':'_Test Project for Purchase'})
else:
project = frappe.get_doc("Project", {"project_name": "_Test Project for Purchase"})
existing_purchase_cost = frappe.db.sql("""select sum(base_net_amount)
from `tabPurchase Invoice Item`
where project = '{0}'
and docstatus=1""".format(project.name))
existing_purchase_cost = existing_purchase_cost and existing_purchase_cost[0][0] or 0
pi = make_purchase_invoice(currency="USD", conversion_rate=60, project=project.name)
self.assertEqual(frappe.db.get_value("Project", project.name, "total_purchase_cost"),
existing_purchase_cost + 15000)
pi1 = make_purchase_invoice(qty=10, project=project.name)
self.assertEqual(frappe.db.get_value("Project", project.name, "total_purchase_cost"),
existing_purchase_cost + 15500)
pi1.cancel()
self.assertEqual(frappe.db.get_value("Project", project.name, "total_purchase_cost"),
existing_purchase_cost + 15000)
pi.cancel()
self.assertEqual(frappe.db.get_value("Project", project.name, "total_purchase_cost"), existing_purchase_cost)
def test_return_purchase_invoice_with_perpetual_inventory(self):
pi = make_purchase_invoice(company = "_Test Company with perpetual inventory", warehouse= "Stores - TCP1",
cost_center = "Main - TCP1", expense_account ="_Test Account Cost for Goods Sold - TCP1")
return_pi = make_purchase_invoice(is_return=1, return_against=pi.name, qty=-2,
company = "_Test Company with perpetual inventory", warehouse= "Stores - TCP1",
cost_center = "Main - TCP1", expense_account ="_Test Account Cost for Goods Sold - TCP1")
# check gl entries for return
gl_entries = frappe.db.sql("""select account, debit, credit
from `tabGL Entry` where voucher_type=%s and voucher_no=%s
order by account desc""", ("Purchase Invoice", return_pi.name), as_dict=1)
self.assertTrue(gl_entries)
expected_values = {
"Creditors - TCP1": [100.0, 0.0],
"Stock Received But Not Billed - TCP1": [0.0, 100.0],
}
for gle in gl_entries:
self.assertEqual(expected_values[gle.account][0], gle.debit)
self.assertEqual(expected_values[gle.account][1], gle.credit)
def test_multi_currency_gle(self):
pi = make_purchase_invoice(supplier="_Test Supplier USD", credit_to="_Test Payable USD - _TC",
currency="USD", conversion_rate=50)
gl_entries = frappe.db.sql("""select account, account_currency, debit, credit,
debit_in_account_currency, credit_in_account_currency
from `tabGL Entry` where voucher_type='Purchase Invoice' and voucher_no=%s
order by account asc""", pi.name, as_dict=1)
self.assertTrue(gl_entries)
expected_values = {
"_Test Payable USD - _TC": {
"account_currency": "USD",
"debit": 0,
"debit_in_account_currency": 0,
"credit": 12500,
"credit_in_account_currency": 250
},
"_Test Account Cost for Goods Sold - _TC": {
"account_currency": "INR",
"debit": 12500,
"debit_in_account_currency": 12500,
"credit": 0,
"credit_in_account_currency": 0
}
}
for field in ("account_currency", "debit", "debit_in_account_currency", "credit", "credit_in_account_currency"):
for i, gle in enumerate(gl_entries):
self.assertEqual(expected_values[gle.account][field], gle[field])
# Check for valid currency
pi1 = make_purchase_invoice(supplier="_Test Supplier USD", credit_to="_Test Payable USD - _TC",
do_not_save=True)
self.assertRaises(InvalidCurrency, pi1.save)
# cancel
pi.cancel()
gle = frappe.db.sql("""select name from `tabGL Entry`
where voucher_type='Sales Invoice' and voucher_no=%s""", pi.name)
self.assertFalse(gle)
def test_purchase_invoice_update_stock_gl_entry_with_perpetual_inventory(self):
pi = make_purchase_invoice(update_stock=1, posting_date=frappe.utils.nowdate(),
posting_time=frappe.utils.nowtime(), cash_bank_account="Cash - TCP1", company="_Test Company with perpetual inventory", supplier_warehouse="Work In Progress - TCP1", warehouse= "Stores - TCP1", cost_center = "Main - TCP1", expense_account ="_Test Account Cost for Goods Sold - TCP1")
gl_entries = frappe.db.sql("""select account, account_currency, debit, credit,
debit_in_account_currency, credit_in_account_currency
from `tabGL Entry` where voucher_type='Purchase Invoice' and voucher_no=%s
order by account asc""", pi.name, as_dict=1)
self.assertTrue(gl_entries)
stock_in_hand_account = get_inventory_account(pi.company, pi.get("items")[0].warehouse)
expected_gl_entries = dict((d[0], d) for d in [
[pi.credit_to, 0.0, 250.0],
[stock_in_hand_account, 250.0, 0.0]
])
for i, gle in enumerate(gl_entries):
self.assertEqual(expected_gl_entries[gle.account][0], gle.account)
self.assertEqual(expected_gl_entries[gle.account][1], gle.debit)
self.assertEqual(expected_gl_entries[gle.account][2], gle.credit)
def test_purchase_invoice_for_is_paid_and_update_stock_gl_entry_with_perpetual_inventory(self):
pi = make_purchase_invoice(update_stock=1, posting_date=frappe.utils.nowdate(),
posting_time=frappe.utils.nowtime(), cash_bank_account="Cash - TCP1", is_paid=1, company="_Test Company with perpetual inventory", supplier_warehouse="Work In Progress - TCP1", warehouse= "Stores - TCP1", cost_center = "Main - TCP1", expense_account ="_Test Account Cost for Goods Sold - TCP1")
gl_entries = frappe.db.sql("""select account, account_currency, sum(debit) as debit,
sum(credit) as credit, debit_in_account_currency, credit_in_account_currency
from `tabGL Entry` where voucher_type='Purchase Invoice' and voucher_no=%s
group by account, voucher_no order by account asc;""", pi.name, as_dict=1)
stock_in_hand_account = get_inventory_account(pi.company, pi.get("items")[0].warehouse)
self.assertTrue(gl_entries)
expected_gl_entries = dict((d[0], d) for d in [
[pi.credit_to, 250.0, 250.0],
[stock_in_hand_account, 250.0, 0.0],
["Cash - TCP1", 0.0, 250.0]
])
for i, gle in enumerate(gl_entries):
self.assertEqual(expected_gl_entries[gle.account][0], gle.account)
self.assertEqual(expected_gl_entries[gle.account][1], gle.debit)
self.assertEqual(expected_gl_entries[gle.account][2], gle.credit)
def test_auto_batch(self):
item_code = frappe.db.get_value('Item',
{'has_batch_no': 1, | |
<filename>python/cm/tests_upgrade.py
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.test import TestCase
import cm.api
import cm.job
from cm.models import Cluster, Host, ClusterObject, ServiceComponent, HostComponent
from cm.models import Bundle, Upgrade, Prototype, PrototypeConfig, ConfigLog
from cm.errors import AdcmEx
from cm import adcm_config
class TestUpgradeVersion(TestCase):
def cook_cluster(self):
b = Bundle(name="ADH", version="1.0")
proto = Prototype(type="cluster", name="ADH", bundle=b)
return Cluster(prototype=proto, issue={})
def cook_upgrade(self):
return Upgrade(
min_version="1.0",
max_version="2.0",
min_strict=False,
max_strict=False,
state_available='any',
)
def check_upgrade(self, obj, upgrade, result):
ok, msg = cm.upgrade.check_upgrade(obj, upgrade)
if not ok:
print("check_upgrade msg: ", msg)
self.assertEqual(ok, result)
def test_version(self):
obj = self.cook_cluster()
upgrade = self.cook_upgrade()
obj.prototype.version = "1.5"
self.check_upgrade(obj, upgrade, True)
obj.prototype.version = "2.5"
self.check_upgrade(obj, upgrade, False)
obj.prototype.version = "2.0"
self.check_upgrade(obj, upgrade, True)
obj.prototype.version = "1.0"
self.check_upgrade(obj, upgrade, True)
def test_strict_version(self):
obj = self.cook_cluster()
upgrade = self.cook_upgrade()
upgrade.min_strict = True
upgrade.max_strict = True
obj.prototype.version = "1.5"
self.check_upgrade(obj, upgrade, True)
obj.prototype.version = "2.5"
self.check_upgrade(obj, upgrade, False)
obj.prototype.version = "1.0"
self.check_upgrade(obj, upgrade, False)
obj.prototype.version = "2.0"
self.check_upgrade(obj, upgrade, False)
def test_state(self):
obj = self.cook_cluster()
upgrade = self.cook_upgrade()
upgrade.state_available = ["installed", "any"]
obj.prototype.version = "1.5"
obj.state = "created"
self.check_upgrade(obj, upgrade, False)
obj.state = "installed"
self.check_upgrade(obj, upgrade, True)
def test_issue(self):
obj = self.cook_cluster()
obj.issue = {"config": False}
upgrade = self.cook_upgrade()
self.check_upgrade(obj, upgrade, False)
class SetUp:
def cook_cluster_bundle(self, ver):
b = Bundle.objects.create(name='ADH', version=ver)
b.save()
Prototype.objects.create(type="cluster", name="ADH", version=ver, bundle=b)
sp2 = Prototype.objects.create(type="service", name="hive", bundle=b)
Prototype.objects.create(parent=sp2, type='component', name='server', bundle=b)
sp1 = Prototype.objects.create(type="service", name="hadoop", version=ver, bundle=b)
Prototype.objects.create(parent=sp1, type='component', name='server', bundle=b)
Prototype.objects.create(parent=sp1, type='component', name='node', bundle=b)
return b
def cook_provider_bundle(self, ver):
b = Bundle.objects.create(name='DF', version=ver)
b.save()
Prototype.objects.create(type="provider", name="DF", version=ver, bundle=b)
Prototype.objects.create(type="host", name="DfHost", version=ver, bundle=b)
return b
def cook_provider(self, bundle, name):
pp = Prototype.objects.get(type="provider", bundle=bundle)
provider = cm.api.add_host_provider(pp, name)
host_proto = Prototype.objects.get(bundle=provider.prototype.bundle, type='host')
cm.api.add_host(host_proto, provider, 'server02.inter.net')
cm.api.add_host(host_proto, provider, 'server01.inter.net')
return provider
def cook_cluster(self, bundle, name):
cp = Prototype.objects.get(type="cluster", bundle=bundle)
cluster = cm.api.add_cluster(cp, name)
sp2 = Prototype.objects.get(type="service", name="hive", bundle=bundle)
cm.api.add_service_to_cluster(cluster, sp2)
sp1 = Prototype.objects.get(type="service", name="hadoop", bundle=bundle)
cm.api.add_service_to_cluster(cluster, sp1)
return cluster
def cook_upgrade(self, bundle):
return Upgrade.objects.create(
bundle=bundle, min_version="1.0", max_version="2.0", state_available=['created']
)
def get_config(obj):
attr = {}
cl = ConfigLog.objects.get(obj_ref=obj.config, id=obj.config.current)
if cl.attr:
attr = cl.attr
return cl.config, attr
class TestConfigUpgrade(TestCase):
add_conf = PrototypeConfig.objects.create
def cook_proto(self):
b = Bundle.objects.create(name='AD1', version='1.0')
proto1 = Prototype.objects.create(type="cluster", name="AD1", version="1.0", bundle=b)
proto2 = Prototype.objects.create(type="cluster", name="AD1", version="2.0", bundle=b)
return (proto1, proto2)
def test_empty_config(self):
(proto1, proto2) = self.cook_proto()
cluster = cm.api.add_cluster(proto1, 'Cluster1')
self.assertEqual(cluster.config, None)
cm.adcm_config.switch_config(cluster, proto2, proto1)
self.assertEqual(cluster.config, None)
def test_empty_first_config(self):
(proto1, proto2) = self.cook_proto()
self.add_conf(prototype=proto2, name='port', type='integer', default=42)
cluster = cm.api.add_cluster(proto1, 'Cluster1')
self.assertEqual(cluster.config, None)
cm.adcm_config.switch_config(cluster, proto2, proto1)
new_config, _ = get_config(cluster)
self.assertEqual(new_config['port'], 42)
def test_adding_parameter(self):
(proto1, proto2) = self.cook_proto()
self.add_conf(prototype=proto1, name='host', type='string', default='arenadata.com')
self.add_conf(prototype=proto2, name='host', type='string', default='arenadata.com')
self.add_conf(prototype=proto2, name='port', type='integer', default=42)
cluster = cm.api.add_cluster(proto1, 'Cluster1')
old_conf, _ = get_config(cluster)
self.assertEqual(old_conf, {'host': 'arenadata.com'})
cm.adcm_config.switch_config(cluster, proto2, proto1)
new_config, _ = get_config(cluster)
self.assertEqual(new_config, {'host': 'arenadata.com', 'port': 42})
def test_deleting_parameter(self):
(proto1, proto2) = self.cook_proto()
self.add_conf(prototype=proto1, name='host', type='string', default='arenadata.com')
self.add_conf(prototype=proto2, name='port', type='integer', default=42)
cluster = cm.api.add_cluster(proto1, 'Cluster1')
old_conf, _ = get_config(cluster)
self.assertEqual(old_conf, {'host': 'arenadata.com'})
cm.adcm_config.switch_config(cluster, proto2, proto1)
new_config, _ = get_config(cluster)
self.assertEqual(new_config, {'port': 42})
def test_default(self):
(proto1, proto2) = self.cook_proto()
self.add_conf(prototype=proto1, name='port', type='integer', default=42)
self.add_conf(prototype=proto2, name='port', type='integer', default=43)
cluster = cm.api.add_cluster(proto1, 'Cluster1')
old_conf, _ = get_config(cluster)
self.assertEqual(old_conf, {'port': 42})
cm.adcm_config.switch_config(cluster, proto2, proto1)
new_config, _ = get_config(cluster)
self.assertEqual(new_config, {'port': 43})
def test_non_default(self):
(proto1, proto2) = self.cook_proto()
self.add_conf(prototype=proto1, name='port', type='integer', default=42)
self.add_conf(prototype=proto2, name='port', type='integer', default=43)
cluster = cm.api.add_cluster(proto1, 'Cluster1')
old_conf, _ = get_config(cluster)
old_conf['port'] = 100500
cm.adcm_config.save_obj_config(cluster.config, old_conf, {})
cm.adcm_config.switch_config(cluster, proto2, proto1)
new_config, _ = get_config(cluster)
self.assertEqual(new_config, {'port': 100500})
def test_add_group(self):
(proto1, proto2) = self.cook_proto()
self.add_conf(prototype=proto1, name='host', type='string', default='arenadata.com')
self.add_conf(prototype=proto2, name='host', type='string', default='arenadata.com')
self.add_conf(prototype=proto2, name='advance', type='group')
self.add_conf(prototype=proto2, name='advance', subname='port', type='integer', default=42)
cluster = cm.api.add_cluster(proto1, 'Cluster1')
old_conf, _ = get_config(cluster)
self.assertEqual(old_conf, {'host': 'arenadata.com'})
cm.adcm_config.switch_config(cluster, proto2, proto1)
new_config, _ = get_config(cluster)
self.assertEqual(new_config, {'host': 'arenadata.com', 'advance': {'port': 42}})
def test_add_non_active_group(self):
(proto1, proto2) = self.cook_proto()
# Old config with one key "host"
self.add_conf(prototype=proto1, name='host', type='string', default='arenadata.com')
# New config with key "host" and activatable group "advance"
self.add_conf(prototype=proto2, name='host', type='string', default='arenadata.com')
limits = {"activatable": True, "active": False}
self.add_conf(prototype=proto2, name='advance', type='group', limits=limits)
self.add_conf(prototype=proto2, name='advance', subname='port', type='integer', default=42)
# Create cluster with old config
cluster = cm.api.add_cluster(proto1, 'Cluster1')
old_conf, _ = get_config(cluster)
self.assertEqual(old_conf, {'host': 'arenadata.com'})
# Upgrade
cm.adcm_config.switch_config(cluster, proto2, proto1)
new_config, new_attr = get_config(cluster)
# Check that new activatable but inactive group default values are added to new config
self.assertEqual(new_config, {'host': 'arenadata.com', 'advance': {'port': 42}})
self.assertEqual(new_attr, {'advance': {'active': False}})
def test_add_active_group(self):
(proto1, proto2) = self.cook_proto()
self.add_conf(prototype=proto1, name='host', type='string', default='arenadata.com')
self.add_conf(prototype=proto2, name='host', type='string', default='arenadata.com')
limits = {"activatable": True, "active": True}
self.add_conf(prototype=proto2, name='advance', type='group', limits=limits)
self.add_conf(prototype=proto2, name='advance', subname='port', type='integer', default=42)
cluster = cm.api.add_cluster(proto1, 'Cluster1')
old_conf, _ = get_config(cluster)
self.assertEqual(old_conf, {'host': 'arenadata.com'})
cm.adcm_config.switch_config(cluster, proto2, proto1)
new_config, new_attr = get_config(cluster)
self.assertEqual(new_config, {'host': 'arenadata.com', 'advance': {'port': 42}})
self.assertEqual(new_attr, {'advance': {'active': True}})
def test_from_active_group_to_not_active_group(self):
"""Scenario:
* Create prototype1 with activatable group, active=False
* Create prototype2 with activatable group, active=False
* Create cluster from prototype1
* Update cluster config, activate group, set value
* Update cluster config from prototype2
* Expect that the cluster configuration has not changed
"""
proto1, proto2 = self.cook_proto()
self.add_conf(
prototype=proto1,
name='advance',
type='group',
limits={"activatable": True, "active": False},
)
self.add_conf(prototype=proto1, name='advance', subname='port', type='integer', default=11)
self.add_conf(
prototype=proto2,
name='advance',
type='group',
limits={"activatable": True, "active": False},
)
self.add_conf(prototype=proto2, name='advance', subname='port', type='integer', default=22)
cluster = cm.api.add_cluster(proto1, 'Cluster1')
cm.api.update_obj_config(
cluster.config, {'advance': {'port': 33}}, {'advance': {'active': True}}
)
old_conf, old_attr = get_config(cluster)
self.assertEqual(old_conf, {'advance': {'port': 33}})
self.assertEqual(old_attr, {'advance': {'active': True}})
adcm_config.switch_config(cluster, proto2, proto1)
new_conf, new_attr = get_config(cluster)
self.assertEqual(new_conf, {'advance': {'port': 33}})
self.assertEqual(new_attr, {'advance': {'active': True}})
def test_non_active_group(self):
proto1, proto2 = self.cook_proto()
# Old config with activatable group "advance"
self.add_conf(
prototype=proto1,
name='advance',
type='group',
limits={"activatable": True, "active": False},
)
self.add_conf(prototype=proto1, name='advance', subname='port', type='integer', default=11)
# New config with the same activatable group "advance"
self.add_conf(
prototype=proto2,
name='advance',
type='group',
limits={"activatable": True, "active": False},
)
self.add_conf(prototype=proto2, name='advance', subname='port', type='integer', default=11)
cluster = cm.api.add_cluster(proto1, 'Cluster1')
old_conf, old_attr = get_config(cluster)
self.assertEqual(old_conf, {'advance': {'port': 11}})
self.assertEqual(old_attr, {'advance': {'active': False}})
# Ugrade
adcm_config.switch_config(cluster, proto2, proto1)
new_conf, new_attr = get_config(cluster)
# Check that activatable but not active group does not disappear from new config
self.assertEqual(new_conf, {'advance': {'port': 11}})
self.assertEqual(new_attr, {'advance': {'active': False}})
class TestUpgrade(TestCase):
def test_cluster_upgrade(self):
setup = SetUp()
b1 = setup.cook_cluster_bundle('1.0')
b2 = setup.cook_cluster_bundle('2.0')
setup.cook_cluster(b1, 'Test0')
cluster = setup.cook_cluster(b1, 'Test1')
upgrade = setup.cook_upgrade(b2)
co1 = ClusterObject.objects.get(cluster=cluster, prototype__name='hadoop')
try:
r = cm.upgrade.do_upgrade(co1, upgrade)
self.assertEqual(r, 'ok')
except AdcmEx as e:
self.assertEqual(e.code, 'UPGRADE_ERROR')
self.assertEqual(e.msg, 'can upgrade only cluster or host provider')
old_proto = Prototype.objects.get(type="service", name="hadoop", bundle=b1)
new_proto = Prototype.objects.get(type="service", name="hadoop", bundle=b2)
self.assertEqual(co1.prototype.id, old_proto.id)
cm.upgrade.do_upgrade(cluster, upgrade)
co2 = ClusterObject.objects.get(cluster=cluster, prototype__name='hadoop')
self.assertEqual(co1.id, co2.id)
self.assertEqual(co2.prototype.id, new_proto.id)
def test_hc(self): # pylint: disable=too-many-locals
setup = SetUp()
b1 = setup.cook_cluster_bundle('1.0')
b2 = setup.cook_cluster_bundle('2.0')
b3 = setup.cook_provider_bundle('1.0')
cluster = setup.cook_cluster(b1, 'Test1')
provider = setup.cook_provider(b3, "DF01")
co = ClusterObject.objects.get(cluster=cluster, prototype__name='hadoop')
sc1 = ServiceComponent.objects.get(cluster=cluster, service=co, prototype__name='server')
sc2 = ServiceComponent.objects.get(cluster=cluster, service=co, prototype__name='node')
h1 = Host.objects.get(provider=provider, fqdn='server01.inter.net')
h2 = Host.objects.get(provider=provider, fqdn='server02.inter.net')
cm.api.add_host_to_cluster(cluster, h1)
cm.api.add_host_to_cluster(cluster, h2)
hc = [
{'service_id': co.id, 'host_id': h1.id, 'component_id': sc1.id},
{'service_id': co.id, 'host_id': h2.id, 'component_id': sc2.id},
]
cm.api.add_hc(cluster, hc)
hc1 = HostComponent.objects.get(cluster=cluster, service=co, component=sc2)
self.assertEqual(hc1.component.id, sc2.id)
new_co_proto = Prototype.objects.get(type="service", name="hadoop", bundle=b2)
new_comp_node = Prototype.objects.get(name='node', type='component', parent=new_co_proto)
new_comp_node.delete()
upgrade = setup.cook_upgrade(b2)
cm.upgrade.do_upgrade(cluster, upgrade)
hc2 = HostComponent.objects.get(cluster=cluster, service=co, component=sc1)
self.assertEqual(hc2.component.id, sc1.id)
r = HostComponent.objects.filter(cluster=cluster, service=co, component=sc2)
self.assertEqual(len(r), 0)
def test_component(self): # pylint: disable=too-many-locals
setup = SetUp()
b1 = setup.cook_cluster_bundle('1.0')
b2 = setup.cook_cluster_bundle('2.0')
sp = Prototype.objects.get(bundle=b2, type="service", name="hadoop")
Prototype.objects.create(parent=sp, type='component', name='data', bundle=b2)
setup.cook_cluster(b1, 'Test0')
cluster = setup.cook_cluster(b1, 'Test1')
co = ClusterObject.objects.get(cluster=cluster, prototype__name='hadoop')
sc11 = ServiceComponent.objects.get(cluster=cluster, service=co, prototype__name='server')
self.assertEqual(sc11.prototype.parent, co.prototype)
sc12 = ServiceComponent.objects.get(cluster=cluster, service=co, prototype__name='node')
self.assertEqual(sc12.prototype.parent, co.prototype)
new_co_proto = Prototype.objects.get(type="service", name="hadoop", bundle=b2)
cm.upgrade.switch_components(cluster, co, new_co_proto)
new_comp1 = Prototype.objects.get(name='server', type='component', parent=new_co_proto)
sc21 = ServiceComponent.objects.get(cluster=cluster, service=co, prototype__name='server')
self.assertEqual(sc11.id, sc21.id)
self.assertEqual(sc21.prototype, new_comp1)
new_comp2 = Prototype.objects.get(name='node', type='component', parent=new_co_proto)
sc22 = ServiceComponent.objects.get(cluster=cluster, service=co, prototype__name='node')
self.assertEqual(sc12.id, sc22.id)
| |
<reponame>IBM/networking-services-python-sdk
# coding: utf-8
# (C) Copyright IBM Corp. 2020.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This document describes CIS WAF Rule Groups API.
"""
from enum import Enum
from typing import Dict, List
import json
from ibm_cloud_sdk_core import BaseService, DetailedResponse
from ibm_cloud_sdk_core.authenticators.authenticator import Authenticator
from ibm_cloud_sdk_core.get_authenticator import get_authenticator_from_environment
from .common import get_sdk_headers
##############################################################################
# Service
##############################################################################
class WafRuleGroupsApiV1(BaseService):
"""The WAF Rule Groups API V1 service."""
DEFAULT_SERVICE_URL = 'https://api.cis.cloud.ibm.com'
DEFAULT_SERVICE_NAME = 'waf_rule_groups_api'
@classmethod
def new_instance(cls,
crn: str,
zone_id: str,
service_name: str = DEFAULT_SERVICE_NAME,
) -> 'WafRuleGroupsApiV1':
"""
Return a new client for the WAF Rule Groups API service using the specified
parameters and external configuration.
:param str crn: cloud resource name.
:param str zone_id: Zone ID.
"""
if crn is None:
raise ValueError('crn must be provided')
if zone_id is None:
raise ValueError('zone_id must be provided')
authenticator = get_authenticator_from_environment(service_name)
service = cls(
crn,
zone_id,
authenticator
)
service.configure_service(service_name)
return service
def __init__(self,
crn: str,
zone_id: str,
authenticator: Authenticator = None,
) -> None:
"""
Construct a new client for the WAF Rule Groups API service.
:param str crn: cloud resource name.
:param str zone_id: Zone ID.
:param Authenticator authenticator: The authenticator specifies the authentication mechanism.
Get up to date information from https://github.com/IBM/python-sdk-core/blob/master/README.md
about initializing the authenticator of your choice.
"""
if crn is None:
raise ValueError('crn must be provided')
if zone_id is None:
raise ValueError('zone_id must be provided')
BaseService.__init__(self,
service_url=self.DEFAULT_SERVICE_URL,
authenticator=authenticator)
self.crn = crn
self.zone_id = zone_id
#########################
# WAF Rule Groups
#########################
def list_waf_rule_groups(self,
pkg_id: str,
*,
name: str = None,
mode: str = None,
rules_count: str = None,
page: int = None,
per_page: int = None,
order: str = None,
direction: str = None,
match: str = None,
**kwargs
) -> DetailedResponse:
"""
List all WAF rule groups.
List all WAF rule groups contained within a package.
:param str pkg_id: Package ID.
:param str name: (optional) Name of the firewall package.
:param str mode: (optional) Whether or not the rules contained within this
group are configurable/usable.
:param str rules_count: (optional) How many rules are contained within this
group.
:param int page: (optional) Page number of paginated results.
:param int per_page: (optional) Number of packages per page.
:param str order: (optional) Field to order packages by.
:param str direction: (optional) Direction to order packages.
:param str match: (optional) Whether to match all search requirements or at
least one (any).
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `WafGroupsResponse` object
"""
if pkg_id is None:
raise ValueError('pkg_id must be provided')
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='list_waf_rule_groups')
headers.update(sdk_headers)
params = {
'name': name,
'mode': mode,
'rules_count': rules_count,
'page': page,
'per_page': per_page,
'order': order,
'direction': direction,
'match': match
}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
url = '/v1/{0}/zones/{1}/firewall/waf/packages/{2}/groups'.format(
*self.encode_path_vars(self.crn, self.zone_id, pkg_id))
request = self.prepare_request(method='GET',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
def get_waf_rule_group(self,
pkg_id: str,
group_id: str,
**kwargs
) -> DetailedResponse:
"""
Get WAF rule group.
Get a single WAF rule group.
:param str pkg_id: Package ID.
:param str group_id: Group ID.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `WafGroupResponse` object
"""
if pkg_id is None:
raise ValueError('pkg_id must be provided')
if group_id is None:
raise ValueError('group_id must be provided')
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='get_waf_rule_group')
headers.update(sdk_headers)
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
url = '/v1/{0}/zones/{1}/firewall/waf/packages/{2}/groups/{3}'.format(
*self.encode_path_vars(self.crn, self.zone_id, pkg_id, group_id))
request = self.prepare_request(method='GET',
url=url,
headers=headers)
response = self.send(request)
return response
def update_waf_rule_group(self,
pkg_id: str,
group_id: str,
*,
mode: str = None,
**kwargs
) -> DetailedResponse:
"""
Update WAF rule group.
Update the state of a WAF rule group.
:param str pkg_id: Package ID.
:param str group_id: Group ID.
:param str mode: (optional) Whether or not the rules contained within this
group are configurable/usable.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `WafGroupResponse` object
"""
if pkg_id is None:
raise ValueError('pkg_id must be provided')
if group_id is None:
raise ValueError('group_id must be provided')
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='update_waf_rule_group')
headers.update(sdk_headers)
data = {
'mode': mode
}
data = {k: v for (k, v) in data.items() if v is not None}
data = json.dumps(data)
headers['content-type'] = 'application/json'
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
url = '/v1/{0}/zones/{1}/firewall/waf/packages/{2}/groups/{3}'.format(
*self.encode_path_vars(self.crn, self.zone_id, pkg_id, group_id))
request = self.prepare_request(method='PATCH',
url=url,
headers=headers,
data=data)
response = self.send(request)
return response
class ListWafRuleGroupsEnums:
"""
Enums for list_waf_rule_groups parameters.
"""
class Mode(str, Enum):
"""
Whether or not the rules contained within this group are configurable/usable.
"""
ON = 'on'
OFF = 'off'
class Direction(str, Enum):
"""
Direction to order packages.
"""
DESC = 'desc'
ASC = 'asc'
class Match(str, Enum):
"""
Whether to match all search requirements or at least one (any).
"""
ALL = 'all'
ANY = 'any'
##############################################################################
# Models
##############################################################################
class WafGroupResponseResultInfo():
"""
Statistics of results.
:attr int page: Page number.
:attr int per_page: Number of results per page.
:attr int count: Number of results.
:attr int total_count: Total number of results.
"""
def __init__(self,
page: int,
per_page: int,
count: int,
total_count: int) -> None:
"""
Initialize a WafGroupResponseResultInfo object.
:param int page: Page number.
:param int per_page: Number of results per page.
:param int count: Number of results.
:param int total_count: Total number of results.
"""
self.page = page
self.per_page = per_page
self.count = count
self.total_count = total_count
@classmethod
def from_dict(cls, _dict: Dict) -> 'WafGroupResponseResultInfo':
"""Initialize a WafGroupResponseResultInfo object from a json dictionary."""
args = {}
if 'page' in _dict:
args['page'] = _dict.get('page')
else:
raise ValueError('Required property \'page\' not present in WafGroupResponseResultInfo JSON')
if 'per_page' in _dict:
args['per_page'] = _dict.get('per_page')
else:
raise ValueError('Required property \'per_page\' not present in WafGroupResponseResultInfo JSON')
if 'count' in _dict:
args['count'] = _dict.get('count')
else:
raise ValueError('Required property \'count\' not present in WafGroupResponseResultInfo JSON')
if 'total_count' in _dict:
args['total_count'] = _dict.get('total_count')
else:
raise ValueError('Required property \'total_count\' not present in WafGroupResponseResultInfo JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a WafGroupResponseResultInfo object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'page') and self.page is not None:
_dict['page'] = self.page
if hasattr(self, 'per_page') and self.per_page is not None:
_dict['per_page'] = self.per_page
if hasattr(self, 'count') and self.count is not None:
_dict['count'] = self.count
if hasattr(self, 'total_count') and self.total_count is not None:
_dict['total_count'] = self.total_count
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this WafGroupResponseResultInfo object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'WafGroupResponseResultInfo') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'WafGroupResponseResultInfo') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class WafGroupsResponseResultInfo():
"""
Statistics of results.
:attr int page: Page number.
:attr int per_page: Number of results per page.
:attr int count: Number of results.
:attr int total_count: Total number of results.
"""
def __init__(self,
page: int,
per_page: int,
count: int,
total_count: int) -> None:
"""
Initialize a WafGroupsResponseResultInfo object.
:param int page: Page number.
:param int per_page: Number of results per page.
:param int count: Number of results.
:param int total_count: Total number of results.
"""
self.page = page
self.per_page = per_page
self.count = count
| |
"""pywizlight integration."""
import asyncio
import json
import logging
import socket
from time import time
from typing import Any, Dict, Tuple, Optional, Union, List
from asyncio_dgram.aio import connect as connect_dgram, DatagramStream
from pywizlight.bulblibrary import BulbClass, BulbType
from pywizlight.exceptions import (
WizLightConnectionError,
WizLightMethodNotFound,
WizLightTimeOutError,
)
from pywizlight.rgbcw import hs2rgbcw, rgb2rgbcw
from pywizlight.scenes import SCENES
from pywizlight.utils import hex_to_percent, percent_to_hex
from pywizlight.vec import Vector
_LOGGER = logging.getLogger(__name__)
TW_SCENES = [6, 9, 10, 11, 12, 13, 14, 15, 16, 18, 29, 30, 31, 32]
DW_SCENES = [9, 10, 13, 14, 29, 30, 31, 32]
BulbResponse = Dict[str, Any]
class PilotBuilder:
"""Get information from the bulb."""
def __init__(
self,
warm_white: Optional[int] = None,
cold_white: Optional[int] = None,
speed: Optional[int] = None,
scene: Optional[int] = None,
rgb: Optional[Tuple[float, float, float]] = None,
hucolor: Optional[Tuple[float, float]] = None,
brightness: Optional[int] = None,
colortemp: Optional[int] = None,
state: bool = True,
) -> None:
"""Set the parameter."""
self.pilot_params: Dict[str, Any] = {"state": state}
if warm_white is not None:
self._set_warm_white(warm_white)
if cold_white is not None:
self._set_cold_white(cold_white)
if speed is not None:
self._set_speed(speed)
if scene is not None:
self._set_scene(scene)
if rgb is not None:
self._set_rgb(rgb)
if brightness is not None:
self._set_brightness(brightness)
if colortemp is not None:
self._set_colortemp(colortemp)
if hucolor is not None:
self._set_hs_color(hucolor)
def set_pilot_message(self) -> str:
"""Return the pilot message."""
return json.dumps({"method": "setPilot", "params": self.pilot_params})
def set_state_message(self, state: bool) -> str:
"""Return the setState message. It doesn't change the current status of the light."""
self.pilot_params["state"] = state
return json.dumps({"method": "setState", "params": self.pilot_params})
def _set_warm_white(self, value: int) -> None:
"""Set the value of the warm white led."""
if 0 <= value < 256:
self.pilot_params["w"] = value
else:
raise ValueError("Value must be between 0 and 255")
def _set_cold_white(self, value: int) -> None:
"""Set the value of the cold white led."""
if 0 <= value < 256:
self.pilot_params["c"] = value
else:
raise ValueError("Value must be between 0 and 255")
def _set_speed(self, value: int) -> None:
"""Set the color changing speed in precent (0-100)."""
# This applies only to changing effects.
if 0 < value < 101:
self.pilot_params["speed"] = value
else:
raise ValueError("Value must be between 0 and 100")
def _set_scene(self, scene_id: int) -> None:
"""Set the scene by id."""
if scene_id in SCENES:
self.pilot_params["sceneId"] = scene_id
else:
# id not in SCENES !
raise ValueError("Scene is not available. Only 0 to 32 are supported")
def _set_rgb(self, values: Tuple[float, float, float]) -> None:
"""Set the RGB color state of the bulb."""
# Setup the tuples for the RGB values
red, green, blue = values
if 0 <= red < 256:
self.pilot_params["r"] = red
else:
raise ValueError("Red is not in range between 0-255.")
if 0 <= green < 256:
self.pilot_params["g"] = green
else:
raise ValueError("Green is not in range between 0-255.")
if 0 <= blue < 256:
self.pilot_params["b"] = blue
else:
raise ValueError("Blue is not in range between 0-255.")
# Get CW from RAW
rgb_out, cw = rgb2rgbcw(values)
# No CW because of full RGB color
if cw is not None:
# Use the existing set_warm_white function to set the CW values
self._set_warm_white(cw)
# Use the existing set_cold_white function to set the CW values
self._set_cold_white(cw)
def _set_hs_color(self, values: Tuple[float, float]) -> None:
"""Set the HS color state of the bulb."""
# Transform the HS values to RGB+CW values
rgb, cw = hs2rgbcw(values)
red, green, blue = rgb
if 0 <= red < 256:
self.pilot_params["r"] = red
else:
raise ValueError("Red is not in range between 0-255.")
if 0 <= green < 256:
self.pilot_params["g"] = green
else:
raise ValueError("Green is not in range between 0-255.")
if 0 <= blue < 256:
self.pilot_params["b"] = blue
else:
raise ValueError("Blue is not in range between 0-255.")
if cw is not None:
# Use the existing set_warm_white function to set the CW values
self._set_warm_white(cw)
# Use the existing set_cold_white function to set the CW values
self._set_cold_white(cw)
def _set_brightness(self, value: int) -> None:
"""Set the value of the brightness 0-255."""
percent = hex_to_percent(value)
# hardware limitation - values less than 10% are not supported
if percent < 10:
percent = 10
if percent > 101:
raise ValueError("Max value can be 100% with 255.")
self.pilot_params["dimming"] = percent
def _set_colortemp(self, kelvin: int) -> None:
"""Set the color temperature for the white led in the bulb."""
# normalize the kelvin values - should be removed
if kelvin < 1000:
kelvin = 1000
if kelvin > 10000:
kelvin = 10000
self.pilot_params["temp"] = kelvin
class PilotParser:
"""Interpret the message from the bulb."""
def __init__(self, pilotResult: BulbResponse) -> None:
"""Init the class."""
self.pilotResult = pilotResult
def get_state(self) -> Optional[bool]:
"""Return the state of the bulb."""
if "state" in self.pilotResult:
return bool(self.pilotResult["state"])
else:
return None
def get_mac(self) -> Optional[str]:
"""Return MAC from the bulb."""
if "mac" in self.pilotResult:
return str(self.pilotResult["mac"])
else:
return None
def get_warm_white(self) -> Optional[int]:
"""Get the value of the warm white led."""
if "w" in self.pilotResult:
return int(self.pilotResult["w"])
else:
return None
def get_white_range(self) -> Optional[List[float]]:
"""Get the value of the whiteRange property."""
if "whiteRange" in self.pilotResult:
return [float(x) for x in self.pilotResult["whiteRange"]]
else:
return None
def get_extended_white_range(self) -> Optional[List[float]]:
"""Get the value of the extended whiteRange property."""
if "extRange" in self.pilotResult:
return [float(x) for x in self.pilotResult["extRange"]]
# New after v1.22 FW - "cctRange":[2200,2700,6500,6500]
elif "cctRange" in self.pilotResult:
return [float(x) for x in self.pilotResult["cctRange"]]
else:
return None
def get_speed(self) -> Optional[int]:
"""Get the color changing speed."""
if "speed" in self.pilotResult:
return int(self.pilotResult["speed"])
else:
return None
def get_scene(self) -> Optional[str]:
"""Get the current scene name."""
if "schdPsetId" in self.pilotResult: # rhythm
return SCENES[1000]
scene_id = self.pilotResult["sceneId"]
if scene_id in SCENES:
return SCENES[scene_id]
else:
return None
def get_cold_white(self) -> Optional[int]:
"""Get the value of the cold white led."""
if "c" in self.pilotResult:
return int(self.pilotResult["c"])
else:
return None
def get_rgb(self) -> Union[Tuple[None, None, None], Vector]:
"""Get the RGB color state of the bulb and turns it on."""
if (
"r" in self.pilotResult
and "g" in self.pilotResult
and "b" in self.pilotResult
):
r = self.pilotResult["r"]
g = self.pilotResult["g"]
b = self.pilotResult["b"]
return float(r), float(g), float(b)
else:
# no RGB color value was set
return None, None, None
def get_brightness(self) -> Optional[int]:
"""Get the value of the brightness 0-255."""
if "dimming" in self.pilotResult:
return percent_to_hex(self.pilotResult["dimming"])
return None
def get_colortemp(self) -> Optional[int]:
"""Get the color temperature from the bulb."""
if "temp" in self.pilotResult:
return int(self.pilotResult["temp"])
else:
return None
async def receiveUDPwithTimeout(stream: DatagramStream, timeout: float) -> bytes:
"""Get message with timeout value."""
data, remote_addr = await asyncio.wait_for(stream.recv(), timeout)
return data
class wizlight:
"""Create an instance of a WiZ Light Bulb."""
# default port for WiZ lights - 38899
def __init__(
self,
ip: str,
connect_on_init: bool = False,
port: int = 38899,
mac: Optional[str] = None,
) -> None:
"""Create instance with the IP address of the bulb."""
self.ip = ip
self.port = port
self.state: Optional[PilotParser] = None
self.mac = mac
self.bulbtype: Optional[BulbType] = None
self.whiteRange: Optional[List[float]] = None
self.extwhiteRange: Optional[List[float]] = None
# check the state on init
if connect_on_init:
self._check_connection()
@property
def status(self) -> Optional[bool]:
"""Return the status of the bulb: true = on, false = off."""
if self.state is None:
return None
return self.state.get_state()
# ------------------ Non properties -------------- #
def _check_connection(self) -> None:
"""Check the connection to the bulb."""
message = r'{"method":"getPilot","params":{}}'
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.settimeout(2)
try:
# send a udp package
sock.sendto(bytes(message, "utf-8"), (self.ip, self.port))
# get response data
data, addr = sock.recvfrom(1024)
if data:
return
except socket.timeout:
raise WizLightTimeOutError(
"No connection was established by initialization."
)
async def get_bulbtype(self) -> BulbType:
"""Return the bulb type as BulbType object."""
if self.bulbtype is not None:
return self.bulbtype
bulb_config = await self.getBulbConfig()
if "moduleName" not in bulb_config["result"]:
raise ValueError("Unable to determine bulb type.")
white_range = await self.getExtendedWhiteRange()
module_name = bulb_config["result"]["moduleName"]
self.bulbtype = BulbType.from_data(module_name, white_range)
return self.bulbtype
async def getWhiteRange(self) -> Optional[List[float]]:
"""Read the white range from the bulb."""
if self.whiteRange is not None:
return self.whiteRange
resp = await self.getUserConfig()
if resp is not None and "result" in resp:
self.whiteRange = PilotParser(resp["result"]).get_white_range()
return self.whiteRange
async def getExtendedWhiteRange(self) -> Optional[List[float]]:
"""Read extended withe | |
the object's current state. If
# any object in the transaction does not fit that rule, refuse
# to undo. In theory this means arbitrary transactions can be
# undone (because we actually match the MD5 of the state); in practice it
# means that it must be the most recent transaction those
# objects were involved in.
# (Note that this prevents conflict-resolving undo as described
# by ZODB.tests.ConflictResolution.ConflictResolvingTransUndoStorage.
# Do people need that? If so, we can probably support it, but it
# will require additional code.)
stmt = """
SELECT prev_os.zoid, current_object.tid
FROM object_state prev_os
INNER JOIN object_state cur_os
ON (prev_os.zoid = cur_os.zoid)
INNER JOIN current_object
ON (cur_os.zoid = current_object.zoid
AND cur_os.tid = current_object.tid)
WHERE prev_os.tid = %(undo_tid)s
AND cur_os.md5 != prev_os.md5
ORDER BY prev_os.zoid
"""
self.runner.run_script_stmt(cursor, stmt, {'undo_tid': undo_tid})
if cursor.fetchmany():
raise UndoError(
"Some data were modified by a later transaction")
# Rule: don't allow the creation of the root object to
# be undone. It's hard to get it back.
stmt = """
SELECT 1
FROM object_state
WHERE tid = %(undo_tid)s
AND zoid = 0
AND prev_tid = 0
"""
self.runner.run_script_stmt(cursor, stmt, {'undo_tid': undo_tid})
if cursor.fetchall():
raise UndoError("Can't undo the creation of the root object")
@metricmethod
def undo(self, cursor, undo_tid, self_tid):
"""Undo a transaction.
Parameters: "undo_tid", the integer tid of the transaction to undo,
and "self_tid", the integer tid of the current transaction.
Returns the states copied forward by the undo operation as a
list of (oid, old_tid).
"""
stmt = self._script_create_temp_undo
if stmt:
self.runner.run_script(cursor, stmt)
stmt = """
DELETE FROM temp_undo;
-- Put into temp_undo the list of objects to be undone and
-- the tid of the transaction that has the undone state.
INSERT INTO temp_undo (zoid, prev_tid)
SELECT zoid, prev_tid
FROM object_state
WHERE tid = %(undo_tid)s
ORDER BY zoid;
-- Override previous undo operations within this transaction
-- by resetting the current_object pointer and deleting
-- copied states from object_state.
UPDATE current_object
SET tid = (
SELECT prev_tid
FROM object_state
WHERE zoid = current_object.zoid
AND tid = %(self_tid)s
)
WHERE zoid IN (SELECT zoid FROM temp_undo ORDER BY zoid)
AND tid = %(self_tid)s;
DELETE FROM object_state
WHERE zoid IN (SELECT zoid FROM temp_undo ORDER BY zoid)
AND tid = %(self_tid)s;
-- Copy old states forward.
INSERT INTO object_state (zoid, tid, prev_tid, md5, state_size, state)
SELECT temp_undo.zoid, %(self_tid)s, current_object.tid,
md5, COALESCE(state_size, 0), state
FROM temp_undo
INNER JOIN current_object ON (temp_undo.zoid = current_object.zoid)
LEFT OUTER JOIN object_state
ON (object_state.zoid = temp_undo.zoid
AND object_state.tid = temp_undo.prev_tid)
ORDER BY current_object.zoid;
-- Copy old blob chunks forward.
INSERT INTO blob_chunk (zoid, tid, chunk_num, chunk)
SELECT temp_undo.zoid, %(self_tid)s, chunk_num, chunk
FROM temp_undo
JOIN blob_chunk
ON (blob_chunk.zoid = temp_undo.zoid
AND blob_chunk.tid = temp_undo.prev_tid);
-- List the copied states.
SELECT zoid, prev_tid
FROM temp_undo;
"""
self.runner.run_script(cursor, stmt,
{'undo_tid': undo_tid, 'self_tid': self_tid})
res = list(cursor)
stmt = self._script_reset_temp_undo
if stmt:
self.runner.run_script(cursor, stmt)
return res
def on_filling_object_refs(self):
"""Test injection point"""
def fill_object_refs(self, conn, cursor, get_references):
"""Update the object_refs table by analyzing new transactions."""
stmt = """
SELECT transaction.tid
FROM transaction
LEFT OUTER JOIN object_refs_added
ON (transaction.tid = object_refs_added.tid)
WHERE object_refs_added.tid IS NULL
ORDER BY transaction.tid
"""
self.runner.run_script_stmt(cursor, stmt)
tids = [tid for (tid,) in cursor]
log_at = time.time() + 60
tid_count = len(tids)
txns_done = 0
if tids:
self.on_filling_object_refs()
log.info(
"pre_pack: analyzing references from objects in %d new "
"transaction(s)", tid_count)
for tid in tids:
self._add_refs_for_tid(cursor, tid, get_references)
txns_done += 1
now = time.time()
if now >= log_at:
# save the work done so far
self.connmanager.commit(conn, cursor)
log_at = now + 60
log.info(
"pre_pack: transactions analyzed: %d/%d",
txns_done, tid_count)
self.connmanager.commit(conn, cursor)
log.info("pre_pack: transactions analyzed: %d/%d", txns_done, tid_count)
def _add_refs_for_tid(self, cursor, tid, get_references):
"""Fill object_refs with all states for a transaction.
Returns the number of references added.
"""
log.debug("pre_pack: transaction %d: computing references ", tid)
from_count = 0
stmt = """
SELECT zoid, state
FROM object_state
WHERE tid = %(tid)s
ORDER BY zoid
"""
self.runner.run_script_stmt(cursor, stmt, {'tid': tid})
add_rows = [] # [(from_oid, tid, to_oid)]
for from_oid, state in self._fetchmany(cursor):
state = self.driver.binary_column_as_state_type(state)
if state:
assert isinstance(state, self.driver.state_types), type(state)
from_count += 1
try:
to_oids = get_references(state)
except:
log.error(
"pre_pack: can't unpickle "
"object %d in transaction %d; state length = %d",
from_oid, tid, len(state))
raise
for to_oid in to_oids:
add_rows.append((from_oid, tid, to_oid))
# A previous pre-pack may have been interrupted. Delete rows
# from the interrupted attempt.
stmt = "DELETE FROM object_ref WHERE tid = %(tid)s"
self.runner.run_script_stmt(cursor, stmt, {'tid': tid})
# Add the new references.
# TODO: Use RowBatcher
stmt = """
INSERT INTO object_ref (zoid, tid, to_zoid)
VALUES (%s, %s, %s)
"""
self.runner.run_many(cursor, stmt, add_rows)
# The references have been computed for this transaction.
stmt = """
INSERT INTO object_refs_added (tid)
VALUES (%(tid)s)
"""
self.runner.run_script_stmt(cursor, stmt, {'tid': tid})
to_count = len(add_rows)
log.debug("pre_pack: transaction %d: has %d reference(s) "
"from %d object(s)", tid, to_count, from_count)
return to_count
@metricmethod
def pre_pack(self, pack_tid, get_references):
"""Decide what to pack.
pack_tid specifies the most recent transaction to pack.
get_references is a function that accepts a pickled state and
returns a set of OIDs that state refers to.
The self.options.pack_gc flag indicates whether
to run garbage collection.
If pack_gc is false, at least one revision of every object is kept,
even if nothing refers to it. Packing with pack_gc disabled can be
much faster.
"""
conn, cursor = self.connmanager.open_for_pre_pack()
try:
# The pre-pack functions are responsible for managing
# their own commits; when they return, the transaction
# should be committed.
#
# ``pack_object`` should be populated,
# essentially with the distinct list of all objects and their
# maximum (newest) transaction ids.
if self.options.pack_gc:
log.info("pre_pack: start with gc enabled")
self._pre_pack_with_gc(
conn, cursor, pack_tid, get_references)
else:
log.info("pre_pack: start without gc")
self._pre_pack_without_gc(
conn, cursor, pack_tid)
log.info("pre_pack: enumerating states to pack")
stmt = "%(TRUNCATE)s pack_state"
self.runner.run_script_stmt(cursor, stmt)
to_remove = 0
if self.options.pack_gc:
# Mark all objects we said not to keep as something
# we should discard.
stmt = """
INSERT INTO pack_state (tid, zoid)
SELECT tid, zoid
FROM object_state
INNER JOIN pack_object USING (zoid)
WHERE keep = %(FALSE)s
AND tid > 0
AND tid <= %(pack_tid)s
ORDER BY zoid
"""
self.runner.run_script_stmt(
cursor, stmt, {'pack_tid': pack_tid})
to_remove += cursor.rowcount
else:
# Support for IExternalGC. Also remove deleted objects.
stmt = """
INSERT INTO pack_state (tid, zoid)
SELECT t.tid, t.zoid
FROM (
SELECT zoid, tid
FROM object_state
WHERE state IS NULL
AND tid = (
SELECT MAX(i.tid)
FROM object_state i
WHERE i.zoid = object_state.zoid
)
) t
"""
self.runner.run_script_stmt(cursor, stmt)
to_remove += cursor.rowcount
# Pack object states with the keep flag set to true,
# excluding their current TID.
stmt = """
INSERT INTO pack_state (tid, zoid)
SELECT tid, zoid
FROM object_state
INNER JOIN pack_object USING (zoid)
WHERE keep = %(TRUE)s
AND tid > 0
AND tid != keep_tid
AND tid <= %(pack_tid)s
ORDER BY zoid
"""
self.runner.run_script_stmt(
cursor, stmt, {'pack_tid': pack_tid})
to_remove += cursor.rowcount
# Make a simple summary of the transactions to examine.
log.info("pre_pack: enumerating transactions to pack")
stmt = "%(TRUNCATE)s pack_state_tid"
self.runner.run_script_stmt(cursor, stmt)
stmt = """
INSERT INTO pack_state_tid (tid)
SELECT DISTINCT tid
FROM pack_state
"""
cursor.execute(stmt)
log.info("pre_pack: will remove %d object state(s)",
to_remove)
log.info("pre_pack: finished successfully")
self.connmanager.commit(conn, cursor)
except:
self.connmanager.rollback_quietly(conn, cursor)
conn, cursor = None, None
raise
finally:
self.connmanager.close(conn, cursor)
def __initial_populate_pack_object(self, conn, cursor, pack_tid, keep):
"""
Put all objects into ``pack_object`` that have revisions equal
to or below *pack_tid*, setting their initial ``keep`` status
to *keep*.
Commits the transaction to release locks.
"""
# Access the tables that are used by online transactions
# in a short transaction and immediately commit to release any
# locks.
# TRUNCATE may or may not cause implicit commits. (MySQL: Yes,
# PostgreSQL: No)
self.runner.run_script(cursor, "%(TRUNCATE)s pack_object;")
affected_objects = """
SELECT zoid, tid
FROM object_state
WHERE tid > 0 AND tid <= %(pack_tid)s
ORDER BY zoid
"""
# Take the | |
't_3'), ('v_6', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_3', 'v_10'), ('t_2', 'v_7'), ('c_1', 'v_7'), ('t_4', 'v_8'), ('c_1', 'v_6'), ('t_4', 'v_10'),
('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_4', 'v_5'),
('t_4', 'v_6'), ('t_4', 'v_7'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_8'), ('t_3', 'v_6'),
('t_2', 'v_10'), ('t_4', 'v_9'), ('c_1', 'v_10')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_4', 't_2', 'v_9', 'v_8', 'c_1', 'v_10'}, 'City': {'c_1'},
'Town': {'t_3', 't_4', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_10'},
'Road': {('t_2', 'c_1'), ('t_4', 'c_1'), ('t_3', 'c_1'), ('v_5', 't_2'), ('t_3', 'v_9'), ('t_4', 'v_8'),
('c_1', 't_2'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_3', 'v_6'), ('v_10', 't_2'), ('t_2', 'v_10'),
('v_7', 't_4'), ('v_9', 't_3'), ('t_4', 'v_7'), ('c_1', 't_3'), ('v_8', 't_4'), ('v_6', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_3', 'v_10'), ('t_2', 'v_7'), ('c_1', 'v_7'), ('t_4', 'v_8'), ('c_1', 'v_6'), ('t_4', 'v_10'),
('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_4', 'v_5'),
('t_4', 'v_6'), ('t_4', 'v_7'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_8'), ('t_3', 'v_6'),
('t_2', 'v_10'), ('t_4', 'v_9'), ('c_1', 'v_10')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_4', 't_2', 'v_9', 'v_8', 'c_1', 'v_10'}, 'City': {'c_1'},
'Town': {'t_3', 't_4', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_10'},
'Road': {('t_2', 'c_1'), ('t_4', 'c_1'), ('t_3', 'c_1'), ('t_3', 'v_7'), ('t_3', 'v_8'), ('v_5', 't_2'),
('c_1', 't_2'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_4', 'v_6'), ('v_6', 't_4'), ('v_10', 't_2'),
('t_2', 'v_10'), ('t_4', 'v_10'), ('v_10', 't_4'), ('v_8', 't_3'), ('v_7', 't_3'), ('c_1', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_3', 'v_10'), ('t_2', 'v_7'), ('c_1', 'v_7'), ('t_4', 'v_8'), ('c_1', 'v_6'), ('t_4', 'v_10'),
('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_4', 'v_5'),
('t_4', 'v_6'), ('t_4', 'v_7'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_8'), ('t_3', 'v_6'),
('t_2', 'v_10'), ('t_4', 'v_9'), ('c_1', 'v_10')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_4', 't_2', 'v_9', 'v_8', 'c_1', 'v_10'}, 'City': {'c_1'},
'Town': {'t_3', 't_4', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_10'},
'Road': {('t_2', 'c_1'), ('t_4', 'c_1'), ('t_3', 'c_1'), ('t_3', 'v_7'), ('v_9', 't_4'), ('t_3', 'v_8'),
('v_5', 't_2'), ('c_1', 't_2'), ('c_1', 't_4'), ('t_2', 'v_5'), ('v_10', 't_2'), ('t_2', 'v_10'),
('t_4', 'v_10'), ('v_10', 't_4'), ('v_8', 't_3'), ('v_7', 't_3'), ('c_1', 't_3'), ('t_4', 'v_9')},
'>': {('c_1', 'v_5'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_3', 'v_10'), ('t_2', 'v_7'), ('c_1', 'v_7'), ('t_4', 'v_8'), ('c_1', 'v_6'), ('t_4', 'v_10'),
('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_4', 'v_5'),
('t_4', 'v_6'), ('t_4', 'v_7'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_8'), ('t_3', 'v_6'),
('t_2', 'v_10'), ('t_4', 'v_9'), ('c_1', 'v_10')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_4', 't_2', 'v_9', 'v_8', 'c_1', 'v_10'}, 'City': {'c_1'},
'Town': {'t_3', 't_4', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_10'},
'Road': {('t_2', 'c_1'), ('t_4', 'c_1'), ('t_3', 'c_1'), ('t_3', 'v_7'), ('v_9', 't_4'), ('t_3', 'v_8'),
('v_5', 't_2'), ('c_1', 't_2'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_4', 'v_6'), ('v_6', 't_4'),
('v_10', 't_2'), ('t_2', 'v_10'), ('v_8', 't_3'), ('v_7', 't_3'), ('c_1', 't_3'), ('t_4', 'v_9')},
'>': {('c_1', 'v_5'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_3', 'v_10'), ('t_2', 'v_7'), ('c_1', 'v_7'), ('t_4', 'v_8'), ('c_1', 'v_6'), ('t_4', 'v_10'),
('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_4', 'v_5'),
('t_4', 'v_6'), ('t_4', 'v_7'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_8'), ('t_3', 'v_6'),
('t_2', 'v_10'), ('t_4', 'v_9'), ('c_1', 'v_10')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_4', 't_2', 'v_9', 'v_8', 'c_1', 'v_10'}, 'City': {'c_1'},
'Town': {'t_3', 't_4', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_10'},
'Road': {('t_2', 'c_1'), ('t_4', 'c_1'), ('t_3', 'c_1'), ('t_3', 'v_7'), ('v_5', 't_2'), ('t_3', 'v_9'),
('c_1', 't_2'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_4', 'v_6'), ('v_6', 't_4'), ('v_10', 't_2'),
('t_2', 'v_10'), ('t_4', 'v_10'), ('v_10', 't_4'), ('v_9', 't_3'), ('v_7', 't_3'), ('c_1', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_3', 'v_10'), ('t_2', 'v_7'), ('c_1', 'v_7'), ('t_4', 'v_8'), ('c_1', 'v_6'), ('t_4', 'v_10'),
('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_4', 'v_5'),
('t_4', 'v_6'), ('t_4', 'v_7'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_8'), ('t_3', 'v_6'),
('t_2', 'v_10'), ('t_4', 'v_9'), ('c_1', 'v_10')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_4', 't_2', 'v_9', 'v_8', 'c_1', 'v_10'}, 'City': {'c_1'},
'Town': {'t_3', 't_4', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_10'},
'Road': {('t_2', 'c_1'), ('t_4', 'c_1'), ('t_3', 'c_1'), ('t_3', 'v_7'), ('v_5', 't_2'), ('t_3', 'v_9'),
('t_4', 'v_8'), ('c_1', 't_2'), ('c_1', 't_4'), ('t_2', 'v_5'), ('v_10', 't_2'), ('t_2', 'v_10'),
('t_4', 'v_10'), ('v_10', 't_4'), ('v_9', 't_3'), ('v_7', 't_3'), ('c_1', 't_3'), ('v_8', 't_4')},
'>': {('c_1', 'v_5'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_3', 'v_10'), ('t_2', 'v_7'), ('c_1', 'v_7'), ('t_4', 'v_8'), ('c_1', 'v_6'), ('t_4', 'v_10'),
('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_4', 'v_5'),
('t_4', 'v_6'), ('t_4', 'v_7'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_8'), ('t_3', 'v_6'),
('t_2', 'v_10'), ('t_4', 'v_9'), ('c_1', 'v_10')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_4', 't_2', 'v_9', 'v_8', 'c_1', 'v_10'}, 'City': {'c_1'},
'Town': {'t_3', 't_4', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_10'},
'Road': {('t_2', 'c_1'), ('t_4', 'c_1'), ('t_3', 'c_1'), ('t_3', 'v_7'), ('v_5', 't_2'), ('t_3', 'v_9'),
('t_4', 'v_8'), ('c_1', 't_2'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_4', 'v_6'), ('v_6', 't_4'),
('v_10', 't_2'), ('t_2', 'v_10'), ('v_9', 't_3'), ('v_7', 't_3'), ('c_1', 't_3'), ('v_8', 't_4')},
'>': {('c_1', 'v_5'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_3', 'v_10'), ('t_2', 'v_7'), ('c_1', 'v_7'), ('t_4', 'v_8'), ('c_1', 'v_6'), ('t_4', 'v_10'),
('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_4', 'v_5'),
('t_4', 'v_6'), ('t_4', 'v_7'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_8'), ('t_3', 'v_6'),
('t_2', 'v_10'), ('t_4', 'v_9'), ('c_1', 'v_10')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_4', 't_2', 'v_9', 'v_8', 'c_1', 'v_10'}, 'City': {'c_1'},
'Town': {'t_3', 't_4', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_10'},
'Road': {('t_2', 'c_1'), ('t_4', 'c_1'), ('t_3', 'c_1'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('v_5', 't_2'),
('c_1', 't_2'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_4', 'v_6'), ('v_6', 't_4'), ('v_10', 't_2'),
('t_2', 'v_10'), ('t_4', 'v_10'), ('v_10', 't_4'), ('v_8', 't_3'), ('v_9', 't_3'), ('c_1', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_3', 'v_10'), ('t_2', 'v_7'), ('c_1', 'v_7'), ('t_4', 'v_8'), ('c_1', 'v_6'), ('t_4', 'v_10'),
('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_4', 'v_5'),
('t_4', 'v_6'), ('t_4', 'v_7'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_8'), ('t_3', 'v_6'),
('t_2', 'v_10'), ('t_4', 'v_9'), ('c_1', 'v_10')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_4', 't_2', 'v_9', 'v_8', 'c_1', 'v_10'}, 'City': {'c_1'},
'Town': {'t_3', 't_4', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_10'},
'Road': {('t_2', 'c_1'), ('t_4', 'c_1'), ('t_3', 'c_1'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('v_5', 't_2'),
('c_1', 't_2'), ('c_1', 't_4'), ('t_2', 'v_5'), ('v_9', 't_3'), ('v_10', 't_2'), ('t_2', 'v_10'),
('t_4', 'v_10'), ('v_10', 't_4'), ('v_7', 't_4'), ('v_8', 't_3'), ('t_4', 'v_7'), ('c_1', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_3', 'v_10'), ('t_2', 'v_7'), ('c_1', 'v_7'), ('t_4', 'v_8'), ('c_1', 'v_6'), ('t_4', 'v_10'),
('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_4', 'v_5'),
('t_4', 'v_6'), ('t_4', 'v_7'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_8'), ('t_3', 'v_6'),
('t_2', 'v_10'), ('t_4', 'v_9'), ('c_1', 'v_10')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_4', 't_2', 'v_9', 'v_8', 'c_1', 'v_10'}, 'City': {'c_1'},
'Town': {'t_3', 't_4', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_10'},
'Road': {('t_2', 'c_1'), ('t_4', 'c_1'), ('t_3', 'c_1'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('v_5', 't_2'),
('c_1', 't_2'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_4', 'v_6'), ('v_6', 't_4'), ('v_10', 't_2'),
('t_2', 'v_10'), ('v_7', 't_4'), ('v_8', 't_3'), ('v_9', 't_3'), ('t_4', 'v_7'), ('c_1', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_3', 'v_10'), ('t_2', 'v_7'), ('c_1', 'v_7'), ('t_4', 'v_8'), ('c_1', 'v_6'), ('t_4', 'v_10'),
('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_4', 'v_5'),
('t_4', 'v_6'), ('t_4', 'v_7'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_8'), ('t_3', 'v_6'),
('t_2', 'v_10'), ('t_4', 'v_9'), ('c_1', 'v_10')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_4', 't_2', 'v_9', 'v_8', 'c_1', 'v_10'}, 'City': {'c_1'},
'Town': {'t_3', 't_4', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_10'},
'Road': {('t_2', 'c_1'), ('t_3', 'v_5'), ('t_3', 'c_1'), ('t_4', 'c_1'), ('c_1', 't_2'), ('c_1', 't_4'),
('v_10', 't_3'), ('t_2', 'v_6'), ('v_6', 't_2'), ('t_3', 'v_10'), ('v_10', 't_2'), ('t_2', 'v_10'),
('t_4', 'v_10'), ('v_10', 't_4'), ('v_7', 't_4'), ('v_5', 't_3'), ('t_4', 'v_7'), ('c_1', 't_3')},
'>': | |
<gh_stars>0
import time
import tkinter as tk
from tkinter import ttk, messagebox
import socket
import chat_utils as utils
import threading
import json
import sys
class Main_GUI(tk.Tk):
def __init__(self):
super(Main_GUI, self).__init__()
self.client = Client(self)
self.client.init_chat()
self.LOGIN_IMG = tk.PhotoImage(file='res/login.png')
self.POEM_IMG = tk.PhotoImage(file='res/poem.png')
self.TIME_IMG = tk.PhotoImage(file='res/time.png')
self.LOGOUT_IMG = tk.PhotoImage(file='res/logout.png')
self.SEARCH_IMG = tk.PhotoImage(file='res/search.png')
self.ADD_GROUP_IMG = tk.PhotoImage(file='res/join_group.png')
self.font_normal = ["Comic Sans MS", 10]
def exit(self):
answer = messagebox.askquestion(title="Exit?",
message="Are you sure you want to exit?")
if answer == "yes": # first logout, then exit
if utils.VERBOSE:
print(self.chat_frame.cache)
self.client.exit()
sys.exit()
def app(self):
"""
Put root in Main_GUI.app(), and configure other frames
"""
self.title("ICS Chatroom")
self.geometry("530x400")
self.config(bg=utils.DEEPGREY)
self.func_frame = Func_frame(self)
self.func_frame.config(bg=utils.DEEPGREY)
self.func_frame.grid(row=0, column=0, sticky='n')
self.group_frame = Group_frame(self)
self.group_frame.config(bg=utils.GREY)
self.group_frame.grid(row=0, column=1, sticky="nswe")
self.chat_frame = Chat_frame(self)
self.chat_frame.config(bg=utils.WHITE)
self.chat_frame.grid(row=0, column=2,
sticky="nswe")
self.status_frame = Status_frame(self)
self.status_frame.config(bg=utils.WHITE)
self.status_frame.grid(row=0, column=0,
columnspan=2, sticky="nswe")
self.rowconfigure(0, weight=1)
self.columnconfigure(2, weight=1)
self.group_frame.rowconfigure(0, weight=1)
self.group_frame.rowconfigure(1, weight=1)
self.group_frame.columnconfigure(0, weight=1)
self.group_frame.columnconfigure(1, weight=1)
self.chat_frame.rowconfigure(1, weight=1)
self.chat_frame.columnconfigure(0, weight=3)
self.leave_group_btn = Leave_group_button(self)
self.leave_group_btn.grid(row=0, column=2, sticky="nswe", padx=10, pady=6)
self.status_frame.columnconfigure(0, weight=1)
self.status_frame.columnconfigure(1, weight=1)
# an easter egg :)
self.bind("<|><t><h>", lambda e: f"{self.chat_frame.chat_box.config(state=tk.NORMAL)}"
f"{self.chat_frame.chat_box.insert('end', 'ᗜˬᗜ ')}"
f"{self.chat_frame.chat_box.config(state=tk.DISABLED)}")
self.protocol("WM_DELETE_WINDOW", self.exit)
self.chat_frame.switch_to(utils.SYS_GRP_ID)
class Client:
def __init__(self, root: Main_GUI):
self.state = utils.S_OFFLINE
self.name = None
self.root = root
self.group_count = 0
def init_chat(self):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
svr = utils.SERVER
self.socket.connect(svr)
new_thread = threading.Thread(target=self.receive_msg_in_new_thread)
new_thread.start()
def login(self, name, password):
if self.state == utils.S_OFFLINE:
if name.isspace() or password.isspace(): # if name or password is empty
self.root.chat_frame.new_msg.set(new_content="[SYSTEM] Login fail: Username and password can't be empty!",
group_id=utils.SYS_GRP_ID)
self.root.chat_frame.put_up_new_msg()
else:
msg_to_server = json.dumps({"action": "login",
"username": name,
"password": password})
utils.mysend(self.socket, msg_to_server)
if utils.VERBOSE:
print("message send in login")
print(msg_to_server)
else:
self.root.chat_frame.new_msg.set(new_content=f"[SYSTEM] You have already logged in as: {self.name}",
group_id=utils.SYS_GRP_ID)
self.root.chat_frame.put_up_new_msg()
def register(self, name, password):
if self.state != utils.S_OFFLINE: # if user has already logged in
self.root.chat_frame.new_msg.set(new_content="[SYSTEM] You have already logged in as: {self.name}",
group_id=utils.SYS_GRP_ID)
self.root.chat_frame.put_up_new_msg()
else:
if name.isspace() or password.isspace(): # if name is empty
self.root.chat_frame.new_msg.set(new_content="[SYSTEM] Register fail: Username and password can't be empty!",
group_id=utils.SYS_GRP_ID)
self.root.chat_frame.put_up_new_msg()
else:
msg = json.dumps({"action": "register",
"username": name,
"password": password})
utils.mysend(self.socket, msg)
if utils.VERBOSE:
print(f"message send in Client.register():\n{msg}")
def load_history(self):
if self.state == utils.S_LOGGEDIN:
msg = json.dumps({
"action": "load_history",
"send_from": self.name
})
utils.mysend(self.socket, msg)
def clear_history(self):
# clear history of previous user
self.group_count = 0
self.root.group_frame.clear_history()
# clear groups cache except for system group
self.root.chat_frame.cache = {
utils.SYS_GRP_ID: self.root.chat_frame.cache[utils.SYS_GRP_ID]
}
def logout(self):
if self.state != utils.S_OFFLINE:
msg = json.dumps({"action": "logout",
"send_from": self.name})
utils.mysend(self.socket, msg)
def exit(self):
msg = json.dumps({"action": "exit",
"send_from": self.name})
utils.mysend(self.socket, msg)
def get_time(self):
msg_json = json.dumps({"action": "time",
"send_from": self.name})
utils.mysend(self.socket, msg_json)
def get_peom(self, poem_idx):
try:
if int(poem_idx) > 0:
msg_json = json.dumps({"action": "poem",
"send_from": self.name,
"index": poem_idx})
utils.mysend(self.socket, msg_json)
else:
self.root.chat_frame.new_msg.set(new_content="[SYSTEM] Poem index must be a positive integer!",
group_id=utils.SYS_GRP_ID)
self.root.chat_frame.put_up_new_msg()
except ValueError:
self.root.chat_frame.new_msg.set(new_content="[SYSTEM] Poem index must be a positive integer!",
group_id=utils.SYS_GRP_ID)
self.root.chat_frame.put_up_new_msg()
def join_group(self, group_name):
msg_json = json.dumps({
"action": "join_group",
"send_from": self.name,
"group_name": group_name})
utils.mysend(self.socket, msg_json)
def create_group(self, group_name):
msg_json = json.dumps({
"action": "create_group",
"group_name": group_name})
utils.mysend(self.socket, msg_json)
def chat(self, message, group_id):
msg_json = json.dumps({"action": "send_chat",
"send_from": self.name,
"send_to": group_id,
"message": message})
utils.mysend(self.socket, msg_json)
if utils.VERBOSE:
print("chat message send to server.")
def search_chat_history(self, term: str):
msg_json = json.dumps({"action": "search_chat_history",
"send_from": self.name,
"term": term})
utils.mysend(self.socket, msg_json)
def leave_group(self, group_name: str):
msg_json = json.dumps({"action": "leave_group",
"send_from": self.name,
"group_name": group_name})
utils.mysend(self.socket, msg_json)
# a typical application is:
# send_msg(msg)
# result = receive_msg()
def receive_msg_in_new_thread(self):
while True:
msg_json = utils.myrecv(self.socket)
if not msg_json: # if msg_json is empty
print("Disconnected")
break
msg_from_server = json.loads(msg_json)
if utils.VERBOSE:
print('msg_from_server: ' + str(msg_from_server))
if msg_from_server["action"] == "login":
if msg_from_server["status"] == True: # login successfully
self.state = utils.S_LOGGEDIN
self.name = msg_from_server["username"]
self.root.status_frame.client_state = self.root.client.state
self.root.status_frame.client_name = self.root.client.name
self.root.func_frame.popup_login_frame.destroy()
self.load_history() # if login succeed, load user's history
else:
self.root.func_frame.username_ety.delete(0, "end")
self.root.func_frame.password_ety.delete(0, "end")
self.root.func_frame.username_ety.focus()
msg = msg_from_server["message"]
self.root.chat_frame.new_msg.set(new_content="[SYSTEM] " + msg,
group_id=utils.SYS_GRP_ID)
self.root.chat_frame.put_up_new_msg()
elif msg_from_server["action"] == "register":
msg = msg_from_server["message"]
self.root.chat_frame.new_msg.set(new_content="[SYSTEM] " + msg,
group_id=utils.SYS_GRP_ID)
self.root.chat_frame.put_up_new_msg()
if msg_from_server["status"] is True: # if register succeed, login
name = msg_from_server["username"]
password = msg_from_server["password"]
self.login(name, password)
elif msg_from_server["action"] == "load_history":
no_error = msg_from_server["status"]
if no_error:
result = msg_from_server["result"]
self.root.group_frame.load_history(result)
else:
msg = msg_from_server["message"]
self.root.chat_frame.new_msg.set(new_content='[SYSTEM] ' + msg,
group_id=utils.SYS_GRP_ID)
elif msg_from_server["action"] == "time":
cur_time = msg_from_server["result"]
self.root.chat_frame.new_msg.set(new_content=f"[SYSTEM] The current server time is {cur_time}.",
group_id=utils.SYS_GRP_ID)
self.root.chat_frame.put_up_new_msg()
self.root.chat_frame.switch_to(utils.SYS_GRP_ID)
elif msg_from_server["action"] == "poem":
no_error = msg_from_server["status"]
poem_idx = msg_from_server["index"]
if no_error:
self.root.chat_frame.new_msg.set(new_content=f"[SYSTEM] Poem #{poem_idx} retrieved successfully.",
group_id=utils.SYS_GRP_ID)
self.root.chat_frame.put_up_new_msg()
poem = msg_from_server["result"]
self.root.func_frame.popup_show_poem(poem)
else:
self.root.chat_frame.new_msg.set(new_content=f"[SYSTEM] Poem #{poem_idx} not found in the sonnet database..",
group_id=utils.SYS_GRP_ID)
self.root.chat_frame.put_up_new_msg()
self.root.chat_frame.switch_to(utils.SYS_GRP_ID)
elif msg_from_server["action"] == "recv_chat":
send_from = msg_from_server["send_from"]
group_id = msg_from_server["send_to"]
new_msg = msg_from_server['message']
self.root.chat_frame.new_msg.set(new_content=new_msg,
group_id=group_id,
send_from=send_from)
self.root.chat_frame.put_up_new_msg()
elif msg_from_server["action"] == "create_group":
no_error = msg_from_server["status"]
group_name = msg_from_server["group_name"]
msg = '[SYSTEM] ' + msg_from_server["message"]
self.root.chat_frame.new_msg.set(new_content=msg,
group_id=utils.SYS_GRP_ID)
self.root.chat_frame.put_up_new_msg()
if no_error:
self.join_group(group_name)
elif msg_from_server["action"] == "join_group":
no_error = msg_from_server["status"]
msg = f"[SYSTEM] {msg_from_server['message']}"
group_id = msg_from_server["group_id"]
group_name = msg_from_server["group_name"]
self.root.chat_frame.new_msg.set(new_content=msg,
group_id=utils.SYS_GRP_ID)
self.root.chat_frame.put_up_new_msg()
if no_error:
self.root.group_frame.put_up_group(group_id, group_name)
self.root.group_frame.popup_join_group_frame.destroy()
elif msg_from_server["action"] == "search_chat_history":
no_error = msg_from_server["status"]
msg = "[SYSTEM] " + msg_from_server["message"]
self.root.chat_frame.new_msg.set(new_content=msg,
group_id=utils.SYS_GRP_ID)
self.root.chat_frame.put_up_new_msg()
if no_error:
term = msg_from_server["term"]
result = msg_from_server["result"]
self.root.group_frame.popup_search_result(term, result)
elif msg_from_server["action"] == "leave_group":
no_error = msg_from_server["status"]
group_name = msg_from_server["group_name"]
msg = '[SYSTEM] ' + msg_from_server["message"]
if no_error:
group_id = self.root.group_frame.group_name2id[group_name]
self.root.chat_frame.switch_to(utils.SYS_GRP_ID)
self.root.group_frame.group_list_dict[group_name].destroy()
self.root.group_frame.group_list_dict.pop(group_name)
self.root.group_frame.group_name2id.pop(group_name)
self.root.group_frame.group_id2name.pop(group_id)
self.root.chat_frame.new_msg.set(new_content=msg,
group_id=utils.SYS_GRP_ID)
self.root.chat_frame.put_up_new_msg()
elif msg_from_server["action"] == "logout":
if msg_from_server["status"]:
self.state = utils.S_OFFLINE
self.name = None
self.root.status_frame.client_state = utils.S_OFFLINE
self.root.status_frame.client_name = " "
self.root.chat_frame.chat_box.delete(0, 'end')
self.clear_history()
self.root.chat_frame.new_msg.set(new_content="[SYSTEM] " + msg_from_server["message"],
group_id=utils.SYS_GRP_ID)
self.root.chat_frame.put_up_new_msg()
elif msg_from_server["action"] == "exit":
return None, None
else:
return False, ""
time.sleep(utils.CHAT_WAIT)
class New_message: # the newest message to be displayed on the screen
"""
The idea is that there is always one and only one piece of new message, and it always directly
comes from the SYSTEM (even if you are chatting with other users).
So, we can implement different functinolity by **updating New_message** accordingly, with different other responses.
"""
def __init__(self):
self.__content = "NULL"
self.__group_id = utils.SYS_GRP_ID
self.__send_from = "NULL"
def get(self):
return self.__content, self.__group_id, self.__send_from
def set(self, new_content, group_id=utils.SYS_GRP_ID, send_from=None): # None is msg send from system
self.__content = new_content
self.__group_id = group_id
self.__send_from = send_from
def clear(self):
self.__content = "NULL"
self.__group_id = utils.SYS_GRP_ID
self.__send_from = "NULL"
class Leave_group_button(tk.Button):
def __init__(self, root: Main_GUI):
self.root = root
super(Leave_group_button, self).__init__(
self.root.chat_frame,
text='leave',
width=5, height=1,
bg=utils.WHITE,
relief='flat',
font=utils.FONT_LARGE
)
print("BUTTON LOADED")
self.set_command()
def set_command(self):
if self.root.chat_frame.cur_group_id == utils.SYS_GRP_ID or self.root.chat_frame.cur_group_id == -1:
# you can't leave the system group
self.config(command=lambda: "", # do nothing
fg=utils.WHITE, bg=utils.WHITE,
relief='flat')
else:
self.config(state=tk.NORMAL)
# suspicious
self.config(command=self.leave_group,
fg=utils.DEEPGREY, bg=utils.WHITE,
relief='raised')
if utils.VERBOSE:
print(f"Inside leave_group_btn: leave group name: {self.root.group_frame.group_id2name[self.root.chat_frame.cur_group_id]}")
def leave_group(self):
group_name = self.root.group_frame.group_id2name[self.root.chat_frame.cur_group_id]
answer = messagebox.askquestion(title="Exit?",
message=f"Are you sure you want to leave group: '{group_name}'?"
f"\n(All the messages you sent in this group will be deleted)")
if answer == "yes":
self.root.chat_frame.leave_group(group_name)
class Group_button(tk.Button):
def __init__(self, root: Main_GUI, group_id: int, group_name: str, chat_frame):
if not isinstance(group_id, int):
raise TypeError(f"group_id of Group_Button must be int (not {type(group_id)})")
if not isinstance(group_name, str):
raise TypeError(f"group_name of Group_Button must be str (not {type(group_name)})")
self.root = root
self.group_id = group_id
# reformat group_name
self.group_name = ""
for i in range(len(group_name)):
if i % 20 or i == 0:
self.group_name += group_name[i]
else:
self.group_name += '\n'
text=f"#{str(group_id)}\n{group_name}"
super(Group_button, self).__init__(self.root.group_frame.group_list_frame,
text=text,
command=lambda:f"{self.root.chat_frame.switch_to(group_id=group_id)}",
bg=utils.GREY,
fg=utils.WHITE,
font=utils.FONT_LARGE,
relief="flat"
)
class Group_frame(tk.Frame):
def __init__(self, root: Main_GUI):
super(Group_frame, self).__init__(root)
self.root = root
self.group_list_frame = tk.Frame(self, bg=utils.GREY)
self.group_list_frame.grid(row=0, column=0, columnspan=2, sticky="n")
self.group_list = []
self.group_list_dict = {} # name2btn
self.group_id2name = {}
self.group_name2id = {}
# system info
tk.Button(self.group_list_frame,
text="System",
command=lambda: self.root.chat_frame.switch_to(utils.SYS_GRP_ID),
bg=utils.GREY,
fg=utils.WHITE,
font=utils.FONT_LARGE,
relief="flat").grid(row=0, column=0, sticky="new")
search_btn = tk.Button(self,
image=self.root.SEARCH_IMG,
bg=utils.WHITE,
command=self.popup_search_chat)
search_btn.grid(row=1, column=0,
padx=2, pady=4,
ipadx=2, ipady=2,
sticky="swe")
join_group_btn = tk.Button(self,
image=self.root.ADD_GROUP_IMG,
bg=utils.WHITE,
command=self.popup_join_group)
join_group_btn.grid(row=1, column=1,
padx=2, pady=4,
ipadx=2, ipady=2,
sticky="swe")
self.group_list_frame.columnconfigure(0, weight=1)
def load_history(self, history):
if utils.VERBOSE:
print(f"history: {history}")
for h in history:
self.put_up_group(group_id=h[0],
group_name=h[1])
if utils.VERBOSE:
print("All history are loaded")
def put_up_group(self, group_id, group_name):
if self.root.client.group_count > utils.MAX_GROUP_NUM:
self.root.chat_frame.new_msg.set(new_content="[SYSTEM] You can only join at most 10 groups!",
group_id=utils.SYS_GRP_ID)
self.root.chat_frame.put_up_new_msg()
else:
self.root.chat_frame.cache[group_id] = f"Group #{group_id}: {group_name}"
btn = Group_button(root=self.root,
group_id=group_id,
group_name=group_name,
chat_frame=self.root.chat_frame)
btn.grid(row=1+self.root.client.group_count,
column=0,
sticky="nwe")
self.group_list.append(btn)
if utils.VERBOSE:
print(self.group_list_dict, self.group_id2name, self.group_name2id)
if group_name not in self.group_list_dict:
self.group_list_dict[group_name] = btn
self.group_id2name[group_id] = | |
<reponame>mogorman/openpilot-1
from selfdrive.car import apply_toyota_steer_torque_limits
from selfdrive.car.chrysler.chryslercan import create_lkas_hud, create_lkas_command, \
create_wheel_buttons_command, create_lkas_heartbit, \
acc_command, acc_command_v2, acc_log
from selfdrive.car.chrysler.values import CAR, CarControllerParams
from opendbc.can.packer import CANPacker
from selfdrive.config import Conversions as CV
from selfdrive.controls.lib.drive_helpers import V_CRUISE_MIN, V_CRUISE_MIN_IMPERIAL
from common.cached_params import CachedParams
from common.op_params import opParams
from common.params import Params
from cereal import car
import math
from random import randrange
ButtonType = car.CarState.ButtonEvent.Type
LongCtrlState = car.CarControl.Actuators.LongControlState
V_CRUISE_MIN_IMPERIAL_MS = V_CRUISE_MIN_IMPERIAL * CV.KPH_TO_MS
V_CRUISE_MIN_MS = V_CRUISE_MIN * CV.KPH_TO_MS
AUTO_FOLLOW_LOCK_MS = 3 * CV.MPH_TO_MS
ACC_BRAKE_THRESHOLD = 2 * CV.MPH_TO_MS
class CarController():
def __init__(self, dbc_name, CP, VM):
self.apply_steer_last = 0
self.prev_frame = -1
self.lkas_frame = -1
self.prev_lkas_counter = -1
self.hud_count = 0
self.car_fingerprint = CP.carFingerprint
self.torq_enabled = False
self.steer_rate_limited = False
self.last_button_counter = -1
self.button_frame = -1
self.last_acc_2_counter = 0
self.last_brake = None
self.last_torque = 0.
self.accel_steady = 0.
self.last_aTarget = 0.
self.packer = CANPacker(dbc_name)
self.params = Params()
self.cachedParams = CachedParams()
self.opParams = opParams()
self.auto_resume = self.params.get_bool('jvePilot.settings.autoResume')
self.minAccSetting = V_CRUISE_MIN_MS if self.params.get_bool("IsMetric") else V_CRUISE_MIN_IMPERIAL_MS
self.round_to_unit = CV.MS_TO_KPH if self.params.get_bool("IsMetric") else CV.MS_TO_MPH
self.autoFollowDistanceLock = None
self.moving_fast = False
self.min_steer_check = self.opParams.get("steer.checkMinimum")
self.moneyPlaneOpLong = (self.cachedParams.get('moneyPlane.settings.opLong', 5000) == "1")
self.gone_fast_yet = False
def update(self, enabled, CS, actuators, pcm_cancel_cmd, hud_alert, gas_resume_speed, c):
if CS.button_pressed(ButtonType.lkasToggle, False):
c.jvePilotState.carControl.useLaneLines = not c.jvePilotState.carControl.useLaneLines
self.params.put("EndToEndToggle", "1" if self.moneyPlaneOpLong else "0")
c.jvePilotState.notifyUi = True
#*** control msgs ***
can_sends = []
self.lkas_control(CS, actuators, can_sends, enabled, hud_alert, c.jvePilotState)
self.wheel_button_control(CS, can_sends, enabled, gas_resume_speed, c.jvePilotState, pcm_cancel_cmd)
self.acc(CS, actuators, can_sends, enabled, c.jvePilotState)
return can_sends
# T = (mass x accel x velocity x 1000)/(.105 x Engine rpm)
def acc(self, CS, actuators, can_sends, enabled, jvepilot_state):
if CS.hybrid:
ACCEL_TORQ_MIN = CS.hybridAxle["AXLE_TORQ_MIN"]
ACCEL_TORQ_MAX = CS.hybridAxle["AXLE_TORQ_MAX"]
else:
ACCEL_TORQ_MIN = 20
ACCEL_TORQ_MAX = self.cachedParams.get_float('jvePilot.settings.longControl.maxAccelTorq', 500)
VEHICLE_MASS = self.cachedParams.get_float('jvePilot.settings.longControl.vehicleMass', 500)
ACCEL_TORQ_START = self.cachedParams.get_float('jvePilot.settings.longControl.torqStart', 500)
acc_2_counter = CS.acc_2['COUNTER']
if acc_2_counter == self.last_acc_2_counter:
return
self.last_acc_2_counter = acc_2_counter
if not enabled or not self.moneyPlaneOpLong:
self.last_brake = None
self.last_torque = ACCEL_TORQ_START
self.last_aTarget = CS.aEgoRaw
if CS.acc_2['ACC_DECEL_REQ'] == 1:
self.last_brake = round(CS.acc_2['ACC_DECEL'], 2) # start here since ACC was already active
return
vTarget = jvepilot_state.carControl.vTargetFuture
# ECO
aTarget, self.accel_steady = self.accel_hysteresis(actuators.accel, self.accel_steady)
COAST_WINDOW = CV.MPH_TO_MS * 3
LOW_WINDOW = CV.MPH_TO_MS * 3
brake_press = False
brake_target = 0
torque = 0
long_starting = actuators.longControlState == LongCtrlState.starting
go_req = long_starting and CS.out.standstill
long_stopping = actuators.longControlState == LongCtrlState.stopping
stop_req = long_stopping or (CS.out.standstill and aTarget == 0 and not go_req)
speed_to_far_off = CS.out.vEgo - vTarget > COAST_WINDOW # speed gap is large, start braking
not_slowing_fast_enough = speed_to_far_off and vTarget < CS.out.vEgo + CS.aEgoRaw # not going to get there, start braking
slow_speed_brake = aTarget <= 0 and CS.out.vEgo < LOW_WINDOW
already_braking = aTarget <= 0 and self.last_brake is not None
spoof_brake = long_stopping or already_braking or slow_speed_brake or not_slowing_fast_enough
if CS.acc_2['ACC_DECEL_REQ'] == 1 and (CS.acc_2['ACC_DECEL'] < aTarget or not spoof_brake):
brake_press = True
brake_target = CS.acc_2['ACC_DECEL']
elif spoof_brake or stop_req:
brake_press = True
if stop_req and CS.out.standstill:
brake_target = -2.
else:
brake_target = max(-4., round(aTarget, 2))
if CS.acc_2['ACC_DECEL_REQ'] == 1:
acc = CS.acc_2['ACC_DECEL']
brake_target = min(brake_target, acc)
if self.last_brake is None:
self.last_brake = acc # start here since ACC was already active
else:
vSmoothTarget = (vTarget + CS.out.vEgo) / 2
accelerating = vTarget - COAST_WINDOW * CV.MS_TO_MPH > CS.out.vEgo and aTarget > 0 and CS.aEgoRaw > 0 and CS.aEgoRaw > self.last_aTarget
if accelerating:
aSmoothTarget = (aTarget + CS.aEgoRaw) / 2
else:
aSmoothTarget = aTarget
rpm = (VEHICLE_MASS * CS.aEgoRaw * CS.out.vEgo) / (.105 * CS.hybridAxle["AXLE_TORQ"]) if CS.hybrid else CS.gasRpm
if CS.out.vEgo < LOW_WINDOW:
cruise = (VEHICLE_MASS * aSmoothTarget * vSmoothTarget) / (.105 * rpm)
if aTarget > 0.5:
cruise = max(cruise, ACCEL_TORQ_START) # give it some oomph
elif CS.out.vEgo < 20 * CV.MPH_TO_MS:
cruise = (VEHICLE_MASS * aTarget * vTarget) / (.105 * rpm)
else:
cruise = (VEHICLE_MASS * aSmoothTarget * vSmoothTarget) / (.105 * rpm)
self.last_torque = max(ACCEL_TORQ_MIN, min(ACCEL_TORQ_MAX, cruise))
torque = math.floor(self.last_torque * 100) / 100 if cruise > ACCEL_TORQ_MIN else 0.
print(f"torq={self.last_torque}, rpm={rpm}. aEgoRaw={CS.aEgoRaw}, aTarget={aTarget}, aSmoothTarget={aSmoothTarget}, vEgo={CS.out.vEgo}, vTarget={vTarget}")
if brake_press:
self.last_torque = None
if self.last_brake is None:
self.last_brake = min(0., brake_target / 2)
else:
lBrake = self.last_brake
tBrake = brake_target
if tBrake < lBrake:
self.last_brake = max(self.last_brake - 0.2, tBrake)
elif tBrake > lBrake:
self.last_brake = min(self.last_brake + 0.2, tBrake)
print(f"last_brake={self.last_brake}, brake_target={brake_target}")
else:
self.last_brake = None
brake = math.floor(self.last_brake * 100) / 100 if self.last_brake is not None else 4
if CS.out.gasPressed or CS.out.brakePressed: # stop sending ACC requests
torque = 0
brake = 4
self.last_aTarget = CS.aEgoRaw
can_sends.append(acc_log(self.packer, actuators.accel, vTarget, long_starting, long_stopping))
can_sends.append(acc_command(self.packer, acc_2_counter + 1, go_req, torque, stop_req, brake, CS.acc_2))
can_sends.append(acc_command_v2(self.packer, acc_2_counter + 1, torque, CS.acc_1))
def lkas_control(self, CS, actuators, can_sends, enabled, hud_alert, jvepilot_state):
if self.prev_frame == CS.frame:
return
self.prev_frame = CS.frame
self.lkas_frame += 1
lkas_counter = CS.lkas_counter
if self.prev_lkas_counter == lkas_counter:
lkas_counter = (self.prev_lkas_counter + 1) % 16 # Predict the next frame
self.prev_lkas_counter = lkas_counter
# *** compute control surfaces ***
# steer torque
new_steer = int(round(actuators.steer * CarControllerParams.STEER_MAX))
apply_steer = apply_toyota_steer_torque_limits(new_steer, self.apply_steer_last,
CS.out.steeringTorqueEps, CarControllerParams)
self.steer_rate_limited = new_steer != apply_steer
if self.car_fingerprint in (CAR.JEEP_CHEROKEE, CAR.PACIFICA_2017_HYBRID, CAR.PACIFICA_2018, CAR.PACIFICA_2018_HYBRID):
self.gone_fast_yet = self.gone_fast_yet or CS.torq_status > 1
low_steer_models = self.car_fingerprint in (CAR.JEEP_CHEROKEE, CAR.PACIFICA_2017_HYBRID, CAR.PACIFICA_2018, CAR.PACIFICA_2018_HYBRID)
if not self.min_steer_check:
self.moving_fast = True
self.torq_enabled = enabled or low_steer_models
elif low_steer_models:
self.moving_fast = not CS.out.steerError and CS.lkas_active
self.torq_enabled = self.torq_enabled or CS.torq_status > 1
else:
self.moving_fast = CS.out.vEgo > CS.CP.minSteerSpeed # for status message
if CS.out.vEgo > (CS.CP.minSteerSpeed - 0.5): # for command high bit
self.gone_fast_yet = True
self.torq_enabled = True
elif CS.out.vEgo < (CS.CP.minSteerSpeed - 3.0):
self.gone_fast_yet = False
self.torq_enabled = False # < 14.5m/s stock turns off this bit, but fine down to 13.5
lkas_active = self.moving_fast and enabled
if not lkas_active:
apply_steer = 0
self.apply_steer_last = apply_steer
if self.lkas_frame % 10 == 0: # 0.1s period
new_msg = create_lkas_heartbit(self.packer, 0 if jvepilot_state.carControl.useLaneLines else 1, CS.lkasHeartbit)
can_sends.append(new_msg)
if self.lkas_frame % 25 == 0: # 0.25s period
if CS.lkas_car_model != -1:
new_msg = create_lkas_hud(
self.packer, CS.out.gearShifter, lkas_active, hud_alert,
self.hud_count, CS.lkas_car_model)
can_sends.append(new_msg)
self.hud_count += 1
wp_mod_enabled = (self.cachedParams.get('moneyPlane.settings.pandaModEnabled', 5000) == "1")
moving_fast_check = (self.gone_fast_yet and enabled) if wp_mod_enabled else self.gone_fast_yet
new_msg = create_lkas_command(self.packer, int(apply_steer), moving_fast_check, lkas_counter)
can_sends.append(new_msg)
def wheel_button_control(self, CS, can_sends, enabled, gas_resume_speed, jvepilot_state, pcm_cancel_cmd):
button_counter = jvepilot_state.carState.buttonCounter
if button_counter == self.last_button_counter:
return
self.last_button_counter = button_counter
self.button_frame += 1
button_counter_offset = 1
buttons_to_press = []
if pcm_cancel_cmd:
buttons_to_press = ['ACC_CANCEL']
elif not CS.button_pressed(ButtonType.cancel):
follow_inc_button = CS.button_pressed(ButtonType.followInc)
follow_dec_button = CS.button_pressed(ButtonType.followDec)
if jvepilot_state.carControl.autoFollow:
follow_inc_button = CS.button_pressed(ButtonType.followInc, False)
follow_dec_button = CS.button_pressed(ButtonType.followDec, False)
if (follow_inc_button and follow_inc_button.pressedFrames < 50) or \
(follow_dec_button and follow_dec_button.pressedFrames < 50):
jvepilot_state.carControl.autoFollow = False
jvepilot_state.notifyUi = True
elif (follow_inc_button and follow_inc_button.pressedFrames >= 50) or \
(follow_dec_button and follow_dec_button.pressedFrames >= 50):
jvepilot_state.carControl.autoFollow = True
jvepilot_state.notifyUi = True
if enabled and not CS.out.brakePressed:
button_counter_offset = [1, 1, 0, None][self.button_frame % 4]
if button_counter_offset is not None:
if (not CS.out.cruiseState.enabled) or CS.out.standstill: # Stopped and waiting to resume
buttons_to_press = [self.auto_resume_button(CS, gas_resume_speed)]
elif CS.out.cruiseState.enabled: # Control ACC
buttons_to_press = [self.auto_follow_button(CS, jvepilot_state), self.hybrid_acc_button(CS, jvepilot_state)]
buttons_to_press = list(filter(None, buttons_to_press))
if buttons_to_press is not None and len(buttons_to_press) > 0:
new_msg = create_wheel_buttons_command(self.packer, button_counter + button_counter_offset, buttons_to_press)
can_sends.append(new_msg)
def auto_resume_button(self, CS, gas_resume_speed):
if self.auto_resume and CS.out.vEgo <= gas_resume_speed: # Keep trying while under gas_resume_speed
return 'ACC_RESUME'
def hybrid_acc_button(self, CS, jvepilot_state):
if not self.moneyPlaneOpLong:
target = jvepilot_state.carControl.vTargetFuture + 3 * CV.MPH_TO_MS # add extra speed so ACC does the limiting
# Move the adaptive curse control to the target speed
eco_limit = None
if jvepilot_state.carControl.accEco == 1: # if eco mode
eco_limit = self.cachedParams.get_float('jvePilot.settings.accEco.speedAheadLevel1', 1000)
elif jvepilot_state.carControl.accEco == 2: # if eco mode
eco_limit = self.cachedParams.get_float('jvePilot.settings.accEco.speedAheadLevel2', 1000)
if eco_limit:
target = min(target, CS.out.vEgo + (eco_limit * CV.MPH_TO_MS))
else:
target = jvepilot_state.carControl.vMaxCruise
# ACC Braking
diff = CS.out.vEgo - target
if diff > ACC_BRAKE_THRESHOLD and abs(target - jvepilot_state.carControl.vMaxCruise) > ACC_BRAKE_THRESHOLD: # ignore change in max cruise speed
target -= diff
# round to nearest unit
target = round(min(jvepilot_state.carControl.vMaxCruise, target) * self.round_to_unit)
current = round(CS.out.cruiseState.speed * self.round_to_unit)
if target < current and current > self.minAccSetting:
return 'ACC_SPEED_DEC'
elif target > current:
return 'ACC_SPEED_INC'
def auto_follow_button(self, CS, jvepilot_state):
if | |
number of satisfied requests
self.n_satisfied_interval = 0.0
self.n_sat_cloud_interval = 0
self.n_instantiations_interval = 0
# Cache and storage
self.cache_hits = 0
self.storage_hits = 0
self.cache_misses = 0
self.storage_misses = 0
self.serv_hits = 0
self.flow_start = {} # flow_id to start time
self.flow_cloud = {} # True if flow reched cloud
self.flow_service = {} # flow id to service
self.flow_deadline = {} # flow id to deadline
self.flow_labels = {} # flow id to service-associated labels
self.flow_feedback = {} # flow id to service-associated labels
self.service_requests = {} # number of requests per service
self.service_satisfied = {} # number of satisfied requests per service
# Time series for various metrics
self.satrate_times = {}
self.latency_times = {}
self.idle_times = {}
self.node_idle_times = {}
self.deadline_metric_times = {}
self.cloud_sat_times = {}
self.instantiations_times = {}
# Log-specific paths TODO: Maybe set up in the same way that the result output is set up.
# self.logs_path = logs_path
# if LOGGING_PARAMETERS:
# self.logs_path = LOGGING_PARAMETERS['logs_path']
# self.sampling_size = LOGGING_PARAMETERS['sampling_interval']
if cdf:
self.latency_data = collections.deque()
self.css = self.view.service_nodes()
# self.n_services = self.css.items()[0][1].numOfVMs
@inheritdoc(DataCollector)
def execute_service(self, flow_id, service, node, timestamp, is_cloud):
if is_cloud:
self.flow_cloud[flow_id] = True
else:
self.flow_cloud[flow_id] = False
@inheritdoc(DataCollector)
def reassign_vm(self, node, serviceToReplace, serviceToAdd):
self.n_instantiations_interval += 1
@inheritdoc(DataCollector)
def replacement_interval_over(self, replacement_interval, timestamp):
if self.interval_sess_count == 0:
self.satrate_times[timestamp] = 0.0
else:
self.satrate_times[timestamp] = self.n_satisfied_interval / self.interval_sess_count
print ("Number of requests in interval: " + repr(self.interval_sess_count))
self.instantiations_times[timestamp] = self.n_instantiations_interval
self.n_instantiations_interval = 0
total_idle_time = 0.0
total_cores = 0 # total number of cores in the network
for node, cs in self.css.items():
if cs.is_cloud:
continue
idle_time = cs.getIdleTime(timestamp)
idle_time /= cs.numOfCores
total_idle_time += idle_time
total_cores += cs.numOfCores
if node not in self.node_idle_times.keys():
self.node_idle_times[node] = []
self.node_idle_times[node].append(idle_time)
# self.idle_times[timestamp] = total_idle_time
self.idle_times[timestamp] = total_idle_time / (total_cores * replacement_interval)
if self.n_satisfied_interval == 0:
self.latency_times[timestamp] = 0.0
self.deadline_metric_times[timestamp] = 0.0
self.cloud_sat_times[timestamp] = 0.0
else:
self.latency_times[timestamp] = self.latency_interval / self.n_satisfied_interval
self.deadline_metric_times[timestamp] = self.deadline_metric_interval / self.n_satisfied_interval
self.cloud_sat_times[timestamp] = (1.0 * self.n_sat_cloud_interval) / self.n_satisfied_interval
# self.per_service_idle_times[timestamp] = [avg_idle_times[x]/replacement_interval for x in range(0, self.n_services)]
# Initialise interval counts
self.n_sat_cloud_interval = 0
self.interval_sess_count = 0
self.n_satisfied_interval = 0
self.latency_interval = 0.0
self.deadline_metric_interval = 0.0
@inheritdoc(DataCollector)
def start_session(self, timestamp, receiver, content, labels, flow_id=0, deadline=0):
self.sess_count += 1
self.sess_latency = 0.0
self.flow_start[flow_id] = timestamp
self.flow_deadline[flow_id] = deadline
self.flow_service[flow_id] = content
self.flow_cloud[flow_id] = False
self.interval_sess_count += 1
self.flow_labels[flow_id] = labels
@inheritdoc(DataCollector)
def cache_hit(self, node):
self.cache_hits += 1
@inheritdoc(DataCollector)
def storage_hit(self, node):
self.storage_hits += 1
@inheritdoc(DataCollector)
def cache_miss(self, node):
self.session['cache_misses'].append(node)
@inheritdoc(DataCollector)
def storage_miss(self, node):
self.session['storage_misses'].append(node)
@inheritdoc(DataCollector)
def request_hop(self, u, v, main_path=True):
if main_path:
self.sess_latency += self.view.link_delay(u, v)
@inheritdoc(DataCollector)
def content_hop(self, u, v, main_path=True):
if main_path:
self.sess_latency += self.view.link_delay(u, v)
@inheritdoc(DataCollector)
def end_session(self, success=True, timestamp=0, flow_id=0):
sat = False
if flow_id in self.flow_deadline:
if not success:
return
if self.cdf:
self.latency_data.append(self.sess_latency)
if self.flow_deadline[flow_id] >= timestamp:
# Request is satisfied
if self.flow_cloud[flow_id]:
self.n_sat_cloud_interval += 1
self.n_satisfied += 1
# print "Request satisfied"
self.n_satisfied_interval += 1
sat = True
self.latency_interval += timestamp - self.flow_start[flow_id]
self.deadline_metric_interval += self.flow_deadline[flow_id] - timestamp
service = self.flow_service[flow_id]
if service['content'] not in self.service_requests.keys():
self.service_requests[service['content']] = 1
self.service_satisfied[service['content']] = 0
else:
self.service_requests[service['content']] += 1
if sat:
if service['content'] in self.service_satisfied.keys():
self.service_satisfied[service['content']] += 1
else:
self.service_satisfied[service['content']] = 1
del self.flow_deadline[flow_id]
del self.flow_start[flow_id]
del self.flow_service[flow_id]
del self.flow_cloud[flow_id]
else:
pass
@inheritdoc(DataCollector)
def results(self):
# TODO: Maybe revise the below and make it even more customisable
if self.view.model.strategy == 'HYBRID':
res_file = "/hybrid.txt"
overhead_file = "/hybrid_overheads.txt"
elif self.view.model.strategy == 'HYBRIDS_REPO_APP':
res_file = "/hybrid_repo.txt"
r_replicas_file = "/gen_r_replicas.txt"
s_replicas_file = "/gen_s_replicas.txt"
r_labels_dist_file = "/gen_r_labels.txt"
s_labels_dist_file = "/gen_s_labels.txt"
overhead_file = "/gen_overheads.txt"
elif self.view.model.strategy == 'HYBRIDS_PRO_REPO_APP':
res_file = "/hybrid_pro_repo.txt"
r_replicas_file = "/pro_r_replicas.txt"
s_replicas_file = "/pro_s_replicas.txt"
r_labels_dist_file = "/pro_r_labels.txt"
s_labels_dist_file = "/pro_s_labels.txt"
overhead_file = "/pro_overheads.txt"
elif self.view.model.strategy == 'HYBRIDS_RE_REPO_APP':
res_file = "/hybrid_repo.txt"
r_replicas_file = "/re_r_replicas.txt"
s_replicas_file = "/re_s_replicas.txt"
r_labels_dist_file = "/re_r_labels.txt"
s_labels_dist_file = "/re_s_labels.txt"
overhead_file = "/re_overheads.txt"
elif self.view.model.strategy == 'HYBRIDS_SPEC_REPO_APP':
res_file = "/hybrid_repo.txt"
r_replicas_file = "/spec_r_replicas.txt"
s_replicas_file = "/spec_s_replicas.txt"
r_labels_dist_file = "/spec_r_labels.txt"
s_labels_dist_file = "/spec_s_labels.txt"
overhead_file = "/spec_overheads.txt"
if self.view.model.strategy == 'HYBRID':
res = open("/home/chrisys/Icarus-repos/IcarusEdgeSim/examples/repos-mgmt/hybrid.txt", 'a')
overhead = open("/home/chrisys/Icarus-repos/IcarusEdgeSim/examples/repos-mgmt/hybrid_overheads.txt", 'a')
elif self.view.model.strategy == 'HYBRIDS_REPO_APP':
res = open("/home/chrisys/Icarus-repos/IcarusEdgeSim/examples/repos-mgmt/hybrid_repo.txt", 'a')
r_replicas = open("/home/chrisys/Icarus-repos/IcarusEdgeSim/examples/repos-mgmt/gen_r_replicas.txt", 'a')
s_replicas = open("/home/chrisys/Icarus-repos/IcarusEdgeSim/examples/repos-mgmt/gen_s_replicas.txt", 'a')
r_labels_dist = open("/home/chrisys/Icarus-repos/IcarusEdgeSim/examples/repos-mgmt/gen_r_labels.txt", 'a')
s_labels_dist = open("/home/chrisys/Icarus-repos/IcarusEdgeSim/examples/repos-mgmt/gen_s_labels.txt", 'a')
overhead = open("/home/chrisys/Icarus-repos/IcarusEdgeSim/examples/repos-mgmt/gen_overheads.txt", 'a')
elif self.view.model.strategy == 'HYBRIDS_PRO_REPO_APP':
res = open("/home/chrisys/Icarus-repos/IcarusEdgeSim/examples/repos-mgmt/hybrid_pro_repo.txt", 'a')
r_replicas = open("/home/chrisys/Icarus-repos/IcarusEdgeSim/examples/repos-mgmt/pro_r_replicas.txt", 'a')
s_replicas = open("/home/chrisys/Icarus-repos/IcarusEdgeSim/examples/repos-mgmt/pro_s_replicas.txt", 'a')
r_labels_dist = open("/home/chrisys/Icarus-repos/IcarusEdgeSim/examples/repos-mgmt/pro_r_labels.txt", 'a')
s_labels_dist = open("/home/chrisys/Icarus-repos/IcarusEdgeSim/examples/repos-mgmt/pro_s_labels.txt", 'a')
overhead = open("/home/chrisys/Icarus-repos/IcarusEdgeSim/examples/repos-mgmt/pro_overheads.txt", 'a')
elif self.view.model.strategy == 'HYBRIDS_RE_REPO_APP':
res = open("/home/chrisys/Icarus-repos/IcarusEdgeSim/examples/repos-mgmt/hybrid_re_repo.txt", 'a')
r_replicas = open("/home/chrisys/Icarus-repos/IcarusEdgeSim/examples/repos-mgmt/re_r_replicas.txt", 'a')
s_replicas = open("/home/chrisys/Icarus-repos/IcarusEdgeSim/examples/repos-mgmt/re_s_replicas.txt", 'a')
r_labels_dist = open("/home/chrisys/Icarus-repos/IcarusEdgeSim/examples/repos-mgmt/re_r_labels.txt", 'a')
s_labels_dist = open("/home/chrisys/Icarus-repos/IcarusEdgeSim/examples/repos-mgmt/re_s_labels.txt", 'a')
overhead = open("/home/chrisys/Icarus-repos/IcarusEdgeSim/examples/repos-mgmt/re_overheads.txt", 'a')
elif self.view.model.strategy == 'HYBRIDS_SPEC_REPO_APP':
res = open("/home/chrisys/Icarus-repos/IcarusEdgeSim/examples/repos-mgmt/hybrid_spec_repo.txt", 'a')
r_replicas = open("/home/chrisys/Icarus-repos/IcarusEdgeSim/examples/repos-mgmt/spec_r_replicas.txt", 'a')
s_replicas = open("/home/chrisys/Icarus-repos/IcarusEdgeSim/examples/repos-mgmt/spec_s_replicas.txt", 'a')
r_labels_dist = open("/home/chrisys/Icarus-repos/IcarusEdgeSim/examples/repos-mgmt/spec_r_labels.txt", 'a')
s_labels_dist = open("/home/chrisys/Icarus-repos/IcarusEdgeSim/examples/repos-mgmt/spec_s_labels.txt", 'a')
overhead = open("/home/chrisys/Icarus-repos/IcarusEdgeSim/examples/repos-mgmt/spec_overheads.txt", 'a')
if self.cdf:
self.results['CDF'] = cdf(self.latency_data)
results = Tree({'SATISFACTION': 1.0 * self.n_satisfied / self.sess_count})
# TODO: Possibly create another file, specifically for tracking repo/service-specific performance!!!!!!!!!!!!!!!
per_service_sats = {}
per_node_r_replicas_requested = {}
per_node_s_replicas_stored = {}
per_label_node_storage = {}
per_label_node_requests = {}
# res.write(str(100*self.n_satisfied/self.sess_count) + " " + str(self.n_satisfied) + " " + str(self.sess_count) + ": \n")
for service in self.service_requests.keys():
per_service_sats[service] = 1.0 * self.service_satisfied[service] / self.service_requests[service]
res.write(str(100 * self.service_satisfied[service] / self.service_requests[service]) + ", ")
res.write("\n")
for content in range(0, 1000):
# overhead.write(str(content) + ": ")
msg = dict()
msg['content'] = content
msg['msg_size'] = 1000000
for node in self.view.model.storageSize:
if self.view.storage_nodes()[node].hasMessage(content, []):
msg = self.view.storage_nodes()[node].hasMessage(content, [])
break
if msg['content'] in self.view.model.replication_overheads:
self.view.model.replication_overheads[msg['content']] = self.view.model.replication_overheads[
msg['content']] + \
self.view.model.replication_hops[
msg['content']] * msg['msg_size']
else:
self.view.model.replication_overheads[msg['content']] = self.view.model.replication_hops[
msg['content']] * msg['msg_size']
overhead.write(str(self.view.replication_overhead(content)) + ", ")
self.view.model.replication_hops[msg['content']] = 0
overhead.write("\n")
if self.view.model.strategy != 'HYBRID':
for node in self.view.model.storageSize:
per_node_r_replicas_requested[node] = self.view.replications_requests(node)
r_replicas.write(str(per_node_r_replicas_requested[node]) + ", ")
self.view.model.replications_from[node] = 0
r_replicas.write("\n")
for node in self.view.model.storageSize:
per_node_s_replicas_stored[node] = self.view.replications_destination(node)
s_replicas.write(str(per_node_s_replicas_stored[node]) + ", ")
self.view.model.replications_to[node] = 0
s_replicas.write("\n")
# TODO: Modify the following, to include ALL NODES, no matter what!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
for label in self.view.model.labels_sources:
# r_labels_dist.write(label + ": ")
if label in self.view.model.request_labels_nodes:
for node in self.view.model.request_labels_nodes[label]:
per_label_node_requests[node] = self.view.model.request_labels_nodes[label][node]
r_labels_dist.write(str(per_label_node_requests[node]) + ", ")
r_labels_dist.write("\n ")
# r_labels_dist.write("r\n")
for label in self.view.model.labels_sources:
# s_labels_dist.write(label + ": ")
for node in self.view.labels_sources([label]):
per_label_node_storage[node] = self.view.labels_sources([label])[node]
s_labels_dist.write(str(per_label_node_storage[node]) + ", ")
s_labels_dist.write("\n ")
results['PER_SERVICE_SATISFACTION'] = per_service_sats
results['PER_SERVICE_REQUESTS'] = self.service_requests
results['PER_SERVICE_SAT_REQUESTS'] = self.service_satisfied
results['SAT_TIMES'] = self.satrate_times
results['IDLE_TIMES'] = self.idle_times
results['NODE_IDLE_TIMES'] = self.node_idle_times
results['LATENCY'] = self.latency_times
results['DEADLINE_METRIC'] = self.deadline_metric_times
results['CLOUD_SAT_TIMES'] = self.cloud_sat_times
results['INSTANTIATION_OVERHEAD'] = self.instantiations_times
print "Printing Sat. rate times:"
for key in sorted(self.satrate_times):
print (repr(key) + " " + repr(self.satrate_times[key]))
print "Printing Idle times:"
for key in sorted(self.idle_times):
print (repr(key) + " " + repr(self.idle_times[key]))
# results['VMS_PER_SERVICE'] = self.vms_per_service
res.close()
return results
# @register_log_writer('REPO_STORAGE')
@register_data_collector('REPO_STATS_W_LATENCY')
class RepoStatsLatencyCollector(DataCollector):
"""Data collector measuring latency, i.e. the delay taken to delivery a
content.
"""
def __init__(self, view, logs_path='/home/chrisys/Icarus-repos/IcarusEdgeSim/examples/repos-mgmt', sampling_interval=500, cdf=False):
"""Constructor
Parameters
----------
view : NetworkView
The network view instance
cdf : bool, optional
If *True*, also collects a cdf of the latency
"""
self.cdf = cdf
self.view = view
self.sess_count = 0
self.interval_sess_count = 0
self.latency_interval = 0.0
self.deadline_metric_interval = 0.0
self.n_satisfied = 0.0 # number of satisfied requests
self.n_satisfied_interval = 0.0
self.n_sat_cloud_interval = 0
self.n_instantiations_interval = 0
# Cache and storage
self.cache_hits = 0
self.storage_hits = 0
self.cache_misses = 0
self.storage_misses = 0
self.serv_hits = 0
self.flow_start = {} # flow_id to start time
self.flow_cloud = {} # True if flow reched cloud
self.flow_service = {} # flow id to service
self.flow_deadline = {} # flow id to deadline
self.flow_labels = {} # flow id to service-associated labels
self.flow_feedback = {} # flow id to service-associated labels
self.service_requests = {} # number of requests per service
self.service_satisfied = {} # number of satisfied requests per service
# Time series for various metrics
self.satrate_times = {}
self.latency_times = {}
self.idle_times = {}
self.node_idle_times = {}
self.deadline_metric_times = {}
self.cloud_sat_times = {}
self.instantiations_times = {}
# Log-specific variables TODO: Maybe set up in the same way that the result output is set up.
# self.logs_path = self.view.get_logs_path
# self.sampling_size = | |
snapshot[<span class="pl-s"><span class="pl-pds">'</span>SnapshotType<span class="pl-pds">'</span></span>] <span class="pl-k">==</span> <span class="pl-s"><span class="pl-pds">'</span>shared<span class="pl-pds">'</span></span> <span class="pl-k">and</span> pattern <span class="pl-k">==</span> <span class="pl-s"><span class="pl-pds">'</span>ALL_SNAPSHOTS<span class="pl-pds">'</span></span> <span class="pl-k">and</span> snapshot[<span class="pl-s"><span class="pl-pds">'</span>Engine<span class="pl-pds">'</span></span>] <span class="pl-k">in</span> <span class="pl-c1">_SUPPORTED_ENGINES</span>:</td>
</tr>
<tr>
<td id="L175" class="blob-num js-line-number" data-line-number="175"></td>
<td id="LC175" class="blob-code blob-code-inner js-file-line"> filtered[get_snapshot_identifier(snapshot)] <span class="pl-k">=</span> {</td>
</tr>
<tr>
<td id="L176" class="blob-num js-line-number" data-line-number="176"></td>
<td id="LC176" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">'</span>Arn<span class="pl-pds">'</span></span>: snapshot[<span class="pl-s"><span class="pl-pds">'</span>DBClusterSnapshotIdentifier<span class="pl-pds">'</span></span>], <span class="pl-s"><span class="pl-pds">'</span>StorageEncrypted<span class="pl-pds">'</span></span>: snapshot[<span class="pl-s"><span class="pl-pds">'</span>StorageEncrypted<span class="pl-pds">'</span></span>], <span class="pl-s"><span class="pl-pds">'</span>DBClusterIdentifier<span class="pl-pds">'</span></span>: snapshot[<span class="pl-s"><span class="pl-pds">'</span>DBClusterIdentifier<span class="pl-pds">'</span></span>]}</td>
</tr>
<tr>
<td id="L177" class="blob-num js-line-number" data-line-number="177"></td>
<td id="LC177" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> snapshot[<span class="pl-s"><span class="pl-pds">'</span>StorageEncrypted<span class="pl-pds">'</span></span>] <span class="pl-k">is</span> <span class="pl-c1">True</span>:</td>
</tr>
<tr>
<td id="L178" class="blob-num js-line-number" data-line-number="178"></td>
<td id="LC178" class="blob-code blob-code-inner js-file-line"> filtered[get_snapshot_identifier(</td>
</tr>
<tr>
<td id="L179" class="blob-num js-line-number" data-line-number="179"></td>
<td id="LC179" class="blob-code blob-code-inner js-file-line"> snapshot)][<span class="pl-s"><span class="pl-pds">'</span>KmsKeyId<span class="pl-pds">'</span></span>] <span class="pl-k">=</span> snapshot[<span class="pl-s"><span class="pl-pds">'</span>KmsKeyId<span class="pl-pds">'</span></span>]</td>
</tr>
<tr>
<td id="L180" class="blob-num js-line-number" data-line-number="180"></td>
<td id="LC180" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> filtered</td>
</tr>
<tr>
<td id="L181" class="blob-num js-line-number" data-line-number="181"></td>
<td id="LC181" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L182" class="blob-num js-line-number" data-line-number="182"></td>
<td id="LC182" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L183" class="blob-num js-line-number" data-line-number="183"></td>
<td id="LC183" class="blob-code blob-code-inner js-file-line"><span class="pl-k">def</span> <span class="pl-en">get_own_snapshots_dest</span>(<span class="pl-smi">pattern</span>, <span class="pl-smi">response</span>):</td>
</tr>
<tr>
<td id="L184" class="blob-num js-line-number" data-line-number="184"></td>
<td id="LC184" class="blob-code blob-code-inner js-file-line"> <span class="pl-c"><span class="pl-c">#</span> Returns a dict with local snapshots, filtered by pattern, with DBClusterSnapshotIdentifier as key and Arn, Status as attributes</span></td>
</tr>
<tr>
<td id="L185" class="blob-num js-line-number" data-line-number="185"></td>
<td id="LC185" class="blob-code blob-code-inner js-file-line"> filtered <span class="pl-k">=</span> {}</td>
</tr>
<tr>
<td id="L186" class="blob-num js-line-number" data-line-number="186"></td>
<td id="LC186" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">for</span> snapshot <span class="pl-k">in</span> response[<span class="pl-s"><span class="pl-pds">'</span>DBClusterSnapshots<span class="pl-pds">'</span></span>]:</td>
</tr>
<tr>
<td id="L187" class="blob-num js-line-number" data-line-number="187"></td>
<td id="LC187" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L188" class="blob-num js-line-number" data-line-number="188"></td>
<td id="LC188" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> snapshot[<span class="pl-s"><span class="pl-pds">'</span>SnapshotType<span class="pl-pds">'</span></span>] <span class="pl-k">==</span> <span class="pl-s"><span class="pl-pds">'</span>manual<span class="pl-pds">'</span></span> <span class="pl-k">and</span> re.search(pattern, snapshot[<span class="pl-s"><span class="pl-pds">'</span>DBClusterSnapshotIdentifier<span class="pl-pds">'</span></span>]) <span class="pl-k">and</span> snapshot[<span class="pl-s"><span class="pl-pds">'</span>Engine<span class="pl-pds">'</span></span>] <span class="pl-k">in</span> <span class="pl-c1">_SUPPORTED_ENGINES</span>:</td>
</tr>
<tr>
<td id="L189" class="blob-num js-line-number" data-line-number="189"></td>
<td id="LC189" class="blob-code blob-code-inner js-file-line"> filtered[snapshot[<span class="pl-s"><span class="pl-pds">'</span>DBClusterSnapshotIdentifier<span class="pl-pds">'</span></span>]] <span class="pl-k">=</span> {</td>
</tr>
<tr>
<td id="L190" class="blob-num js-line-number" data-line-number="190"></td>
<td id="LC190" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">'</span>Arn<span class="pl-pds">'</span></span>: snapshot[<span class="pl-s"><span class="pl-pds">'</span>DBClusterSnapshotArn<span class="pl-pds">'</span></span>], <span class="pl-s"><span class="pl-pds">'</span>Status<span class="pl-pds">'</span></span>: snapshot[<span class="pl-s"><span class="pl-pds">'</span>Status<span class="pl-pds">'</span></span>], <span class="pl-s"><span class="pl-pds">'</span>StorageEncrypted<span class="pl-pds">'</span></span>: snapshot[<span class="pl-s"><span class="pl-pds">'</span>StorageEncrypted<span class="pl-pds">'</span></span>], <span class="pl-s"><span class="pl-pds">'</span>DBClusterIdentifier<span class="pl-pds">'</span></span>: snapshot[<span class="pl-s"><span class="pl-pds">'</span>DBClusterIdentifier<span class="pl-pds">'</span></span>]}</td>
</tr>
<tr>
<td id="L191" class="blob-num js-line-number" data-line-number="191"></td>
<td id="LC191" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L192" class="blob-num js-line-number" data-line-number="192"></td>
<td id="LC192" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> snapshot[<span class="pl-s"><span class="pl-pds">'</span>StorageEncrypted<span class="pl-pds">'</span></span>] <span class="pl-k">is</span> <span class="pl-c1">True</span>:</td>
</tr>
<tr>
<td id="L193" class="blob-num js-line-number" data-line-number="193"></td>
<td id="LC193" class="blob-code blob-code-inner js-file-line"> filtered[snapshot[<span class="pl-s"><span class="pl-pds">'</span>DBClusterSnapshotIdentifier<span class="pl-pds">'</span></span>]</td>
</tr>
<tr>
<td id="L194" class="blob-num js-line-number" data-line-number="194"></td>
<td id="LC194" class="blob-code blob-code-inner js-file-line"> ][<span class="pl-s"><span class="pl-pds">'</span>KmsKeyId<span class="pl-pds">'</span></span>] <span class="pl-k">=</span> snapshot[<span class="pl-s"><span class="pl-pds">'</span>KmsKeyId<span class="pl-pds">'</span></span>]</td>
</tr>
<tr>
<td id="L195" class="blob-num js-line-number" data-line-number="195"></td>
<td id="LC195" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L196" class="blob-num js-line-number" data-line-number="196"></td>
<td id="LC196" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">elif</span> snapshot[<span class="pl-s"><span class="pl-pds">'</span>SnapshotType<span class="pl-pds">'</span></span>] <span class="pl-k">==</span> <span class="pl-s"><span class="pl-pds">'</span>manual<span class="pl-pds">'</span></span> <span class="pl-k">and</span> pattern <span class="pl-k">==</span> <span class="pl-s"><span class="pl-pds">'</span>ALL_SNAPSHOTS<span class="pl-pds">'</span></span> <span class="pl-k">and</span> snapshot[<span class="pl-s"><span class="pl-pds">'</span>Engine<span class="pl-pds">'</span></span>] <span class="pl-k">in</span> <span class="pl-c1">_SUPPORTED_ENGINES</span>:</td>
</tr>
<tr>
<td id="L197" class="blob-num js-line-number" data-line-number="197"></td>
<td id="LC197" class="blob-code blob-code-inner js-file-line"> filtered[snapshot[<span class="pl-s"><span class="pl-pds">'</span>DBClusterSnapshotIdentifier<span class="pl-pds">'</span></span>]] <span class="pl-k">=</span> {</td>
</tr>
<tr>
<td id="L198" class="blob-num js-line-number" data-line-number="198"></td>
<td id="LC198" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">'</span>Arn<span class="pl-pds">'</span></span>: snapshot[<span class="pl-s"><span class="pl-pds">'</span>DBClusterSnapshotArn<span class="pl-pds">'</span></span>], <span class="pl-s"><span class="pl-pds">'</span>Status<span class="pl-pds">'</span></span>: snapshot[<span class="pl-s"><span class="pl-pds">'</span>Status<span class="pl-pds">'</span></span>], <span class="pl-s"><span class="pl-pds">'</span>StorageEncrypted<span class="pl-pds">'</span></span>: snapshot[<span class="pl-s"><span class="pl-pds">'</span>StorageEncrypted<span class="pl-pds">'</span></span>], <span class="pl-s"><span class="pl-pds">'</span>DBClusterIdentifier<span class="pl-pds">'</span></span>: snapshot[<span class="pl-s"><span class="pl-pds">'</span>DBClusterIdentifier<span class="pl-pds">'</span></span>]}</td>
</tr>
<tr>
<td id="L199" class="blob-num js-line-number" data-line-number="199"></td>
<td id="LC199" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L200" class="blob-num js-line-number" data-line-number="200"></td>
<td id="LC200" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> snapshot[<span class="pl-s"><span class="pl-pds">'</span>StorageEncrypted<span class="pl-pds">'</span></span>] <span class="pl-k">is</span> <span class="pl-c1">True</span>:</td>
</tr>
<tr>
<td id="L201" class="blob-num js-line-number" data-line-number="201"></td>
<td id="LC201" class="blob-code blob-code-inner js-file-line"> filtered[snapshot[<span class="pl-s"><span class="pl-pds">'</span>DBClusterSnapshotIdentifier<span class="pl-pds">'</span></span>]</td>
</tr>
<tr>
<td id="L202" class="blob-num js-line-number" data-line-number="202"></td>
<td id="LC202" class="blob-code blob-code-inner js-file-line"> ][<span class="pl-s"><span class="pl-pds">'</span>KmsKeyId<span class="pl-pds">'</span></span>] <span class="pl-k">=</span> snapshot[<span class="pl-s"><span class="pl-pds">'</span>KmsKeyId<span class="pl-pds">'</span></span>]</td>
</tr>
<tr>
<td id="L203" class="blob-num js-line-number" data-line-number="203"></td>
<td id="LC203" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L204" class="blob-num js-line-number" data-line-number="204"></td>
<td id="LC204" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> filtered</td>
</tr>
<tr>
<td id="L205" class="blob-num js-line-number" data-line-number="205"></td>
<td id="LC205" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L206" class="blob-num js-line-number" data-line-number="206"></td>
<td id="LC206" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L207" class="blob-num js-line-number" data-line-number="207"></td>
<td id="LC207" class="blob-code blob-code-inner js-file-line"><span class="pl-k">def</span> <span class="pl-en">copy_local</span>(<span class="pl-smi">snapshot_identifier</span>, <span class="pl-smi">snapshot_object</span>):</td>
</tr>
<tr>
<td id="L208" class="blob-num js-line-number" data-line-number="208"></td>
<td id="LC208" class="blob-code blob-code-inner js-file-line"> client <span class="pl-k">=</span> boto3.client(<span class="pl-s"><span class="pl-pds">'</span>rds<span class="pl-pds">'</span></span>, <span class="pl-v">region_name</span><span class="pl-k">=</span><span class="pl-c1">_REGION</span>)</td>
</tr>
<tr>
<td id="L209" class="blob-num js-line-number" data-line-number="209"></td>
<td id="LC209" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L210" class="blob-num js-line-number" data-line-number="210"></td>
<td id="LC210" class="blob-code blob-code-inner js-file-line"> tags <span class="pl-k">=</span> [{</td>
</tr>
<tr>
<td id="L211" class="blob-num js-line-number" data-line-number="211"></td>
<td id="LC211" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">'</span>Key<span class="pl-pds">'</span></span>: <span class="pl-s"><span class="pl-pds">'</span>CopiedBy<span class="pl-pds">'</span></span>,</td>
</tr>
<tr>
<td id="L212" class="blob-num js-line-number" data-line-number="212"></td>
<td id="LC212" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">'</span>Value<span class="pl-pds">'</span></span>: <span class="pl-s"><span class="pl-pds">'</span>Snapshot Tool for Aurora<span class="pl-pds">'</span></span></td>
</tr>
<tr>
<td id="L213" class="blob-num js-line-number" data-line-number="213"></td>
<td id="LC213" class="blob-code blob-code-inner js-file-line"> }]</td>
</tr>
<tr>
<td id="L214" class="blob-num js-line-number" data-line-number="214"></td>
<td id="LC214" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L215" class="blob-num js-line-number" data-line-number="215"></td>
<td id="LC215" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> snapshot_object[<span class="pl-s"><span class="pl-pds">'</span>StorageEncrypted<span class="pl-pds">'</span></span>]:</td>
</tr>
<tr>
<td id="L216" class="blob-num js-line-number" data-line-number="216"></td>
<td id="LC216" class="blob-code blob-code-inner js-file-line"> logger.info(<span class="pl-s"><span class="pl-pds">'</span>Copying encrypted snapshot <span class="pl-c1">%s</span> locally<span class="pl-pds">'</span></span> <span class="pl-k">%</span></td>
</tr>
<tr>
<td id="L217" class="blob-num js-line-number" data-line-number="217"></td>
<td id="LC217" class="blob-code blob-code-inner js-file-line"> snapshot_identifier)</td>
</tr>
<tr>
<td id="L218" class="blob-num js-line-number" data-line-number="218"></td>
<td id="LC218" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L219" class="blob-num js-line-number" data-line-number="219"></td>
<td id="LC219" class="blob-code blob-code-inner js-file-line"> response <span class="pl-k">=</span> client.copy_db_cluster_snapshot(</td>
</tr>
<tr>
<td id="L220" class="blob-num js-line-number" data-line-number="220"></td>
<td id="LC220" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">SourceDBClusterSnapshotIdentifier</span><span class="pl-k">=</span>snapshot_object[<span class="pl-s"><span class="pl-pds">'</span>Arn<span class="pl-pds">'</span></span>],</td>
</tr>
<tr>
<td id="L221" class="blob-num js-line-number" data-line-number="221"></td>
<td id="LC221" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">TargetDBClusterSnapshotIdentifier</span><span class="pl-k">=</span>snapshot_identifier,</td>
</tr>
<tr>
<td id="L222" class="blob-num js-line-number" data-line-number="222"></td>
<td id="LC222" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">KmsKeyId</span><span class="pl-k">=</span><span class="pl-c1">_KMS_KEY_SOURCE_REGION</span>,</td>
</tr>
<tr>
<td id="L223" class="blob-num js-line-number" data-line-number="223"></td>
<td id="LC223" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">Tags</span><span class="pl-k">=</span>tags)</td>
</tr>
<tr>
<td id="L224" class="blob-num js-line-number" data-line-number="224"></td>
<td id="LC224" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L225" class="blob-num js-line-number" data-line-number="225"></td>
<td id="LC225" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">else</span>:</td>
</tr>
<tr>
<td id="L226" class="blob-num js-line-number" data-line-number="226"></td>
<td id="LC226" class="blob-code blob-code-inner js-file-line"> logger.info(<span class="pl-s"><span class="pl-pds">'</span>Copying snapshot <span class="pl-c1">%s</span> locally<span class="pl-pds">'</span></span> <span class="pl-k">%</span> snapshot_identifier)</td>
</tr>
<tr>
<td id="L227" class="blob-num js-line-number" data-line-number="227"></td>
<td id="LC227" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L228" class="blob-num js-line-number" data-line-number="228"></td>
<td id="LC228" class="blob-code blob-code-inner js-file-line"> response <span class="pl-k">=</span> client.copy_db_cluster_snapshot(</td>
</tr>
<tr>
<td id="L229" class="blob-num js-line-number" data-line-number="229"></td>
<td id="LC229" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">SourceDBClusterSnapshotIdentifier</span><span class="pl-k">=</span>snapshot_object[<span class="pl-s"><span class="pl-pds">'</span>Arn<span class="pl-pds">'</span></span>],</td>
</tr>
<tr>
<td id="L230" class="blob-num js-line-number" data-line-number="230"></td>
<td id="LC230" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">TargetDBClusterSnapshotIdentifier</span><span class="pl-k">=</span>snapshot_identifier,</td>
</tr>
<tr>
<td id="L231" class="blob-num js-line-number" data-line-number="231"></td>
<td id="LC231" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">Tags</span><span class="pl-k">=</span>tags)</td>
</tr>
<tr>
<td id="L232" class="blob-num js-line-number" data-line-number="232"></td>
<td id="LC232" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L233" class="blob-num js-line-number" data-line-number="233"></td>
<td id="LC233" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> response</td>
</tr>
<tr>
<td id="L234" class="blob-num js-line-number" data-line-number="234"></td>
<td id="LC234" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L235" class="blob-num js-line-number" data-line-number="235"></td>
<td id="LC235" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L236" class="blob-num js-line-number" data-line-number="236"></td>
<td id="LC236" class="blob-code blob-code-inner js-file-line"><span class="pl-k">def</span> <span class="pl-en">copy_remote</span>(<span class="pl-smi">snapshot_identifier</span>, <span class="pl-smi">snapshot_object</span>):</td>
</tr>
<tr>
<td id="L237" class="blob-num js-line-number" data-line-number="237"></td>
<td id="LC237" class="blob-code blob-code-inner js-file-line"> client <span class="pl-k">=</span> boto3.client(<span class="pl-s"><span class="pl-pds">'</span>rds<span class="pl-pds">'</span></span>, <span class="pl-v">region_name</span><span class="pl-k">=</span><span class="pl-c1">_DESTINATION_REGION</span>)</td>
</tr>
<tr>
<td id="L238" class="blob-num js-line-number" data-line-number="238"></td>
<td id="LC238" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L239" class="blob-num js-line-number" data-line-number="239"></td>
<td id="LC239" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> snapshot_object[<span class="pl-s"><span class="pl-pds">'</span>StorageEncrypted<span class="pl-pds">'</span></span>]:</td>
</tr>
<tr>
<td id="L240" class="blob-num js-line-number" data-line-number="240"></td>
<td id="LC240" class="blob-code blob-code-inner js-file-line"> logger.info(<span class="pl-s"><span class="pl-pds">'</span>Copying encrypted snapshot <span class="pl-c1">%s</span> to remote region <span class="pl-c1">%s</span><span class="pl-pds">'</span></span> <span class="pl-k">%</span></td>
</tr>
<tr>
<td id="L241" class="blob-num js-line-number" data-line-number="241"></td>
<td id="LC241" class="blob-code blob-code-inner js-file-line"> (snapshot_object[<span class="pl-s"><span class="pl-pds">'</span>Arn<span class="pl-pds">'</span></span>], <span class="pl-c1">_DESTINATION_REGION</span>))</td>
</tr>
<tr>
<td id="L242" class="blob-num js-line-number" data-line-number="242"></td>
<td id="LC242" class="blob-code blob-code-inner | |
timestep=timestep,
lower=lower,
upper=upper,
minsize=conf_idfy["minsize"],
maxsize=conf_idfy["maxsize"],
split_levels=conf_split["levels"],
split_seed_minsize=conf_split["seed_minsize"],
split_seed_minstrength=conf_split["seed_minstrength"],
topo_filter_apply=conf_topo["filter_apply"],
topo_filter_mode=conf_topo["filter_mode"],
topo_filter_threshold=conf_topo["filter_threshold"],
topo_filter_min_overlap=conf_topo["filter_min_overlap"],
topo_fld=conf_topo["fld"],
silent=silent,
grid=grid,
)
if timings_measure:
timings["core_lst"] = [timer() - timings.pop("core_start")]
if not silent:
log.info("identified {} '{}' features".format(len(new_features), name))
if timings_measure:
timings["core_start"] = timer()
postproc_features(new_features, flds_named, infile, lon, lat, conf_in)
if conf_idfy["grow_features_n"]:
const = default_constants(nx=conf_in["nx"], ny=conf_in["ny"])
features_grow(
conf_idfy["grow_features_n"],
new_features,
const,
inplace=True,
retain_orig=True,
)
if timings_measure:
timings["core_lst"] = [timer() - timings.pop("core_start")]
return new_features, timings
def get_infile(conf_in, timestep):
for timesteps in conf_in["infiles_tss"]:
if timestep in timesteps:
infile = conf_in["infiles_tss"][timesteps]
break
else:
err = "no infile found for timestep {}:\n{}".format(
timestep, pformat(conf_in["infiles_tss"])
)
raise Exception(err)
return infile
def import_fields(infile, lon, lat, conf_in, conf_preproc):
"""Read a field from disk (netCDF, npz) and preprocess it."""
# Fetch some arguments
informat = conf_in["input_format"]
fld_name = conf_in["varname"]
transpose = conf_in["infield_transpose"]
fld_mirror = conf_in["infield_mirror"]
level = conf_in["infield_lvl"]
reduce_stride = conf_in["reduce_grid_stride"]
reduce_mode = conf_in["reduce_grid_mode"]
# Read field (or compute raw fields in special cases)
fld = read_field(
infile,
fld_name,
level,
transpose,
fld_mirror,
informat,
reduce_stride,
reduce_mode,
lon,
lat,
)
flds_named = {fld_name: fld}
# Masking
mask_name = conf_preproc["mask_varname"]
if mask_name:
fld_mask = read_field(
infile,
mask_name,
level,
transpose,
conf_preproc["mask_mirror"],
informat,
reduce_stride,
reduce_mode,
lon,
lat,
)
mask_field(
fld,
fld_mask,
conf_preproc["mask_threshold_gt"],
conf_preproc["mask_threshold_lt"],
)
# Boundary filter
trim_boundaries(fld, conf_preproc["trim_boundaries_n"])
# Add refval
if conf_preproc["add_refval"]:
fld += conf_preproc["refval"]
replace_varname = conf_in["replace_varname"]
if replace_varname:
# Read field used to replace feature values in the end
informat = conf_in["input_format"]
fld_name = conf_in["replace_varname"]
level = conf_in["infield_lvl"]
transpose = conf_in["infield_transpose"]
mirror = False
fld_repl = read_field(
infile,
replace_varname,
level,
transpose,
mirror,
informat,
reduce_stride,
reduce_mode,
lon,
lat,
)
flds_named[replace_varname] = fld_repl
return flds_named
def compute_front_fields(infile, conf_in, conf_preproc, conf_comp_fronts):
"""Read a field from disk (netCDF, npz) and preprocess it."""
# Fetch some input arguments
fld_name = conf_in["varname"]
informat = conf_in["input_format"]
level = conf_in["infield_lvl"]
name_lon, name_lat = conf_in["lonlat_names"]
reduce_mode = conf_in["reduce_grid_mode"]
reduce_stride = conf_in["reduce_grid_stride"]
# Fetch some preprocessing arguments
add_refval = conf_preproc["add_refval"]
mask_mirror = conf_preproc["mask_mirror"]
mask_name = conf_preproc["mask_varname"]
mask_threshold_gt = conf_preproc["mask_threshold_gt"]
mask_threshold_lt = conf_preproc["mask_threshold_lt"]
refval = conf_preproc["refval"]
trim_boundaries_n = conf_preproc["trim_boundaries_n"]
if informat != "field":
err = ("input format must be 'field' to compute fronts, not '{}'").format(
informat
)
raise ValueError(err)
# Read raw input fields
iflds = fronts_read_raw_fields(
infile,
level,
name_lon=name_lon,
name_lat=name_lat,
name_p=conf_comp_fronts["var_name_p"],
name_t=conf_comp_fronts["var_name_t"],
name_qv=conf_comp_fronts["var_name_qv"],
name_u=conf_comp_fronts["var_name_u"],
name_v=conf_comp_fronts["var_name_v"],
uv_stag=conf_comp_fronts["var_uv_stag"],
)
if reduce_stride > 1:
# Reduce grid resolution by striding
for name, fld in iflds.items():
iflds[name] = reduce_grid_resolution(fld, reduce_stride, reduce_mode)
# Compute front fields
kwas = dict(iflds)
# SR_TMP < TODO cleaner solution
# kwas.update(conf_comp_fronts)
kwas.update(
{
k: v
for k, v in conf_comp_fronts.items()
if not k.startswith("var_name_") and k != "var_uv_stag" # SR_TMP
}
)
# SR_TMP >
# kwas["print_prefix"] = ""
# kwas["verbose"] = False
oflds = identify_fronts(**kwas)
_fmt = lambda s: "{}_{}".format(s, conf_comp_fronts["tvar"])
oflds = {_fmt(k): v for k, v in oflds.items()}
fld = oflds[fld_name]
# Masking
if mask_name:
fld_mask = oflds[mask_name]
if mask_mirror:
fld_mask = -fld_mask
mask_field(fld, fld_mask, mask_threshold_gt, mask_threshold_lt)
# Add reference value
if add_refval:
fld += refval
# Boundary filter
trim_boundaries(fld, trim_boundaries_n)
return oflds
def read_field(
infile,
varname,
level,
transpose,
mirror,
input_format="field",
reduce_stride=1,
reduce_mode="mean",
lon=None,
lat=None,
):
# Read field
if "field" in input_format:
fld = read_field__field(infile, varname, level)
if reduce_stride > 1:
fld = reduce_grid_resolution(fld, reduce_stride, reduce_mode)
elif "list" in input_format:
if lon is None or lat is None:
err = "must pass lon, lat to read list input"
raise ValueError(err)
# SR_TMP<
if reduce_stride > 1:
# Note: original lon, lat not available
raise NotImplementedError("reduce stride for list input")
# SR_TMP>
fld = read_field__list(infile, varname, lon, lat)
else:
err = "invalid input format '{}'".format(input_format)
raise ValueError(err)
# Transpose if necessary
if transpose:
fld = fld.T
# Mirror if necessary
if mirror:
fld = -fld
return fld
def read_field__field(infile, varname, level):
"""Read normal 2D field."""
# Read field ...
if infile.endswith(".npz"):
# ...from numpy archive
with np.load(infile) as fi:
fld = fi[varname]
elif infile.endswith(".h5"):
# ...from hdf5 archive
with h5py.File(infile, "r") as fi:
fld = fi[varname][:]
else:
# ... from netcdf file
try:
with nc4.Dataset(infile, "r") as fi:
vi = fi.variables[varname]
fld = vi[:]
if vi.dimensions[0] == "time":
fld = fld[0]
except Exception as e:
err = ("error reading field '{}' from file '{}':\n{}({})").format(
varname, infile, e.__class__.__name__, e
)
raise Exception(err)
if np.ma.is_masked(fld):
fld = fld.filled()
# Check dimensions
if len(fld.shape) != 2:
if len(fld.shape) == 3:
if level is None:
err = "must pass level for 3d input field: {}".format(varname)
raise ValueError(err)
fld = fld[level, :, :]
else:
err = "wrong number of dimensions: {} != 2 {}".format(
len(fld.shape), fld.shape
)
raise Exception(err)
return fld
def read_field__list(infile, varname, lon, lat):
"""Read point list and initialize field."""
# Read field ...
if infile.endswith(".npz"):
# ... from numpy archive
with np.load(infile) as fi:
pts_lon, pts_lat, pts_fld = fi["lon"], fi["lat"], fi[varname]
fld = point_list_to_field(pts_lon, pts_lat, pts_fld, lon, lat)
elif infile.endswith(".h5"):
# .. from hdf5 file
with h5py.File(infile, "r") as fi:
pts_lon, pts_lat, pts_fld = fi["lon"], fi["lat"], fi[varname]
fld = point_list_to_field(pts_lon, pts_lat, pts_fld, lon, lat)
else:
# ... from netcdf file
fld = nc_read_var_list(infile, varname, lon, lat)
return fld
def mask_field(fld, fld_mask, lower, upper):
if upper is None:
mask = fld_mask > lower
elif lower is None:
mask = fld_mask < upper
else:
mask = (fld_mask > lower) & (fld_mask < upper)
fld[~mask] = 0
def trim_boundaries(fld, n):
if n > 0:
fld[:n, :] = 0
fld[:, :n] = 0
fld[-n:, :] = 0
fld[:, -n:] = 0
def postproc_features(features, flds_named, infile, lon, lat, conf_in):
# If field is mirrored, reverse values
if conf_in["infield_mirror"]:
for feature in features:
feature.mirror_values()
if conf_in["replace_varname"]:
# Replace feature values
fld = flds_named[conf_in["replace_varname"]]
for feature in features:
feature.replace_values(fld)
if conf_in["minsize_km2"] and conf_in["minsize_km2"] > 1:
for feature in [f for f in features]:
if feature.area_lonlat(lon, lat, "km2") < conf_in["minsize_km2"]:
features.remove(feature)
if conf_in["maxsize_km2"] and conf_in["maxsize_km2"] > 0:
for feature in [f for f in features]:
if feature.area_lonlat(lon, lat, "km2") > conf_in["maxsize_km2"]:
features.remove(feature)
def identify_cyclones(
infile, name, conf_in, conf_preproc, timestep, anti=False
):
fld_name = conf_in["varname"]
inifile = conf_in["cycl_inifile"]
if inifile is None:
raise Exception("must pass cyclones inifile")
# SR_TMP< TODO use command line arguments
fact = 0.01 # convert Pa to hPa or geopotential to gpdm
# SR_TMP>
# Set up config: merge inifile config into default config
conf_def = cycl_cfg.get_config_default()
conf_ini = cycl_cfg.get_config_inifile(inifile)
conf = cycl_cfg.merge_configs([conf_def, conf_ini])
conf["IDENTIFY"]["timings-identify"] = None
conf["IDENTIFY"]["datetime"] = timestep
def read_topo(file, var):
with nc4.Dataset(file) as fi:
fld = fi.variables[var][:]
lon = fi.variables[conf_in["lonlat_names"][0]][:]
lat = fi.variables[conf_in["lonlat_names"][1]][:]
if len(fld.shape) == 3:
if fld.shape[0] == 1:
fld = fld[0]
else:
raise Exception(f"topo field {var} has unexpected shape: {fld.shape}")
if len(lon.shape) == 1:
topo_lonlat_shape = (lon.size, lat.size)
else:
topo_lonlat_shape = lon.shape
if fld.shape == topo_lonlat_shape[::-1]:
fld = fld.T
return Field2D(fld, lon=lon, lat=lat, name=var)
if conf["IDENTIFY"]["topo-cutoff-level"] < 0:
topo = None
else:
topo = read_topo(
f"{conf['GENERAL']['topofile-path'] or '.'}/{conf['GENERAL']['topofile']}",
conf["GENERAL"]["topo-field-name"],
)
# Fetch some config values
level = conf_in["infield_lvl"]
if level is None:
level = conf["GENERAL"]["input-field-level"]
fld_name = conf_in["varname"]
if fld_name is None:
fld_name = conf["GENERAL"]["input-field-name"]
# Read pressure or height field
fld = read_input_field_lonlat(
infile,
fld_name,
level,
conf_in,
conv_fact=fact,
crop=conf_preproc["crop_domain_n"],
)
refval = conf_preproc["refval"]
# if refval is not None:
# refval *= fact
if conf_preproc["add_refval"]:
fld += refval
if anti:
# Mirror field around reference value (e.g., 1015 hPa for SLP)
if refval is None:
err = (
"must provide reference value to identify anticyclones "
"('refval' in conf_preproc)"
)
raise Exception(err)
_m = ~np.isnan(fld)
fld[_m] = -1 * (fld[_m] - refval) + refval
# SR_TMP<
if conf_preproc["trim_boundaries_n"] > 0:
err = "cyclones: --trim-boundaries; consider --shrink-domain"
raise NotImplementedError(err)
if conf_preproc["crop_domain_n"] > 0:
pass
# SR_TMP>
# Identify cyclone features
_r = identify_cyclones_core(fld, topo, conf["IDENTIFY"])
cyclones = _r["cyclones"]
depressions = _r["depressions"]
# Add type code to cyclone features
tcode = DEFAULT_TYPE_CODES[name]
for cyclone in cyclones:
assert str(cyclone._id).startswith(str(timestep))
_core = str(cyclone._id)[len(str(timestep)) :]
cyclone._id = int(str(timestep) + str(tcode) + _core)
# Plot cyclones, depressions, and/or extrema
if conf["GENERAL"]["make-plots"]:
plot_cyclones_depressions_extrema(infile, cyclones, depressions, fld, conf)
if anti:
# Mirror field around reference value (e.g., 1015 hPa for SLP)
_m = ~np.isnan(fld)
fld[_m] | |
following verifications are available for this
keyword. They are exposed as additional methods on this object.
- :meth:`energy_cutoff`
- :meth:`forces`
- :meth:`kpoints`
- :meth:`pressure_residual`
- :meth:`stress_tensor`
Returns:
list: Nearest neighbors bond lengths of the relaxed structure per ordered set of species Ai,Aj greater than or equal to i.
Examples:
You can expect the *content* of the result to be something like:
`nbondxx=1.2599,1.0911,1.0911,1.7818,1.2599,1.7818`
"""
return self._lazy_load("nbondxx")
@property
def node_CPU_Cores(self):
"""available CPU cores (`optional`). Units: ``.
Returns:
float: Information about the number of cores in the node/cluster where the calculation was performed.
Examples:
You can expect the *content* of the result to be something like:
`node_CPU_Cores=12`
"""
return self._lazy_load("node_CPU_Cores")
@property
def node_CPU_MHz(self):
"""CPU rate (`optional`). Units: `Megahertz`.
Returns:
float: Information about the CPU speed in the node/cluster where the calculation was performed.
Examples:
You can expect the *content* of the result to be something like:
`node_CPU_MHz=12`
"""
return self._lazy_load("node_CPU_MHz")
@property
def node_CPU_Model(self):
"""CPU model (`optional`). Units: ``.
Returns:
str: Information about the CPU model in the node/cluster where the calculation was performed.
Examples:
You can expect the *content* of the result to be something like:
`node_CPU_Model=12`
"""
return self._lazy_load("node_CPU_Model")
@property
def node_RAM_GB(self):
"""available RAM (`optional`). Units: `Gigabytes`.
Returns:
float: Information about the RAM in the node/cluster where the calculation was performed.
Examples:
You can expect the *content* of the result to be something like:
`node_RAM_GB=12`
"""
return self._lazy_load("node_RAM_GB")
@property
def nspecies(self):
"""species count (`mandatory`). Units: ``.
Returns:
float: Returns the number of species in the system (e.g., binary = 2, ternary = 3, etc.).
Examples:
You can expect the *content* of the result to be something like:
`nspecies=3`
"""
return self._lazy_load("nspecies")
@property
def positions_cartesian(self):
"""relaxed absolute positions (`mandatory`). Units: `Å`.
.. warning:: This keyword is still listed as development level. Use it
knowing that it is subject to change or removal.
.. note:: The following verifications are available for this
keyword. They are exposed as additional methods on this object.
- :meth:`energy_cutoff`
- :meth:`forces`
- :meth:`kpoints`
- :meth:`pressure_residual`
- :meth:`stress_tensor`
Returns:
numpy.ndarray: Final Cartesian positions (xi,xj,xk) in the notation of the code.
Examples:
You can expect the *content* of the result to be something like:
`positions_cartesian=0,0,0;18.18438,0,2.85027;...`
"""
return self._lazy_load("positions_cartesian")
@property
def positions_fractional(self):
"""relaxed relative positions (`mandatory`). Units: ``.
.. warning:: This keyword is still listed as development level. Use it
knowing that it is subject to change or removal.
.. note:: The following verifications are available for this
keyword. They are exposed as additional methods on this object.
- :meth:`energy_cutoff`
- :meth:`forces`
- :meth:`kpoints`
- :meth:`pressure_residual`
- :meth:`stress_tensor`
Returns:
numpy.ndarray: Final fractional positions (xi,xj,xk) with respect to the unit cell as specified in $geometry.
Examples:
You can expect the *content* of the result to be something like:
`positions_fractional=0,0,0;0.25,0.25,0.25;...`
"""
return self._lazy_load("positions_fractional")
@property
def pressure(self):
"""external pressure (`mandatory`). Units: `kbar`.
Returns:
float: Returns the target pressure selected for the simulation.
Examples:
You can expect the *content* of the result to be something like:
`pressure=10.0`
"""
return self._lazy_load("pressure")
@property
def pressure_residual(self):
"""residual pressure (`mandatory`). Units: `kbar`.
.. warning:: This keyword is still listed as development level. Use it
knowing that it is subject to change or removal.
Returns:
float: Returns the external pressure achieved by the simulation.
Examples:
You can expect the *content* of the result to be something like:
`pressure_residual=10.0`
"""
return self._lazy_load("pressure_residual")
@property
def prototype(self):
"""original prototype (`mandatory`). Units: ``.
Returns:
str: Returns the AFLOW unrelaxed prototype which was used for the calculation.
Examples:
You can expect the *content* of the result to be something like:
`prototype=T0001.A2BC`
"""
return self._lazy_load("prototype")
@property
def scintillation_attenuation_length(self):
"""attenuation length (`mandatory`). Units: `cm`.
.. note:: The following verifications are available for this
keyword. They are exposed as additional methods on this object.
- :meth:`energy_cutoff`
- :meth:`kpoints`
Returns:
float: Returns the scintillation attenuation length of the compound in cm.
Examples:
You can expect the *content* of the result to be something like:
`scintillation_attenuation_length=2.21895`
"""
return self._lazy_load("scintillation_attenuation_length")
@property
def sg(self):
"""space group of compound (`mandatory`). Units: ``.
.. note:: The following verifications are available for this
keyword. They are exposed as additional methods on this object.
- :meth:`energy_cutoff`
- :meth:`forces`
- :meth:`kpoints`
- :meth:`stress_tensor`
Returns:
list: Evolution of the space group of the compound. The first, second and third string represent space group name/number before the first, after the first, and after the last relaxation of the calculation.
Examples:
You can expect the *content* of the result to be something like:
`sg=Fm-3m#225,Fm-3m#225,Fm-3m#225`
"""
return self._lazy_load("sg")
@property
def sg2(self):
"""refined space group of compound (`mandatory`). Units: ``.
.. note:: The following verifications are available for this
keyword. They are exposed as additional methods on this object.
- :meth:`energy_cutoff`
- :meth:`forces`
- :meth:`kpoints`
- :meth:`stress_tensor`
Returns:
list: Evolution of the space group of the compound. The first, second and third string represent space group name/number before the first, after the first, and after the last relaxation of the calculation.
Examples:
You can expect the *content* of the result to be something like:
`sg2=Fm-3m#225,Fm-3m#225,Fm-3m#225`
"""
return self._lazy_load("sg2")
@property
def spacegroup_orig(self):
"""original space group number (`mandatory`). Units: ``.
Returns:
float: Returns the spacegroup number of the original-unrelaxed structure before the calculation.
Examples:
You can expect the *content* of the result to be something like:
`spacegroup_orig=225`
"""
return self._lazy_load("spacegroup_orig")
@property
def spacegroup_relax(self):
"""relaxed space group number (`mandatory`). Units: ``.
.. note:: The following verifications are available for this
keyword. They are exposed as additional methods on this object.
- :meth:`energy_cutoff`
- :meth:`forces`
- :meth:`kpoints`
- :meth:`stress_tensor`
Returns:
float: Returns the spacegroup number of the relaxed structure after the calculation.
Examples:
You can expect the *content* of the result to be something like:
`spacegroup_relax=225`
"""
return self._lazy_load("spacegroup_relax")
@property
def species(self):
"""atomic species (`mandatory`). Units: ``.
Returns:
list: Species of the atoms in this material.
Examples:
You can expect the *content* of the result to be something like:
`species=Y,Zn,Zr`
"""
return self._lazy_load("species")
@property
def species_pp(self):
"""pseudopotential of chemical speciess (`mandatory`). Units: ``.
Returns:
list: Pseudopotentials of the atomic species.
Examples:
You can expect the *content* of the result to be something like:
`species_pp=Y,Zn,Zr`
"""
return self._lazy_load("species_pp")
@property
def species_pp_ZVAL(self):
"""valence atoms per species (`optional`). Units: `electrons`.
Returns:
list: Returns the number of valence electrons of the atomic species.
Examples:
You can expect the *content* of the result to be something like:
`species_pp_ZVAL=3`
"""
return self._lazy_load("species_pp_ZVAL")
@property
def species_pp_version(self):
"""pseudopotential version and species (`mandatory`). Units: ``.
Returns:
list: Species of the atoms, pseudopotentials species, and pseudopotential versions.
Examples:
You can expect the *content* of the result to be something like:
`species_pp_version=Y,Zn,Zr`
"""
return self._lazy_load("species_pp_version")
@property
def spinD(self):
"""spin decomposition over unit cell (`mandatory`). Units: `μ<sub>B</sub>`.
.. note:: The following verifications are available for this
keyword. They are exposed as additional methods on this object.
- :meth:`energy_cutoff`
- :meth:`kpoints`
Returns:
list: For spin polarized calculations, the spin decomposition over the atoms of the cell.
Examples:
You can expect the *content* of the result to be something like:
`spinD=0.236,0.236,-0.023,1.005`
"""
return self._lazy_load("spinD")
@property
def spinF(self):
"""magnetization of unit cell at Fermi level (`mandatory`). Units: `μ<sub>B</sub>`.
.. note:: The following verifications are available for this
keyword. They are exposed as additional methods on this object.
- :meth:`energy_cutoff`
- :meth:`kpoints`
Returns:
float: For spin polarized calculations, the magnetization | |
pipeline is not None else self.pipeline
kvy = kvy if kvy is not None else self.kvy
tvy = tvy if tvy is not None else self.tvy
exc = exc if exc is not None else self.exc
rvy = rvy if rvy is not None else self.rvy
while True: # continuous stream processing never stop
try:
done = yield from self.msgParsator(ims=ims,
framed=framed,
pipeline=pipeline,
kvy=kvy,
tvy=tvy,
exc=exc,
rvy=rvy)
except kering.SizedGroupError as ex: # error inside sized group
# processOneIter already flushed group so do not flush stream
if logger.isEnabledFor(logging.DEBUG):
logger.exception("Parser msg extraction error: %s\n", ex.args[0])
else:
logger.error("Parser msg extraction error: %s\n", ex.args[0])
except (kering.ColdStartError, kering.ExtractionError) as ex: # some extraction error
if logger.isEnabledFor(logging.DEBUG):
logger.exception("Parser msg extraction error: %s\n", ex.args[0])
else:
logger.error("Parser msg extraction error: %s\n", ex.args[0])
del ims[:] # delete rest of stream to force cold restart
except (kering.ValidationError, Exception) as ex: # non Extraction Error
# Non extraction errors happen after successfully extracted from stream
# so we don't flush rest of stream just resume
if logger.isEnabledFor(logging.DEBUG):
logger.exception("Parser msg non-extraction error: %s\n", ex.args[0])
else:
logger.error("Parser msg non-extraction error: %s\n", ex.args[0])
yield
return True # should never return
def msgParsator(self, ims=None, framed=True, pipeline=False, kvy=None, tvy=None, exc=None, rvy=None, vry=None):
"""
Returns generator that upon each iteration extracts and parses msg
with attached crypto material (signature etc) from incoming message
stream, ims, and dispatches processing of message with attachments.
Uses .ims when ims is not provided.
Iterator yields when not enough bytes in ims to finish one msg plus
attachments. Returns (which raises StopIteration) when finished.
Parameters:
ims (bytearray) of serialized incoming message stream.
May contain one or more sets each of a serialized message with
attached cryptographic material such as signatures or receipts.
framed (bool) True means ims contains only one frame of msg plus
counted attachments instead of stream with multiple messages
pipeline (bool) True means use pipeline processor to process
ims msgs when stream includes pipelined count codes.
kvy (Kevery) route KERI KEL message types to this instance
tvy (Tevery) route TEL message types to this instance
exc (Exchanger) route EXN message types to this instance
rvy (Revery): reply (RPY) message handler
vry (Verifier) ACDC credential processor
Logic:
Currently only support couters on attachments not on combined or
on message
Attachments must all have counters so know if txt or bny format for
attachments. So even when framed==True must still have counter.
Do While loop
sniff to set up first extraction
raise exception and flush full tream if stream start is counter
must be message
extract message
sniff for counter
if group counter extract and discard but keep track of count
so if error while processing attachments then only need to flush
attachment count not full stream.
"""
if ims is None:
ims = self.ims
while not ims:
yield
cold = self.sniff(ims) # check for spurious counters at front of stream
if cold in (Colds.txt, Colds.bny): # not message error out to flush stream
# replace with pipelining here once CESR message format supported.
raise kering.ColdStartError("Expecting message counter tritet={}"
"".format(cold))
# Otherwise its a message cold start
while True: # extract and deserialize message from ims
try:
sadder = Sadder(raw=ims)
except kering.ShortageError as ex: # need more bytes
yield
else: # extracted successfully
del ims[:sadder.size] # strip off event from front of ims
break
sigers = [] # list of Siger instances of attached indexed controller signatures
wigers = [] # list of Siger instance of attached indexed witness signatures
cigars = [] # List of cigars to hold nontrans rct couplets
# List of tuples from extracted transferable receipt (vrc) quadruples
trqs = [] # each converted quadruple is (prefixer, seqner, diger, siger)
# List of tuples from extracted transferable indexed sig groups
tsgs = [] # each converted group is tuple of (i,s,d) triple plus list of sigs
# List of tuples from extracted signer seals sig groups
ssgs = [] # each converted group is the identifier prefix plus list of sigs
# List of tuples from extracted first seen replay couples
frcs = [] # each converted couple is (seqner, dater)
# List of tuples from extracted source seal couples (delegator or issuer)
sscs = [] # each converted couple is (seqner, diger) for delegating/issuing event
# List of tuples from extracted SAD path sig groups from transferable identifiers
sadsigs = [] # each converted group is tuple of (path, i, s, d) quad plus list of sigs
# List of tuples from extracted SAD path sig groups from non-trans identifiers
sadcigs = [] # each converted group is path plus list of non-trans sigs
pipelined = False # all attachments in one big pipeline counted group
# extract and deserialize attachments
try: # catch errors here to flush only counted part of stream
# extract attachments must start with counter so know if txt or bny.
while not ims:
yield
cold = self.sniff(ims) # expect counter at front of attachments
if cold != Colds.msg: # not new message so process attachments
ctr = yield from self._extractor(ims=ims, klas=Counter, cold=cold)
if ctr.code == CtrDex.AttachedMaterialQuadlets: # pipeline ctr?
pipelined = True
# compute pipelined attached group size based on txt or bny
pags = ctr.count * 4 if cold == Colds.txt else ctr.count * 3
while len(ims) < pags: # wait until rx full pipelned group
yield
pims = ims[:pags] # copy out substream pipeline group
del ims[:pags] # strip off from ims
ims = pims # now just process substream as one counted frame
if pipeline:
pass # pass extracted ims to pipeline processor
return
ctr = yield from self._extractor(ims=ims,
klas=Counter,
cold=cold,
abort=pipelined)
# iteratively process attachment counters (all non pipelined)
while True: # do while already extracted first counter is ctr
if ctr.code == CtrDex.ControllerIdxSigs:
for i in range(ctr.count): # extract each attached signature
siger = yield from self._extractor(ims=ims,
klas=Siger,
cold=cold,
abort=pipelined)
sigers.append(siger)
elif ctr.code == CtrDex.WitnessIdxSigs:
for i in range(ctr.count): # extract each attached signature
wiger = yield from self._extractor(ims=ims,
klas=Siger,
cold=cold,
abort=pipelined)
wigers.append(wiger)
elif ctr.code == CtrDex.NonTransReceiptCouples:
# extract attached rct couplets into list of sigvers
# verfer property of cigar is the identifier prefix
# cigar itself has the attached signature
for cigar in self._nonTransReceiptCouples(ctr=ctr, ims=ims, cold=cold, pipelined=pipelined):
cigars.append(cigar)
elif ctr.code == CtrDex.TransReceiptQuadruples:
# extract attaced trans receipt vrc quadruple
# spre+ssnu+sdig+sig
# spre is pre of signer of vrc
# ssnu is sn of signer's est evt when signed
# sdig is dig of signer's est event when signed
# sig is indexed signature of signer on this event msg
for i in range(ctr.count): # extract each attached quadruple
prefixer = yield from self._extractor(ims,
klas=Prefixer,
cold=cold,
abort=pipelined)
seqner = yield from self._extractor(ims,
klas=Seqner,
cold=cold,
abort=pipelined)
saider = yield from self._extractor(ims,
klas=Saider,
cold=cold,
abort=pipelined)
siger = yield from self._extractor(ims=ims,
klas=Siger,
cold=cold,
abort=pipelined)
trqs.append((prefixer, seqner, saider, siger))
elif ctr.code == CtrDex.TransIdxSigGroups:
# extract attaced trans indexed sig groups each made of
# triple pre+snu+dig plus indexed sig group
# pre is pre of signer (endorser) of msg
# snu is sn of signer's est evt when signed
# dig is dig of signer's est event when signed
# followed by counter for ControllerIdxSigs with attached
# indexed sigs from trans signer (endorser).
for (prefixer, seqner, saider, isigers) in self._transIdxSigGroups(ctr, ims, cold=cold,
pipelined=pipelined):
tsgs.append((prefixer, seqner, saider, isigers))
elif ctr.code == CtrDex.TransLastIdxSigGroups:
# extract attaced signer seal indexed sig groups each made of
# identifier pre plus indexed sig group
# pre is pre of signer (endorser) of msg
# followed by counter for ControllerIdxSigs with attached
# indexed sigs from trans signer (endorser).
for i in range(ctr.count): # extract each attached groups
prefixer = yield from self._extractor(ims,
klas=Prefixer,
cold=cold,
abort=pipelined)
ictr = ctr = yield from self._extractor(ims=ims,
klas=Counter,
cold=cold,
abort=pipelined)
if ctr.code != CtrDex.ControllerIdxSigs:
raise kering.UnexpectedCountCodeError("Wrong | |
the list.
"""
hl7V2Stores = _messages.MessageField('Hl7V2Store', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class ListLocationsResponse(_messages.Message):
r"""The response message for Locations.ListLocations.
Fields:
locations: A list of locations that matches the specified filter in the
request.
nextPageToken: The standard List next-page token.
"""
locations = _messages.MessageField('Location', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class ListMessagesResponse(_messages.Message):
r"""Lists the messages in the specified HL7v2 store.
Fields:
messages: The returned message names. Won't be more values than the value
of page_size in the request.
nextPageToken: Token to retrieve the next page of results or empty if
there are no more results in the list.
"""
messages = _messages.StringField(1, repeated=True)
nextPageToken = _messages.StringField(2)
class ListOperationsResponse(_messages.Message):
r"""The response message for Operations.ListOperations.
Fields:
nextPageToken: The standard List next-page token.
operations: A list of operations that matches the specified filter in the
request.
"""
nextPageToken = _messages.StringField(1)
operations = _messages.MessageField('Operation', 2, repeated=True)
class Location(_messages.Message):
r"""A resource that represents Google Cloud Platform location.
Messages:
LabelsValue: Cross-service attributes for the location. For example
{"cloud.googleapis.com/region": "us-east1"}
MetadataValue: Service-specific metadata. For example the available
capacity at the given location.
Fields:
displayName: The friendly name for this location, typically a nearby city
name. For example, "Tokyo".
labels: Cross-service attributes for the location. For example
{"cloud.googleapis.com/region": "us-east1"}
locationId: The canonical id for this location. For example: `"us-east1"`.
metadata: Service-specific metadata. For example the available capacity at
the given location.
name: Resource name for the location, which may vary between
implementations. For example: `"projects/example-project/locations/us-
east1"`
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""Cross-service attributes for the location. For example
{"cloud.googleapis.com/region": "us-east1"}
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
r"""Service-specific metadata. For example the available capacity at the
given location.
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
displayName = _messages.StringField(1)
labels = _messages.MessageField('LabelsValue', 2)
locationId = _messages.StringField(3)
metadata = _messages.MessageField('MetadataValue', 4)
name = _messages.StringField(5)
class Message(_messages.Message):
r"""A complete HL7v2 message. See
http://www.hl7.org/implement/standards/index.cfm?ref=common for details on
the standard.
Messages:
LabelsValue: User-supplied key-value pairs used to organize HL7v2 stores.
Label keys must be between 1 and 63 characters long, have a UTF-8
encoding of maximum 128 bytes, and must conform to the following PCRE
regular expression: \p{Ll}\p{Lo}{0,62} Label values are optional, must
be between 1 and 63 characters long, have a UTF-8 encoding of maximum
128 bytes, and must conform to the following PCRE regular expression:
[\p{Ll}\p{Lo}\p{N}_-]{0,63} No more than 64 labels can be associated
with a given store.
Fields:
createTime: The datetime when the message was created. Set by the server.
data: Raw message bytes.
labels: User-supplied key-value pairs used to organize HL7v2 stores.
Label keys must be between 1 and 63 characters long, have a UTF-8
encoding of maximum 128 bytes, and must conform to the following PCRE
regular expression: \p{Ll}\p{Lo}{0,62} Label values are optional, must
be between 1 and 63 characters long, have a UTF-8 encoding of maximum
128 bytes, and must conform to the following PCRE regular expression:
[\p{Ll}\p{Lo}\p{N}_-]{0,63} No more than 64 labels can be associated
with a given store.
messageType: The message type and trigger event for this message. MSH-9.
name: Resource name of the Message, of the form `projects/{project_id}/dat
asets/{dataset_id}/hl7V2Stores/{hl7_v2_store_id}/messages/{message_id}`.
Assigned by the server.
parsedData: The parsed version of the raw message data.
patientIds: All patient IDs listed in the PID-2, PID-3, and PID-4 segments
of this message.
sendFacility: The hospital that this message came from. MSH-4.
sendTime: The datetime the sending application sent this message. MSH-7.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""User-supplied key-value pairs used to organize HL7v2 stores. Label
keys must be between 1 and 63 characters long, have a UTF-8 encoding of
maximum 128 bytes, and must conform to the following PCRE regular
expression: \p{Ll}\p{Lo}{0,62} Label values are optional, must be between
1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and
must conform to the following PCRE regular expression:
[\p{Ll}\p{Lo}\p{N}_-]{0,63} No more than 64 labels can be associated with
a given store.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
createTime = _messages.StringField(1)
data = _messages.BytesField(2)
labels = _messages.MessageField('LabelsValue', 3)
messageType = _messages.StringField(4)
name = _messages.StringField(5)
parsedData = _messages.MessageField('ParsedData', 6)
patientIds = _messages.MessageField('PatientId', 7, repeated=True)
sendFacility = _messages.StringField(8)
sendTime = _messages.StringField(9)
class NotificationConfig(_messages.Message):
r"""Specifies where notifications should be sent upon changes to a data
store.
Fields:
pubsubTopic: The [Cloud Pub/Sub](https://cloud.google.com/pubsub/docs/)
topic that notifications of changes are published on. Supplied by the
client. PubsubMessage.Data will contain the resource name.
PubsubMessage.MessageId is the ID of this message. It is guaranteed to
be unique within the topic. PubsubMessage.PublishTime is the time at
which the message was published. Notifications are only sent if the
topic is non-empty. [Topic
names](https://cloud.google.com/pubsub/docs/overview#names) must be
scoped to a project. <EMAIL> must
have publisher permissions on the given Cloud Pub/Sub topic. Not having
adequate permissions will cause the calls that send notifications to
fail.
"""
pubsubTopic = _messages.StringField(1)
class Operation(_messages.Message):
r"""This resource represents a long-running operation that is the result of
a network API call.
Messages:
MetadataValue: Service-specific metadata associated with the operation.
It typically contains progress information and common metadata such as
create time. Some services might not provide such metadata. Any method
that returns a long-running operation should document the metadata type,
if any.
ResponseValue: The normal response of the operation in case of success.
If the original method returns no data on success, such as `Delete`, the
response is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
Fields:
done: If the value is `false`, it means the operation is still in
progress. If `true`, the operation is completed, and either `error` or
`response` is available.
error: The error result of the operation in case of failure or
cancellation.
metadata: Service-specific metadata associated with the operation. It
typically contains progress information and common metadata such as
create time. Some services might not provide such metadata. Any method
that returns a long-running operation should document the metadata type,
if any.
name: The server-assigned name, which is only unique within the same
service that originally returns it. If you use the default HTTP mapping,
the `name` should have the format of `operations/some/unique/name`.
response: The normal response of the operation in case of success. If the
original method returns no data on success, such as `Delete`, the
response is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
r"""Service-specific metadata associated with the operation. It typically
contains progress information and common metadata such as create time.
Some services might not provide such metadata. Any method that returns a
long-running operation should document the metadata type, if any.
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with | |
defaults
tests.append("""
Sub MySub(x As Single, y, z As Boolean, a, Optional b = 10, Optional c="hello")
a=10
n=20
c="hello"
End Sub
""")
# simple sub with optional arguments and defaults
tests.append("""
Sub MySub(x As Single, y, z As Boolean, a, Optional b = 10, Optional c As String = "hello")
a=10
n=20
c="hello"
End Sub
""")
# << Parsing tests >> (40 of 61)
# ByVal, ByRef args
tests.append("""
Sub MySub(ByVal a, ByRef y)
a=10
n=20
c="hello"
End Sub
""")
tests.append("""
Sub MySub(a, ByRef y)
a=10
n=20
c="hello"
End Sub
""")
tests.append("""
Sub MySub(ByVal a, y)
a=10
n=20
c="hello"
End Sub
""")
tests.append("""
Sub MySub(ByVal a As Single, y)
a=10
n=20
c="hello"
End Sub
""")
# << Parsing tests >> (41 of 61)
# 852166 Sub X<spc>(a,b,c) fails to parse
tests.append("""
Sub MySub (ByVal a, ByRef y)
a=10
n=20
c="hello"
End Sub
""")
# 880612 Continuation character inside call
tests.append("""
Sub MySub _
(ByVal a, ByRef y)
a=10
n=20
c="hello"
End Sub
""")
# << Parsing tests >> (42 of 61)
# simple fn
tests.append("""
Function MyFn()
a=10
n=20
c="hello"
MyFn = 20
End Function
""")
# simple fn with exit
tests.append("""
Function MyFn()
a=10
n=20
MyFn = 20
Exit Function
c="hello"
End Function
""")
# simple sub with scope
tests.extend(["""
Private Function MyFn()
a=10
n=20
c="hello"
MyFn = 20
End Function""",
"""
Public Function MyFn()
a=10
n=20
c="hello"
MyFn = 20
End Function
""",
"""
Friend Function MyFn()
a=10
n=20
c="hello"
MyFn = 20
End Function
""",
])
# simple fn with gap in ()
tests.append("""
Function MyFn( )
a=10
n=20
c="hello"
MyFn = 20
End Function
""")
# << Parsing tests >> (43 of 61)
# simple sub
tests.append("""
Function MySub(x, y, z, a, b, c)
a=10
n=20
c="hello"
End Function
""")
# simple sub with exit
tests.append("""
Function MySub(x, y, z, a, b, c)
a=10
n=20
Exit Sub
c="hello"
End Function
""")
# simple sub with scope
tests.append("""
Private Function MySub(x, y, z, a, b, c)
a=10
n=20
c="hello"
End Function
Public Function fn(x, y, z, a, b, c)
a=10
n=20
c="hello"
End Function
""")
# << Parsing tests >> (44 of 61)
# simple sub
tests.append("""
Function fn(x As Single, y, z As Boolean, a, b As Variant, c) As Single
a=10
n=20
c="hello"
End Function
""")
# simple sub with exit
tests.append("""
Function fc(x As Single, y, z As Object, a, b As MyThing.Object, c) As Object.Obj
a=10
n=20
Exit Function
c="hello"
End Function
""")
# simple sub with scope
tests.append("""
Private Function MySub(x, y As Variant, z, a As Boolena, b, c As Long) As Variant
a=10
n=20
c="hello"
End Function
Public Function MySub(x, y, z, a, b, c) As String
a=10
n=20
c="hello"
End Function
""")
# function returning an array
tests.append("""
Function fn(x As Single, y, z As Boolean, a, b As Variant, c) As Single()
a=10
n=20
c="hello"
End Function
""")
# << Parsing tests >> (45 of 61)
# simple sub
tests.append("""
Function fn(x As Single, y, z As Boolean, a, Optional b As Variant, c) As Single
a=10
n=20
c="hello"
End Function
""")
# simple sub with exit
tests.append("""
Function MySub(x() As Single, y, z As Object, Optional a, b As MyThing.Object, Optional c) As Integer
a=10
n=20
Exit Function
c="hello"
End Function
""")
# simple sub with scope
tests.append("""
Private Function MySub(x, Optional y As Variant, Optional z, a As Boolena, b, c As Long) As Long
a=10
n=20
c="hello"
End Function
Public Function MySub(x, y, z, a, b, c) As Control.Buttons.BigButtons.ThisOne
a=10
n=20
c="hello"
End Function
""")
# simple fn with optional arguments and defaults
tests.append("""
Function MySub(x As Single, y, z As Boolean, a, Optional b = 10, Optional c="hello")
a=10
n=20
c="hello"
End Function
""")
# simple fn with optional arguments and defaults
tests.append("""
Function MySub(x As Single, y, z As Boolean, a, Optional b = 10, Optional c As String = "hello")
a=10
n=20
c="hello"
End Function
""")
# << Parsing tests >> (46 of 61)
# ByVal, ByRef args
tests.append("""
Function MySub(ByVal a, ByRef y)
a=10
n=20
c="hello"
End Function
""")
tests.append("""
Function MySub(a, ByRef y)
a=10
n=20
c="hello"
End Function
""")
tests.append("""
Function MySub(ByVal a, y)
a=10
n=20
c="hello"
End Function
""")
tests.append("""
Function MySub(ByVal a As Single, y)
a=10
n=20
c="hello"
End Function
""")
# << Parsing tests >> (47 of 61)
# Simple property let/get/set
tests.extend(["""
Property Let MyProp(NewVal As String)
a = NewVal
Exit Property
End Property
""",
"""
Property Get MyProp() As Long
MyProp = NewVal
Exit Property
End Property
""",
"""
Property Set MyProp(NewObject As Object)
Set MyProp = NewVal
Exit Property
End Property
"""
"""
Public Property Let MyProp(NewVal As String)
a = NewVal
End Property
""",
"""
Public Property Get MyProp() As Long
MyProp = NewVal
End Property
""",
"""
Public Property Set MyProp(NewObject As Object)
Set MyProp = NewVal
End Property
""",
"""
Public Property Get MyProp( ) As Long
MyProp = NewVal
End Property
""",
])
# Simple property let/get/set with labels
tests.extend(["""
1: Property Let MyProp(NewVal As String)
1: a = NewVal
1: End Property
""",
"""
1: Property Get MyProp() As Long
1: MyProp = NewVal
1: End Property
""",
"""
1: Property Set MyProp(NewObject As Object)
1: Set MyProp = NewVal
1: End Property
"""
])
# Simple property let/get/set with labels and comment
tests.extend(["""
1: Property Let MyProp(NewVal As String) ' comment
1: a = NewVal ' comment
1: End Property ' comment
""",
"""
1: Property Get MyProp() As Long ' comment
1: MyProp = NewVal ' comment
1: End Property ' comment
""",
"""
1: Property Set MyProp(NewObject As Object) ' comment
1: Set MyProp = NewVal ' comment
1: End Property ' comment
"""
])
# << Parsing tests >> (48 of 61)
# Simple case
tests.append("""
Select Case x
Case "one"
y = 1
Case "two"
y = 2
Case "three"
z = 3
End Select
""")
# Simple case with else
tests.append("""
Select Case x
Case "one"
y = 1
Case "two"
y = 2
Case "three"
z = 3
Case Else
z = -1
End Select
""")
# Simple case with else and trailing colons
tests.append("""
Select Case x
Case "one":
y = 1
Case "two":
y = 2
Case "three":
z = 3
Case Else:
z = -1
End Select
""")
# Multiple case with else
tests.append("""
Select Case x
Case "one"
y = 1
Case "two"
y = 2
Case "three", "four", "five"
z = 3
Case Else
z = -1
End Select
""")
# Single line case with else
tests.append("""
Select Case x
Case "one": y = 1
Case "two": y = 2
Case "three", "four", "five": z = 3
Case Else: z = -1
End Select
""")
# Range case
tests.append("""
Select Case x
Case "a" To "m"
z = 1
Case "n" To "z"
z = 20
End Select
""")
# Range case with Is and Like
tests.append("""
Select Case x
Case Is < "?", "a" To "m"
z = 1
Case "n" To "z", Is > 10, Is Like "*blah"
z = 20
End Select
""")
# Multiple Range case
tests.append("""
Select Case x
Case "a" To "m", "A" To "G", "K" To "P"
z = 1
Case "n" To "z", 10 To this.that(10,20)
z = 20
End Select
""")
# Empty case
tests.append("""
Select Case a
Case 10
Case 20
End Select
""")
# Case with comments
tests.append("""
Select Case x
' Here is a nasty comment
Case "one"
y = 1
Case "two"
y = 2
Case "three"
z = 3
End Select
""")
# << Parsing tests >> (49 of 61)
# Simple for
tests.append("""
For i = 0 To 1000
a = a + 1
Next i
""")
# Simple for
tests.append("""
For i=0 To 1000
a = a + 1
Next i
""")
# Empty for
tests.append("""
For i = 0 To 1000
Next i
""")
# Simple for with unnamed Next
tests.append("""
For i = 0 To 1000
a = a + 1
Next
""")
# For with step
tests.append("""
For i = 0 To 1000 Step 2
a = a + 1
Next i
""")
# For with exit
tests.append("""
For i = 0 To 1000
a = a + 1
Exit For
Next i
""")
# Nested for
tests.append("""
For i = 0 To 1000
a = a + 1
For j = 1 To i
b = b + j
Next j
Next i
""")
# Dotted names - what does this even mean?
tests.append("""
For me.you = 0 To 1000 Step 2
a = a + 1
Next me.you
""")
# << Parsing tests >> (50 of 61)
# Simple for
tests.append("""
For Each i In coll
a = a + 1
Next i
""")
# Empty for
tests.append("""
For Each i In coll
Next i
""")
# Simple for with unnamed Next
tests.append("""
For Each i In coll
a = a + 1
Next
""")
# For with exit
tests.append("""
For Each i In coll
a = a + 1
Exit For
Next i
""")
# Nested for
tests.append("""
For Each i In coll
a = a + 1
For Each j In coll
b = b + j
Next j
Next i
""")
# << Parsing tests >> (51 of 61)
# Simple while wend
tests.append("""
a = 0
While a < 10
g = 10
a = a + 1
Wend
""")
# Nested while wend
tests.append("""
a = 0
While a < 10
g = 10
a = a + 1
While b < 40
doit
Wend
Wend
""")
# Simple while wend with line numbers
tests.append("""
1: a = 0
2: While a < 10
3: g = 10
4: a = a + 1
5: Wend
""")
# << Parsing tests >> (52 of 61)
# Simple do while loop
tests.append("""
a = 0
Do While a < 10
g = 10
a = a + 1
Loop
""")
# Simple do while with exit
tests.append("""
a = 0
Do While a < 10
g = 10
a = a + 1
Exit Do
Loop
""")
# Nested do while loop
tests.append("""
a = 0
Do While a < 10
g = 10
a = a + 1
Do While b < 40
doit
Loop
Loop
""")
# << Parsing tests >> (53 of 61)
# Simple do loop
tests.append("""
a = 0
Do
g = 10
a = a + 1
Loop
""")
# Simple do with exit
tests.append("""
a = 0
Do
g = 10
a = a + 1
Exit Do
Loop
""")
# Nested do loop
tests.append("""
a = 0
Do
g = 10
a = a + 1
Do
doit
Loop
Loop
""")
# << | |
flavor = "variable-font"
_attrs = ('filename', 'axisSubsets', 'lib')
filename = posixpath_property("_filename")
def __init__(self, *, name, filename=None, axisSubsets=None, lib=None):
self.name: str = name
"""string, required. Name of this variable to identify it during the
build process and from other parts of the document, and also as a
filename in case the filename property is empty.
VarLib.
"""
self.filename: str = filename
"""string, optional. Relative path to the variable font file, **as it is
in the document**. The file may or may not exist.
If not specified, the :attr:`name` will be used as a basename for the file.
"""
self.axisSubsets: List[Union[RangeAxisSubsetDescriptor, ValueAxisSubsetDescriptor]] = axisSubsets or []
"""Axis subsets to include in this variable font.
If an axis is not mentioned, assume that we only want the default
location of that axis (same as a :class:`ValueAxisSubsetDescriptor`).
"""
self.lib: MutableMapping[str, Any] = lib or {}
"""Custom data associated with this variable font."""
class RangeAxisSubsetDescriptor(SimpleDescriptor):
"""Subset of a continuous axis to include in a variable font.
.. versionadded:: 5.0
"""
flavor = "axis-subset"
_attrs = ('name', 'userMinimum', 'userDefault', 'userMaximum')
def __init__(self, *, name, userMinimum=-math.inf, userDefault=None, userMaximum=math.inf):
self.name: str = name
"""Name of the :class:`AxisDescriptor` to subset."""
self.userMinimum: float = userMinimum
"""New minimum value of the axis in the target variable font.
If not specified, assume the same minimum value as the full axis.
(default = ``-math.inf``)
"""
self.userDefault: Optional[float] = userDefault
"""New default value of the axis in the target variable font.
If not specified, assume the same default value as the full axis.
(default = ``None``)
"""
self.userMaximum: float = userMaximum
"""New maximum value of the axis in the target variable font.
If not specified, assume the same maximum value as the full axis.
(default = ``math.inf``)
"""
class ValueAxisSubsetDescriptor(SimpleDescriptor):
"""Single value of a discrete or continuous axis to use in a variable font.
.. versionadded:: 5.0
"""
flavor = "axis-subset"
_attrs = ('name', 'userValue')
def __init__(self, *, name, userValue):
self.name: str = name
"""Name of the :class:`AxisDescriptor` or :class:`DiscreteAxisDescriptor`
to "snapshot" or "freeze".
"""
self.userValue: float = userValue
"""Value in user coordinates at which to freeze the given axis."""
class BaseDocWriter(object):
_whiteSpace = " "
axisDescriptorClass = AxisDescriptor
discreteAxisDescriptorClass = DiscreteAxisDescriptor
axisLabelDescriptorClass = AxisLabelDescriptor
locationLabelDescriptorClass = LocationLabelDescriptor
ruleDescriptorClass = RuleDescriptor
sourceDescriptorClass = SourceDescriptor
variableFontDescriptorClass = VariableFontDescriptor
valueAxisSubsetDescriptorClass = ValueAxisSubsetDescriptor
rangeAxisSubsetDescriptorClass = RangeAxisSubsetDescriptor
instanceDescriptorClass = InstanceDescriptor
@classmethod
def getAxisDecriptor(cls):
return cls.axisDescriptorClass()
@classmethod
def getSourceDescriptor(cls):
return cls.sourceDescriptorClass()
@classmethod
def getInstanceDescriptor(cls):
return cls.instanceDescriptorClass()
@classmethod
def getRuleDescriptor(cls):
return cls.ruleDescriptorClass()
def __init__(self, documentPath, documentObject: DesignSpaceDocument):
self.path = documentPath
self.documentObject = documentObject
self.effectiveFormatTuple = self._getEffectiveFormatTuple()
self.root = ET.Element("designspace")
def write(self, pretty=True, encoding="UTF-8", xml_declaration=True):
self.root.attrib['format'] = ".".join(str(i) for i in self.effectiveFormatTuple)
if self.documentObject.axes or self.documentObject.elidedFallbackName is not None:
axesElement = ET.Element("axes")
if self.documentObject.elidedFallbackName is not None:
axesElement.attrib['elidedfallbackname'] = self.documentObject.elidedFallbackName
self.root.append(axesElement)
for axisObject in self.documentObject.axes:
self._addAxis(axisObject)
if self.documentObject.locationLabels:
labelsElement = ET.Element("labels")
for labelObject in self.documentObject.locationLabels:
self._addLocationLabel(labelsElement, labelObject)
self.root.append(labelsElement)
if self.documentObject.rules:
if getattr(self.documentObject, "rulesProcessingLast", False):
attributes = {"processing": "last"}
else:
attributes = {}
self.root.append(ET.Element("rules", attributes))
for ruleObject in self.documentObject.rules:
self._addRule(ruleObject)
if self.documentObject.sources:
self.root.append(ET.Element("sources"))
for sourceObject in self.documentObject.sources:
self._addSource(sourceObject)
if self.documentObject.variableFonts:
variableFontsElement = ET.Element("variable-fonts")
for variableFont in self.documentObject.variableFonts:
self._addVariableFont(variableFontsElement, variableFont)
self.root.append(variableFontsElement)
if self.documentObject.instances:
self.root.append(ET.Element("instances"))
for instanceObject in self.documentObject.instances:
self._addInstance(instanceObject)
if self.documentObject.lib:
self._addLib(self.root, self.documentObject.lib, 2)
tree = ET.ElementTree(self.root)
tree.write(
self.path,
encoding=encoding,
method='xml',
xml_declaration=xml_declaration,
pretty_print=pretty,
)
def _getEffectiveFormatTuple(self):
"""Try to use the version specified in the document, or a sufficiently
recent version to be able to encode what the document contains.
"""
minVersion = self.documentObject.formatTuple
if (
any(
isinstance(axis, DiscreteAxisDescriptor) or
axis.axisOrdering is not None or
axis.axisLabels
for axis in self.documentObject.axes
) or
self.documentObject.locationLabels or
any(
source.localisedFamilyName
for source in self.documentObject.sources
) or
self.documentObject.variableFonts or
any(
instance.locationLabel or
instance.userLocation
for instance in self.documentObject.instances
)
):
if minVersion < (5, 0):
minVersion = (5, 0)
return minVersion
def _makeLocationElement(self, locationObject, name=None):
""" Convert Location dict to a locationElement."""
locElement = ET.Element("location")
if name is not None:
locElement.attrib['name'] = name
validatedLocation = self.documentObject.newDefaultLocation()
for axisName, axisValue in locationObject.items():
if axisName in validatedLocation:
# only accept values we know
validatedLocation[axisName] = axisValue
for dimensionName, dimensionValue in validatedLocation.items():
dimElement = ET.Element('dimension')
dimElement.attrib['name'] = dimensionName
if type(dimensionValue) == tuple:
dimElement.attrib['xvalue'] = self.intOrFloat(dimensionValue[0])
dimElement.attrib['yvalue'] = self.intOrFloat(dimensionValue[1])
else:
dimElement.attrib['xvalue'] = self.intOrFloat(dimensionValue)
locElement.append(dimElement)
return locElement, validatedLocation
def intOrFloat(self, num):
if int(num) == num:
return "%d" % num
return ("%f" % num).rstrip('0').rstrip('.')
def _addRule(self, ruleObject):
# if none of the conditions have minimum or maximum values, do not add the rule.
ruleElement = ET.Element('rule')
if ruleObject.name is not None:
ruleElement.attrib['name'] = ruleObject.name
for conditions in ruleObject.conditionSets:
conditionsetElement = ET.Element('conditionset')
for cond in conditions:
if cond.get('minimum') is None and cond.get('maximum') is None:
# neither is defined, don't add this condition
continue
conditionElement = ET.Element('condition')
conditionElement.attrib['name'] = cond.get('name')
if cond.get('minimum') is not None:
conditionElement.attrib['minimum'] = self.intOrFloat(cond.get('minimum'))
if cond.get('maximum') is not None:
conditionElement.attrib['maximum'] = self.intOrFloat(cond.get('maximum'))
conditionsetElement.append(conditionElement)
if len(conditionsetElement):
ruleElement.append(conditionsetElement)
for sub in ruleObject.subs:
subElement = ET.Element('sub')
subElement.attrib['name'] = sub[0]
subElement.attrib['with'] = sub[1]
ruleElement.append(subElement)
if len(ruleElement):
self.root.findall('.rules')[0].append(ruleElement)
def _addAxis(self, axisObject):
axisElement = ET.Element('axis')
axisElement.attrib['tag'] = axisObject.tag
axisElement.attrib['name'] = axisObject.name
self._addLabelNames(axisElement, axisObject.labelNames)
if axisObject.map:
for inputValue, outputValue in axisObject.map:
mapElement = ET.Element('map')
mapElement.attrib['input'] = self.intOrFloat(inputValue)
mapElement.attrib['output'] = self.intOrFloat(outputValue)
axisElement.append(mapElement)
if axisObject.axisOrdering or axisObject.axisLabels:
labelsElement = ET.Element('labels')
if axisObject.axisOrdering is not None:
labelsElement.attrib['ordering'] = str(axisObject.axisOrdering)
for label in axisObject.axisLabels:
self._addAxisLabel(labelsElement, label)
axisElement.append(labelsElement)
if isinstance(axisObject, AxisDescriptor):
axisElement.attrib['minimum'] = self.intOrFloat(axisObject.minimum)
axisElement.attrib['maximum'] = self.intOrFloat(axisObject.maximum)
elif isinstance(axisObject, DiscreteAxisDescriptor):
axisElement.attrib['values'] = " ".join(self.intOrFloat(v) for v in axisObject.values)
axisElement.attrib['default'] = self.intOrFloat(axisObject.default)
if axisObject.hidden:
axisElement.attrib['hidden'] = "1"
self.root.findall('.axes')[0].append(axisElement)
def _addAxisLabel(self, axisElement: ET.Element, label: AxisLabelDescriptor) -> None:
labelElement = ET.Element('label')
labelElement.attrib['uservalue'] = self.intOrFloat(label.userValue)
if label.userMinimum is not None:
labelElement.attrib['userminimum'] = self.intOrFloat(label.userMinimum)
if label.userMaximum is not None:
labelElement.attrib['usermaximum'] = self.intOrFloat(label.userMaximum)
labelElement.attrib['name'] = label.name
if label.elidable:
labelElement.attrib['elidable'] = "true"
if label.olderSibling:
labelElement.attrib['oldersibling'] = "true"
if label.linkedUserValue is not None:
labelElement.attrib['linkeduservalue'] = self.intOrFloat(label.linkedUserValue)
self._addLabelNames(labelElement, label.labelNames)
axisElement.append(labelElement)
def _addLabelNames(self, parentElement, labelNames):
for languageCode, labelName in sorted(labelNames.items()):
languageElement = ET.Element('labelname')
languageElement.attrib[XML_LANG] = languageCode
languageElement.text = labelName
parentElement.append(languageElement)
def _addLocationLabel(self, parentElement: ET.Element, label: LocationLabelDescriptor) -> None:
labelElement = ET.Element('label')
labelElement.attrib['name'] = label.name
if label.elidable:
labelElement.attrib['elidable'] = "true"
if label.olderSibling:
labelElement.attrib['oldersibling'] = "true"
self._addLabelNames(labelElement, label.labelNames)
self._addLocationElement(labelElement, userLocation=label.userLocation)
parentElement.append(labelElement)
def _addLocationElement(
self,
parentElement,
*,
designLocation: AnisotropicLocationDict = None,
userLocation: SimpleLocationDict = None
):
locElement = ET.Element("location")
for axis in self.documentObject.axes:
if designLocation is not None and axis.name in designLocation:
dimElement = ET.Element('dimension')
dimElement.attrib['name'] = axis.name
value = designLocation[axis.name]
if isinstance(value, tuple):
dimElement.attrib['xvalue'] = self.intOrFloat(value[0])
dimElement.attrib['yvalue'] = self.intOrFloat(value[1])
else:
dimElement.attrib['xvalue'] = self.intOrFloat(value)
locElement.append(dimElement)
elif userLocation is not None and axis.name in userLocation:
dimElement = ET.Element('dimension')
dimElement.attrib['name'] = axis.name
value = userLocation[axis.name]
dimElement.attrib['uservalue'] = self.intOrFloat(value)
locElement.append(dimElement)
if len(locElement) > 0:
parentElement.append(locElement)
def _addInstance(self, instanceObject):
instanceElement = ET.Element('instance')
if instanceObject.name is not None:
instanceElement.attrib['name'] = instanceObject.name
if instanceObject.locationLabel is not None:
instanceElement.attrib['location'] = instanceObject.locationLabel
if instanceObject.familyName is not None:
instanceElement.attrib['familyname'] = instanceObject.familyName
if instanceObject.styleName is not None:
instanceElement.attrib['stylename'] = instanceObject.styleName
# add localisations
if instanceObject.localisedStyleName:
languageCodes = list(instanceObject.localisedStyleName.keys())
languageCodes.sort()
for code in languageCodes:
if code == "en":
continue # already stored in the element attribute
localisedStyleNameElement = ET.Element('stylename')
localisedStyleNameElement.attrib[XML_LANG] = code
localisedStyleNameElement.text = instanceObject.getStyleName(code)
instanceElement.append(localisedStyleNameElement)
if instanceObject.localisedFamilyName:
languageCodes = list(instanceObject.localisedFamilyName.keys())
languageCodes.sort()
for code in languageCodes:
if code == "en":
continue # already stored in the element attribute
localisedFamilyNameElement = ET.Element('familyname')
localisedFamilyNameElement.attrib[XML_LANG] = code
localisedFamilyNameElement.text = instanceObject.getFamilyName(code)
instanceElement.append(localisedFamilyNameElement)
if instanceObject.localisedStyleMapStyleName:
languageCodes = list(instanceObject.localisedStyleMapStyleName.keys())
languageCodes.sort()
for code in languageCodes:
if code == "en":
continue
localisedStyleMapStyleNameElement = ET.Element('stylemapstylename')
localisedStyleMapStyleNameElement.attrib[XML_LANG] = code
localisedStyleMapStyleNameElement.text = instanceObject.getStyleMapStyleName(code)
instanceElement.append(localisedStyleMapStyleNameElement)
if instanceObject.localisedStyleMapFamilyName:
languageCodes = list(instanceObject.localisedStyleMapFamilyName.keys())
languageCodes.sort()
for code in languageCodes:
if code == "en":
continue
localisedStyleMapFamilyNameElement = ET.Element('stylemapfamilyname')
localisedStyleMapFamilyNameElement.attrib[XML_LANG] = code
localisedStyleMapFamilyNameElement.text = instanceObject.getStyleMapFamilyName(code)
instanceElement.append(localisedStyleMapFamilyNameElement)
if self.effectiveFormatTuple >= (5, 0):
if instanceObject.locationLabel is None:
self._addLocationElement(
instanceElement,
designLocation=instanceObject.designLocation,
userLocation=instanceObject.userLocation
)
else:
# Pre-version 5.0 code was validating and filling in the location
# dict while writing it out, as preserved below.
if instanceObject.location is not None:
locationElement, instanceObject.location = self._makeLocationElement(instanceObject.location)
instanceElement.append(locationElement)
if instanceObject.filename is not None:
instanceElement.attrib['filename'] = instanceObject.filename
if instanceObject.postScriptFontName is not None:
instanceElement.attrib['postscriptfontname'] = instanceObject.postScriptFontName
if instanceObject.styleMapFamilyName is not | |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, <NAME>. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Named entity recognition fine-tuning: utilities to work with CoNLL-2003 task. """
import logging
import os
import json
from utils import get_labels, write_file
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for token classification."""
def __init__(self, id, words, start_labels, end_labels, event_type=None, role=None):
"""Constructs a InputExample.
Args:
id: Unique id for the example.
words: list. The words of the sequence.
labels: (Optional) list. The labels for each word of the sequence. This should be
specified for train and dev examples, but not for test examples.
"""
self.id = id
self.words = words
self.event_type = event_type
self.role = role
self.start_labels = start_labels
self.end_labels = end_labels
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, attention_mask, token_type_ids, start_label_ids, end_label_ids):
self.input_ids = input_ids
self.attention_mask = attention_mask
self.token_type_ids = token_type_ids
self.start_label_ids = start_label_ids
self.end_label_ids = end_label_ids
## ccks格式
def trigger_process_bin_ccks(input_file, schema_file, is_predict=False):
event_type_list = []
rows = open(schema_file, encoding='utf-8').read().splitlines()
for row in rows:
row = json.loads(row)
event_type = row['event_type']
event_type_list.append(event_type)
rows = open(input_file, encoding='utf-8').read().splitlines()
results = []
for row in rows:
if len(row)==1: print(row)
row = json.loads(row)
start_labels = [0]*len(row["content"])
end_labels = [0]*len(row["content"])
if is_predict:
results.append({"id":row["id"], "words":list(row["content"]), "start_labels":start_labels, "end_labels":end_labels, "event_type":None})
continue
for gold_event_type in event_type_list:
start_labels = [0]*len(row["content"])
end_labels = [0]*len(row["content"])
for event in row["events"]:
event_type = event["type"]
if event_type != gold_event_type: continue
for mention in event["mentions"]:
if mention["role"]=="trigger":
trigger = mention["word"]
trigger_start_index, trigger_end_index = mention["span"]
trigger_end_index -= 1
start_labels[trigger_start_index]= 1
end_labels[trigger_end_index]= 1
break
results.append({"id":row["id"], "words":list(row["content"]), "start_labels":start_labels, "end_labels":end_labels, "event_type":gold_event_type})
# write_file(results,output_file)
return results
## lic格式
def trigger_process_bin_lic(input_file, schema_file, is_predict=False):
event_type_list = []
rows = open(schema_file, encoding='utf-8').read().splitlines()
for row in rows:
row = json.loads(row)
event_type = row['event_type']
event_type_list.append(event_type)
rows = open(input_file, encoding='utf-8').read().splitlines()
results = []
for row in rows:
if len(row)==1: print(row)
row = json.loads(row)
start_labels = [0]*len(row["text"])
end_labels = [0]*len(row["text"])
if is_predict:
results.append({"id":row["id"], "words":list(row["text"]), "start_labels":start_labels, "end_labels":end_labels, "event_type":None})
continue
for gold_event_type in event_type_list:
start_labels = [0]*len(row["content"])
end_labels = [0]*len(row["content"])
for event in row["event_list"]:
trigger = event["trigger"]
event_type = event["event_type"]
if event_type != gold_event_type: continue
trigger_start_index = event["trigger_start_index"]
trigger_end_index = trigger_start_index + len(trigger) - 1
start_labels[trigger_start_index]= 1
end_labels[trigger_end_index]= 1
results.append({"id":row["id"], "words":list(row["text"]), "start_labels":start_labels, "end_labels":end_labels, "event_type":gold_event_type})
# write_file(results,output_file)
return results
## ccks格式
def role_process_bin_ccks(input_file, schema_file, is_predict=False):
role_dict = {}
rows = open(schema_file, encoding='utf-8').read().splitlines()
for row in rows:
row = json.loads(row)
event_type = row['event_type']
role_dict[event_type] = []
for role in row["role_list"]:
role_dict[event_type].append(role["role"])
rows = open(input_file, encoding='utf-8').read().splitlines()
results = []
count = 0
for row in rows:
if len(row)==1: print(row)
row = json.loads(row)
count += 1
if "id" not in row:
row["id"]=count
# arguments = []
if is_predict:
results.append({"id":row["id"], "words":list(row["content"]), "start_labels":start_labels, "end_labels":end_labels})
continue
# for gold_event_type in role_dict.keys():
# for gold_role in role_dict[gold_event_type]:
# for event in row["events"]:
# start_labels = [0]*len(row["content"])
# end_labels = [0]*len(row["content"])
# event_type = event["type"]
# if event_type != gold_event_type: continue
# for arg in event["mentions"]:
# role = arg['role']
# if role=="trigger": continue
# if role!=gold_role: continue
# argument_start_index, argument_end_index = arg["span"]
# argument_end_index -= 1
# start_labels[argument_start_index] = 1
# end_labels[argument_end_index] = 1
# results.append({"id":row["id"], "words":list(row["content"]), "event_type":gold_event_type, "role":gold_role, \
# "start_labels":start_labels, "end_labels":end_labels})
# 假设事件类型全部是对的
for event in row["events"]:
event_type = event["type"]
for gold_role in role_dict[event_type]:
start_labels = [0]*len(row["content"])
end_labels = [0]*len(row["content"])
for arg in event["mentions"]:
role = arg['role']
if role=="trigger": continue
if role!=gold_role: continue
argument_start_index, argument_end_index = arg["span"]
argument_end_index -= 1
start_labels[argument_start_index] = 1
end_labels[argument_end_index] = 1
results.append({"id":row["id"], "words":list(row["content"]), "event_type":event_type, "role":gold_role, \
"start_labels":start_labels, "end_labels":end_labels})
return results
## lic格式
def role_process_bin_lic(input_file, schema_file, is_predict=False):
role_dict = {}
rows = open(schema_file, encoding='utf-8').read().splitlines()
for row in rows:
row = json.loads(row)
event_type = row['event_type']
role_dict[event_type] = []
for role in row["role_list"]:
role_dict[event_type].append(role["role"])
rows = open(input_file, encoding='utf-8').read().splitlines()
results = []
count = 0
for row in rows:
if len(row)==1: print(row)
row = json.loads(row)
count += 1
if "id" not in row:
row["id"]=count
# arguments = []
if is_predict:
results.append({"id":row["id"], "words":list(row["text"]), "start_labels":start_labels, "end_labels":end_labels})
continue
# # 假设事件类型全部是对的
for event in row["event_list"]:
event_type = event["event_type"]
for gold_role in role_dict[event_type]:
start_labels = [0]*len(row["text"])
end_labels = [0]*len(row["text"])
for arg in event["arguments"]:
role = arg['role']
if role!=gold_role: continue
argument = arg['argument']
argument_start_index = arg["argument_start_index"]
argument_end_index = argument_start_index + len(argument) -1
start_labels[argument_start_index] = 1
end_labels[argument_end_index] = 1
results.append({"id":row["id"], "words":list(row["text"]), "event_type":event_type, "role":gold_role, \
"start_labels":start_labels, "end_labels":end_labels})
return results
## ace格式
def role_process_bin_ace(input_file, schema_file, is_predict=False):
role_dict = {}
rows = open(schema_file, encoding='utf-8').read().splitlines()
for row in rows:
row = json.loads(row)
event_type = row['event_type']
role_dict[event_type] = []
for role in row["role_list"]:
role_dict[event_type].append(role["role"])
results = []
count = 0
file = open(input_file,'r',encoding='utf-8')
rows = json.load(file)
for row in rows:
count += 1
if "id" not in row:
row["id"]=count
# arguments = []
if is_predict:
results.append({"id":row["id"], "words":list(row["words"]), "start_labels":start_labels, "end_labels":end_labels})
continue
entities = row['entities']
# # 假设事件类型全部是对的
for event in row["event-mentions"]:
event_type = event["event_type"]
for gold_role in role_dict[event_type]:
start_labels = [0]*len(row["words"])
end_labels = [0]*len(row["words"])
for i, role in enumerate(event["arguments"]):
if role!=gold_role: continue
entity = entities[i]
# argument = entity['text']
# if entity['text'] != entity['head']["text"]:
# print(entity['text'], '\n', entity['head']["text"])
# assert entity['text'] == entity['head']["text"]
argument_start_index = entity['head']["start"]
argument_end_index = entity['head']["end"] - 1
start_labels[argument_start_index] = 1
end_labels[argument_end_index] = 1
results.append({"id":row["id"], "words":list(row["words"]), "event_type":event_type, "role":gold_role, \
"start_labels":start_labels, "end_labels":end_labels})
return results
def read_examples_from_file(data_dir, schema_file, mode, task, dataset="ccks"):
file_path = os.path.join(data_dir, "{}.json".format(mode))
if dataset=="ccks":
if task=='trigger': items = trigger_process_bin_ccks(file_path, schema_file,)
if task=='role': items = role_process_bin_ccks(file_path, schema_file,)
elif dataset=="lic":
if task=='trigger': items = trigger_process_bin_lic(file_path, schema_file,)
if task=='role': items = role_process_bin_lic(file_path, schema_file,)
elif dataset=="ace":
if task=='role': items = role_process_bin_ace(file_path, schema_file,)
return [InputExample(**item) for item in items]
def get_query_templates_trigger(dataset):
query_file = "./query_template/trigger/"+dataset+".csv"
query_templates = dict()
with open(query_file, "r", encoding='utf-8') as f:
next(f)
for line in f:
if dataset == "ccks":
event_type, description = line.strip().split(",")
elif dataset == 'lic':
event_type, description = line.strip().split(",")
if event_type not in query_templates:
query_templates[event_type] = list()
# 0
query_templates[event_type].append(event_type)
# 1
query_templates[event_type].append(event_type + " "+ description)
# 2
query_templates[event_type].append(event_type + "的触发词是什么?" + "(" + description + ")" )
# 3
query_templates[event_type].append(event_type + " " + description+ " "+ description)
# query_templates[event_type][role].append(role + " in [trigger]")
# query_templates[event_type][role].append(query[:-1] + " in [trigger]?")
return query_templates
def get_query_templates_role(dataset):
"""Load query templates"""
query_file = "./query_template/role/"+dataset+".csv"
query_templates = dict()
with open(query_file, "r", encoding='utf-8') as f:
next(f)
for line in f:
if dataset == "ccks":
event_type, role, role_chinese, description, role_type = line.strip().split(",")
elif dataset == 'lic':
event_type, role = line.strip().split(",")
role_chinese, description, role_type = role, "", ""
if event_type not in query_templates:
query_templates[event_type] = dict()
if role not in query_templates[event_type]:
query_templates[event_type][role] = list()
# 0
query_templates[event_type][role].append(role_chinese)
# 1
query_templates[event_type][role].append(event_type + " "+ role_chinese)
# 2
query_templates[event_type][role].append(role_chinese+ " "+ description)
# 3
query_templates[event_type][role].append(event_type + " " + role_chinese+ " "+ description)
# 4
query_templates[event_type][role].append(event_type + "中的" + role_chinese+ " "+ description+ " 是什么?")
# 5
query_templates[event_type][role].append(["[unused2]", "[unused3]"] +list(event_type) + ["[unused4]", "[unused5]"] + list(role_chinese)+ ["[unused6]", "[unused7]"]+ list(description) + ["[unused8]", "[unused9]"])
# query_templates[event_type][role].append(role + " in [trigger]")
# query_templates[event_type][role].append(query[:-1] + " in [trigger]?")
return query_templates
def get_query_templates(dataset, task):
if task=='role': return get_query_templates_role(dataset)
elif task=="trigger": return get_query_templates_trigger(dataset)
def convert_examples_to_features(
examples,
label_list,
max_seq_length,
tokenizer,
cls_token_at_end=False,
cls_token="[CLS]",
cls_token_segment_id=1,
sep_token="[SEP]",
sep_token_extra=False,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
pad_token_label_id=-100,
sequence_a_segment_id=0,
sequence_b_segment_id=1,
mask_padding_with_zero=True,
nth_query=2,
dataset='ccks',
task='trigger'
):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id | |
import database as d
import numpy as np
import random
from transitions import Machine
#Conversations are markov chains. Works as follows: a column vector for each CURRENT state j, a row vector for each TARGET state i.
#Each entry i,j = the probability of moving to state i from state j.
#target state D = end of conversation. We start in state D when initializing conversation.
#row vectors sum to 1, internal lists are columns.
#Conversation is a singleton. DO NOT CREATE NEW CONVERSATION OBJECTS.
class Conversation(object):
#a. stores, b.manufacturers, c.friends, d. myself, e.end conversation
topicMatrix = [
[0.00,0.20,0.15,0.15,0.25],
[0.20,0.00,0.15,0.15,0.25],
[0.15,0.15,0.00,0.20,0.25],
[0.15,0.15,0.20,0.00,0.25],
[0.50,0.50,0.50,0.50,0.00]
]
#a. different store, b. new topic, c. end convo, d. prices
storeMatrix = [
[0.0,0.0,0.25,0.25],
[0.0,0.0,0.25,0.25],
[0.0,0.0,0.25,0.50],
[1.0,1.0,0.25,0.00]
]
#a. different manufacturer, b. new topic, c. end convo, d. prices
manuMatrix = [
[0.0,0.0,0.25,0.25],
[0.0,0.0,0.25,0.25],
[0.0,0.0,0.25,0.50],
[1.0,1.0,0.25,0.00]
]
#a. different friend, b. new topic, c. end convo, d. family, e. job, /f. skills
friendMatrix = [
[0.0,0.0,0.2,0.1,0.1],
[0.0,0.0,0.2,0.2,0.2],
[0.0,0.0,0.2,0.5,0.5],
[0.5,0.5,0.2,0.0,0.2],
[0.5,0.5,0.2,0.2,0.0]
]
# friendMatrix = [
# [0.00,0.00,0.15,0.1,0.1,0.1],
# [0.00,0.00,0.15,0.2,0.2,0.2],
# [0.00,0.00,0.15,0.5,0.5,0.5],
# [0.34,0.34,0.15,0.0,0.1,0.1],
# [0.33,0.33,0.15,0.1,0.0,0.1],
# [0.33,0.33,0.25,0.1,0.1,0.0]
# ]
#a. introduction, b. new topic, c. end convo, d. myfamily, e. myjob, /f. myskills
myselfMatrix = [
[0.00,1,0.2,0.0,0.0],
[0.25,0,0.2,0.2,0.2],
[0.25,0,0.2,0.5,0.5],
[0.25,0,0.2,0.0,0.3],
[0.25,0,0.2,0.3,0.0]
]
# myselfMatrix = [
# [0.0,1,0.15,0.00,0.00,0.00],
# [0.2,0,0.15,0.20,0.20,0.20],
# [0.2,0,0.15,0.50,0.50,0.50],
# [0.2,0,0.15,0.00,0.15,0.15],
# [0.2,0,0.15,0.15,0.00,0.15],
# [0.2,0,0.15,0.15,0.15,0.00]
# ]
states = ['topic','store','manu','friend', 'myself', 'exit']
transitions = [
{'trigger' : 'toTopic', 'source' : '*', 'dest' : 'topic'},
{'trigger' : 'toStore', 'source' : 'topic', 'dest' : 'store'},
{'trigger' : 'toManu' , 'source' : 'topic', 'dest' : 'manu' },
{'trigger' : 'toFriend', 'source' : 'topic', 'dest' : 'friend' },
{'trigger' : 'toMyself', 'source' : 'topic', 'dest' : 'myself'},
{'trigger' : 'toExit', 'source' : '*', 'dest' : 'exit'}
]
def __init__(self):
self.isPlayer = False
self.firstPerson = None
self.secondPerson = None
self.target = None
self.machine = Machine(model=self, states=Conversation.states, transitions=Conversation.transitions, initial='exit')
self.menuDict = {
'topic' : [self.toStore, self.toManu, self.toFriend, self.toMyself, self.toExit],
'store' : [self.different, self.toTopic, self.toExit, self.prices],
'manu' : [self.different, self.toTopic, self.toExit, self.prices],
'friend' : [self.different, self.toTopic, self.toExit, self.family, self.job],
'myself' : [self.introduction, self.toTopic, self.toExit, self.myfamily, self.myjob]
}
self.machine.on_enter_topic('topicHandler')
self.machine.on_enter_store('storeHandler')
self.machine.on_enter_manu('manuHandler')
self.machine.on_enter_friend('friendHandler')
self.machine.on_enter_myself('myselfHandler')
self.machine.on_enter_exit('exitHandler')
def beginConversation(self, firstPerson, secondPerson, isPlayer=False):
self.isPlayer = isPlayer
self.firstPerson = firstPerson
self.secondPerson = secondPerson
self.introduction()
self.toTopic()
def introduction(self):
p2 = self.firstPerson.peopleManager(self.secondPerson)
p1 = self.secondPerson.peopleManager(self.firstPerson)
p2.name = self.secondPerson.name
p1.name = self.firstPerson.name
p2.updateOpinion(1)
p1.updateOpinion(1)
def different(self):
if self.state == 'friend':
testTarget = self.firstPerson.randomPerson(self.target)
if testTarget is not None:
self.target = testTarget.person
else:
self.target = None
elif self.state == 'manu':
testTarget = self.firstPerson.randomManu(self.target)
if testTarget is not None:
self.target = testTarget.store
else:
self.target = None
elif self.state == 'store':
testTarget = self.firstPerson.randomStore(self.target)
if testTarget is not None:
self.target = testTarget.store
else:
self.target = None
def prices(self):
if self.target is not None:
firstProfile = self.firstPerson.unitManager(self.target, self.secondPerson)
secondProfile = self.secondPerson.unitManager(self.target, self.firstPerson)
firstPrices = firstProfile.getPricesWithDayNum()
secondPrices = secondProfile.getPricesWithDayNum()
firstDayNum = firstPrices[1]
secondDayNum = secondPrices[1]
if firstDayNum > secondDayNum:
prices = firstPrices[0]
secondProfile.updatePrices(prices, firstDayNum)
#thoughts
self.firstPerson.think("I told " + self.secondPerson.name + " about the prices at " + self.target.name + ".")
self.secondPerson.think(self.firstPerson.name + " told me about the prices at " + self.target.name + ".")
elif secondDayNum > firstDayNum:
prices = secondPrices[0]
firstProfile.updatePrices(prices, secondDayNum)
#thoughts
self.firstPerson.think(self.secondPerson.name + " told me about the prices at " + self.target.name + ".")
self.secondPerson.think("I told " + self.firstPerson.name + " about the prices at " + self.target.name + ".")
else:
self.firstPerson.think(self.secondPerson.name + " and I talked about " + self.target.name + "'s prices.")
self.secondPerson.think(self.firstPerson.name + " and I talked about " + self.target.name + "'s prices.")
else:
if self.state == 'store':
self.firstPerson.think(self.secondPerson.name + " listened to me gripe about how I can't find anywhere to shop.")
self.secondPerson.think(self.firstPerson.name + " told me that they can't find anywhere to shop.")
elif self.state == 'manu':
self.firstPerson.think("I mentioned to " + self.secondPerson.name + " that I don't know anything about the local industry.")
self.secondPerson.think(self.firstPerson.name + " told me that they don't know much about the local industry.")
else:
self.firstPerson.think("There is a bug in conversation.prices. (not manu or store)")
self.secondPerson.think("There is a bug in conversation.prices. (not manu or store)")
def family(self):
if self.target is not None:
#info: family, people
#profiles
p1 = self.firstPerson.peopleManager(self.target)
p2 = self.secondPerson.peopleManager(self.target)
#variables
f1 = p1.getFamily()
f2 = p2.getFamily()
ff = []
#update profiles
for a, b in zip(f1, f2):
if a[-1] >= b[-1]:
ff.append(a)
else:
ff.append(b)
p1.updateFamily(*ff)
p2.updateFamily(*ff)
#thoughts
self.firstPerson.think(self.secondPerson.name + " and I gossipped about " + self.target.name + "'s family.")
self.secondPerson.think(self.firstPerson.name + " and I gossipped about " + self.target.name + "'s family.")
else:
self.firstPerson.think("I don't really know anything about my friends' families.")
self.secondPerson.think("I don't really know anything about my friends' families.")
def job(self):
if self.target is not None:
#profiles
firstProfile = self.firstPerson.peopleManager(self.target)
secondProfile = self.secondPerson.peopleManager(self.target)
#variables
firstJob = firstProfile.getJob()
secondJob = secondProfile.getJob()
#update profiles
if firstJob[1] > secondJob[1]:
secondProfile.updateJob(*firstJob)
self.firstPerson.think("I told " + self.secondPerson.name + " what " + self.target.name + " does for a living.")
self.secondPerson.think(self.firstPerson.name + " told me what " + self.target.name + " does for a living.")
elif secondJob[1] > firstJob[1]:
firstProfile.updateJob(*secondJob)
self.firstPerson.think(self.secondPerson.name + " told me what " + self.target.name + " does for a living.")
self.secondPerson.think("I told " + self.firstPerson.name + " about " + self.target.name + " does for a living.")
else:
self.firstPerson.think(self.secondPerson.name + " and I talked about " + self.target.name + "'s job.")
self.secondPerson.think(self.firstPerson.name + " and I talked about " + self.target.name + "'s job.")
else:
self.firstPerson.think("I don't know what any of my friends do for a living!")
self.secondPerson.think("I don't know what any of my friends do for a living!")
# def skills(self):
# #info: skills
# if self.target is not None:
# #profiles
# firstProfile = self.firstPerson.peopleManager(self.target)
# secondProfile = self.secondPerson.peopleManager(self.target)
# #variables
# firstSkills = firstProfile.getSkills()
# secondSkills = secondProfile.getSkills()
# #update profiles
# if firstSkills[1] > secondSkills[1]:
# secondProfile.updateSkills(*firstSkills)
# self.firstPerson.think("I told " + self.secondPerson.name + " about how good " + self.target.name + " is with their hands.")
# self.secondPerson.think(self.firstPerson.name + " told me about how good " + self.target.name + " is with their hands.")
# elif secondSkills[1] > firstSkills[1]:
# firstProfile.updateSkills(*secondSkills)
# self.firstPerson.think(self.secondPerson.name + " told me about how good " + self.target.name + " is with their hands.")
# self.secondPerson.think("I told " + self.firstPerson.name + " about how good " + self.target.name + " is with their hands.")
# else:
# self.firstPerson.think(self.secondPerson.name + " and I talked about how good " + self.target.name + " is with their hands.")
# self.secondPerson.think(self.firstPerson.name + " and I talked about how good " + self.target.name + " is with their hands.")
# else:
# self.firstPerson.think("I should spend more time doing things with my friends.")
# self.secondPerson.think("I should spend more time doing things with my friends.")
def myfamily(self):
#info: family, people
#profiles
firstProfile = self.secondPerson.peopleManager(self.firstPerson)
secondProfile = self.firstPerson.peopleManager(self.secondPerson)
firstOwn = self.firstPerson.peopleManager(self.firstPerson)
secondOwn = self.secondPerson.peopleManager(self.secondPerson)
#update profiles
firstProfile.updateFamily(firstOwn.getFather(), firstOwn.getMother(), firstOwn.getSpouse(), firstOwn.getSiblings(), firstOwn.getChildren())
secondProfile.updateFamily(secondOwn.getFather(), secondOwn.getMother(), secondOwn.getSpouse(), secondOwn.getSiblings(), secondOwn.getChildren())
#thoughts
self.firstPerson.think(self.secondPerson.name + " caught me up on their family life.")
self.secondPerson.think(self.firstPerson.name + " caught me up on their family life.")
def myjob(self):
#info: jobs, jobUnits, *salaries
#profiles
firstProfile = self.secondPerson.peopleManager(self.firstPerson)
secondProfile = self.firstPerson.peopleManager(self.secondPerson)
#variables
firstJob = self.firstPerson.getJob()
secondJob = self.secondPerson.getJob()
dayNum = self.firstPerson.model.getDayNum()
try:
firstJobType = firstJob.getJobType()
firstJobUnit = firstJob.getUnit()
firstJobLoc = firstJobUnit.getName()
firstSalary = firstJob.getSalary()
except:
firstJobType = "Jobhunter"
firstJobUnit = None
firstJobLoc = "home"
firstSalary = 0
try:
secondJobType = secondJob.getJobType()
secondJobUnit = secondJob.getUnit()
secondJobLoc = secondJobUnit.getName()
secondSalary = secondJob.getSalary()
except:
secondJobType = "Jobhunter"
secondJobUnit = None
secondJobLoc = "home"
secondSalary = 0
#update profiles
if dayNum > firstProfile.getJob()[1]:
firstProfile.updateJob(firstJob, dayNum)
if dayNum > firstProfile.getSalary()[1]:
firstProfile.updateSalary(firstSalary, dayNum)
if dayNum > secondProfile.getJob()[1]:
secondProfile.updateJob(secondJob, dayNum)
if dayNum > secondProfile.getSalary()[1]:
secondProfile.updateSalary(firstSalary, dayNum)
if firstJobUnit is not None:
self.secondPerson.unitManager(firstJobUnit, self.firstPerson)
if secondJobUnit is not None:
self.firstPerson.unitManager(secondJobUnit, self.secondPerson)
#thoughts
self.firstPerson.think(self.secondPerson.name + " told me about their job as a " + secondJobType + " at " + secondJobLoc + ".")
self.secondPerson.think(self.firstPerson.name | |
from dataclasses import dataclass
from datetime import date, datetime
import mock
from pdfminer.layout import LTChar, LTCurve, LTFigure, LTImage, LTTextBoxHorizontal, LTTextLineHorizontal
from typing import List
from rdr_service.services.consent import files
from tests.helpers.unittest_base import BaseTestCase
class ConsentFileParsingTest(BaseTestCase):
def __init__(self, *args, **kwargs):
super(ConsentFileParsingTest, self).__init__(*args, **kwargs)
self.uses_database = False
def test_vibrent_primary_consent(self):
for consent_example in self._get_vibrent_primary_test_data():
consent_file = consent_example.file
self.assertEqual(consent_example.expected_signature, consent_file.get_signature_on_file())
self.assertEqual(consent_example.expected_sign_date, consent_file.get_date_signed())
self.assertEqual(consent_example.expected_to_be_va_file, consent_file.get_is_va_consent())
def test_vibrent_cabor_consent(self):
for consent_example in self._get_vibrent_cabor_test_data():
consent_file = consent_example.file
self.assertEqual(consent_example.expected_signature, consent_file.get_signature_on_file())
self.assertEqual(consent_example.expected_sign_date, consent_file.get_date_signed())
def test_vibrent_ehr_consent(self):
for consent_example in self._get_vibrent_ehr_test_data():
consent_file = consent_example.file
self.assertEqual(consent_example.expected_signature, consent_file.get_signature_on_file())
self.assertEqual(consent_example.expected_sign_date, consent_file.get_date_signed())
self.assertEqual(consent_example.expected_to_be_va_file, consent_file.get_is_va_consent())
def test_vibrent_gror_consent(self):
for consent_example in self._get_vibrent_gror_test_data():
consent_file = consent_example.file
self.assertEqual(consent_example.expected_signature, consent_file.get_signature_on_file())
self.assertEqual(consent_example.expected_sign_date, consent_file.get_date_signed())
self.assertEqual(consent_example.has_yes_selected, consent_file.is_confirmation_selected())
def test_vibrent_primary_update_consent(self):
for consent_example in self._get_vibrent_primary_update_test_data():
consent_file = consent_example.file
self.assertEqual(consent_example.expected_signature, consent_file.get_signature_on_file())
self.assertEqual(consent_example.expected_sign_date, consent_file.get_date_signed())
self.assertEqual(consent_example.has_yes_selected, consent_file.is_agreement_selected())
self.assertEqual(consent_example.expected_to_be_va_file, consent_file.get_is_va_consent())
def _get_primary_consent_elements(self):
return [
self._build_pdf_element(
cls=LTTextBoxHorizontal,
children=[
self._build_pdf_element(
cls=LTTextLineHorizontal,
text='understand the information in this form. All of my questions\n'
),
self._build_pdf_element(
cls=LTTextLineHorizontal,
text='have been answered. I freely and willingly choose to take part in\n'
),
self._build_pdf_element(
cls=LTTextLineHorizontal,
text='the All of Us Research Program.\n'
)
]
),
self._build_pdf_element(
cls=LTTextBoxHorizontal,
children=[
self._build_pdf_element(cls=LTTextLineHorizontal, text='Sign Your Full Name: \n')
]
)
]
def _get_vibrent_primary_test_data(self) -> List['PrimaryConsentTestData']:
"""
Builds a list of PDFs that represent the different layouts of Vibrent's primary consent
that have been encountered. Add to this if the code incorrectly parses any Vibrent primary pdf
"""
test_data = []
# elements that usually appear on the signature page
description_elements = self._get_primary_consent_elements()
# Build basic file with signature of Test Name and signing date of August 17, 2019
pdf = self._build_pdf(pages=[
[
*description_elements,
self._build_form_element(text='<NAME>', bbox=(116, 147, 517, 169)),
self._build_form_element(text='Aug 17, 2019', bbox=(116, 97, 266, 119))
]
])
test_data.append(
PrimaryConsentTestData(
file=files.VibrentPrimaryConsentFile(pdf=pdf, blob=mock.MagicMock()),
expected_signature='<NAME>',
expected_sign_date=date(2019, 8, 17)
)
)
# Build an older style of primary layout, with signature box higher up on the page
pdf = self._build_pdf(pages=[
[
*description_elements,
self._build_form_element(text='Nick', bbox=(116, 585, 517, 605)),
self._build_form_element(text='Dec 25, 2017', bbox=(116, 565, 266, 585))
]
])
test_data.append(
PrimaryConsentTestData(
file=files.VibrentPrimaryConsentFile(pdf=pdf, blob=mock.MagicMock()),
expected_signature='Nick',
expected_sign_date=date(2017, 12, 25)
)
)
# Build basic VA primary file
pdf = self._build_pdf(pages=[
[
self._build_pdf_element(
cls=LTTextBoxHorizontal,
children=[
self._build_pdf_element(cls=LTTextLineHorizontal, text='you will get care at a VA facility')
]
)
]
])
test_data.append(
PrimaryConsentTestData(
file=files.VibrentPrimaryConsentFile(pdf=pdf, blob=mock.MagicMock()),
expected_signature=None,
expected_sign_date=None,
expected_to_be_va_file=True
)
)
# Build file with an empty text element instead of a signature and date
pdf = self._build_pdf(pages=[
[
*description_elements,
self._build_form_element(text='', bbox=(116, 147, 521, 171)),
self._build_form_element(text='', bbox=(116, 97, 266, 119))
]
])
test_data.append(
PrimaryConsentTestData(
file=files.VibrentPrimaryConsentFile(pdf=pdf, blob=mock.MagicMock()),
expected_signature=None,
expected_sign_date=None
)
)
# Build consent with an image instead of a typed signature
pdf = self._build_pdf(pages=[
[
*description_elements,
self._build_form_element(
bbox=(200, 125, 400, 191),
children=[
self._build_pdf_element(cls=LTImage, bbox=(200, 125, 400, 191))
]
),
self._build_form_element(text='December 7, 2018', bbox=(116, 97, 266, 119))
]
])
test_data.append(
PrimaryConsentTestData(
file=files.VibrentPrimaryConsentFile(pdf=pdf, blob=mock.MagicMock()),
expected_signature=True,
expected_sign_date=date(2018, 12, 7)
)
)
# Build older style consent with different signature description formatting
pdf = self._build_pdf(pages=[
[
self._build_pdf_element(
cls=LTTextBoxHorizontal,
children=[
self._build_pdf_element(
cls=LTTextLineHorizontal,
text='this form. All of my questions have been answered. I freely and\n'
),
self._build_pdf_element(
cls=LTTextLineHorizontal,
text='willingly choose to take part in the All of Us Research Program.\n'
),
]
),
self._build_pdf_element(
cls=LTTextBoxHorizontal,
children=[
self._build_pdf_element(
cls=LTTextLineHorizontal,
children=[
self._build_pdf_element(LTTextLineHorizontal, text='Sign Your \n'),
self._build_pdf_element(LTTextLineHorizontal, text='Full Name: \n')
]
)
]
),
self._build_form_element(text='2018 Participant', bbox=(116, 147, 521, 171)),
self._build_form_element(text='Feb 19, 2018', bbox=(116, 96, 521, 120))
]
])
test_data.append(
PrimaryConsentTestData(
file=files.VibrentPrimaryConsentFile(pdf=pdf, blob=mock.MagicMock()),
expected_signature='2018 Participant',
expected_sign_date=date(2018, 2, 19)
)
)
# Build Spanish version of the Primary file
pdf = self._build_pdf(pages=[
[
self._build_pdf_element(
cls=LTTextBoxHorizontal,
children=[
self._build_pdf_element(
cls=LTTextLineHorizontal,
text='Decido participar libremente y por voluntad propia'
)
]
),
self._build_pdf_element(
cls=LTTextBoxHorizontal,
children=[
self._build_pdf_element(
cls=LTTextLineHorizontal,
children=[
self._build_pdf_element(LTTextLineHorizontal, text='Firme con su nombre completo:')
]
)
]
),
self._build_form_element(text='Spanish Participant', bbox=(116, 147, 517, 169)),
self._build_form_element(text='Mar 3, 2021', bbox=(116, 97, 266, 119))
]
])
test_data.append(
PrimaryConsentTestData(
file=files.VibrentPrimaryConsentFile(pdf=pdf, blob=mock.MagicMock()),
expected_signature='Spanish Participant',
expected_sign_date=date(2021, 3, 3)
)
)
return test_data
def _get_vibrent_cabor_test_data(self) -> List['ConsentTestData']:
"""Builds a list of PDFs that represent the different layouts of Vibrent's CaBOR consent"""
basic_cabor_pdf = self._build_pdf(pages=[
[
self._build_form_element(text='Test cabor', bbox=(116, 100, 517, 140)),
self._build_form_element(text='April 27, 2020', bbox=(500, 100, 600, 140))
]
])
basic_cabor_case = ConsentTestData(
file=files.VibrentCaborConsentFile(pdf=basic_cabor_pdf, blob=mock.MagicMock()),
expected_signature='Test cabor',
expected_sign_date=date(2020, 4, 27)
)
older_cabor_pdf = self._build_pdf(pages=[
[
self._build_form_element(text='2017 Cabor', bbox=(150, 150, 350, 188)),
self._build_form_element(text='Sep 8, 2017', bbox=(434, 153, 527, 182))
]
])
older_cabor_case = ConsentTestData(
file=files.VibrentCaborConsentFile(pdf=older_cabor_pdf, blob=mock.MagicMock()),
expected_signature='2017 Cabor',
expected_sign_date=date(2017, 9, 8)
)
return [basic_cabor_case, older_cabor_case]
def _get_vibrent_ehr_test_data(self) -> List['EhrConsentTestData']:
six_empty_pages = [[], [], [], [], [], []] # The EHR signature is expected to be on the 7th page
basic_ehr_pdf = self._build_pdf(pages=[
*six_empty_pages,
[
self._build_form_element(text='Test ehr', bbox=(125, 150, 450, 180)),
self._build_form_element(text='Dec 21, 2019', bbox=(125, 100, 450, 130))
]
])
basic_ehr_case = EhrConsentTestData(
file=files.VibrentEhrConsentFile(pdf=basic_ehr_pdf, blob=mock.MagicMock()),
expected_signature='Test ehr',
expected_sign_date=date(2019, 12, 21)
)
va_ehr_pdf = self._build_pdf(pages=[
*six_empty_pages,
[
self._build_pdf_element(
cls=LTTextLineHorizontal,
text='We may ask you to go to a local clinic to be measured'
),
self._build_form_element(text='Test va ehr', bbox=(125, 150, 450, 180)),
self._build_form_element(text='Oct 10, 2020', bbox=(125, 100, 450, 130))
]
])
va_ehr_case = EhrConsentTestData(
file=files.VibrentEhrConsentFile(pdf=va_ehr_pdf, blob=mock.MagicMock()),
expected_signature='Test va ehr',
expected_sign_date=date(2020, 10, 10),
expected_to_be_va_file=True
)
return [basic_ehr_case, va_ehr_case]
def _get_vibrent_gror_test_data(self) -> List['GrorConsentTestData']:
# The GROR signature is expected to be on the 10th page
nine_empty_pages = [
[], [], [], [], [], [], [], [], []
]
basic_gror_pdf = self._build_pdf(pages=[
*nine_empty_pages,
[
self._build_form_element(
children=[self._build_pdf_element(LTCurve)],
bbox=(65, 470, 75, 480)
),
self._build_form_element(text='Test gror', bbox=(140, 150, 450, 180)),
self._build_form_element(text='Jan 1st, 2021', bbox=(125, 100, 450, 130))
]
])
basic_gror_case = GrorConsentTestData(
file=files.VibrentGrorConsentFile(pdf=basic_gror_pdf, blob=mock.MagicMock()),
expected_signature='Test gror',
expected_sign_date=date(2021, 1, 1),
has_yes_selected=True
)
gror_missing_check = self._build_pdf(pages=[
*nine_empty_pages,
[
self._build_form_element(text='no confirmation', bbox=(140, 150, 450, 180)),
self._build_form_element(text='Feb 1st, 2021', bbox=(125, 100, 450, 130))
]
])
no_confirmation_case = GrorConsentTestData(
file=files.VibrentGrorConsentFile(pdf=gror_missing_check, blob=mock.MagicMock()),
expected_signature='no confirmation',
expected_sign_date=date(2021, 2, 1),
has_yes_selected=False
)
spanish_gror_pdf = self._build_pdf(pages=[
*nine_empty_pages,
[
self._build_pdf_element(
cls=LTTextLineHorizontal,
text='¿Desea conocer alguno de sus resultados de ADN?'
),
self._build_form_element(
children=[self._build_pdf_element(LTCurve)],
bbox=(30, 478, 40, 488)
),
self._build_form_element(text='spanish gror', bbox=(140, 150, 450, 180)),
self._build_form_element(text='May 1st, 2018', bbox=(125, 100, 450, 130))
]
])
spanish_gror_case = GrorConsentTestData(
file=files.VibrentGrorConsentFile(pdf=spanish_gror_pdf, blob=mock.MagicMock()),
expected_signature='spanish gror',
expected_sign_date=date(2018, 5, 1),
has_yes_selected=True
)
return [basic_gror_case, no_confirmation_case, spanish_gror_case]
def _get_vibrent_primary_update_test_data(self) -> List['PrimaryUpdateConsentTestData']:
basic_update_pdf = self._build_pdf(pages=[
[
self._build_pdf_element(
cls=LTTextBoxHorizontal,
children=[
self._build_pdf_element(cls=LTTextLineHorizontal, text='Do you agree to this updated consent?')
]
),
self._build_form_element(
children=[self._build_pdf_element(LTChar, text='4')],
bbox=(34, 669, 45, 683)
),
self._build_form_element(text='Test update', bbox=(116, 146, 521, 170)),
self._build_form_element(text='Jan 1st, 2021', bbox=(116, 96, 521, 120))
]
])
basic_update_case = PrimaryUpdateConsentTestData(
file=files.VibrentPrimaryConsentUpdateFile(
pdf=basic_update_pdf,
blob=mock.MagicMock(),
consent_date=datetime.now()
),
expected_signature='Test update',
expected_sign_date=date(2021, 1, 1),
has_yes_selected=True,
expected_to_be_va_file=False
)
va_update_pdf = self._build_pdf(pages=[
[
self._build_pdf_element(
cls=LTTextBoxHorizontal,
children=[
self._build_pdf_element(cls=LTTextLineHorizontal, text='Do you agree to this updated consent?')
]
),
self._build_pdf_element(
cls=LTTextBoxHorizontal,
children=[
self._build_pdf_element(cls=LTTextLineHorizontal, text='you will get care at a VA facility')
]
),
self._build_form_element(text='Test update', bbox=(116, 146, 521, 170)),
self._build_form_element(text='Jan 1st, 2021', bbox=(116, 96, 521, 120))
]
])
va_update_case = PrimaryUpdateConsentTestData(
file=files.VibrentPrimaryConsentUpdateFile(
pdf=va_update_pdf,
blob=mock.MagicMock(),
consent_date=datetime.now()
),
expected_signature='Test update',
expected_sign_date=date(2021, 1, 1),
has_yes_selected=False,
expected_to_be_va_file=True
)
# Build basic primary file for older version of PrimaryUpdate
pdf = self._build_pdf(pages=[
[
*self._get_primary_consent_elements(),
self._build_form_element(text='<NAME>', bbox=(116, 147, 517, 169)),
self._build_form_element(text='Aug 9, 2020', bbox=(116, 97, 266, 119))
]
])
older_update_case = PrimaryUpdateConsentTestData(
file=files.VibrentPrimaryConsentUpdateFile(
pdf=pdf,
blob=mock.MagicMock(),
consent_date=datetime(2020, 8, 9)
),
expected_signature='Test Name',
expected_sign_date=date(2020, 8, 9),
has_yes_selected=True
)
return [basic_update_case, va_update_case, older_update_case]
@classmethod
def _build_pdf(cls, pages) -> files.Pdf:
"""
Builds a consent_files.Pdf object
:param pages A list where each item represents a page,
and each item is a list of pdf elements for what should be on that page
"""
page_mocks = []
for page_elements in pages:
page_mock = mock.MagicMock()
page_mock.__iter__.return_value = page_elements
page_mocks.append(page_mock)
return files.Pdf(pages=page_mocks)
def _build_pdf_element(self, cls, text: str = None, children: list = None, bbox=None):
"""Create a generic pdf element to add to the page"""
element = mock.MagicMock(spec=cls)
self._set_bbox(bbox, element)
if children:
element.__iter__.return_value = children
if hasattr(element, 'get_text'):
if text is None:
get_text_result = ''.join([child.get_text() for child in children])
else:
get_text_result = text
element.get_text.return_value = get_text_result
return element
def _build_form_element(self, bbox, text: str = None, children: list = None):
"""
Form elements don't have a get_text method, and (at least with the Vibrent PDFs) any text within them is
laid out character by character
"""
element = mock.MagicMock(spec=LTFigure)
self._set_bbox(bbox, element)
if children:
element.__iter__.return_value = children
else:
char_list = []
for char_str in text:
char_element = mock.MagicMock(spec=LTChar)
char_element.get_text.return_value | |
from tool_shed.base.twilltestcase import ShedTwillTestCase, common, os
import tool_shed.base.test_db_util as test_db_util
emboss_datatypes_repository_name = 'emboss_datatypes_0050'
emboss_datatypes_repository_description = "Datatypes for emboss"
emboss_datatypes_repository_long_description = "Long description of Emboss' datatypes"
emboss_repository_name = 'emboss_0050'
emboss_repository_description = "Galaxy's emboss tool"
emboss_repository_long_description = "Long description of Galaxy's emboss tool"
filtering_repository_name = 'filtering_0050'
filtering_repository_description = "Galaxy's filtering tool"
filtering_repository_long_description = "Long description of Galaxy's filtering tool"
freebayes_repository_name = 'freebayes_0050'
freebayes_repository_description = "Galaxy's freebayes tool"
freebayes_repository_long_description = "Long description of Galaxy's freebayes tool"
column_repository_name = 'column_maker_0050'
column_repository_description = "Add column"
column_repository_long_description = "Compute an expression on every row"
convert_repository_name = 'convert_chars_0050'
convert_repository_description = "Convert delimiters"
convert_repository_long_description = "Convert delimiters to tab"
bismark_repository_name = 'bismark_0050'
bismark_repository_description = "A flexible aligner."
bismark_repository_long_description = "A flexible aligner and methylation caller for Bisulfite-Seq applications."
category_name = 'Test 0050 Circular Dependencies 5 Levels'
category_description = 'Test circular dependency features'
running_standalone = False
class TestInstallRepositoryCircularDependencies( ShedTwillTestCase ):
'''Verify that the code correctly handles circular dependencies down to n levels.'''
def test_0000_initiate_users( self ):
"""Create necessary user accounts."""
self.logout()
self.login( email=common.test_user_1_email, username=common.test_user_1_name )
test_user_1 = test_db_util.get_user( common.test_user_1_email )
assert test_user_1 is not None, 'Problem retrieving user with email %s from the database' % test_user_1_email
test_user_1_private_role = test_db_util.get_private_role( test_user_1 )
self.logout()
self.login( email=common.admin_email, username=common.admin_username )
admin_user = test_db_util.get_user( common.admin_email )
assert admin_user is not None, 'Problem retrieving user with email %s from the database' % admin_email
admin_user_private_role = test_db_util.get_private_role( admin_user )
def test_0005_create_convert_repository( self ):
'''Create and populate convert_chars_0050.'''
category = self.create_category( name=category_name, description=category_description )
global running_standalone
self.logout()
self.login( email=common.test_user_1_email, username=common.test_user_1_name )
repository = self.get_or_create_repository( name=convert_repository_name,
description=convert_repository_description,
long_description=convert_repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=[] )
if self.repository_is_new( repository ):
running_standalone = True
self.upload_file( repository,
'convert_chars/convert_chars.tar',
strings_displayed=[],
commit_message='Uploaded convert_chars.tar.' )
def test_0010_create_column_repository( self ):
'''Create and populate convert_chars_0050.'''
category = self.create_category( name=category_name, description=category_description )
repository = self.get_or_create_repository( name=column_repository_name,
description=column_repository_description,
long_description=column_repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=[] )
if self.repository_is_new( repository ):
self.upload_file( repository,
'column_maker/column_maker.tar',
strings_displayed=[],
commit_message='Uploaded column_maker.tar.' )
def test_0015_create_emboss_datatypes_repository( self ):
'''Create and populate emboss_datatypes_0050.'''
category = self.create_category( name=category_name, description=category_description )
repository = self.get_or_create_repository( name=emboss_datatypes_repository_name,
description=emboss_datatypes_repository_description,
long_description=emboss_datatypes_repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=[] )
if self.repository_is_new( repository ):
self.upload_file( repository,
'emboss/datatypes/datatypes_conf.xml',
strings_displayed=[],
commit_message='Uploaded datatypes_conf.xml.' )
def test_0020_create_emboss_repository( self ):
'''Create and populate emboss_0050.'''
category = self.create_category( name=category_name, description=category_description )
repository = self.get_or_create_repository( name=emboss_repository_name,
description=emboss_repository_description,
long_description=emboss_repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=[] )
if self.repository_is_new( repository ):
self.upload_file( repository,
'emboss/emboss.tar',
strings_displayed=[],
commit_message='Uploaded tool tarball.' )
def test_0025_create_filtering_repository( self ):
'''Create and populate filtering_0050.'''
category = self.create_category( name=category_name, description=category_description )
repository = self.get_or_create_repository( name=filtering_repository_name,
description=filtering_repository_description,
long_description=filtering_repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=[] )
if self.repository_is_new( repository ):
self.upload_file( repository,
'filtering/filtering_1.1.0.tar',
strings_displayed=[],
commit_message='Uploaded filtering.tar.' )
def test_0030_create_freebayes_repository( self ):
'''Create and populate freebayes_0050.'''
category = self.create_category( name=category_name, description=category_description )
repository = self.get_or_create_repository( name=freebayes_repository_name,
description=freebayes_repository_description,
long_description=freebayes_repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=[] )
if self.repository_is_new( repository ):
self.upload_file( repository,
'freebayes/freebayes.tar',
strings_displayed=[],
commit_message='Uploaded freebayes.tar.' )
def test_0035_create_bismark_repository( self ):
'''Create and populate bismark_0050.'''
category = self.create_category( name=category_name, description=category_description )
repository = self.get_or_create_repository( name=bismark_repository_name,
description=bismark_repository_description,
long_description=bismark_repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=[] )
if self.repository_is_new( repository ):
self.upload_file( repository,
'bismark/bismark.tar',
strings_displayed=[],
valid_tools_only=False,
commit_message='Uploaded bismark.tar.' )
def test_0040_create_and_upload_dependency_definitions( self ):
'''Set up the dependency structure.'''
global running_standalone
if running_standalone:
column_repository = test_db_util.get_repository_by_name_and_owner( column_repository_name, common.test_user_1_name )
convert_repository = test_db_util.get_repository_by_name_and_owner( convert_repository_name, common.test_user_1_name )
emboss_datatypes_repository = test_db_util.get_repository_by_name_and_owner( emboss_datatypes_repository_name, common.test_user_1_name )
emboss_repository = test_db_util.get_repository_by_name_and_owner( emboss_repository_name, common.test_user_1_name )
filtering_repository = test_db_util.get_repository_by_name_and_owner( filtering_repository_name, common.test_user_1_name )
freebayes_repository = test_db_util.get_repository_by_name_and_owner( freebayes_repository_name, common.test_user_1_name )
bismark_repository = test_db_util.get_repository_by_name_and_owner( bismark_repository_name, common.test_user_1_name )
dependency_xml_path = self.generate_temp_path( 'test_1050', additional_paths=[ 'dependencies' ] )
# convert_chars depends on column_maker
# column_maker depends on convert_chars
# emboss depends on emboss_datatypes
# emboss_datatypes depends on bismark
# freebayes depends on freebayes, emboss, emboss_datatypes, and column_maker
# filtering depends on emboss
self.create_repository_dependency( convert_repository, depends_on=[ column_repository ], filepath=dependency_xml_path )
self.create_repository_dependency( column_repository, depends_on=[ convert_repository ], filepath=dependency_xml_path )
self.create_repository_dependency( emboss_datatypes_repository, depends_on=[ bismark_repository ], filepath=dependency_xml_path )
self.create_repository_dependency( emboss_repository, depends_on=[ emboss_datatypes_repository ], filepath=dependency_xml_path )
self.create_repository_dependency( freebayes_repository,
depends_on=[ freebayes_repository, emboss_datatypes_repository, emboss_repository, column_repository ],
filepath=dependency_xml_path )
self.create_repository_dependency( filtering_repository, depends_on=[ emboss_repository ], filepath=dependency_xml_path )
def test_0045_verify_repository_dependencies( self ):
'''Verify that the generated dependency circle does not cause an infinite loop.
Expected structure:
id: 2 key: http://toolshed.local:10001__ESEP__filtering__ESEP__test__ESEP__871602b4276b
['http://toolshed.local:10001', 'emboss_5', 'test', '8de5fe0d7b04']
id: 3 key: http://toolshed.local:10001__ESEP__emboss_datatypes__ESEP__test__ESEP__dbd4f68bf507
['http://toolshed.local:10001', 'freebayes', 'test', 'f40028114098']
id: 4 key: http://toolshed.local:10001__ESEP__freebayes__ESEP__test__ESEP__f40028114098
['http://toolshed.local:10001', 'emboss_datatypes', 'test', 'dbd4f68bf507']
['http://toolshed.local:10001', 'emboss_5', 'test', '8de5fe0d7b04']
['http://toolshed.local:10001', 'column_maker', 'test', '83e956bdbac0']
id: 5 key: http://toolshed.local:10001__ESEP__column_maker__ESEP__test__ESEP__83e956bdbac0
['http://toolshed.local:10001', 'convert_chars', 'test', 'b28134220c8a']
id: 6 key: http://toolshed.local:10001__ESEP__convert_chars__ESEP__test__ESEP__b28134220c8a
['http://toolshed.local:10001', 'column_maker', 'test', '83e956bdbac0']
id: 7 key: http://toolshed.local:10001__ESEP__emboss_5__ESEP__test__ESEP__8de5fe0d7b04
['http://toolshed.local:10001', 'emboss_datatypes', 'test', 'dbd4f68bf507']
'''
emboss_datatypes_repository = test_db_util.get_repository_by_name_and_owner( emboss_datatypes_repository_name, common.test_user_1_name )
emboss_repository = test_db_util.get_repository_by_name_and_owner( emboss_repository_name, common.test_user_1_name )
filtering_repository = test_db_util.get_repository_by_name_and_owner( filtering_repository_name, common.test_user_1_name )
freebayes_repository = test_db_util.get_repository_by_name_and_owner( freebayes_repository_name, common.test_user_1_name )
column_repository = test_db_util.get_repository_by_name_and_owner( column_repository_name, common.test_user_1_name )
convert_repository = test_db_util.get_repository_by_name_and_owner( convert_repository_name, common.test_user_1_name )
bismark_repository = test_db_util.get_repository_by_name_and_owner( bismark_repository_name, common.test_user_1_name )
self.check_repository_dependency( convert_repository, column_repository )
self.check_repository_dependency( column_repository, convert_repository )
self.check_repository_dependency( emboss_datatypes_repository, bismark_repository )
self.check_repository_dependency( emboss_repository, emboss_datatypes_repository )
self.check_repository_dependency( filtering_repository, emboss_repository )
for repository in [ emboss_datatypes_repository, emboss_repository, column_repository ]:
self.check_repository_dependency( freebayes_repository, repository )
freebayes_dependencies = [ freebayes_repository, emboss_datatypes_repository, emboss_repository, column_repository ]
strings_displayed = [ '%s depends on %s.' % ( freebayes_repository.name, ', '.join( repo.name for repo in freebayes_dependencies ) ) ]
self.display_manage_repository_page( freebayes_repository, strings_displayed=strings_displayed )
def test_0050_verify_tool_dependencies( self ):
'''Check that freebayes and emboss display tool dependencies.'''
freebayes_repository = test_db_util.get_repository_by_name_and_owner( freebayes_repository_name, common.test_user_1_name )
emboss_repository = test_db_util.get_repository_by_name_and_owner( emboss_repository_name, common.test_user_1_name )
self.display_manage_repository_page( freebayes_repository,
strings_displayed=[ 'freebayes', '0.9.4_9696d0ce8a9', 'samtools', '0.1.18', 'Tool dependencies' ] )
self.display_manage_repository_page( emboss_repository, strings_displayed=[ 'Tool dependencies', 'emboss', '5.0.0', 'package' ] )
def test_0055_install_column_repository( self ):
'''Install column_maker with repository dependencies.'''
self.galaxy_logout()
self.galaxy_login( email=common.admin_email, username=common.admin_username )
self.install_repository( column_repository_name,
common.test_user_1_name,
category_name,
install_tool_dependencies=False,
install_repository_dependencies=True,
new_tool_panel_section='column_maker' )
# This should result in column_maker and convert_chars being installed, and the rest never installed.
installed_repositories = [ ( column_repository_name, common.test_user_1_name ),
( convert_repository_name, common.test_user_1_name ) ]
uninstalled_repositories = [ ( emboss_datatypes_repository_name, common.test_user_1_name ),
( emboss_repository_name, common.test_user_1_name ),
( filtering_repository_name, common.test_user_1_name ),
( freebayes_repository_name, common.test_user_1_name ),
( bismark_repository_name, common.test_user_1_name ) ]
self.verify_installed_uninstalled_repositories( installed_repositories=installed_repositories, uninstalled_repositories=uninstalled_repositories )
def test_0060_install_emboss_repository( self ):
'''Install emboss_5 with repository dependencies.'''
global running_standalone
original_datatypes = self.get_datatypes_count()
self.install_repository( emboss_repository_name,
common.test_user_1_name,
category_name,
install_tool_dependencies=False,
install_repository_dependencies=True,
new_tool_panel_section='emboss_5_0050' )
if running_standalone:
assert original_datatypes < self.get_datatypes_count(), 'Installing a repository that depends on emboss_datatypes did not add datatypes.'
# Now we have emboss_datatypes, emboss, bismark, column_maker, and convert_chars installed, filtering and freebayes never installed.
installed_repositories = [ ( emboss_datatypes_repository_name, common.test_user_1_name ),
( column_repository_name, common.test_user_1_name ),
( emboss_repository_name, common.test_user_1_name ),
( convert_repository_name, common.test_user_1_name ),
( bismark_repository_name, common.test_user_1_name ) ]
uninstalled_repositories = [ ( filtering_repository_name, common.test_user_1_name ),
( freebayes_repository_name, common.test_user_1_name ) ]
self.verify_installed_uninstalled_repositories( installed_repositories=installed_repositories, uninstalled_repositories=uninstalled_repositories )
def test_0065_deactivate_datatypes_repository( self ):
'''Deactivate emboss_datatypes and verify that the datatypes count is reduced.'''
original_datatypes = self.get_datatypes_count()
repository = test_db_util.get_installed_repository_by_name_owner( emboss_datatypes_repository_name, common.test_user_1_name )
self.uninstall_repository( repository, remove_from_disk=False )
assert original_datatypes > self.get_datatypes_count(), 'Deactivating emboss_datatypes did not remove datatypes.'
# Now we have emboss, bismark, column_maker, and convert_chars installed, filtering and freebayes never installed, and emboss_datatypes deactivated.
installed_repositories = [ ( column_repository_name, common.test_user_1_name ),
( emboss_repository_name, common.test_user_1_name ),
( convert_repository_name, common.test_user_1_name ),
( bismark_repository_name, common.test_user_1_name ) ]
uninstalled_repositories = [ ( emboss_datatypes_repository_name, common.test_user_1_name ),
( filtering_repository_name, common.test_user_1_name ),
( freebayes_repository_name, common.test_user_1_name ) ]
self.verify_installed_uninstalled_repositories( installed_repositories=installed_repositories, uninstalled_repositories=uninstalled_repositories )
strings_not_displayed = [ repository.name, repository.installed_changeset_revision ]
self.display_galaxy_browse_repositories_page( strings_not_displayed=strings_not_displayed )
def test_0070_uninstall_emboss_repository( self ):
'''Uninstall the emboss_5 repository.'''
repository = test_db_util.get_installed_repository_by_name_owner( emboss_repository_name, common.test_user_1_name )
self.uninstall_repository( repository, remove_from_disk=True )
strings_not_displayed = [ repository.name, repository.installed_changeset_revision ]
self.display_galaxy_browse_repositories_page( strings_not_displayed=strings_not_displayed )
test_db_util.ga_refresh( repository )
self.check_galaxy_repository_tool_panel_section( repository, 'emboss_5_0050' )
# Now we have bismark, column_maker, and convert_chars installed, filtering and freebayes never installed, emboss_datatypes deactivated,
# and emboss uninstalled.
installed_repositories = [ ( column_repository_name, common.test_user_1_name ),
( convert_repository_name, common.test_user_1_name ),
( bismark_repository_name, common.test_user_1_name ) ]
uninstalled_repositories = [ ( emboss_datatypes_repository_name, common.test_user_1_name ),
( emboss_repository_name, common.test_user_1_name ),
( filtering_repository_name, common.test_user_1_name ),
| |
import hashlib
import importlib
import os
import os.path as osp
import pkg_resources
import re
import subprocess
import tarfile
import typing
from collections import defaultdict
from distutils.version import LooseVersion
from pkg_resources import parse_version
from typing import Any, List, Optional, Tuple, Union
import click
import requests
from requests.exceptions import InvalidURL, RequestException, Timeout
from requests.models import Response
from .default import DEFAULT_URL, MMPACKAGE_PATH, PKG2MODULE, PKG2PROJECT
def parse_url(url: str) -> Tuple[str, str]:
"""Parse username and repo from url.
Args:
url (str): Url for parsing username and repo name.
Example:
>>> parse_url('https://github.com/open-mmlab/mmcv.git')
'open-mmlab', 'mmcv'
>>> parse_ulr('<EMAIL>:open-mmlab/mmcv.git')
'open-mmlab', 'mmcv'
"""
if url.startswith('git@'):
res = url.split(':')[-1].split('/')
elif 'git' in url:
res = url.split('/')[-2:]
else:
raise ValueError(highlighted_error(f'{url} is invalid.'))
username = res[0]
repo = res[1].split('.')[0]
return username, repo
def get_github_url(package: str) -> str:
"""Get github url.
Args:
package (str): Name of package, like mmcls.
Example:
>>> get_github_url('mmcls')
'https://github.com/open-mmlab/mmclassification.git'
"""
for _package, _, _url in read_installation_records():
if _package == package and _url != 'local':
github_url = _url
break
else:
if package not in PKG2PROJECT:
raise ValueError(
highlighted_error(f'Failed to get url of {package}.'))
github_url = f'{DEFAULT_URL}/{PKG2PROJECT[package]}.git'
return github_url
def get_content_from_url(url: str,
timeout: int = 15,
stream: bool = False) -> Response:
"""Get content from url.
Args:
url (str): Url for getting content.
timeout (int): Set the socket timeout. Default: 15.
"""
try:
response = requests.get(url, timeout=timeout, stream=stream)
except InvalidURL as err:
raise highlighted_error(err) # type: ignore
except Timeout as err:
raise highlighted_error(err) # type: ignore
except RequestException as err:
raise highlighted_error(err) # type: ignore
except Exception as err:
raise highlighted_error(err) # type: ignore
return response
@typing.no_type_check
def download_from_file(url: str,
dest_path: str,
hash_prefix: Optional[str] = None) -> None:
"""Download object at the given URL to a local path.
Args:
url (str): URL of the object to download.
dest_path (str): Path where object will be saved.
hash_prefix (string, optional): If not None, the SHA256 downloaded
file should start with `hash_prefix`. Default: None.
"""
if hash_prefix is not None:
sha256 = hashlib.sha256()
response = get_content_from_url(url, stream=True)
size = int(response.headers.get('content-length'))
with open(dest_path, 'wb') as fw:
content_iter = response.iter_content(chunk_size=1024)
with click.progressbar(content_iter, length=size / 1024) as chunks:
for chunk in chunks:
if chunk:
fw.write(chunk)
fw.flush()
if hash_prefix is not None:
sha256.update(chunk)
if hash_prefix is not None:
digest = sha256.hexdigest()
if digest[:len(hash_prefix)] != hash_prefix:
raise RuntimeError(
highlighted_error(
f'invalid hash value, expected "{hash_prefix}", but got '
f'"{digest}"'))
def split_package_version(package: str) -> Tuple[str, ...]:
"""Split the package which maybe contains version info.
Args:
package (str): Name of package to split.
Example:
>>> split_package_version('mmcls')
'mmcls', ''
>>> split_package_version('mmcls=0.11.0')
'mmcls', '0.11.0'
>>> split_package_version('mmcls==0.11.0')
'mmcls', '0.11.0'
"""
if '=' in package:
return tuple(re.split(r'=+', package))
else:
return package, ''
def is_installed(package: str) -> Any:
"""Check package whether installed.
Args:
package (str): Name of package to be checked.
"""
module_name = PKG2MODULE.get(package, package)
return importlib.util.find_spec(module_name) # type: ignore
def get_package_version(repo_root: str) -> Tuple[str, str]:
"""Get package and version from local repo.
Args:
repo_root (str): Directory of repo.
"""
for file_name in os.listdir(repo_root):
version_path = osp.join(repo_root, file_name, 'version.py')
if osp.exists(version_path):
with open(version_path, 'r', encoding='utf-8') as f:
exec(compile(f.read(), version_path, 'exec'))
return file_name, locals()['__version__']
return '', ''
def get_installed_version(package: str) -> str:
"""Get the version of package from local environment.
Args:
package (str): Name of package.
"""
module_name = PKG2MODULE.get(package, package)
if not is_installed(module_name):
raise RuntimeError(highlighted_error(f'{package} is not installed.'))
module = importlib.import_module(module_name)
return module.__version__ # type: ignore
def get_release_version(package: str, timeout: int = 15) -> List[str]:
"""Get release version from pypi.
The return list of versions is sorted by ascending order.
Args:
package (str): Package to get version.
timeout (int): Set the socket timeout. Default: 15.
"""
pkg_url = f'https://pypi.org/pypi/{package}/json'
response = get_content_from_url(pkg_url, timeout)
content = response.json()
releases = content['releases']
return sorted(releases, key=parse_version)
def get_latest_version(package: str, timeout: int = 15) -> str:
"""Get latest version of package.
Args:
package (str): Package to get latest version.
timeout (int): Set the socket timeout. Default: 15.
Example:
>>> get_latest_version('mmcv-full')
'0.11.0'
"""
release_version = get_release_version(package, timeout)
return release_version[-1]
def is_version_equal(version1: str, version2: str) -> bool:
return LooseVersion(version1) == LooseVersion(version2)
def get_installed_path(package: str) -> str:
"""Get installed path of package.
Args:
package (str): Name of package.
Example:
>>> get_installed_path('mmcls')
>>> '.../lib/python3.7/site-packages/mmcls'
"""
module_name = PKG2MODULE.get(package, package)
module = importlib.import_module(module_name)
return module.__path__[0] # type: ignore
def get_torch_cuda_version() -> Tuple[str, str]:
"""Get PyTorch version and CUDA version if it is available.
Example:
>>> get_torch_cuda_version()
'1.8.0', '102'
"""
try:
import torch
except ImportError as err:
raise err
torch_v = torch.__version__
if '+' in torch_v: # 1.8.1+cu111 -> 1.8.1
torch_v = torch_v.split('+')[0]
if torch.cuda.is_available():
# torch.version.cuda like 10.2 -> 102
cuda_v = ''.join(torch.version.cuda.split('.'))
else:
cuda_v = 'cpu'
return torch_v, cuda_v
def read_installation_records() -> list:
"""Read installed packages from mmpackage.txt."""
if not osp.isfile(MMPACKAGE_PATH):
return []
seen = set()
pkgs_info = []
with open(MMPACKAGE_PATH, 'r') as fr:
for line in fr:
line = line.strip()
package, version, source = line.split(',')
if not is_installed(package):
continue
pkgs_info.append((package, version, source))
seen.add(package)
# handle two cases
# 1. install mmrepos by other ways not mim, such as pip install mmcls
# 2. existed mmrepos
for pkg in pkg_resources.working_set:
pkg_name = pkg.project_name
if pkg_name not in seen and (pkg_name in PKG2PROJECT
or pkg_name in PKG2MODULE):
pkgs_info.append((pkg_name, pkg.version, ''))
return pkgs_info
def write_installation_records(package: str,
version: str,
source: str = '') -> None:
"""Write installed package to mmpackage.txt."""
pkgs_info = read_installation_records()
with open(MMPACKAGE_PATH, 'w') as fw:
if pkgs_info:
for _package, _version, _source in pkgs_info:
if _package != package:
fw.write(f'{_package},{_version},{_source}\n')
fw.write(f'{package},{version},{source}\n')
def remove_installation_records(package: str) -> None:
"""Remove package from mmpackage.txt."""
pkgs_info = read_installation_records()
if not pkgs_info:
with open(MMPACKAGE_PATH, 'w') as fw:
for _package, _version, _source in pkgs_info:
if _package != package:
fw.write(f'{_package},{_version},{_source}\n')
def cast2lowercase(input: Union[list, tuple, str]) -> Any:
"""Cast input into lowercase.
Example:
>>> cast2lowercase('Hello World')
'hello world'
>>> cast2lowercase(['Hello', 'World'])
['hello', 'world']
"""
inputs = []
outputs = []
if isinstance(input, str):
inputs = [input]
else:
inputs = input # type: ignore
for _input in inputs:
outputs.append(_input.lower())
if isinstance(input, str):
return outputs[0]
elif isinstance(input, tuple):
return tuple(outputs)
else:
return outputs
def recursively_find(root: str, base_name: str) -> list:
"""Recursive list a directory, return all files with a given base_name.
Args:
root (str): The root directory to list.
base_name (str): The base_name.
Return:
Files with given base_name.
"""
results = list(os.walk(root))
files = []
for tup in results:
root = tup[0]
if base_name in tup[2]:
files.append(osp.join(root, base_name))
return files
def highlighted_error(msg: Union[str, Exception]) -> str:
return click.style(msg, fg='red', bold=True) # type: ignore
def color_echo(msg: str, color: str) -> None:
click.echo(click.style(msg, fg=color)) # type: ignore
def echo_error(msg: Union[str, Exception]) -> None:
color_echo(msg=msg, color='red') # type: ignore
def echo_warning(msg: Union[str, Exception]) -> None:
color_echo(msg=msg, color='yellow') # type: ignore
def echo_success(msg: str) -> None:
color_echo(msg=msg, color='green')
def exit_with_error(msg: Union[str, Exception]) -> None:
echo_error(msg)
exit(1)
def call_command(cmd: list) -> None:
try:
subprocess.check_call(cmd)
except Exception as e:
raise highlighted_error(e) # type: ignore
def string2args(text: str) -> dict:
"""Parse string to arguments.
Args:
text (str): The string to be parsed, which should be of the format:
"--arg1 value1 value2 --arg2 value1 ... --argn value1".
Using '=' is also OK, like "--argn=value1". It also support flag
args like "--arg1".
Return:
A dictionary that contains parsed args. Note that the type of values
will all be strings.
Example:
>>> text = '--arg1 value1 value2 --arg2 value3 --arg3 value4'
>>> string2args(text)
args = {
'arg1': [value1, value2],
'arg2': [value3],
'arg3': [value4]
}
"""
ret: dict = defaultdict(list)
name = None
items = text.split()
for item in items:
if name is None:
assert item.startswith('--')
if item.startswith('--'):
if name is not None and ret[name] == []:
ret[name] = bool
if '=' in item:
name, value = item[2:].split('=')
ret[name] = [value]
name = None
else:
name = item[2:]
else:
ret[name].append(item)
if name is not None and ret[name] == []:
ret[name] = bool
return ret
def args2string(args: dict) -> str:
"""Convert args dictionary to a string.
Args:
args (dict): A dictionary that contains parsed args.
Return:
A converted string.
Example:
>>> args = {
'arg1': [value1, value2],
'arg2': [value3],
'arg3': [value4]
}
>>> args2string(args)
'--arg1 value1 value2 --arg2 value3 --arg3 value4'
"""
text = []
for k in args:
text.append(f'--{k}')
if args[k] is not bool:
text.extend([str(x) for x in args[k]])
return ' '.join(text)
def get_config(cfg, name):
"""Given | |
from pxr import *
import os, os.path
import numpy
import re
import usdUtils
import math
import imp
usdStageWithFbxLoaded = True
try:
imp.find_module('fbx')
import fbx
except ImportError:
usdUtils.printError("Failed to import fbx module. Please install FBX Python bindings from http://www.autodesk.com/fbx and add path to FBX Python SDK to your PYTHONPATH")
usdStageWithFbxLoaded = False
class ConvertError(Exception):
pass
def printErrorAndExit(message):
usdUtils.printError(message)
raise ConvertError()
def GfMatrix4dWithFbxMatrix(m):
return Gf.Matrix4d(
m[0][0], m[0][1], m[0][2], m[0][3],
m[1][0], m[1][1], m[1][2], m[1][3],
m[2][0], m[2][1], m[2][2], m[2][3],
m[3][0], m[3][1], m[3][2], m[3][3])
def getFbxNodeTransforms(fbxNode):
return GfMatrix4dWithFbxMatrix(fbxNode.EvaluateLocalTransform())
def getFbxNodeGeometricTransform(fbxNode):
# geometry transform is an additional transform for geometry
# it is relative to the node transform
# this transform is not distributing to the children nodes in scene graph
translation = fbxNode.GetGeometricTranslation(fbx.FbxNode.eSourcePivot)
rotation = fbxNode.GetGeometricRotation(fbx.FbxNode.eSourcePivot)
scale = fbxNode.GetGeometricScaling(fbx.FbxNode.eSourcePivot)
return fbx.FbxAMatrix(translation, rotation, scale)
def convertUVTransformFromFBX(translation, scale, rotation):
# from FBX to Blender
scale[0] = 1.0 / scale[0]
scale[1] = 1.0 / scale[1]
rotation = -rotation
# Blender: Tuv = T * R * S
# USD: Tuv = S * R * T
scaleMatrix = Gf.Matrix4d(Gf.Vec4d(scale[0], scale[1], 1, 1))
inverseScaleMatrix = Gf.Matrix4d(Gf.Vec4d(1.0 / scale[0], 1.0 / scale[1], 1, 1))
rotationMatrix = Gf.Matrix4d(
math.cos(rotation), math.sin(rotation), 0, 0,
-math.sin(rotation), math.cos(rotation), 0, 0,
0, 0, 1, 0,
0, 0, 0, 1)
inverseRotationMatrix = rotationMatrix.GetTranspose()
translateMatrix = Gf.Matrix4d(1)
translateMatrix.SetTranslate(Gf.Vec3d(translation[0], translation[1], 0))
# translate matrix from Blender to USD
transform = scaleMatrix * rotationMatrix * translateMatrix * inverseRotationMatrix * inverseScaleMatrix
translation3d = transform.ExtractTranslation()
translation[0] = translation3d[0]
translation[1] = translation3d[1]
return translation, scale, math.degrees(rotation)
class FbxNodeManager(usdUtils.NodeManager):
def __init__(self, value=None):
usdUtils.NodeManager.__init__(self)
def overrideGetName(self, fbxNode):
return usdUtils.makeValidIdentifier(fbxNode.GetName().split(":")[-1])
def overrideGetChildren(self, fbxNode):
children = []
for childIdx in xrange(fbxNode.GetChildCount()):
children.append(fbxNode.GetChild(childIdx))
return children
def overrideGetLocalTransformGfMatrix4d(self, fbxNode):
return GfMatrix4dWithFbxMatrix(fbxNode.EvaluateLocalTransform())
def overrideGetWorldTransformGfMatrix4d(self, fbxNode):
return GfMatrix4dWithFbxMatrix(fbxNode.EvaluateGlobalTransform())
def overrideGetParent(self, fbxNode):
return fbxNode.GetParent()
class AnimProperty:
def __init__(self, fbxAnimLayer, fbxProperty, timeSpans):
self.fbxAnimLayer = fbxAnimLayer
self.fbxProperty = fbxProperty
self.timeSpans = timeSpans
class FbxConverter:
def __init__(self, fbxPath, usdPath, legacyModifier, copyTextures, searchPaths, verbose):
self.verbose = verbose
self.legacyModifier = legacyModifier
self.copyTextures = copyTextures
self.searchPaths = searchPaths
self.asset = usdUtils.Asset(usdPath)
self.usdStage = None
self.usdMaterials = {}
self.nodeId = 0
self.nodePaths = {}
self.fbxSkinToSkin = {}
self.startAnimationTime = 0
self.stopAnimationTime = 0
self.skeletonByNode = {} # collect skinned mesh to construct later
self.blendShapeByNode = {} # collect blend shapes to construct later
self.copiedTextures = {} # avoid copying textures more then once
self.extent = [[], []]
self.fbxScene = None
filenameFull = fbxPath.split('/')[-1]
self.srcFolder = fbxPath[:len(fbxPath)-len(filenameFull)]
filenameFull = usdPath.split('/')[-1]
self.dstFolder = usdPath[:len(usdPath)-len(filenameFull)]
self.loadFbxScene(fbxPath)
self.fps = fbx.FbxTime.GetFrameRate(fbx.FbxTime.GetGlobalTimeMode())
self.asset.setFPS(self.fps)
self.nodeManager = FbxNodeManager()
self.skinning = usdUtils.Skinning(self.nodeManager)
self.shapeBlending = usdUtils.ShapeBlending()
def loadFbxScene(self, fbxPath):
fbxManager = fbx.FbxManager.Create()
if not fbxManager:
printErrorAndExit("failed to create FBX manager object")
self.fbxManager = fbxManager
fbxIOSettings = fbx.FbxIOSettings.Create(fbxManager, fbx.IOSROOT)
fbxManager.SetIOSettings(fbxIOSettings)
fbxImporter = fbx.FbxImporter.Create(fbxManager, "")
result = fbxImporter.Initialize(fbxPath, -1, fbxManager.GetIOSettings())
if not result:
printErrorAndExit("failed to initialize FbxImporter object")
if fbxImporter.IsFBX():
fbxManager.GetIOSettings().SetBoolProp(fbx.EXP_FBX_MATERIAL, True)
fbxManager.GetIOSettings().SetBoolProp(fbx.EXP_FBX_TEXTURE, True)
fbxManager.GetIOSettings().SetBoolProp(fbx.EXP_FBX_EMBEDDED, True)
fbxManager.GetIOSettings().SetBoolProp(fbx.EXP_FBX_SHAPE, True)
fbxManager.GetIOSettings().SetBoolProp(fbx.EXP_FBX_GOBO, True)
fbxManager.GetIOSettings().SetBoolProp(fbx.EXP_FBX_ANIMATION, True)
fbxManager.GetIOSettings().SetBoolProp(fbx.EXP_FBX_GLOBAL_SETTINGS, True)
self.fbxScene = fbx.FbxScene.Create(fbxManager, "")
result = fbxImporter.Import(self.fbxScene)
fbxImporter.Destroy()
if not result:
printErrorAndExit("failed to load FBX scene")
def getTextureProperties(self, materialProperty):
if materialProperty.GetSrcObjectCount(fbx.FbxCriteria.ObjectType(fbx.FbxFileTexture.ClassId)) > 0:
fbxFileTexture = materialProperty.GetSrcObject(fbx.FbxCriteria.ObjectType(fbx.FbxFileTexture.ClassId), 0)
texCoordSet = 'st'
if fbxFileTexture.UVSet is not None:
texCoordSet = str(fbxFileTexture.UVSet.Get())
if texCoordSet == '' or texCoordSet == 'default':
texCoordSet = 'st'
else:
texCoordSet = usdUtils.makeValidIdentifier(texCoordSet)
wrapS = usdUtils.WrapMode.repeat
wrapT = usdUtils.WrapMode.repeat
if fbxFileTexture.GetWrapModeU() == fbx.FbxTexture.eClamp:
wrapS = usdUtils.WrapMode.clamp
if fbxFileTexture.GetWrapModeV() == fbx.FbxTexture.eClamp:
wrapT = usdUtils.WrapMode.clamp
# texture transform
mapTransform = None
translation = [fbxFileTexture.GetTranslationU(), fbxFileTexture.GetTranslationV()]
scale = [fbxFileTexture.GetScaleU(), fbxFileTexture.GetScaleV()]
rotation = fbxFileTexture.GetRotationW()
if (translation[0] != 0 or translation[1] != 0 or
scale[0] != 1 or scale[1] != 1 or
rotation != 0):
(translation, scale, rotation) = convertUVTransformFromFBX(translation, scale, rotation)
mapTransform = usdUtils.MapTransform(translation, scale, rotation)
return fbxFileTexture.GetFileName(), texCoordSet, wrapS, wrapT, mapTransform
elif materialProperty.GetSrcObjectCount(fbx.FbxCriteria.ObjectType(fbx.FbxLayeredTexture.ClassId)) > 0:
pass
return '', 'st', usdUtils.WrapMode.repeat, usdUtils.WrapMode.repeat, None
def processMaterialProperty(self, input, propertyName, property, factorProperty, channels, material, fbxMaterial):
value = None
factor = float(factorProperty.Get()) if factorProperty is not None else None
if property is not None:
if channels == 'rgb':
value = [property.Get()[0], property.Get()[1], property.Get()[2]]
else:
if input == usdUtils.InputName.opacity:
transparency = property.Get()[0]
if factor is not None:
transparency = transparency * factor
factor = None
value = 1.0 - transparency
else:
value = float(property.Get()[0])
srcTextureFilename = '' # source texture filename on drive
textureFilename = '' # valid for USD
materialProperty = fbxMaterial.FindProperty(propertyName)
if materialProperty.IsValid():
srcTextureFilename, texCoordSet, wrapS, wrapT, mapTransform = self.getTextureProperties(materialProperty)
srcTextureFilename = usdUtils.resolvePath(srcTextureFilename, self.srcFolder, self.searchPaths)
textureFilename = usdUtils.makeValidPath(srcTextureFilename)
if textureFilename != '' and (self.copyTextures or srcTextureFilename != textureFilename):
if srcTextureFilename in self.copiedTextures:
textureFilename = self.copiedTextures[srcTextureFilename]
else:
newTextureFilename = 'textures/' + os.path.basename(textureFilename)
# do not rewrite the texture with same basename
subfolderIdx = 0
while newTextureFilename in self.copiedTextures.values():
newTextureFilename = 'textures/' + str(subfolderIdx) + '/' + os.path.basename(textureFilename)
subfolderIdx += 1
usdUtils.copy(srcTextureFilename, self.dstFolder + newTextureFilename, self.verbose)
self.copiedTextures[srcTextureFilename] = newTextureFilename
textureFilename = newTextureFilename
if textureFilename != '':
scale = None
if factor is not None:
if channels == 'rgb':
scale = [factor, factor, factor]
else:
scale = factor
material.inputs[input] = usdUtils.Map(channels, textureFilename, value, texCoordSet, wrapS, wrapT, scale, mapTransform)
else:
if value is not None:
if factor is not None:
if channels == 'rgb':
material.inputs[input] = [value[0] * factor, value[1] * factor, value[2] * factor]
else:
material.inputs[input] = value * factor
else:
material.inputs[input] = value
def processMaterials(self):
for i in range(self.fbxScene.GetMaterialCount()):
fbxMaterial = self.fbxScene.GetMaterial(i)
material = usdUtils.Material(fbxMaterial.GetName().split(":")[-1])
normalMap = fbxMaterial.NormalMap if hasattr(fbxMaterial, 'NormalMap') else None
self.processMaterialProperty(usdUtils.InputName.normal, fbx.FbxSurfaceMaterial.sNormalMap, normalMap, None, 'rgb', material, fbxMaterial)
diffuse = fbxMaterial.Diffuse if hasattr(fbxMaterial, 'Diffuse') else None
diffuseFactor = fbxMaterial.DiffuseFactor if hasattr(fbxMaterial, 'DiffuseFactor') else None
self.processMaterialProperty(usdUtils.InputName.diffuseColor, fbx.FbxSurfaceMaterial.sDiffuse, diffuse, diffuseFactor, 'rgb', material, fbxMaterial)
transparentColor = fbxMaterial.TransparentColor if hasattr(fbxMaterial, 'TransparentColor') else None
transparencyFactor = fbxMaterial.TransparencyFactor if hasattr(fbxMaterial, 'TransparencyFactor') else None
self.processMaterialProperty(usdUtils.InputName.opacity, fbx.FbxSurfaceMaterial.sTransparentColor, transparentColor, transparencyFactor, 'a', material, fbxMaterial)
emissive = fbxMaterial.Emissive if hasattr(fbxMaterial, 'Emissive') else None
emissiveFactor = fbxMaterial.EmissiveFactor if hasattr(fbxMaterial, 'EmissiveFactor') else None
self.processMaterialProperty(usdUtils.InputName.emissiveColor, fbx.FbxSurfaceMaterial.sEmissive, emissive, emissiveFactor, 'rgb', material, fbxMaterial)
ambient = fbxMaterial.Ambient if hasattr(fbxMaterial, 'Ambient') else None
ambientFactor = fbxMaterial.AmbientFactor if hasattr(fbxMaterial, 'AmbientFactor') else None
self.processMaterialProperty(usdUtils.InputName.occlusion, fbx.FbxSurfaceMaterial.sAmbient, ambient, ambientFactor, 'r', material, fbxMaterial)
# 'metallic', 'roughness' ?
usdMaterial = material.makeUsdMaterial(self.asset)
if self.legacyModifier is not None:
self.legacyModifier.opacityAndDiffuseOneTexture(material)
self.usdMaterials[fbxMaterial.GetName()] = usdMaterial
def prepareAnimations(self):
animStacksCount = self.fbxScene.GetSrcObjectCount(fbx.FbxCriteria.ObjectType(fbx.FbxAnimStack.ClassId))
if animStacksCount < 1:
if self.verbose:
print 'No animation found'
return
fbxAnimStack = self.fbxScene.GetSrcObject(fbx.FbxCriteria.ObjectType(fbx.FbxAnimStack.ClassId), 0)
timeSpan = fbxAnimStack.GetLocalTimeSpan()
self.startAnimationTime = timeSpan.GetStart().GetSecondDouble()
self.stopAnimationTime = timeSpan.GetStop().GetSecondDouble()
self.asset.extentTime(self.startAnimationTime)
self.asset.extentTime(self.stopAnimationTime)
def processControlPoints(self, fbxMesh, usdMesh):
points = [Gf.Vec3f(p[0], p[1], p[2]) for p in fbxMesh.GetControlPoints()]
extent = Gf.Range3f()
for point in points:
extent.UnionWith(point)
usdMesh.CreatePointsAttr(points)
usdMesh.CreateExtentAttr([Gf.Vec3f(extent.GetMin()), Gf.Vec3f(extent.GetMax())])
if not any(self.extent):
self.extent[0] = extent.GetMin()
self.extent[1] = extent.GetMax()
else:
for i in range(3):
self.extent[0][i] = min(self.extent[0][i], extent.GetMin()[i])
self.extent[1][i] = max(self.extent[1][i], extent.GetMax()[i])
def getVec3fArrayWithLayerElements(self, elements, fbxLayerElements):
elementsArray = fbxLayerElements.GetDirectArray()
for i in xrange(elementsArray.GetCount()):
element = elementsArray.GetAt(i)
elements.append(Gf.Vec3f(element[0], element[1], element[2]))
def getIndicesWithLayerElements(self, fbxMesh, fbxLayerElements):
mappingMode = fbxLayerElements.GetMappingMode()
referenceMode = fbxLayerElements.GetReferenceMode()
indexToDirect = (
referenceMode == fbx.FbxLayerElement.eIndexToDirect or
referenceMode == fbx.FbxLayerElement.eIndex)
indices = []
if mappingMode == fbx.FbxLayerElement.eByControlPoint:
if indexToDirect:
for contorlPointIdx in xrange(fbxMesh.GetControlPointsCount()):
indices.append(fbxLayerElements.GetIndexArray().GetAt(contorlPointIdx))
elif mappingMode == fbx.FbxLayerElement.eByPolygonVertex:
pointIdx = 0
for polygonIdx in xrange(fbxMesh.GetPolygonCount()):
for vertexIdx in xrange(fbxMesh.GetPolygonSize(polygonIdx)):
if indexToDirect:
indices.append(fbxLayerElements.GetIndexArray().GetAt(pointIdx))
else:
indices.append(pointIdx)
pointIdx += 1
elif mappingMode == fbx.FbxLayerElement.eByPolygon:
for polygonIdx in xrange(fbxMesh.GetPolygonCount()):
if indexToDirect:
indices.append(fbxLayerElements.GetIndexArray().GetAt(polygonIdx))
else:
indices.append(polygonIdx)
return indices
def getInterpolationWithLayerElements(self, fbxLayerElements):
mappingMode = fbxLayerElements.GetMappingMode()
if mappingMode == fbx.FbxLayerElement.eByControlPoint:
return UsdGeom.Tokens.vertex
elif mappingMode == fbx.FbxLayerElement.eByPolygonVertex:
return UsdGeom.Tokens.faceVarying
elif mappingMode == fbx.FbxLayerElement.eByPolygon:
return UsdGeom.Tokens.uniform
elif mappingMode == fbx.FbxLayerElement.eAllSame:
return UsdGeom.Tokens.constant
elif mappingMode == fbx.FbxLayerElement.eByEdge:
usdUtils.printWarning("Mapping mode eByEdge for layer elements is not supported.")
return ''
def processNormals(self, fbxMesh, usdMesh, vertexIndices):
for layerIdx in xrange(fbxMesh.GetLayerCount()):
fbxLayerNormals = fbxMesh.GetLayer(layerIdx).GetNormals()
if fbxLayerNormals is None:
continue
normals = []
self.getVec3fArrayWithLayerElements(normals, fbxLayerNormals)
if not any(normals):
continue
indices = self.getIndicesWithLayerElements(fbxMesh, fbxLayerNormals)
interpolation = self.getInterpolationWithLayerElements(fbxLayerNormals)
normalPrimvar = usdMesh.CreatePrimvar('normals', Sdf.ValueTypeNames.Normal3fArray, interpolation)
normalPrimvar.Set(normals)
if len(indices) != 0:
normalPrimvar.SetIndices(Vt.IntArray(indices))
break # normals can be in one layer only
def processUVs(self, fbxMesh, usdMesh, vertexIndices):
for layerIdx in xrange(fbxMesh.GetLayerCount()):
fbxLayerUVs = fbxMesh.GetLayer(layerIdx).GetUVs() # get diffuse texture uv-s
if fbxLayerUVs is None:
continue
uvs = []
uvArray = fbxLayerUVs.GetDirectArray()
for i in xrange(uvArray.GetCount()):
uv = uvArray.GetAt(i)
uvs.append(Gf.Vec2f(uv[0], uv[1]))
if not any(uvs):
continue
indices = self.getIndicesWithLayerElements(fbxMesh, fbxLayerUVs)
interpolation = self.getInterpolationWithLayerElements(fbxLayerUVs)
texCoordSet = 'st'
uvSets = fbxMesh.GetLayer(layerIdx).GetUVSets()
if len(uvSets) > 0:
fbxLayerElementUV = fbxMesh.GetLayer(layerIdx).GetUVSets()[0]
| |
num_layers,
dropout=0, **kwargs):
super(Seq2SeqEncoder, self).__init__(**kwargs)
self.embedding = nn.Embedding(vocab_size, embed_size)
self.rnn = rnn.LSTM(num_hiddens, num_layers, dropout=dropout)
def forward(self, X, *args):
X = self.embedding(X) # X shape: (batch_size, seq_len, embed_size)
X = X.swapaxes(0, 1) # RNN needs first axes to be time
state = self.rnn.begin_state(batch_size=X.shape[1], ctx=X.context)
out, state = self.rnn(X, state)
# The shape of out is (seq_len, batch_size, num_hiddens).
# state contains the hidden state and the memory cell
# of the last time step, the shape is (num_layers, batch_size, num_hiddens)
return out, state
# Defined in file: ./chapter_recurrent-neural-networks/seq2seq.md
class Seq2SeqDecoder(d2l.Decoder):
def __init__(self, vocab_size, embed_size, num_hiddens, num_layers,
dropout=0, **kwargs):
super(Seq2SeqDecoder, self).__init__(**kwargs)
self.embedding = nn.Embedding(vocab_size, embed_size)
self.rnn = rnn.LSTM(num_hiddens, num_layers, dropout=dropout)
self.dense = nn.Dense(vocab_size, flatten=False)
def init_state(self, enc_outputs, *args):
return enc_outputs[1]
def forward(self, X, state):
X = self.embedding(X).swapaxes(0, 1)
out, state = self.rnn(X, state)
# Make the batch to be the first dimension to simplify loss computation.
out = self.dense(out).swapaxes(0, 1)
return out, state
# Defined in file: ./chapter_recurrent-neural-networks/seq2seq.md
class MaskedSoftmaxCELoss(gluon.loss.SoftmaxCELoss):
# pred shape: (batch_size, seq_len, vocab_size)
# label shape: (batch_size, seq_len)
# valid_length shape: (batch_size, )
def forward(self, pred, label, valid_length):
# the sample weights shape should be (batch_size, seq_len, 1)
weights = nd.ones_like(label).expand_dims(axis=-1)
weights = nd.SequenceMask(weights, valid_length, True, axis=1)
return super(MaskedSoftmaxCELoss, self).forward(pred, label, weights)
# Defined in file: ./chapter_recurrent-neural-networks/seq2seq.md
def train_s2s_ch8(model, data_iter, lr, num_epochs, ctx):
model.initialize(init.Xavier(), force_reinit=True, ctx=ctx)
trainer = gluon.Trainer(model.collect_params(),
'adam', {'learning_rate': lr})
loss = MaskedSoftmaxCELoss()
#tic = time.time()
animator = d2l.Animator(xlabel='epoch', ylabel='loss',
xlim=[1, num_epochs], ylim=[0, 0.25])
for epoch in range(1, num_epochs+1):
timer = d2l.Timer()
metric = d2l.Accumulator(2) # loss_sum, num_tokens
for batch in data_iter:
X, X_vlen, Y, Y_vlen = [x.as_in_context(ctx) for x in batch]
Y_input, Y_label, Y_vlen = Y[:,:-1], Y[:,1:], Y_vlen-1
with autograd.record():
Y_hat, _ = model(X, Y_input, X_vlen, Y_vlen)
l = loss(Y_hat, Y_label, Y_vlen)
l.backward()
d2l.grad_clipping(model, 1)
num_tokens = Y_vlen.sum().asscalar()
trainer.step(num_tokens)
metric.add(l.sum().asscalar(), num_tokens)
if epoch % 10 == 0:
animator.add(epoch, metric[0]/metric[1])
print('loss %.3f, %d tokens/sec on %s ' % (
metric[0]/metric[1], metric[1]/timer.stop(), ctx))
# Defined in file: ./chapter_recurrent-neural-networks/seq2seq.md
def predict_s2s_ch8(model, src_sentence, src_vocab, tgt_vocab, num_steps, ctx):
src_tokens = src_vocab[src_sentence.lower().split(' ')]
enc_valid_length = nd.array([len(src_tokens)], ctx=ctx)
src_tokens = d2l.trim_pad(src_tokens, num_steps, src_vocab.pad)
enc_X = nd.array(src_tokens, ctx=ctx)
# add the batch_size dimension.
enc_outputs = model.encoder(enc_X.expand_dims(axis=0), enc_valid_length)
dec_state = model.decoder.init_state(enc_outputs, enc_valid_length)
dec_X = nd.array([tgt_vocab.bos], ctx=ctx).expand_dims(axis=0)
predict_tokens = []
for _ in range(num_steps):
Y, dec_state = model.decoder(dec_X, dec_state)
# The token with highest score is used as the next time step input.
dec_X = Y.argmax(axis=2)
py = dec_X.squeeze(axis=0).astype('int32').asscalar()
if py == tgt_vocab.eos:
break
predict_tokens.append(py)
return ' '.join(tgt_vocab.to_tokens(predict_tokens))
# Defined in file: ./chapter_attention-mechanism/attention.md
def masked_softmax(X, valid_length):
# X: 3-D tensor, valid_length: 1-D or 2-D tensor
if valid_length is None:
return X.softmax()
else:
shape = X.shape
if valid_length.ndim == 1:
valid_length = valid_length.repeat(shape[1], axis=0)
else:
valid_length = valid_length.reshape((-1,))
# fill masked elements with a large negative, whose exp is 0
X = nd.SequenceMask(X.reshape((-1, shape[-1])), valid_length, True,
axis=1, value=-1e6)
return X.softmax().reshape(shape)
# Defined in file: ./chapter_attention-mechanism/attention.md
class DotProductAttention(nn.Block):
def __init__(self, dropout, **kwargs):
super(DotProductAttention, self).__init__(**kwargs)
self.dropout = nn.Dropout(dropout)
# query: (batch_size, #queries, d)
# key: (batch_size, #kv_pairs, d)
# value: (batch_size, #kv_pairs, dim_v)
# valid_length: either (batch_size, ) or (batch_size, xx)
def forward(self, query, key, value, valid_length=None):
d = query.shape[-1]
# set transpose_b=True to swap the last two dimensions of key
scores = nd.batch_dot(query, key, transpose_b=True) / math.sqrt(d)
attention_weights = self.dropout(masked_softmax(scores, valid_length))
return nd.batch_dot(attention_weights, value)
# Defined in file: ./chapter_attention-mechanism/attention.md
class MLPAttention(nn.Block):
def __init__(self, units, dropout, **kwargs):
super(MLPAttention, self).__init__(**kwargs)
# Use flatten=True to keep query's and key's 3-D shapes.
self.W_k = nn.Dense(units, activation='tanh',
use_bias=False, flatten=False)
self.W_q = nn.Dense(units, activation='tanh',
use_bias=False, flatten=False)
self.v = nn.Dense(1, use_bias=False, flatten=False)
self.dropout = nn.Dropout(dropout)
def forward(self, query, key, value, valid_length):
query, key = self.W_k(query), self.W_q(key)
# expand query to (batch_size, #querys, 1, units), and key to
# (batch_size, 1, #kv_pairs, units). Then plus them with broadcast.
features = query.expand_dims(axis=2) + key.expand_dims(axis=1)
scores = self.v(features).squeeze(axis=-1)
attention_weights = self.dropout(masked_softmax(scores, valid_length))
return nd.batch_dot(attention_weights, value)
# Defined in file: ./chapter_optimization/optimization-intro.md
def annotate(text, xy, xytext):
d2l.plt.gca().annotate(text, xy=xy, xytext=xytext,
arrowprops=dict(arrowstyle='->'))
# Defined in file: ./chapter_optimization/gd.md
def train_2d(trainer):
"""Optimize a 2-dim objective function with a customized trainer."""
# s1 and s2 are internal state variables and will
# be used later in the chapter
x1, x2, s1, s2 = -5, -2, 0, 0
results = [(x1, x2)]
for i in range(20):
x1, x2, s1, s2 = trainer(x1, x2, s1, s2)
results.append((x1, x2))
print('epoch %d, x1 %f, x2 %f' % (i + 1, x1, x2))
return results
# Defined in file: ./chapter_optimization/gd.md
def show_trace_2d(f, results):
"""Show the trace of 2D variables during optimization."""
d2l.set_figsize((3.5, 2.5))
d2l.plt.plot(*zip(*results), '-o', color='#ff7f0e')
x1, x2 = np.meshgrid(np.arange(-5.5, 1.0, 0.1), np.arange(-3.0, 1.0, 0.1))
d2l.plt.contour(x1, x2, f(x1, x2), colors='#1f77b4')
d2l.plt.xlabel('x1')
d2l.plt.ylabel('x2')
# Defined in file: ./chapter_optimization/minibatch-sgd.md
def get_data_ch10(batch_size=10, n=1500):
data = np.genfromtxt('../data/airfoil_self_noise.dat', delimiter='\t')
data = nd.array((data - data.mean(axis=0)) / data.std(axis=0))
data_iter = d2l.load_array((data[:n, :-1], data[:n, -1]),
batch_size, is_train=True)
return data_iter, data.shape[1]-1
# Defined in file: ./chapter_optimization/minibatch-sgd.md
def train_ch10(trainer_fn, states, hyperparams, data_iter,
feature_dim, num_epochs=2):
# Initialization
w = nd.random.normal(scale=0.01, shape=(feature_dim, 1))
b = nd.zeros(1)
w.attach_grad()
b.attach_grad()
net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss
# Train
animator = d2l.Animator(xlabel='epoch', ylabel='loss',
xlim=[0, num_epochs], ylim=[0.22, 0.35])
n, timer = 0, d2l.Timer()
for _ in range(num_epochs):
for X, y in data_iter:
with autograd.record():
l = loss(net(X), y).mean()
l.backward()
trainer_fn([w, b], states, hyperparams)
n += X.shape[0]
if n % 200 == 0:
timer.stop()
animator.add(n/X.shape[0]/len(data_iter),
d2l.evaluate_loss(net, data_iter, loss))
timer.start()
print('loss: %.3f, %.3f sec/epoch'%(animator.Y[0][-1], timer.avg()))
return timer.cumsum(), animator.Y[0]
# Defined in file: ./chapter_optimization/minibatch-sgd.md
def train_gluon_ch10(trainer_name, trainer_hyperparams,
data_iter, num_epochs=2):
# Initialization
net = nn.Sequential()
net.add(nn.Dense(1))
net.initialize(init.Normal(sigma=0.01))
trainer = gluon.Trainer(
net.collect_params(), trainer_name, trainer_hyperparams)
loss = gluon.loss.L2Loss()
animator = d2l.Animator(xlabel='epoch', ylabel='loss',
xlim=[0, num_epochs], ylim=[0.22, 0.35])
n, timer = 0, d2l.Timer()
for _ in range(num_epochs):
for X, y in data_iter:
with autograd.record():
l = loss(net(X), y)
l.backward()
trainer.step(X.shape[0])
n += X.shape[0]
if n % 200 == 0:
timer.stop()
animator.add(n/X.shape[0]/len(data_iter),
d2l.evaluate_loss(net, data_iter, loss))
timer.start()
print('loss: %.3f, %.3f sec/epoch'%(animator.Y[0][-1], timer.avg()))
# Defined in file: ./chapter_computational-performance/multiple-gpus.md
def split_batch(X, y, ctx_list):
"""Split X and y into multiple devices specified by ctx"""
assert X.shape[0] == y.shape[0]
return (gluon.utils.split_and_load(X, ctx_list),
gluon.utils.split_and_load(y, ctx_list))
# Defined in file: ./chapter_computational-performance/multiple-gpus-gluon.md
def resnet18(num_classes):
"""A slightly modified ResNet-18 model"""
def resnet_block(num_channels, num_residuals, first_block=False):
blk = nn.Sequential()
for i in range(num_residuals):
if i == 0 and not first_block:
blk.add(d2l.Residual(
num_channels, use_1x1conv=True, strides=2))
else:
blk.add(d2l.Residual(num_channels))
return blk
net = nn.Sequential()
# This model uses a smaller convolution kernel, stride, and padding and
# removes the maximum pooling layer
net.add(nn.Conv2D(64, kernel_size=3, strides=1, padding=1),
nn.BatchNorm(), nn.Activation('relu'))
net.add(resnet_block(64, 2, first_block=True),
resnet_block(128, 2),
resnet_block(256, 2),
resnet_block(512, 2))
net.add(nn.GlobalAvgPool2D(), nn.Dense(num_classes))
return net
# Defined in file: ./chapter_computational-performance/multiple-gpus-gluon.md
def evaluate_accuracy_gpus(net, data_iter):
# Query the list of devices.
ctx_list = list(net.collect_params().values())[0].list_ctx()
metric = d2l.Accumulator(2) # num_corrected_examples, num_examples
for features, labels in data_iter:
Xs, ys = d2l.split_batch(features, labels, ctx_list)
pys = [net(X) for X in Xs] # run in parallel
metric.add(sum(d2l.accuracy(py, y) for py, y in zip(pys, ys)),
labels.size)
return metric[0]/metric[1]
# Defined in file: ./chapter_computer-vision/image-augmentation.md
def train_batch_ch12(net, features, labels, loss, trainer, ctx_list):
Xs, ys = d2l.split_batch(features, labels, ctx_list)
with autograd.record():
pys = [net(X) for X in Xs]
ls = [loss(py, y) for py, y in zip(pys, ys)]
for l in ls:
l.backward()
trainer.step(features.shape[0])
train_loss_sum = sum([l.sum().asscalar() for l in ls])
train_acc_sum = sum(d2l.accuracy(py, y) for py, y in zip(pys, ys))
return train_loss_sum, train_acc_sum
# Defined in file: ./chapter_computer-vision/image-augmentation.md
def train_ch12(net, train_iter, test_iter, loss, trainer, num_epochs,
ctx_list=d2l.try_all_gpus()):
num_batches, timer = len(train_iter), d2l.Timer()
animator = d2l.Animator(xlabel='epoch', xlim=[0,num_epochs], ylim=[0,2],
legend=['train loss','train acc','test acc'])
for epoch in range(num_epochs):
# store training_loss, training_accuracy, num_examples, num_features
metric = d2l.Accumulator(4)
for i, (features, labels) in enumerate(train_iter):
timer.start()
l, acc = train_batch_ch12(
net, features, labels, loss, trainer, ctx_list)
metric.add(l, acc, labels.shape[0], labels.size)
timer.stop()
if (i+1) % (num_batches // 5) == 0:
animator.add(epoch+i/num_batches,
(metric[0]/metric[2], metric[1]/metric[3], None))
test_acc = d2l.evaluate_accuracy_gpus(net, test_iter)
animator.add(epoch+1, (None, None, test_acc))
print('loss %.3f, train acc %.3f, test acc %.3f' % (
metric[0]/metric[2], metric[1]/metric[3], test_acc))
print('%.1f exampes/sec on %s' % (
metric[2]*num_epochs/timer.sum(), ctx_list))
# Defined in file: ./chapter_computer-vision/bounding-box.md
def bbox_to_rect(bbox, color):
"""Convert bounding box to matplotlib format."""
# Convert the bounding box (top-left x, top-left y, bottom-right x,
# bottom-right y) format to matplotlib format: ((upper-left x,
# upper-left y), | |
<reponame>dirtybrain/DAnTE_V2
#!usr/bin/env python3
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__copyright__ = "Copyright 2020 Westwood Robotics"
__date__ = "Jan 8, 2020"
__version__ = "0.1.0"
__status__ = "Beta"
import time
import os
import sys
from pathlib import Path
from Play.motor_controller import MotorController
from Play.dynamixel_controller import DynamixelController
from Settings.Robot import *
from Settings.Constants_DAnTE import *
import math
from collections import deque
if EXTERNAL_ENC:
# Only import the following when EXTERNAL_ENC is True as wiringpi and spidev are required.
import Forward_Kinematics.forward_kin as FK
from Play.MPS import MPS_Encoder_Cluster
import pdb
# PI0 is in Settings.Robot
if PI0:
print("Using Pi-ZERO, will not plot.")
PLOTTING_OVERRIDE = False
else:
import matplotlib.pyplot as plt
import matplotlib
matplotlib.interactive(True)
PLOTTING_OVERRIDE = True
class RobotController(object):
def __init__(self, robot=None, bypass_DXL=False, bypass_ext_enc=False):
if robot is None:
print("Robot set to DAnTE by default")
robot = DAnTE
self.robot = robot
self.MC = MotorController(self.robot.BEAR_baudrate, self.robot.BEAR_port)
# When debug, you might want to bypass Dynamixel
self.bypass_DXL = bypass_DXL
if self.bypass_DXL:
self.DC = None
else:
self.DC = DynamixelController(self.robot.palm.motor_id, self.robot.DXL_port, self.robot.DXL_baudrate)
if not EXTERNAL_ENC:
# Force to bypass external encoders when EXTERNAL_ENC=None
bypass_ext_enc = True
# When debug, you might want to bypass external encoders
self.bypass_ext_enc = bypass_ext_enc
if self.bypass_ext_enc:
self.ext_enc = None
else:
self.ext_enc = MPS_Encoder_Cluster("MA310", BUS, robot.encoders, MAX_SPI_SPEED, SPI_MODE)
# self.robot.DXL_port)
# self.gesture = None
self.mode = None
self.approach_speed = default_approach_speed
self.approach_stiffness = None
self.detect_current = None
self.final_stiffness = default_final_stiffness
self.preload = None
self.max_iq = None
self.contact_position = [0, 0, 0]
# self.contact_iq = [0, 0, 0]
self.balance_factor = [1, 1, 1] # Factor for force balance between fingers, update when change gesture
self.welcome_msg()
# self.start_robot()
def welcome_msg(self):
print("=========== DAnTE version 2.0.0 -- Last Updated 2020.06.24 ===")
print("==============================================================")
# ------------------------
# INITIALIZATION / CALIBRATION
# ------------------------
# Functions for initialization of finger(s)
# Read initials.txt and check settings
# Move finger(s) through range of motion according to initials.txt and check for mobility and interference
def start_robot(self):
# Ping all actuators
error = self.ping() # 4 bit respectively for INDEX, INDEX_M, THUMB, Dynamixel, 0b10000 for overall error.
# Read initials, fact check and populate robot object
init_data = self.read_initials() # init_data = [['FINGER', motor_id, homing_offset, travel, encoder_offset]...]
if not init_data or (not self.bypass_DXL and len(init_data) < 4) or (self.bypass_DXL and len(init_data) < 3):
print("Initialization data seems to be corrupted. Please run calibration_geometry() first.")
error = 0b10000
return error
for idx, f in enumerate(self.robot.fingerlist):
if f.name != init_data[idx][0]:
print("init_data.name does not match for %s." % f.name)
error = error | (1 << idx)
elif f.motor_id != init_data[idx][1]:
print("init_data.motor_id does not match for %s." % f.name)
error = error | (1 << idx)
else:
f.homing_offset = init_data[idx][2]
f.travel = init_data[idx][3]
if not self.bypass_ext_enc:
f.encoder_offset = init_data[idx][4]
if not self.bypass_DXL:
if self.robot.palm.name != init_data[3][0]:
print("init_data.name does not match for %s." % self.robot.palm.name)
error = error | (1 << 3)
elif self.robot.palm.motor_id != init_data[3][1]:
print("init_data.motor_id does not match for %s." % self.robot.palm.name)
error = error | (1 << 3)
else:
self.robot.palm.home = init_data[3][2]
self.robot.palm.travel = init_data[3][3]
if error:
print("Failed to start robot.")
sys.exit()
else:
# Set Current, Velocity and Position PID as well as safe iq_max and velocity_max,
# and clear Direct Force PID.
self.MC.init_driver_all()
self.robot.booted = True
print("Welcome aboard, Captain.")
# return error
def initialization(self):
"""Full hand initialization."""
# Check if booted
if self.robot.booted:
pass
else:
print("Run start_robot first.")
return False
abnormal = [0, 0, 0, 0]
print("Starting initialization sequence...")
# [Fingers, Palm] abnormal code:
# 10000 External encoder offset discrepancy
# 01000 Failed to travel to home
# 00100 Failed to fully close
# 00010 Position out of range
# 00001 home_offset abnormal
# 0. Check PALM first
if not self.bypass_DXL:
# Compare DXL homing_offset
if self.DC.get_homing_offset() != 0:
print("Palm actuator needs calibration.\nCalibrate it first, or run with bypass_DXL option.")
abnormal[3] = 0b00001
return False
else:
# Check if position in range, home ~ home+pi/2
palm_pos = self.DC.get_present_position()
if (self.robot.palm.home - 0.2) < palm_pos < (self.robot.palm.home + 1.77):
pass
else:
print("Palm actuator needs calibration.\nCalibrate it first, or run with bypass_DXL option.")
abnormal[3] = 0b00010
return False
# bypass_DXL or PALM checked
if self.bypass_DXL:
input("Turn index fingers to parallel gesture then press enter.")
else:
print("Changing to Parallel gesture...")
self.DC.torque_enable(1)
self.DC.set_goal_position(self.robot.palm.home)
time.sleep(0.5)
self.robot.palm.angle = 0
self.MC.init_driver_all()
# 1. Compare home_offset
for i in range(3):
if round(self.robot.fingerlist[i].homing_offset, 2) != round(
self.MC.pbm.get_homing_offset(self.robot.finger_ids[i])[0][0][0], 2):
abnormal[i] = abnormal[i] | 0b0001
print("%s home_offset abnormal." % self.robot.fingerlist[i].name)
# 2. Current position with in range
present_pos = self.MC.pbm.get_present_position(BEAR_INDEX, BEAR_INDEX_M, BEAR_THUMB)
for i, pos in enumerate(present_pos):
if self.robot.fingerlist[i].mirrored:
if pos[0][0] < -0.1 or pos[0][0] > self.robot.fingerlist[i].travel + 0.1:
abnormal[i] = abnormal[i] | 0b0010
print("%s present_pos out of range." % self.robot.fingerlist[i].name)
print(pos[0])
else:
if pos[0][0] > 0.1 or pos[0][0] < self.robot.fingerlist[i].travel - 0.1:
abnormal[i] = abnormal[i] | 0b0010
print("%s present_pos out of range." % self.robot.fingerlist[i].name)
print(pos[0])
# # 3. Check for position Limit
# limit_min = self.MC.pbm.get_limit_position_min(m_id)[0]
# limit_max = self.MC.pbm.get_limit_position_max(m_id)[0]
# if limit_min != end_pos or limit_max != 0:
# abnormal = True
# print("Position limit abnoraml")
# Ask user if abnormal
if sum(abnormal):
usr = input("Fingers seem to need calibration. Do you want to continue anyway?(y/n)")
if usr == "n" or usr == "N":
return False
else:
pass
# Set motor mode and PID
# Set mode and limits
self.MC.set_mode_all('position')
# 4. Move to End -> complete
running = [True, True, True]
self.MC.torque_enable_all(1)
self.MC.pbm.set_goal_position((BEAR_INDEX, self.robot.fingerlist[0].travel),
(BEAR_INDEX_M, self.robot.fingerlist[1].travel),
(BEAR_THUMB, self.robot.fingerlist[0].travel))
start_time = time.time()
while sum(running):
try:
status = self.MC.pbm.get_bulk_status((INDEX.motor_id, 'present_position', 'present_velocity'),
(INDEX_M.motor_id, 'present_position', 'present_velocity'),
(THUMB.motor_id, 'present_position', 'present_velocity'))
err = [data[1] for data in status]
position = [data[0][0] for data in status]
# velocity = [data[0][1] for data in status]
elapsed_time = time.time() - start_time
if elapsed_time < TIMEOUT_INIT:
for i in range(3):
if running[i] and abs(position[i] - self.robot.fingerlist[i].travel) < 0.1:
running[i] = False
self.MC.damping_mode(self.robot.finger_ids[i])
print("%s end travel complete." % self.robot.fingerlist[i].name)
else:
self.MC.pbm.set_goal_position((self.robot.finger_ids[i], self.robot.fingerlist[i].travel))
if err[i] != 128 and err[i] != 144:
print("%s error, code:" % self.robot.fingerlist[i].name, bin(err[i]))
else:
print("Timeout while moving to end. Is there something blocking the finger(s)?")
print("Abnormal:")
for i in range(3):
if running[i]:
abnormal[i] = abnormal[i] | 0b0100
print(self.robot.fingerlist[i].name)
self.MC.damping_mode_all()
running = [False, False, False]
except KeyboardInterrupt:
running = [0]
print("User interrupted.")
time.sleep(0.5)
# 5. Move to Home -> complete
print("Fingers resetting...")
running = [True, True, True]
# Enable torque and go to Home
self.MC.damping_release_all()
self.MC.torque_enable_all(1)
self.MC.pbm.set_goal_position((THUMB.motor_id, 0),
(INDEX.motor_id, 0),
(INDEX_M.motor_id, 0))
time.sleep(2)
start_time = time.time()
while sum(running):
try:
status = self.MC.pbm.get_bulk_status((INDEX.motor_id, 'present_position', 'present_velocity'),
(INDEX_M.motor_id, 'present_position', 'present_velocity'),
(THUMB.motor_id, 'present_position', 'present_velocity'))
err = [data[1] for data in status]
position = [data[0][0] for data in status]
# velocity = [data[0][1] for data in status]
elapsed_time = time.time() - start_time
if elapsed_time < TIMEOUT_INIT:
for i in range(3):
if abs(position[i]) < 0.1:
running[i] = False
self.MC.torque_enable(self.robot.finger_ids[i], 0)
else:
self.MC.pbm.set_goal_position((self.robot.finger_ids[i], 0))
if err[i] != 128:
print("%s error, code:" % self.robot.fingerlist[i].name, bin(err[i]))
else:
print("Timeout while resetting. Is there something blocking the finger(s)?")
print("Abnormal:")
for i in range(3):
if running[i]:
abnormal[i] = abnormal[i] | 0b1000
print(self.robot.fingerlist[i].name)
self.MC.torque_enable(self.robot.finger_ids[i], 0)
running = [False, False, False]
except KeyboardInterrupt:
running = [0]
print("User interrupted.")
# 6. Check external encoder reading and offset
if not self.bypass_ext_enc:
self.ext_enc.connect()
time.sleep(0.2)
ext_reading = self.ext_enc.get_angle()
self.ext_enc.release()
for idx, finger in enumerate(self.robot.fingerlist):
if 0.18 < abs(ext_reading[idx] - finger.encoder_offset) < 6.1:
# External encoder reading differs from the record for too much
abnormal[idx] = abnormal[idx] | 0b10000
print("%s external encoder abnormal." % finger.name)
print("ext_reading: %f" % ext_reading[idx])
print("encoder_offset: %f" % finger.encoder_offset)
# 7. Finger initialization complete
# Disable and finish
self.set_robot_enable(0)
if sum(abnormal):
print("Initialization failed.")
for idx, code in enumerate(abnormal):
if code:
print("%s abnormal, error code: %d" % (self.robot.fingerlist[idx].name, code))
return False
else:
print("Initialization Complete.")
for f in self.robot.fingerlist:
f.initialized = True
self.robot.palm.initialized = True
self.robot.initialized = True
# Get iq_compensation data
iq_compensation = self.read_iq_compensation()
if iq_compensation is None:
# There is no iq compensation data
print("WARNING: No iq_compensation found. Contact detection performance may be reduced.")
usr = input("Do you want to run a quick calibration? (Y/n)")
if usr == 'Y' or 'y':
# Run iq_compensation calibration
if self.calibration_iq_compensation():
self.robot.iq_compensation = self.read_iq_compensation()
else:
print("Iq compensation calibration failed.")
else:
| |
leads to a whole load of wrongdoings.",
"vote_count": 7,
"first_air_date": "2013-09-24",
"backdrop_path": "/yVsEJ9Y4c50I5ZY5Fo7odcde2cd.jpg",
"poster_path": "/wGpeMDbep7sn4m4OQbu3WQ1Bdwy.jpg",
"genre_ids": [35],
"name": "London Irish",
"original_language": "en",
"original_name": "London Irish",
"popularity": 2.643,
"character": "Steph",
"credit_id": "6215e9180b5fd60069117146",
"episode_count": 1,
"media_type": "tv",
},
],
"crew": [
{
"vote_average": 8.0,
"overview": "Fleabag may seem oversexed, emotionally unfiltered and self-obsessed, but that's just the tip of the iceberg. With family and friendships under strain and a guinea pig café struggling to keep afloat, Fleabag suddenly finds herself with nothing to lose.",
"id": 620350,
"adult": False,
"backdrop_path": "/avdCATeTca285BpMhLxSARIiFLG.jpg",
"vote_count": 23,
"genre_ids": [35],
"release_date": "2019-09-12",
"original_language": "en",
"original_title": "National Theatre Live: Fleabag",
"poster_path": "/wBmmX79NxG6LOztS3PI7uvc5ZBh.jpg",
"title": "National Theatre Live: Fleabag",
"video": True,
"popularity": 4.532,
"credit_id": "5d4565332d1e40219bbdb1fb",
"department": "Writing",
"job": "Writer",
"media_type": "movie",
},
{
"adult": False,
"backdrop_path": "/r2GAjd4rNOHJh6i6Y0FntmYuPQW.jpg",
"genre_ids": [12, 28, 53],
"vote_count": 4212,
"original_language": "en",
"original_title": "No Time to Die",
"poster_path": "/iUgygt3fscRoKWCV1d0C7FbM9TP.jpg",
"title": "No Time to Die",
"video": False,
"vote_average": 7.5,
"id": 370172,
"overview": "Bond has left active service and is enjoying a tranquil life in Jamaica. His peace is short-lived when his old friend <NAME> from the CIA turns up asking for help. The mission to rescue a kidnapped scientist turns out to be far more treacherous than expected, leading Bond onto the trail of a mysterious villain armed with dangerous new technology.",
"release_date": "2021-09-29",
"popularity": 285.445,
"credit_id": "5e56e686f48b34001577b646",
"department": "Writing",
"job": "Screenplay",
"media_type": "movie",
},
{
"backdrop_path": "/ddkbWeVyqvLyJa3QdNRalj8kMQJ.jpg",
"first_air_date": "2016-07-21",
"genre_ids": [35, 18],
"id": 67070,
"name": "Fleabag",
"origin_country": ["GB"],
"original_language": "en",
"original_name": "Fleabag",
"overview": "A comedy series adapted from the award-winning play about a young woman trying to cope with life in London whilst coming to terms with a recent tragedy.",
"poster_path": "/27vEYsRKa3eAniwmoccOoluEXQ1.jpg",
"vote_average": 8.2,
"vote_count": 660,
"popularity": 23.924,
"credit_id": "5783be1c9251417df60022ae",
"department": "Writing",
"episode_count": 12,
"job": "Writer",
"media_type": "tv",
},
{
"backdrop_path": "/2eR0H27Us1tvbjd95oW7WhiKioC.jpg",
"first_air_date": "2018-04-08",
"genre_ids": [80, 18],
"id": 72750,
"original_language": "en",
"poster_path": "/4wKhTVw8aGq5AZMa0Q1spERdi7n.jpg",
"vote_average": 8.1,
"original_name": "<NAME>",
"origin_country": ["US"],
"vote_count": 888,
"overview": "A security consultant hunts for a ruthless assassin. Equally obsessed with each other, they go head to head in an epic game of cat-and-mouse.",
"name": "<NAME>",
"popularity": 62.865,
"credit_id": "5<PASSWORD>",
"department": "Writing",
"episode_count": 8,
"job": "Writer",
"media_type": "tv",
},
{
"original_language": "en",
"id": 65251,
"overview": "A comedy drama that crashes straight into the lives and loves of six twenty-something adults living together as Property Guardians in a disused hospital.",
"vote_average": 7.3,
"vote_count": 103,
"original_name": "Crashing",
"origin_country": ["GB"],
"poster_path": "/7X09F4FWJa2OB62KVpYaFrX12ea.jpg",
"first_air_date": "2016-01-11",
"backdrop_path": "/5P6S91EXQMZJK5bHsm8RSQnkPMF.jpg",
"genre_ids": [35],
"name": "Crashing",
"popularity": 10.402,
"credit_id": "5b09d7bb9251414715007c32",
"department": "Writing",
"episode_count": 6,
"job": "Writer",
"media_type": "tv",
},
{
"backdrop_path": "/ddkbWeVyqvLyJa3QdNRalj8kMQJ.jpg",
"first_air_date": "2016-07-21",
"genre_ids": [35, 18],
"id": 67070,
"name": "Fleabag",
"origin_country": ["GB"],
"original_language": "en",
"original_name": "Fleabag",
"overview": "A comedy series adapted from the award-winning play about a young woman trying to cope with life in London whilst coming to terms with a recent tragedy.",
"poster_path": "/27vEYsRKa3eAniwmoccOoluEXQ1.jpg",
"vote_average": 8.2,
"vote_count": 660,
"popularity": 23.924,
"credit_id": "5b2e61bd9251416e00005249",
"department": "Production",
"episode_count": 6,
"job": "Executive Producer",
"media_type": "tv",
},
{
"original_language": "en",
"id": 65251,
"overview": "A comedy drama that crashes straight into the lives and loves of six twenty-something adults living together as Property Guardians in a disused hospital.",
"vote_average": 7.3,
"vote_count": 103,
"original_name": "Crashing",
"origin_country": ["GB"],
"poster_path": "/7X09F4FWJa2OB62KVpYaFrX12ea.jpg",
"first_air_date": "2016-01-11",
"backdrop_path": "/5P6S91EXQMZJK5bHsm8RSQnkPMF.jpg",
"genre_ids": [35],
"name": "Crashing",
"popularity": 10.402,
"credit_id": "5b2e61350e0a2657bf005fde",
"department": "Production",
"episode_count": 6,
"job": "Associate Producer",
"media_type": "tv",
},
{
"backdrop_path": "/2eR0H27Us1tvbjd95oW7WhiKioC.jpg",
"first_air_date": "2018-04-08",
"genre_ids": [80, 18],
"id": 72750,
"original_language": "en",
"poster_path": "/4wKhTVw8aGq5AZMa0Q1spERdi7n.jpg",
"vote_average": 8.1,
"original_name": "<NAME>",
"origin_country": ["US"],
"vote_count": 888,
"overview": "A security consultant hunts for a ruthless assassin. Equally obsessed with each other, they go head to head in an epic game of cat-and-mouse.",
"name": "<NAME>",
"popularity": 62.865,
"credit_id": "5b723c080e0a267ef41a61e4",
"department": "Production",
"episode_count": 16,
"job": "Executive Producer",
"media_type": "tv",
},
{
"original_language": "en",
"poster_path": "/rbYfDspa6vUTAxif5SZJABgB8pr.jpg",
"id": 87393,
"first_air_date": "2020-04-12",
"name": "RUN",
"vote_count": 138,
"vote_average": 6.8,
"overview": "Ruby is living a humdrum existence when one day she gets a text inviting her to fulfill a youthful pact, promising true love and self-reinvention, by stepping out of her life to take a journey with her oldest flame.",
"backdrop_path": "/7aMoV1v6A8uwTRaGV8LHIBcVBuN.jpg",
"original_name": "RUN",
"origin_country": ["US"],
"genre_ids": [35, 18],
"popularity": 9.495,
"credit_id": "5c80550a0e0a2643035d2944",
"department": "Production",
"episode_count": 7,
"job": "Executive Producer",
"media_type": "tv",
},
{
"backdrop_path": "/2eR0H27Us1tvbjd95oW7WhiKioC.jpg",
"first_air_date": "2018-04-08",
"genre_ids": [80, 18],
"id": 72750,
"original_language": "en",
"poster_path": "/4wKhTVw8aGq5AZMa0Q1spERdi7n.jpg",
"vote_average": 8.1,
"original_name": "<NAME>",
"origin_country": ["US"],
"vote_count": 888,
"overview": "A security consultant hunts for a ruthless assassin. Equally obsessed with each other, they go head to head in an epic game of cat-and-mouse.",
"name": "<NAME>",
"popularity": 62.865,
"credit_id": "623961d1d29bdd00786ca6cf",
"department": "Crew",
"episode_count": 32,
"job": "Creator",
"media_type": "tv",
},
{
"backdrop_path": "/ddkbWeVyqvLyJa3QdNRalj8kMQJ.jpg",
"first_air_date": "2016-07-21",
"genre_ids": [35, 18],
"id": 67070,
"name": "Fleabag",
"origin_country": ["GB"],
"original_language": "en",
"original_name": "Fleabag",
"overview": "A comedy series adapted from the award-winning play about a young woman trying to cope with life in London whilst coming to terms with a recent tragedy.",
"poster_path": "/27vEYsRKa3eAniwmoccOoluEXQ1.jpg",
"vote_average": 8.2,
"vote_count": 660,
"popularity": 23.924,
"credit_id": "5783be01c3a368424c00249b",
"department": "Creator",
"job": "Creator",
"media_type": "tv",
},
{
"original_language": "en",
"id": 65251,
"overview": "A comedy drama that crashes straight into the lives and loves of six twenty-something adults living together as Property Guardians in a disused hospital.",
"vote_average": 7.3,
"vote_count": 103,
"original_name": "Crashing",
"origin_country": ["GB"],
"poster_path": "/7X09F4FWJa2OB62KVpYaFrX12ea.jpg",
"first_air_date": "2016-01-11",
"backdrop_path": "/5P6S91EXQMZJK5bHsm8RSQnkPMF.jpg",
"genre_ids": [35],
"name": "Crashing",
"popularity": 10.402,
"credit_id": "5b09d82e0e0a267c27007e5e",
"department": "Creator",
"job": "Creator",
"media_type": "tv",
},
{
"poster_path": None,
"vote_average": 0.0,
"overview": "Based on the 2005 film Mr. & Mrs. Smith.",
"origin_country": ["US"],
"id": 118642,
"name": "Mr. & <NAME>",
"backdrop_path": None,
"vote_count": 0,
"genre_ids": [],
"original_name": "Mr. & <NAME>",
"original_language": "en",
"popularity": 1.4,
"credit_id": "602734a2df857c003fde9e65",
"department": "Creator",
"job": "Creator",
"media_type": "tv",
},
],
"id": 1023483,
}
series = {
"adult": False,
"backdrop_path": "/t15KHp3iNfHVQBNIaqUGW12xQA4.jpg",
"created_by": [
{
"id": 24951,
"credit_id": "52599cb3760ee34661b60d9a",
"name": "<NAME>",
"gender": 2,
"profile_path": "/il6NSy05UnO7SV9SgzEqKHXioNl.jpg",
},
{
"id": 66633,
"credit_id": "52599cb3760ee34661b60d94",
"name": "<NAME>",
"gender": 2,
"profile_path": "/uFh3OrBvkwKSU3N5y0XnXOhqBJz.jpg",
},
],
"episode_run_time": [45],
"first_air_date": "2015-02-08",
"genres": [{"id": 80, "name": "Crime"}, {"id": 18, "name": "Drama"}],
"homepage": "https://www.amc.com/shows/better-call-saul--1002228",
"id": 60059,
"in_production": True,
"languages": ["en"],
"last_air_date": "2022-05-23",
"last_episode_to_air": {
"air_date": "2022-05-23",
"episode_number": 7,
"id": 3641946,
"name": "<NAME>",
"overview": "Jimmy and Kim deal with a last-minute snag. Howard's investigation finally yields results.",
"production_code": "",
"runtime": 50,
"season_number": 6,
"still_path": "/flVUI98wSTtLmYDCx81gowleQTe.jpg",
"vote_average": 10.0,
"vote_count": 5,
},
"name": "<NAME>",
"next_episode_to_air": {
"air_date": "2022-07-11",
"episode_number": 8,
"id": 3641948,
"name": "",
"overview": "",
"production_code": "",
"runtime": None,
"season_number": 6,
"still_path": None,
"vote_average": 0.0,
"vote_count": 0,
},
"networks": [
{
"name": "AMC",
"id": 174,
"logo_path": "/pmvRmATOCaDykE6JrVoeYxlFHw3.png",
"origin_country": "US",
}
],
"number_of_episodes": 63,
"number_of_seasons": 6,
"origin_country": ["US"],
"original_language": "en",
"original_name": "<NAME>",
"overview": 'Six years before <NAME> meets Walter White. We meet him when the man who will become <NAME>man is known as <NAME>, a small-time lawyer searching for his destiny, and, more immediately, hustling to make ends meet. Working alongside, and, often, against Jimmy, is “fixer” <NAME>. The series tracks Jimmy’s transformation into Saul Goodman, the man who puts “criminal” in “criminal lawyer".',
"popularity": 237.057,
"poster_path": "/fC2HDm5t0kHl7mTm7jxMR31b7by.jpg",
"production_companies": [
{
"id": 11073,
"logo_path": "/wHs44fktdoj6c378ZbSWfzKsM2Z.png",
"name": "<NAME> Television Studios",
"origin_country": "US",
},
{
"id": 23242,
"logo_path": "/fOALFvgnO1ZdIaA9PNIAAuaDKWd.png",
"name": "AMC Networks",
"origin_country": "US",
},
],
"production_countries": [
{"iso_3166_1": "US", "name": "United States of America"}
],
"seasons": [
{
"air_date": "2014-12-17",
"episode_count": 5,
"id": 64929,
"name": "Specials",
"overview": "",
"poster_path": "/oaF4Fj0FafTEK2CX9aiIW42oABe.jpg",
"season_number": 0,
},
{
"air_date": "2015-02-08",
"episode_count": 10,
"id": 60223,
"name": "Season 1",
"overview": "When we meet him, the man who will become <NAME> is known as <NAME>, a small-time lawyer searching for his destiny, and, more immediately, hustling to make ends meet. Working alongside, and often against, Jimmy is “fixer” <NAME>.",
"poster_path": "/iwwHYFrVJF3mkHHMp77120gnYUq.jpg",
"season_number": 1,
},
{
"air_date": "2016-02-15",
"episode_count": 10,
"id": 72040,
"name": "Season 2",
"overview": "Season one left Jimmy at the center of a large class-action lawsuit that led to a promising job opportunity at a prestigious firm in Santa Fe. Having arrived at a fork in the road, will Jimmy | |
fields for fc. If none specified, all fields are returned.
Supports fields in list [] or comma separated string "field1,field2,.."
where -- optional where clause
params -- dictionary of parameters for query
"""
lyr = self.layer(layer_name)
lyr.layer_to_kmz(flds, where, params, kmz=out_kmz)
def createReplica(self, layers, replicaName, geometry='', geometryType='', inSR='', replicaSR='', dataFormat='json', returnReplicaObject=True, **kwargs):
"""query attachments, returns a JSON object
Required:
layers -- list of layers to create replicas for (valid inputs below)
replicaName -- name of replica
Optional:
geometry -- optional geometry to query features, if none supplied, will grab all features
geometryType -- type of geometry
inSR -- input spatial reference for geometry
replicaSR -- output spatial reference for replica data
dataFormat -- output format for replica (sqlite|json)
**kwargs -- optional keyword arguments for createReplica request
Special Optional Args:
returnReplicaObject -- option to return replica as an object (restapi.SQLiteReplica|restapi.JsonReplica)
based on the dataFormat of the replica. If the data format is sqlite and this parameter
is False, the data will need to be fetched quickly because the server will automatically clean
out the directory. The default cleanup for a sqlite file is 10 minutes. This option is set to True
by default. It is recommended to set this option to True if the output dataFormat is "sqlite".
Documentation on Server Directory Cleaning:
http://server.arcgis.com/en/server/latest/administer/linux/about-server-directories.htm
"""
if hasattr(self, SYNC_ENABLED) and not self.syncEnabled:
raise NotImplementedError('FeatureService "{}" does not support Sync!'.format(self.url))
# validate layers
if isinstance(layers, basestring):
layers = [l.strip() for l in layers.split(',')]
elif not isinstance(layers, (list, tuple)):
layers = [layers]
if all(map(lambda x: isinstance(x, int), layers)):
layers = ','.join(map(str, layers))
elif all(map(lambda x: isinstance(x, basestring), layers)):
layers = ','.join(map(str, filter(lambda x: x is not None,
[s.id for s in self.layers if s.name.lower()
in [l.lower() for l in layers]])))
if not geometry and not geometryType:
ext = self.initialExtent
inSR = self.initialExtent.spatialReference
geometry= ','.join(map(str, [ext.xmin,ext.ymin,ext.xmax,ext.ymax]))
geometryType = ESRI_ENVELOPE
inSR = self.spatialReference
useGeometry = False
else:
useGeometry = True
geometry = Geometry(geometry)
inSR = geometry.getSR()
geometryType = geometry.geometryType
if not replicaSR:
replicaSR = self.spatialReference
validated = layers.split(',')
options = {REPLICA_NAME: replicaName,
LAYERS: layers,
LAYER_QUERIES: '',
GEOMETRY: geometry,
GEOMETRY_TYPE: geometryType,
IN_SR: inSR,
REPLICA_SR: replicaSR,
TRANSPORT_TYPE: TRANSPORT_TYPE_URL,
RETURN_ATTACHMENTS: TRUE,
RETURN_ATTACHMENTS_DATA_BY_URL: TRUE,
ASYNC: FALSE,
F: PJSON,
DATA_FORMAT: dataFormat,
REPLICA_OPTIONS: '',
}
for k,v in kwargs.iteritems():
if k != SYNC_MODEL:
if k == LAYER_QUERIES:
if options[k]:
if isinstance(options[k], basestring):
options[k] = json.loads(options[k])
for key in options[k].keys():
options[k][key][USE_GEOMETRY] = useGeometry
options[k] = json.dumps(options[k], ensure_ascii=False)
else:
options[k] = v
if self.syncCapabilities.supportsPerReplicaSync:
options[SYNC_MODEL] = PER_REPLICA
else:
options[SYNC_MODEL] = PER_LAYER
if options[ASYNC] in (TRUE, True) and self.syncCapabilities.supportsAsync:
st = self.request(self.url + '/createReplica', options, )
while STATUS_URL not in st:
time.sleep(1)
else:
options[ASYNC] = 'false'
st = self.request(self.url + '/createReplica', options)
if returnReplicaObject:
return self.fetchReplica(st)
else:
return st
@staticmethod
def fetchReplica(rep_url):
"""fetches a replica from a server resource. This can be a url or a
dictionary/JSON object with a "URL" key. Based on the file name of the
replica, this will return either a restapi.SQLiteReplica() or
restapi.JsonReplica() object. The two valid file name extensions are ".json"
(restapi.JsonReplica) or ".geodatabase" (restapi.SQLiteReplica).
Required:
rep_url -- url or JSON object that contains url to replica file on server
If the file is sqlite, it is highly recommended to use a with statement to
work with the restapi.SQLiteReplica object so the connection is automatically
closed and the file is cleaned from disk. Example:
>>> url = 'http://someserver.com/arcgis/rest/directories/TEST/SomeService_MapServer/_ags_data{B7893BA273C164D96B7BEE588627B3EBC}.geodatabase'
>>> with FeatureService.fetchReplica(url) as replica:
>>> # this is a restapi.SQLiteReplica() object
>>> # list tables in database
>>> print(replica.list_tables())
>>> # export to file geodatabase <- requires arcpy access
>>> replica.exportToGDB(r'C\Temp\replica.gdb')
"""
if isinstance(rep_url, dict):
rep_url = st.get(URL_UPPER)
if rep_url.endswith('.geodatabase'):
resp = requests.get(rep_url, stream=True, verify=False)
fileName = rep_url.split('/')[-1]
db = os.path.join(TEMP_DIR, fileName)
with open(db, 'wb') as f:
for chunk in resp.iter_content(1024 * 16):
if chunk:
f.write(chunk)
return SQLiteReplica(db)
elif rep_url.endswith('.json'):
return JsonReplica(requests.get(self.url, verify=False).json())
return None
def replicaInfo(self, replicaID):
"""get replica information
Required:
replicaID -- ID of replica
"""
query_url = self.url + '/replicas/{}'.format(replicaID)
return namedTuple('ReplicaInfo', self.request(query_url))
def syncReplica(self, replicaID, **kwargs):
"""synchronize a replica. Must be called to sync edits before a fresh replica
can be obtained next time createReplica is called. Replicas are snapshots in
time of the first time the user creates a replica, and will not be reloaded
until synchronization has occured. A new version is created for each subsequent
replica, but it is cached data.
It is also recommended to unregister a replica
AFTER sync has occured. Alternatively, setting the "closeReplica" keyword
argument to True will unregister the replica after sync.
More info can be found here:
http://server.arcgis.com/en/server/latest/publish-services/windows/prepare-data-for-offline-use.htm
and here for key word argument parameters:
http://resources.arcgis.com/en/help/arcgis-rest-api/index.html#/Synchronize_Replica/02r3000000vv000000/
Required:
replicaID -- ID of replica
"""
query_url = self.url + '/synchronizeReplica'
params = {REPLICA_ID: replicaID}
for k,v in kwargs.iteritems():
params[k] = v
return self.request(query_url, params)
def unRegisterReplica(self, replicaID):
"""unregisters a replica on the feature service
Required:
replicaID -- the ID of the replica registered with the service
"""
query_url = self.url + '/unRegisterReplica'
params = {REPLICA_ID: replicaID}
return self.request(query_url, params)
class FeatureLayer(MapServiceLayer):
def __init__(self, url='', usr='', pw='', token='', proxy=None, referer=None):
"""class to handle Feature Service Layer
Required:
url -- feature service layer url
Optional (below params only required if security is enabled):
usr -- username credentials for ArcGIS Server
pw -- password credentials for ArcGIS Server
token -- token to handle security (alternative to usr and pw)
proxy -- option to use proxy page to handle security, need to provide
full path to proxy url.
referer -- option to add Referer Header if required by proxy, this parameter
is ignored if no proxy is specified.
"""
super(FeatureLayer, self).__init__(url, usr, pw, token, proxy, referer)
# store list of EditResult() objects to track changes
self.editResults = []
def updateCursor(self, fields='*', where='1=1', add_params={}, records=None, exceed_limit=False, auto_save=True, useGlobalIds=False, **kwargs):
"""updates features in layer using a cursor, the applyEdits() method is automatically
called when used in a "with" statement and auto_save is True.
Optional:
fields -- fields to return. Default is "*" to return all fields
where -- where clause
add_params -- extra parameters to add to query string passed as dict
records -- number of records to return. Default is None to return all
records within bounds of max record count unless exceed_limit is True
exceed_limit -- option to get all records in layer. This option may be time consuming
because the ArcGIS REST API uses default maxRecordCount of 1000, so queries
must be performed in chunks to get all records.
auto_save -- automatically apply edits when using with statement,
if True, will apply edits on the __exit__ method.
useGlobalIds -- (added at 10.4) Optional parameter which is false by default. Requires
the layer's supportsApplyEditsWithGlobalIds property to be true. When set to true, the
features and attachments in the adds, updates, deletes, and attachments parameters are
identified by their globalIds. When true, the service adds the new features and attachments
while preserving the globalIds submitted in the payload. If the globalId of a feature
(or an attachment) collides with a pre-existing feature (or an attachment), that feature
and/or attachment add fails. Other adds, updates, or deletes are attempted if rollbackOnFailure
is false. If rollbackOnFailure is true, the whole operation fails and rolls back on any failure
including a globalId collision.
When useGlobalIds is true, updates and deletes are identified by each feature or attachment
globalId rather than their objectId or attachmentId.
kwargs -- any additional keyword arguments supported by the applyEdits method of the REST API, see
http://resources.arcgis.com/en/help/arcgis-rest-api/index.html#/Apply_Edits_Feature_Service_Layer/02r3000000r6000000/
"""
layer = self
class UpdateCursor(Cursor):
def __init__(self, feature_set, fieldOrder=[], auto_save=auto_save, useGlobalIds=useGlobalIds, **kwargs):
super(UpdateCursor, self).__init__(feature_set, fieldOrder)
self.useGlobalIds = useGlobalIds
self._deletes = []
self._updates = []
self._attachments = {
ADDS: [],
UPDATES: [],
DELETES: []
}
self._kwargs = {}
for k,v in kwargs.iteritems():
if k not in('feature_set', 'fieldOrder', 'auto_save'):
self._kwargs[k] = v
for i, f in enumerate(self.features):
ft = Feature(f)
oid = self._get_oid(ft)
self.features[i] = ft
@property
def has_oid(self):
try:
return hasattr(self, | |
np.cos(locs1[..., 1] - locs2[..., 1])
return 6367000 * np.arccos(
cos_lat_d - cos_lat1 * cos_lat2 * (1 - cos_lon_d))
def isopoly_to_gdf(collec_poly, levels, field_name="levels"):
"""
Convert a collection of matplotlib.contour.QuadContourSet to a GeoDataFrame
Set an attribute `field_name` on each feature, according to `levels` values
(`levels` must have the same number of features as the collection of contours)
Parameters
----------
collection_polygons : matplotlib.contour.QuadContourSet
The result of a grid interpolation from matplotlib.
levels : array-like
The value to use as attributes for the constructed GeoDataFrame.
field_name : str
The name of the field to be fill by values contained in
`levels` variable (default: "levels").
Returns
-------
gdf_contours : GeoDataFrame
The result as a GeoDataFrame.
"""
polygons, data = [], []
for i, polygon in enumerate(collec_poly.collections):
mpoly = []
for path in polygon.get_paths():
path.should_simplify = False
poly = path.to_polygons()
exterior, holes = [], []
if len(poly) > 0 and len(poly[0]) > 3:
exterior = poly[0]
if len(poly) > 1:
holes = [h for h in poly[1:] if len(h) > 3]
mpoly.append(Polygon(exterior, holes))
if len(mpoly) > 1:
mpoly = MultiPolygon(mpoly)
polygons.append(mpoly)
data.append(levels[i])
elif len(mpoly) == 1:
polygons.append(mpoly[0])
data.append(levels[i])
return GeoDataFrame(geometry=polygons,
data=data,
columns=[field_name])
class BaseSmooth:
def __repr__(self):
return "\n".join([self.info, self.info2, self.info3])
def __str__(self):
return "\n".join([self.info, self.info2, self.info3])
@property
def properties(self):
print("\n".join([self.info, self.info2, self.info3]))
def open_mask(self, mask, input_layer):
# Read the mask according to its format:
if isinstance(mask, GeoDataFrame):
self.mask = mask
elif isinstance(mask, str) and isinstance(input_layer, str) \
and mask == input_layer:
self.mask = self.gdf.copy()
else:
self.mask = GeoDataFrame.from_file(mask)
self.check_mask()
def check_mask(self):
# Ensure the mask is made of Polygon/MultiPolygon:
if len(set(self.mask.type)
.intersection({"Polygon", "MultiPolygon"})) > 0:
# Use the same projection for the mask as for the input layer:
if self.mask.crs and self.mask.crs is not self.proj_to_use:
self.use_mask = True
self.mask.to_crs(self.proj_to_use, inplace=True)
else:
self.use_mask = True
self.mask.crs = self.proj_to_use
else:
self.mask = None
self.use_mask = False
def filter_missing_values(self, variable_name, variable_name2):
# Convert the first value field to a numeric field if not already,
# and dont take into account features with no value / NaN value
if not self.gdf[variable_name].dtype in (float, int):
self.gdf.loc[:, variable_name] = \
self.gdf[variable_name].replace('', np.NaN)
self.gdf.loc[:, variable_name] = self.gdf[variable_name].astype(float)
self.gdf = self.gdf[self.gdf[variable_name].notnull()]
# Convert the second value field to a numeric field if not already,
# and dont take into account features with no value / NaN value
if variable_name2:
if not self.gdf[variable_name2].dtype in (float, int):
self.gdf.loc[:, variable_name2] = \
self.gdf[variable_name2].replace('', np.NaN)
self.gdf.loc[:, variable_name2] = \
self.gdf[variable_name2].astype(float)
self.gdf = self.gdf[self.gdf[variable_name2].notnull()]
# Provide a new index if entries have been removed :
self.gdf.index = range(len(self.gdf))
def define_levels(self, nb_class, disc_func):
zi = self.zi
_min = np.nanmin(zi)
if not nb_class:
# nb_class = int(get_opt_nb_class(len(zi)) - 2)
nb_class = 8
if not disc_func or "prog_geom" in disc_func:
levels = [_min] + [
np.nanmax(zi) / i for i in range(1, nb_class + 1)][::-1]
elif "equal_interval" in disc_func:
_bin = np.nanmax(zi) / nb_class
levels = [_min] + [_bin * i for i in range(1, nb_class+1)]
elif "percentiles" in disc_func:
levels = np.percentile(
np.concatenate((zi[zi.nonzero()], np.array([_min]))),
np.linspace(0.0, 100.0, nb_class+1))
elif "jenks" in disc_func:
levels = list(jenks_breaks(np.concatenate(
([_min], zi[zi.nonzero()])), nb_class))
levels[0] = levels[0] - _min * 0.01
elif "head_tail" in disc_func:
levels = head_tail_breaks(np.concatenate(
([_min], zi[zi.nonzero()])))
elif "maximal_breaks" in disc_func:
levels = maximal_breaks(np.concatenate(
([_min], zi[zi.nonzero()])), nb_class)
else:
raise ValueError
return levels
def render(self, nb_class=8, disc_func=None, user_defined_breaks=None,
output="GeoJSON", new_mask=False):
"""
Parameters
----------
nb_class : int, optionnal
The number of class (default: 8).
disc_func : str, optionnal
The kind of data classification to be used (to be choosed in
"equal_interval", "jenks", "percentiles, "head_tail_breaks"
and "prog_geom"), default: None.
user_defined_breaks : list or tuple, optionnal
A list of ordered break to use to construct the contours
(override `nb_class` and `disc_func` values if any)
(default: None).
output : string, optionnal
The type of output expected (not case-sensitive)
in {"GeoJSON", "GeoDataFrame"} (default: "GeoJSON").
new_mask : str, optionnal
Use a new mask by giving the path to the file (Polygons only)
to use as clipping mask, can also be directly a GeoDataFrame
(default: False).
Returns
-------
smoothed_result : bytes or GeoDataFrame
The result, dumped as GeoJSON (utf-8 encoded) or as a GeoDataFrame.
"""
if disc_func and 'jenks' in disc_func and not jenks_breaks:
raise ValueError(
"Missing jenkspy package - could not use jenks breaks")
zi = self.zi
if isinstance(new_mask, (type(False), type(None))):
if not self.use_mask:
self.use_mask = False
self.mask = None
else:
self.open_mask(new_mask, None)
# We want levels with the first break value as the minimum of the
# interpolated values and the last break value as the maximum of theses
# values:
if user_defined_breaks:
levels = user_defined_breaks
if levels[len(levels) - 1] < np.nanmax(zi):
levels = levels + [np.nanmax(zi)]
if levels[0] > np.nanmin(zi):
levels = [np.nanmin(zi)] + levels
else:
levels = self.define_levels(nb_class, disc_func)
# Ensure that the levels are unique/increasing
# to avoid error from `contourf` :
s_levels = set(levels)
if len(s_levels) != len(levels):
levels = list(s_levels)
levels.sort()
try:
collec_poly = contourf(
self.XI, self.YI,
zi.reshape(tuple(reversed(self.shape))).T,
levels,
vmax=abs(np.nanmax(zi)), vmin=-abs(np.nanmin(zi)))
# Retry without setting the levels :
except ValueError:
collec_poly = contourf(
self.XI, self.YI,
zi.reshape(tuple(reversed(self.shape))).T,
vmax=abs(np.nanmax(zi)), vmin=-abs(np.nanmin(zi)))
# Fetch the levels returned by contourf:
levels = collec_poly.levels
# Set the maximum value at the maximum value of the interpolated values:
levels[-1] = np.nanmax(zi)
# Transform contourf contours into a GeoDataFrame of (Multi)Polygons:
res = isopoly_to_gdf(collec_poly, levels=levels[1:], field_name="max")
if self.longlat:
def f(x, y, z=None):
return (x / 0.017453292519943295,
y / 0.017453292519943295)
res.geometry = [transform(f, g) for g in res.geometry]
res.crs = self.proj_to_use
# Set the min/max/center values of each class as properties
# if this contour layer:
res["min"] = [np.nanmin(zi)] + res["max"][0:len(res)-1].tolist()
res["center"] = (res["min"] + res["max"]) / 2
# Compute the intersection between the contour layer and the mask layer:
ix_max_ft = len(res) - 1
if self.use_mask:
res.loc[0:ix_max_ft, "geometry"] = res.geometry.buffer(
0).intersection(unary_union(self.mask.geometry.buffer(0)))
# res.loc[0:ix_max_ft, "geometry"] = res.geometry.buffer(
# 0).intersection(self.poly_max_extend.buffer(-0.1))
# Repair geometries if necessary :
if not all(t in ("MultiPolygon", "Polygon") for t in res.geom_type):
res.loc[0:ix_max_ft, "geometry"] = \
[geom if geom.type in ("Polygon", "MultiPolygon")
else MultiPolygon(
[j for j in geom if j.type in ('Polygon', 'MultiPolygon')]
)
for geom in res.geometry]
if "geojson" in output.lower():
return res.to_crs({"init": "epsg:4326"}).to_json().encode()
else:
return res
class SmoothStewart(BaseSmooth):
"""
Main object, allowing to create an instance with some required parameters
(span, beta, etc.) then render the contour polygons according to various
parameters (data classification, number of bins, output format, etc.)
Parameters
----------
input_layer : str
Path to file to use as input (Points/Polygons) or GeoDataFrame object,
must contains a relevant numerical field.
variable_name : str
The name of the variable to use (numerical field only).
span : int
The span!
beta : float
The beta!
typefct : str, optionnal
The type of function in {"exponential", "pareto"} (default: "exponential").
resolution_pts: int, optionnal
The resolution to use (in number of points). Can be overrided by the
'resolution' parameter if set.
resolution : int, optionnal
The resolution to use (in unit of the input file).
mask : str, optionnal
Path to the file (Polygons only) to use as clipping mask (default: None).
variable_name2 : str, optionnal
The name of the 2nd variable to use (numerical field only); values
computed from this variable will be will be used as to divide
values computed from the first variable (default: None)
Attributes
----------
zi : numpy.ndarray
The computed potential values for each `unknownpts`.
Methods
-------
render(nb_class=8, disc_func=None, user_defined_breaks=None,
output="GeoJSON", new_mask=False)
Render the contour polygon according to the choosen number of class and
the choosen classification method (or according to
`user_defined_breaks` which will overwrite these parameters)
"""
def __init__(self, input_layer, variable_name, span, beta,
typefct='exponential', nb_pts=10000,
resolution=None, variable_name2=None, mask=None, **kwargs):
self.sizelimit = kwargs.get('sizelimit', float('infinity'))
self.longlat = kwargs.get("distGeo", kwargs.get("longlat", True))
self.proj_to_use = {'init': 'epsg:4326'} if self.longlat \
else kwargs.get("projDistance", None) \
or ("""+proj=robin +lon_0=0 +x_0=0 +y_0=0 """
"""+ellps=WGS84 +datum=WGS84 +units=m +no_defs""")
self.gdf = input_layer.copy() if isinstance(input_layer, GeoDataFrame) \
else GeoDataFrame.from_file(input_layer)
if self.gdf.crs and self.gdf.crs is not self.proj_to_use:
self.gdf.to_crs(self.proj_to_use, inplace=True)
else:
self.gdf.crs = self.proj_to_use
self.info = (
'SmoothStewart - variable : {}{} ({} | |
if content_type is None:
raise ValueError(
"Missing the required parameter `content_type` "
"when calling `get_account_attachment_by_file_name`"
)
collection_formats = {}
path_params = {
"AccountID": account_id,
"FileName": file_name,
}
query_params = []
header_params = {
"xero-tenant-id": xero_tenant_id,
"contentType": content_type,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/octet-stream"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Accounts/{AccountID}/Attachments/{FileName}")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="file",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(
error, self, "get_account_attachment_by_file_name"
)
def get_account_attachment_by_id(
self,
xero_tenant_id,
account_id,
attachment_id,
content_type,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves a specific attachment from a specific account using a unique attachment Id # noqa: E501
OAuth2 scope: accounting.attachments, accounting.attachments.read
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str account_id: Unique identifier for Account object (required)
:param str attachment_id: Unique identifier for Attachment object (required)
:param str content_type: The mime type of the attachment file you are retrieving i.e image/jpg, application/pdf (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: file
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_account_attachment_by_id`"
)
# verify the required parameter 'account_id' is set
if account_id is None:
raise ValueError(
"Missing the required parameter `account_id` "
"when calling `get_account_attachment_by_id`"
)
# verify the required parameter 'attachment_id' is set
if attachment_id is None:
raise ValueError(
"Missing the required parameter `attachment_id` "
"when calling `get_account_attachment_by_id`"
)
# verify the required parameter 'content_type' is set
if content_type is None:
raise ValueError(
"Missing the required parameter `content_type` "
"when calling `get_account_attachment_by_id`"
)
collection_formats = {}
path_params = {
"AccountID": account_id,
"AttachmentID": attachment_id,
}
query_params = []
header_params = {
"xero-tenant-id": xero_tenant_id,
"contentType": content_type,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/octet-stream"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Accounts/{AccountID}/Attachments/{AttachmentID}")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="file",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(
error, self, "get_account_attachment_by_id"
)
def get_account_attachments(
self,
xero_tenant_id,
account_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves attachments for a specific accounts by using a unique account Id # noqa: E501
OAuth2 scope: accounting.attachments, accounting.attachments.read
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str account_id: Unique identifier for Account object (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: Attachments
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_account_attachments`"
)
# verify the required parameter 'account_id' is set
if account_id is None:
raise ValueError(
"Missing the required parameter `account_id` "
"when calling `get_account_attachments`"
)
collection_formats = {}
path_params = {
"AccountID": account_id,
}
query_params = []
header_params = {
"xero-tenant-id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Accounts/{AccountID}/Attachments")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="Attachments",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_account_attachments")
def get_accounts(
self,
xero_tenant_id,
if_modified_since=empty,
where=empty,
order=empty,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves the full chart of accounts # noqa: E501
OAuth2 scope: accounting.settings, accounting.settings.read
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param datetime if_modified_since: Only records created or modified since this timestamp will be returned
:param str where: Filter by an any element
:param str order: Order by an any element
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: Accounts
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_accounts`"
)
collection_formats = {}
path_params = {}
query_params = []
if where is not empty:
query_params.append(("where", where))
if order is not empty:
query_params.append(("order", order))
header_params = {
"xero-tenant-id": xero_tenant_id,
}
if if_modified_since is not empty:
header_params["If-Modified-Since"] = if_modified_since
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Accounts")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="Accounts",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_accounts")
def get_bank_transaction(
self,
xero_tenant_id,
bank_transaction_id,
unitdp=empty,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves a single spent or received money transaction by using a unique bank transaction Id # noqa: E501
OAuth2 scope: accounting.transactions, accounting.transactions.read
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str bank_transaction_id: Xero generated unique identifier for a bank transaction (required)
:param int unitdp: e.g. unitdp=4 – (Unit Decimal Places) You can opt in to use four decimal places for unit amounts
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: BankTransactions
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_bank_transaction`"
)
# verify the required parameter 'bank_transaction_id' is set
if bank_transaction_id is None:
raise ValueError(
"Missing the required parameter `bank_transaction_id` "
"when calling `get_bank_transaction`"
)
collection_formats = {}
path_params = {
"BankTransactionID": bank_transaction_id,
}
query_params = []
if unitdp is not empty:
query_params.append(("unitdp", unitdp))
header_params = {
"xero-tenant-id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/BankTransactions/{BankTransactionID}")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="BankTransactions",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_bank_transaction")
def get_bank_transaction_attachment_by_file_name(
self,
xero_tenant_id,
bank_transaction_id,
file_name,
content_type,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves a specific attachment from a specific bank transaction by filename # noqa: E501
OAuth2 scope: accounting.attachments, accounting.attachments.read
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str bank_transaction_id: Xero generated unique identifier for a bank transaction (required)
:param str file_name: Name of the attachment (required)
:param str content_type: The mime type of the attachment file you are retrieving i.e image/jpg, application/pdf (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: file
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_bank_transaction_attachment_by_file_name`"
)
# verify the required parameter 'bank_transaction_id' is set
if bank_transaction_id is None:
raise ValueError(
"Missing the required parameter `bank_transaction_id` "
"when calling `get_bank_transaction_attachment_by_file_name`"
)
# verify the required parameter 'file_name' is set
if file_name is None:
raise ValueError(
"Missing the required parameter `file_name` "
"when calling `get_bank_transaction_attachment_by_file_name`"
)
# verify the required parameter 'content_type' is set
if content_type is None:
raise ValueError(
"Missing the required parameter `content_type` "
"when calling `get_bank_transaction_attachment_by_file_name`"
)
collection_formats = {}
path_params = {
"BankTransactionID": bank_transaction_id,
"FileName": file_name,
}
query_params = []
header_params = {
"xero-tenant-id": xero_tenant_id,
"contentType": content_type,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/octet-stream"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url(
"/BankTransactions/{BankTransactionID}/Attachments/{FileName}"
)
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="file",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(
error, self, "get_bank_transaction_attachment_by_file_name"
)
def get_bank_transaction_attachment_by_id(
self,
xero_tenant_id,
bank_transaction_id,
attachment_id,
content_type,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves specific attachments from a | |
** 4
)
# calc kinematic viscosity from celsius temperature AND RETURN IT, VDI
# Wärmeatlas 2013 table D2.1:
@njit(nogil=GLOB_NOGIL, cache=True)
def ny_water(T):
# 4th degree:
# Tclp = T.copy()
# Tclp[Tclp > 100.] = 100.
return (
1.7764473380494155e-06
- 5.5640275781265404e-08 * T
+ 1.0243072887494426e-09 * T ** 2
- 9.7954460136981165e-12 * T ** 3
+ 3.6460468745062724e-14 * T ** 4
)
# calc Prandtl number from celsius temperature:
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def get_Pr_water(T, Pr):
# 4th degree:
# Pr[:] = (12.909891117064289 - 0.4207372206483363*T
# + 7.4860282126284405e-03*T**2 - 6.854571430021334e-05*T**3
# + 2.4685760188512201e-07*T**4)
# 3rd degree:
# Tclp = T.copy()
# Tclp[Tclp > 100.] = 100.
Pr[:] = (
12.5780108199379058
- 0.35124680571767508 * T
+ 4.3225480444706085e-03 * T ** 2
- 1.9174193923188898e-05 * T ** 3
)
# calc Prandtl number from celsius temperature AND RETURN IT
# (alot faster for single values):
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def Pr_water_return(T):
# 4th degree:
# Pr[:] = (12.909891117064289 - 0.4207372206483363*T
# + 7.4860282126284405e-03*T**2 - 6.854571430021334e-05*T**3
# + 2.4685760188512201e-07*T**4)
# 3rd degree:
# Tclp = T.copy()
# Tclp[Tclp > 100.] = 100.
return (
12.5780108199379058
- 0.35124680571767508 * T
+ 4.3225480444706085e-03 * T ** 2
- 1.9174193923188898e-05 * T ** 3
)
# calc isobaric expansion coefficient in [1/K] from celsius temperature:
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def get_beta_water(T, beta):
# 3rd degree:
# Tclp = T.copy()
# Tclp[Tclp > 100.] = 100.
beta[:] = (
-5.87985364766666e-05
+ 1.5641955219950547e-05 * T
- 1.3587684743777981e-07 * T ** 2
+ 6.1220503308149086e-10 * T ** 3
)
# calc isobaric expansion coefficient in [1/K] from celsius temperature
# AND RETURN IT:
@nb.njit(nogil=GLOB_NOGIL, cache=True)
def beta_water_return(T):
# 3rd degree:
# Tclp = T.copy()
# Tclp[Tclp > 100.] = 100.
return (
-5.87985364766666e-05
+ 1.5641955219950547e-05 * T
- 1.3587684743777981e-07 * T ** 2
+ 6.1220503308149086e-10 * T ** 3
)
# calc Reynolds number
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def get_Re_water(v, L, ny, Re):
Re[:] = np.abs(v) * L / ny
# calc Reynolds number and RETURN the result
@nb.njit(nogil=GLOB_NOGIL, cache=True)
def Re_water_return(v, L, ny):
return np.abs(v) * L / ny
# ---> dry air:
# calc density from celsius temperature:
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def get_rho_dryair(T, rho):
# 2nd degree
rho[:] = (
1.2767987012987012
- 0.0046968614718614701 * T
+ 1.4296536796536256e-05 * T ** 2
)
# calc heat conductivity from celsius temperature:
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def get_lam_dryair(T, lam):
# 2nd degree
lam[:] = (
0.024358670995670989
+ 7.6533982683982561e-05 * T
- 4.2099567099572201e-08 * T ** 2
)
# calc heat conductivity from celsius temperature and return it:
@njit(nogil=GLOB_NOGIL, cache=True)
def lam_dryair_return(T):
# 2nd degree
return (
0.024358670995670989
+ 7.6533982683982561e-05 * T
- 4.2099567099572201e-08 * T ** 2
)
# calc kinematic viscosity from celsius temperature:
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def get_ny_dryair(T, ny):
# 2nd degree
ny[:] = (
1.3500069264069257e-05
+ 8.8810389610389459e-08 * T
+ 1.0974025974025443e-10 * T ** 2
)
# calc kinematic viscosity from celsius temperature and return it:
@njit(nogil=GLOB_NOGIL, cache=True)
def ny_dryair_return(T):
# 2nd degree
return (
1.3500069264069257e-05
+ 8.8810389610389459e-08 * T
+ 1.0974025974025443e-10 * T ** 2
)
# ---> humid air:
# saturation pressure in [Pa] of humid air for total pressures < 2MPa
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def humid_air_saturation_pressure(T):
# 6th degree
return (
+1.56927617e-09 * T ** 6
+ 2.32760367e-06 * T ** 5
+ 3.19028425e-04 * T ** 4
+ 2.51824584e-02 * T ** 3
+ 1.42489091e00 * T ** 2
+ 4.55277840e01 * T ** 1
+ 5.99770272e02
)
# 10th degree
# return (- 1.30339138e-16*T**10 + 7.49527386e-14*T**9 - 1.59881730e-11*T**8
# + 1.54764869e-09*T**7 - 5.56609536e-08*T**6 + 1.46597641e-06*T**5
# + 4.21883898e-04*T**4 + 2.43290034e-02*T**3 + 1.38204573e+00*T**2
# + 4.58581434e+01*T + 6.02909924e+02)
# mass of water in fully saturated air in [kg H2O / kg Air] for a pressure of
# 0.1 MPa, only valid for -30 <= T <= 80 !
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def humid_air_sat_water_mass(T):
r"""
Calculate the mass of water in fully saturated air (at 100% relative
humidity) in :math:`[f]= \mathtt{kg_{H_2O}}/\mathtt{kg_{Luft}}`,
valid for a pressure of :math:`0.1\mathtt{\,MPa}` and a temperature range
of :math:`-30\mathtt{\,°C}\leq T \leq 80\mathtt{\,°C}`.
"""
# assert np.all(-30 <= T) and np.all(T <= 80)
# 6th degree
# return (1.56927617e-09*T**6 + 2.32760367e-06*T**5 + 3.19028425e-04*T**4
# + 2.51824584e-02*T**3 + 1.42489091e+00*T**2 + 4.55277840e+01*T
# + 5.99770272e+02)
# 10th degree
return (
+3.47491188e-19 * T ** 10
- 6.50956001e-17 * T ** 9
+ 3.68271647e-15 * T ** 8
+ 2.06252891e-14 * T ** 7
- 7.11474217e-12 * T ** 6
+ 1.29052920e-10 * T ** 5
+ 6.62755505e-09 * T ** 4
+ 8.79652019e-08 * T ** 3
+ 8.16034548e-06 * T ** 2
+ 2.98380899e-04 * T
+ 3.79413965e-03
)
# %% part shape specific calculations:
def calc_flow_length(*, part_shape, vertical, **kwargs):
"""
Calculate the shape specific flow length of a part for the calculation
of heat-transfer specific numbers, like the Rayleigh number.
"""
err_str = (
'`part_shape=' + str(part_shape) + '` was passed to '
'`calc_flow_length()`. The following shapes are supported:\n'
'\'plane\', \'cylinder\', \'sphere\'.'
)
assert part_shape in ['plane', 'cylinder', 'sphere'], err_str
err_str = (
'`vertical=' + str(vertical) + '` was passed to '
'`calc_flow_length()`. `vertical` must be a bool value, '
'depicting the orientation of the surface of which the flow '
'length shall be calculated. For a sphere this argument will '
'be ignored.'
)
assert type(vertical) == bool, err_str
err_str_len = (
'The part shape specific length parameters to be passed to '
'`calc_flow_length()` depend on the part\'s shape and '
'orientation. The following parameters are needed to calculate '
'the flow length for each shape:\n'
' plane, vertical=True: `height=X`\n'
' plane, vertical=False (horizontal): `width=X`, `depth=Y`. '
'Pass the diameter as value for width and depth for a circular '
'disk.\n'
' cylinder, vertical=True: `height=X`\n'
' cylinder, vertical=False (horizontal): `diameter=X`\n'
' sphere: `diameter=X`'
)
if part_shape in ('plane', 'cylinder') and vertical:
assert 'height' in kwargs and isinstance(
kwargs['height'], (int, float)
), err_str_len
return kwargs['height'] # VDI Wärmeatlas 2013, F2.1
elif part_shape == 'plane' and not vertical:
# VDI Wärmeatlas 2013, F2.3
assert 'width' in kwargs and isinstance(
kwargs['width'], (int, float)
), err_str_len
assert 'depth' in kwargs and isinstance(
kwargs['depth'], (int, float)
), err_str_len
return (kwargs['width'] * kwargs['depth']) / (
2 * (kwargs['width'] + kwargs['depth'])
)
elif part_shape == 'cylinder' and not vertical:
assert 'diameter' in kwargs and isinstance(
kwargs['diameter'], (int, float)
), err_str_len
return kwargs['diameter'] * np.pi / 2 # VDI Wärmeatlas 2013, F2.4.1
else:
assert 'diameter' in kwargs and isinstance(
kwargs['diameter'], (int, float)
), err_str_len
return kwargs['diameter'] # VDI Wärmeatlas 2013, F2.4.2
# caller to calculate Reynolds number for a round pipe/TES:
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def pipe_get_Re(dm, rho, ny, A, d_i, Re):
get_Re_water(dm / (rho * A), d_i, ny, Re)
# manual inlining function to calculate Reynolds number for a round pipe/TES:
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def pipe_get_Re2(dm, rho, ny, A, d_i, Re):
Re[:] = dm * d_i / (rho * A * ny)
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def pipe_alpha_i(dm, T, rho, ny, lam_fld, A, d_i, x, alpha):
"""
Calculates the inner alpha value in [W/(m**2K)] between the fluid inside a
pipe and the pipe wall for each cell of a round pipe or thermal energy
storage of diameter `d_i` and length `Len`.
In this case, the wall is considererd in the same row of the temperature
array as the fluid and thus can't have temperatures different from the
fluid temperature.
Parameters:
-----------
dm : np.ndarray, float, integer
Massflow in the pipe/TES in [kg/s].
rho : np.ndarray
Fluid density in [kg/m**3].
ny : np.ndarray
Fluid kinematic viscosity in [m**2/s].
lam_fld : np.ndarray
Fluid heat conductivity in [W/(mK)].
A : float, integer
Inner pipe cross section in [m**2] for round pipes or hydraulic cross
section for pipes of other shapes.
d_i : float, integer
Inner pipe diameter in [m] for round pipes or hydraulic diameter for
pipes of other shapes.
x : float, integer
Distance of cell from start of the pipe [m]. If the massflow `dm` is
negative, the inverse (the distance from the other end of the pipe) is
taken.
alpha : np.ndarray
Array to save the resulting alpha value in [W/(m**2K)] for all cells
of the pipe/TES.
"""
# save shape:
shape = rho.shape
# shape = (100,)
# preallocate arrays:
Re = np.zeros(shape)
# Pr | |
self.window,
self.theme.text_state[state],
False,
None,
self,
None,
dst_x,
dst_y,
layout)
cr.restore()
return
def __draw_part_bg(self, cr, part, w, h, state, shape, style, r, aw, shapes):
# outer slight bevel or focal highlight
shapes[shape](cr, 0, 0, w, h, r, aw)
cr.set_source_rgba(0, 0, 0, 0.055)
cr.fill()
# colour scheme dicts
bg = self.theme.bg_colors
outer = self.theme.dark_line_colors
inner = self.theme.light_line_colors
# bg linear vertical gradient
if state != gtk.STATE_PRELIGHT:
color1, color2 = bg[state]
else:
if part != self.get_active():
color1, color2 = bg[self.theme.PRELIT_NORMAL]
else:
color1, color2 = bg[self.theme.PRELIT_ACTIVE]
shapes[shape](cr, 1, 1, w-1, h-1, r, aw)
lin = cairo.LinearGradient(0, 0, 0, h-1)
lin.add_color_stop_rgb(0.0, *color1)
lin.add_color_stop_rgb(1.0, *color2)
cr.set_source(lin)
cr.fill()
cr.set_line_width(1.0)
# strong outline
shapes[shape](cr, 1.5, 1.5, w-1.5, h-1.5, r, aw)
cr.set_source_rgb(*outer[state])
cr.stroke()
# inner bevel/highlight
if self.theme.light_line_colors[state]:
shapes[shape](cr, 2.5, 2.5, w-2.5, h-2.5, r, aw)
r, g, b = inner[state]
cr.set_source_rgba(r, g, b, 0.6)
cr.stroke()
return
def __shape_rect(self, cr, x, y, w, h, r, aw):
global M_PI, PI_OVER_180
cr.new_sub_path()
cr.arc(r+x, r+y, r, M_PI, 270*PI_OVER_180)
cr.arc(w-r, r+y, r, 270*PI_OVER_180, 0)
cr.arc(w-r, h-r, r, 0, 90*PI_OVER_180)
cr.arc(r+x, h-r, r, 90*PI_OVER_180, M_PI)
cr.close_path()
return
def __shape_start_arrow_ltr(self, cr, x, y, w, h, r, aw):
global M_PI, PI_OVER_180
cr.new_sub_path()
cr.arc(r+x, r+y, r, M_PI, 270*PI_OVER_180)
# arrow head
cr.line_to(w-aw+1, y)
cr.line_to(w, (h+y)*0.5)
cr.line_to(w-aw+1, h)
cr.arc(r+x, h-r, r, 90*PI_OVER_180, M_PI)
cr.close_path()
return
def __shape_mid_arrow_ltr(self, cr, x, y, w, h, r, aw):
cr.move_to(-1, y)
# arrow head
cr.line_to(w-aw+1, y)
cr.line_to(w, (h+y)*0.5)
cr.line_to(w-aw+1, h)
cr.line_to(-1, h)
cr.close_path()
return
def __shape_end_cap_ltr(self, cr, x, y, w, h, r, aw):
global M_PI, PI_OVER_180
cr.move_to(-1, y)
cr.arc(w-r, r+y, r, 270*PI_OVER_180, 0)
cr.arc(w-r, h-r, r, 0, 90*PI_OVER_180)
cr.line_to(-1, h)
cr.close_path()
return
def __shape_start_arrow_rtl(self, cr, x, y, w, h, r, aw):
global M_PI, PI_OVER_180
cr.new_sub_path()
cr.move_to(x, (h+y)*0.5)
cr.line_to(aw-1, y)
cr.arc(w-r, r+y, r, 270*PI_OVER_180, 0)
cr.arc(w-r, h-r, r, 0, 90*PI_OVER_180)
cr.line_to(aw-1, h)
cr.close_path()
return
def __shape_mid_arrow_rtl(self, cr, x, y, w, h, r, aw):
cr.move_to(x, (h+y)*0.5)
cr.line_to(aw-1, y)
cr.line_to(w+1, y)
cr.line_to(w+1, h)
cr.line_to(aw-1, h)
cr.close_path()
return
def __shape_end_cap_rtl(self, cr, x, y, w, h, r, aw):
global M_PI, PI_OVER_180
cr.arc(r+x, r+y, r, M_PI, 270*PI_OVER_180)
cr.line_to(w+1, y)
cr.line_to(w+1, h)
cr.arc(r+x, h-r, r, 90*PI_OVER_180, M_PI)
cr.close_path()
return
def __state(self, part):
# returns the idle state of the part depending on
# whether part is active or not.
if part == self.__active_part:
return gtk.STATE_ACTIVE
return gtk.STATE_NORMAL
def __tooltip_check(self, part):
# only show a tooltip if part is truncated, i.e. not all label text is
# visible.
if part.is_truncated():
self.set_has_tooltip(False)
gobject.timeout_add(50, self.__set_tooltip_cb, part.label)
else:
self.set_has_tooltip(False)
return
def __set_tooltip_cb(self, text):
# callback allows the tooltip position to be updated as pointer moves
# accross different parts
self.set_has_tooltip(True)
self.set_tooltip_markup(text)
return False
def __pick_theme(self, name=None):
name = name or gtk.settings_get_default().get_property("gtk-theme-name")
themes = PathBarThemes.DICT
if themes.has_key(name):
return themes[name]()
#print "No styling hints for %s are available" % name
return PathBarThemeHuman()
def __init_drawing(self):
if self.get_direction() != gtk.TEXT_DIR_RTL:
self.__draw_part = self.__draw_part_ltr
self.__shapes = {
self.SHAPE_RECTANGLE : self.__shape_rect,
self.SHAPE_START_ARROW : self.__shape_start_arrow_ltr,
self.SHAPE_MID_ARROW : self.__shape_mid_arrow_ltr,
self.SHAPE_END_CAP : self.__shape_end_cap_ltr}
else:
self.__draw_part = self.__draw_part_rtl
self.__shapes = {
self.SHAPE_RECTANGLE : self.__shape_rect,
self.SHAPE_START_ARROW : self.__shape_start_arrow_rtl,
self.SHAPE_MID_ARROW : self.__shape_mid_arrow_rtl,
self.SHAPE_END_CAP : self.__shape_end_cap_rtl}
return
def __motion_notify_cb(self, widget, event):
if self.__scroll_xO > 0:
return
part = self.__part_at_xy(event.x, event.y)
prev_focal = self.__focal_part
if self.__button_down:
if prev_focal and part != prev_focal:
prev_focal.set_state(self.__state(prev_focal))
self.queue_draw_area(*prev_focal.get_allocation_tuple())
return
self.__button_down = False
if part and part.state != gtk.STATE_PRELIGHT:
self.__tooltip_check(part)
part.set_state(gtk.STATE_PRELIGHT)
if prev_focal:
prev_focal.set_state(self.__state(prev_focal))
self.queue_draw_area(*prev_focal.get_allocation_tuple())
self.__focal_part = part
self.queue_draw_area(*part.get_allocation_tuple())
elif not part and prev_focal != None:
prev_focal.set_state(self.__state(prev_focal))
self.queue_draw_area(*prev_focal.get_allocation_tuple())
self.__focal_part = None
return
def __leave_notify_cb(self, widget, event):
self.__button_down = False
prev_focal = self.__focal_part
if prev_focal:
prev_focal.set_state(self.__state(prev_focal))
self.queue_draw_area(*prev_focal.get_allocation_tuple())
self.__focal_part = None
return
def __button_press_cb(self, widget, event):
self.__button_down = True
part = self.__part_at_xy(event.x, event.y)
if part:
part.set_state(gtk.STATE_SELECTED)
self.queue_draw_area(*part.get_allocation_tuple())
return
def __button_release_cb(self, widget, event):
part = self.__part_at_xy(event.x, event.y)
if self.__focal_part and self.__focal_part != part:
pass
elif part and self.__button_down:
self.grab_focus()
prev_active, redraw = self.__set_active(part)
part.set_state(gtk.STATE_PRELIGHT)
self.queue_draw_area(*part.get_allocation_tuple())
if redraw:
self.queue_draw_area(*prev_active.get_allocation_tuple())
self.__button_down = False
return
# def __key_release_cb(self, widget, event):
# part = None
# # left key pressed
# if event.keyval == 65363:
# part = self.get_left_part()
# # right key pressed
# elif event.keyval == 65361:
# part = self.get_right_part()
# if not part: return
# prev_active = self.set_active(part)
# self.queue_draw_area(*part.allocation)
# if prev_active:
# self.queue_draw_area(*prev_active.allocation)
# part.emit("clicked", event.copy())
# return
def __realize_cb(self, widget):
self.theme.load(widget.style)
return
def __expose_cb(self, widget, event):
cr = widget.window.cairo_create()
if self.theme.base_hack:
cr.set_source_rgb(*self.theme.base_hack)
cr.paint()
if self.__scroll_xO:
self.__draw_hscroll(cr)
else:
self.__draw_all(cr, event.area)
del cr
return
def __style_change_cb(self, widget, old_style):
# when alloc.width == 1, this is typical of an unallocated widget,
# lets not break a sweat for nothing...
if self.allocation.width == 1:
return
self.theme = self.__pick_theme()
self.theme.load(widget.style)
# set height to 0 so that if part height has been reduced the widget will
# shrink to an appropriate new height based on new font size
self.set_size_request(-1, 28)
parts = self.__parts
self.__parts = []
# recalc best fits, re-append then draw all
for part in parts:
if part.icon.pixbuf:
part.icon.load_pixbuf()
part.calc_size_requisition()
self.__append(part)
self.queue_draw()
return
def __allocation_change_cb(self, widget, allocation):
if allocation.width == 1:
return
path_w = self.__draw_width()
if path_w == allocation.width:
return
elif path_w > allocation.width:
self.__shrink_check(allocation)
else:
self.__grow_check(allocation.width, allocation)
self.queue_draw()
return
class PathPart:
def __init__(self, id, label=None, callback=None, obj=None):
self.__requisition = (0,0)
self.__layout = None
self.__pbar = None
self.id = id
self.allocation = [0, 0, 0, 0]
self.state = gtk.STATE_NORMAL
self.shape = PathBar.SHAPE_RECTANGLE
self.callback = callback
self.obj = obj
self.set_label(label or "")
self.icon = PathBarIcon()
return
def set_callback(self, cb):
self.callback = cb
return
def set_label(self, label):
# escape special characters
label = gobject.markup_escape_text(label.strip())
# some hackery to preserve italics markup
label = label.replace('<i>', '<i>').replace('</i>', '</i>')
self.label = label
return
def set_icon(self, stock_icon, size=gtk.ICON_SIZE_BUTTON):
self.icon.specify(stock_icon, size)
self.icon.load_pixbuf()
return
def set_state(self, gtk_state):
self.state = gtk_state
return
def set_shape(self, shape):
self.shape = shape
return
def set_x(self, x):
self.allocation[0] = int(x)
return
def set_size(self, w, h):
if w != -1: self.allocation[2] = int(w)
if h != -1: self.allocation[3] = int(h)
self.__calc_layout_width(self.__layout, self.shape, self.__pbar)
return
def set_pathbar(self, path_bar):
self.__pbar = path_bar
return
def get_x(self):
return self.allocation[0]
def get_width(self):
return self.allocation[2]
def get_height(self):
return self.allocation[3]
def get_label(self):
return self.label
def get_allocation(self):
return gtk.gdk.Rectangle(*self.get_allocation_tuple())
def get_allocation_tuple(self):
if self.__pbar.get_direction() != gtk.TEXT_DIR_RTL:
return self.allocation
x, y, w, h = self.allocation
x = self.__pbar.allocation[2]-x-w
return x, y, w, h
def get_size_requisition(self):
return self.__requisition
def get_layout(self):
return self.__layout
def activate(self):
self.__pbar.set_active(self)
return
def calc_size_requisition(self):
pbar = self.__pbar
# determine widget size base on label width
self.__layout = self.__layout_text(self.label, pbar.get_pango_context())
extents = self.__layout.get_pixel_extents()
# calc text width + 2 * padding, text height + 2 * ypadding
w = extents[1][2] + 2*pbar.theme.xpadding
h = max(extents[1][3] + 2*pbar.theme.ypadding, pbar.get_size_request()[1])
# if has icon add some more pixels on
if self.icon.pixbuf:
w += self.icon.pixbuf.get_width() + pbar.theme.spacing
h = max(self.icon.pixbuf.get_height() + 2*pbar.theme.ypadding, h)
# extend width depending on part shape ...
if self.shape == PathBar.SHAPE_START_ARROW or \
self.shape == PathBar.SHAPE_END_CAP:
w += pbar.theme.arrow_width
elif self.shape == PathBar.SHAPE_MID_ARROW:
w += 2*pbar.theme.arrow_width
# if height greater than current height request,
# reset height request to higher value
# i get the feeling this should be in set_size_request(), but meh
if h > pbar.get_size_request()[1]:
pbar.set_size_request(-1, h)
self.__requisition = (w,h)
return w, h
def is_truncated(self):
return self.__requisition[0] != self.allocation[2]
def __layout_text(self, text, pango_context):
layout = pango.Layout(pango_context)
layout.set_markup('%s' % text)
layout.set_ellipsize(pango.ELLIPSIZE_END)
return layout
def __calc_layout_width(self, layout, shape, pbar):
# set layout width
if self.icon.pixbuf:
icon_w = self.icon.pixbuf.get_width() + pbar.theme.spacing
else:
icon_w = 0
w = self.allocation[2]
if shape == PathBar.SHAPE_MID_ARROW:
layout.set_width((w - 2*pbar.theme.arrow_width -
2*pbar.theme.xpadding - icon_w)*pango.SCALE)
elif shape == PathBar.SHAPE_START_ARROW or \
shape == PathBar.SHAPE_END_CAP:
layout.set_width((w - pbar.theme.arrow_width - 2*pbar.theme.xpadding -
icon_w)*pango.SCALE)
else:
layout.set_width((w - 2*pbar.theme.xpadding - icon_w)*pango.SCALE)
return
class PathBarIcon:
def __init__(self, name=None, size=None):
self.name = name
self.size = size
self.pixbuf = None
return
def specify(self, name, size):
self.name = name
self.size = size
return
def load_pixbuf(self):
if not self.name:
print 'Error: No icon specified.'
return
if not self.size:
print 'Note: No icon size specified.'
def render_icon(icon_set, name, size):
self.pixbuf = icon_set.render_icon(
style,
gtk.TEXT_DIR_NONE,
gtk.STATE_NORMAL,
self.size or gtk.ICON_SIZE_BUTTON,
gtk.Image(),
None)
return
style = gtk.Style()
icon_set = style.lookup_icon_set(self.name)
if not icon_set:
t = gtk.icon_theme_get_default()
self.pixbuf = t.lookup_icon(self.name, self.size, 0).load_icon()
else:
icon_set = style.lookup_icon_set(self.name)
render_icon(icon_set, self.name, self.size)
| |
"""
Cadastre - Export method class
This plugins helps users to import the french land registry ('cadastre')
into a database. It is meant to ease the use of the data in QGIs
by providing search tools and appropriate layer symbology.
begin : 2013-06-11
copyright : (C) 2013, 2019 by 3liz
email : <EMAIL>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
import os
import re
import tempfile
from contextlib import contextmanager
from pathlib import Path
from typing import Callable, Generator
from qgis.core import (
QgsExpression,
QgsFeatureRequest,
QgsFillSymbol,
QgsLayoutExporter,
QgsLayoutGridSettings,
QgsLayoutItemLabel,
QgsLayoutItemMap,
QgsLayoutItemPage,
QgsLayoutMeasurement,
QgsLayoutPoint,
QgsLayoutSize,
QgsMapLayer,
QgsMapSettings,
QgsPrintLayout,
QgsProject,
QgsUnitTypes,
QgsVectorLayer,
)
from qgis.PyQt.QtCore import QRectF, QSettings, Qt
from qgis.PyQt.QtGui import QFont
import cadastre.cadastre_common_base as cadastre_common
@contextmanager
def _printProgress(self, nb: int) -> Generator[Callable[[int], None], None, None]:
""" Define a dummy defaultprint progress
"""
yield (lambda step: None)
class cadastreExport:
def __init__(self, project: QgsProject, layer: QgsMapLayer, etype: str, comptecommunal: str,
geo_parcelle: str = None, target_dir: str = None) -> None:
self.plugin_dir = str(Path(__file__).resolve().parent)
self.print_parcelle_page = False
if not hasattr(self, 'mProgress'):
self.mProgress = _printProgress
# Store project from context
self.mProject = project
# Store an instance of QgsComposition
self.currentComposition = None
# Get instance needed for QgsComposition
self.mInstance = self.getMapInstance()
# type of export : proprietaire or parcelle
self.etype = etype
# id of the parcelle
self.geo_parcelle = geo_parcelle
# memory layer for redlining
self.redlineLayer = None
self.ccFilter = None
if isinstance(comptecommunal, list):
self.isMulti = True
comptecommunal = list(set(comptecommunal))
if len(comptecommunal) == 1:
self.isMulti = False
comptecommunal = comptecommunal[0].strip(" '")
else:
self.isMulti = False
self.comptecommunal = comptecommunal
self.maxLineNumber = 15 # max number of line per main table
self.numPages = 1
self.pageHeight = 210
self.pageWidth = 297
self.printResolution = 300
# target directory for saving
s = QSettings()
tempDir = s.value("cadastre/tempDir", '%s' % tempfile.gettempdir(), type=str)
if not target_dir or not os.path.exists(target_dir):
self.targetDir = tempfile.mkdtemp('', 'cad_export_', tempDir)
else:
self.targetDir = target_dir
# label for header2
if self.etype == 'proprietaire':
self.typeLabel = u'DE PROPRIÉTÉ'
else:
self.typeLabel = u'PARCELLAIRE'
self.layer = layer
self.connectionParams = cadastre_common.getConnectionParameterFromDbLayer(self.layer)
self.connector = cadastre_common.getConnectorFromUri(self.connectionParams)
self.dbType = self.connectionParams['dbType']
def getMapInstance(self) -> QgsMapSettings:
"""
Get instance of object needed to instantiate QgsComposition
QgsMapRenderer or QgsMapSettings
Different if context is server
"""
return QgsMapSettings()
def setComposerTemplates(self, comptecommunal):
"""
Set parameters for given comptecommunal
"""
# List of templates
comptecommunalAbrev = comptecommunal[9:]
self.composerTemplates = {
'header1': {
'names': ['annee', 'ccodep', 'ccodir', 'ccocom', 'libcom'],
'position': [3.5, 2.5, 145, 7.5], 'align': [128, 4],
'keepContent': True,
'type': 'sql',
'filter': 'comptecommunal',
'and': {
'proprietaire': u" AND comptecommunal = '%s'" % comptecommunal,
'parcelle': u" AND comptecommunal = '%s'" % comptecommunal
},
'sticky': True
},
'header2': {
'names': ['type'],
'position': [153.5, 2.5, 60, 7.5], 'align': [128, 4],
'keepContent': True,
'type': 'properties',
'source': [self.typeLabel],
'sticky': True
},
'header3': {
'names': ['comptecommunal'],
'position': [218.5, 2.5, 75, 7.5], 'align': [128, 2],
'keepContent': True,
'type': 'properties',
'source': [comptecommunalAbrev],
'sticky': True
},
'proprietaires': {
'names': ['lines'],
'position': [3.5, 10, 290, 40], 'align': [32, 1],
'keepContent': False,
'type': 'parent',
'source': 'proprietaires_line'
},
'proprietes_baties': {
'names': ['lines'],
'position': [3.5, 50, 290, 65], 'align': [32, 1],
'keepContent': False,
'type': 'parent',
'source': 'proprietes_baties_line'
},
'proprietes_baties_sum': {
'names': ['revenucadastral', 'co_vlbaia', 'co_bipevla', 'gp_vlbaia', 'gp_bipevla', 'de_vlbaia',
'de_bipevla', 're_vlbaia', 're_bipevla'],
'position': [3.5, 115, 290, 15], 'align': [32, 1],
'type': 'sql',
'keepContent': True,
'filter': 'comptecommunal',
'and': {
'proprietaire': u" AND l10.comptecommunal = '%s'" % comptecommunal,
'parcelle': u" AND p.parcelle = '%s'" % self.geo_parcelle
}
},
'proprietes_non_baties': {
'names': ['lines'],
'position': [3.5, 130, 290, 65], 'align': [32, 1],
'keepContent': False,
'type': 'parent',
'source': 'proprietes_non_baties_line'
},
'proprietes_non_baties_sum': {
'names': ['sum_ha_contenance', 'sum_a_contenance', 'sum_ca_contenance', 'sum_drcsuba'],
'position': [3.5, 195, 290, 13], 'align': [32, 1],
'type': 'sql',
'keepContent': True,
'filter': 'comptecommunal',
'and': {
'proprietaire': u" AND p.comptecommunal = '%s'" % comptecommunal,
'parcelle': u" AND p.parcelle = '%s'" % self.geo_parcelle
},
'bgcolor': Qt.transparent
},
'footer': {
'names': ['foot'],
'position': [3.5, 208, 288, 4], 'align': [128, 4],
'keepContent': True,
'type': 'properties',
'source': [u"Ce document est donné à titre indicatif - Il n'a pas de valeur légale"],
'bgcolor': Qt.white,
'htmlState': 0,
'font': QFont('sans-serif', 4, 1, True),
'sticky': True
}
}
self.mainTables = {
'proprietaires_line': {
'names': ['mainprop', 'epousede', 'adrprop', 'nele'],
'type': 'sql',
'keepContent': True,
'filter': 'comptecommunal',
'and': {
'proprietaire': u" AND comptecommunal = '%s'" % comptecommunal,
'parcelle': u" AND comptecommunal = '%s'" % comptecommunal
}
},
'proprietes_baties_line': {
'names': ['section', 'ndeplan', 'ndevoirie', 'adresse', 'coderivoli', 'bat', 'ent', 'niv', 'ndeporte',
'numeroinvar', 'star', 'meval', 'af', 'natloc', 'cat', 'revenucadastral', 'coll', 'natexo',
'anret', 'andeb', 'fractionrcexo', 'pourcentageexo', 'txom', 'coefreduc'],
'type': 'sql',
'filter': 'comptecommunal',
'and': {
'proprietaire': u" AND l10.comptecommunal = '%s'" % comptecommunal,
'parcelle': u" AND p.parcelle = '%s'" % self.geo_parcelle
}
},
'proprietes_non_baties_line': {
'names': ['section', 'ndeplan', 'ndevoirie', 'adresse', 'coderivoli', 'nparcprim', 'fpdp', 'star',
'suf', 'grssgr', 'cl', 'natcult', 'ha_contenance', 'a_contenance', 'ca_contenance',
'revenucadastral', 'coll', 'natexo', 'anret', 'fractionrcexo', 'pourcentageexo', 'tc', 'lff'],
'type': 'sql',
'and': {
'proprietaire': u" AND p.comptecommunal = '%s'" % comptecommunal,
'parcelle': u" AND p.parcelle = '%s'" % self.geo_parcelle
}
}
}
# items for which to count number of lines
self.lineCount = {
'proprietes_baties_line': {'count': 0, 'data': None},
'proprietes_non_baties_line': {'count': 0, 'data': None}
}
# items for which not the run a query for each page
# but only once and keep content for next pages
self.contentKeeped = {}
for key, item in list(self.composerTemplates.items()):
if 'keepContent' in item and item['keepContent']:
self.contentKeeped[key] = ''
for key, item in list(self.mainTables.items()):
if 'keepContent' in item and item['keepContent']:
self.contentKeeped[key] = ''
def getContentForGivenItem(self, key, item, page=None):
"""
Take content from template file
corresponding to the key
and assign data from item
"""
# First check previous stored content
if 'keepContent' in item and item['keepContent'] \
and self.contentKeeped[key]:
return self.contentKeeped[key]
content = ''
replaceDict = ''
# Build template file path
tplPath = os.path.join(
self.plugin_dir,
"templates",
"%s.tpl" % key
)
# Build replace dict depending on source type
if item['type'] == 'sql':
data = None
# Load SQL query and get data
# Get sql file
sqlFile = tplPath + '.sql'
fin = open(sqlFile, 'rt', encoding='utf-8')
sql = fin.read()
fin.close()
# Add schema to search_path if postgis
if self.dbType == 'postgis':
sql = sql.replace('$schema', '"{}".'.format(self.connectionParams['schema']))
else:
sql = sql.replace('$schema', '')
# Add where clause depending on etype
sql = sql.replace('$and', item['and'][self.etype])
# Limit results if asked
if page and key in list(self.mainTables.keys()) \
and key in list(self.lineCount.keys()):
offset = int((page - 1) * self.maxLineNumber)
# ~ sql+= " LIMIT %s" % self.maxLineNumber
# ~ sql+= " OFFSET %s" % offset
# Get data from previous fetched full data
data = self.lineCount[key]['data'][offset:self.maxLineNumber + offset]
# Run SQL
if self.dbType == 'spatialite':
sql = cadastre_common.postgisToSpatialite(sql)
# Run SQL only if data has not already been defined
if data is None:
# print(sql)
[header, data, rowCount, ok] = cadastre_common.fetchDataFromSqlQuery(self.connector, sql)
# Page no defined = means the query is here to
# get line count and whole data for proprietes_baties & proprietes_non_baties
if not page:
if key in list(self.lineCount.keys()):
# line count
self.lineCount[key]['count'] = rowCount
# keep data
self.lineCount[key]['data'] = data
if page:
# Get content for each line of data
for line in data:
replaceDict = {}
for i in range(len(item['names'])):
replaceDict['$%s' % item['names'][i]] = u'%s' % line[i]
content += self.getHtmlFromTemplate(tplPath, replaceDict)
# fill empty data to have full size table
if key in list(self.mainTables.keys()) \
and key not in list(self.contentKeeped.keys()) \
and len(data) < self.maxLineNumber:
for _ in range(self.maxLineNumber - len(data)):
replaceDict = {}
for i in range(len(item['names'])):
replaceDict['$%s' % item['names'][i]] = u' '
content += self.getHtmlFromTemplate(tplPath, replaceDict)
elif item['type'] == 'properties':
# build replace dict from properties
replaceDict = {}
for i in range(len(item['names'])):
replaceDict['$' + item['names'][i]] = item['source'][i]
content = self.getHtmlFromTemplate(tplPath, replaceDict)
elif item['type'] == 'parent':
replaceDict = {}
for i in range(len(item['names'])):
replaceDict['$' + item['names'][i]] = self.mainTables[item['source']]['content']
content = self.getHtmlFromTemplate(tplPath, replaceDict)
# Keep somme content globally
if 'keepContent' in item and item['keepContent']:
self.contentKeeped[key] = content
# replace some unwanted content
content = content.replace('None', '')
return | |
"""
FUNÇÕES UTEIS PARA PRÉ PROCESSAMENTO DA IMAGEM, ANTES DA APLICAÇÃO DO OCR.
1. A IMAGEM É LIDA EM ESCALA DE CINZA;
2. GAUSSIAN BLUR É EXECUTADO PARA REMOVER QUALQUER RUÍDO DISPONÍVEL;
3. O LIMIAR ADAPTATIVO É APLICADO À IMAGEM BORRADA;
4. APLICA-SE TRANSFORMAÇÕES MORFOLÓGICAS PARA DILATAÇÃO DA IMAGEM.
5. ENCONTRAMOS OS CONTORNOS CUJA ÁREA SÃO MAIORES QUE UMA MÍNIMA ÁREA DEFINIDA.
6. COM O CONTORNO ENCONTRADO NA ÚLTIMA ETAPA, CRIAMOS UMA MÁSCARA COM A ÁREA REPRESENTADA PELA MOLDURA;
7. USANDO ESTA MÁSCARA, PODEMOS ENCONTRAR OS QUATRO CANTOS DO DOCUMENTO DE IDENTIFICAÇÃO NA IMAGEM ORIGINAL;
# Arguments
object - Required : Imagem para aplicação do OCR (Base64 | Path | Numpy Array)
# Returns
output_table - Required : Textos dos campos da tabela após aplicação das
técnicas de pré processamento,
OCR e pós processamento (String)
"""
__version__ = "1.0"
__author__ = """<NAME> (EMERVIN)"""
__data_atualizacao__ = "16/10/2021"
from inspect import stack
import cv2
from dynaconf import settings
from UTILS.generic_functions import get_date_time_now
from UTILS.image_view import image_view_functions
import execute_log
class Image_Pre_Processing(object):
def __init__(self,
blur_ksize=settings.BLUR_KSIZE,
std_dev_x_direction=settings.STD_DEV_X_DIRECTION,
std_dev_y_direction=settings.STD_DEV_Y_DIRECTION,
max_color_val=settings.MAX_COLOR_VAL,
threshold_ksize=settings.THRESHOLD_KSIZE,
subtract_from_mean=settings.SUBTRACT_FROM_MEAN,
scale=settings.SCALE,
min_table_area=settings.MIN_TABLE_AREA):
# 1 - DEFININDO A PROPRIEDADE DE BLUR
# (DESFOQUE DA IMAGEM COM O OBJETIVO DE REMOÇÃO DE RUÍDOS DA IMAGEM)
self.blur_ksize = blur_ksize
# 2 - DESVIO PADRÃO DO KERNEL AO LONGO DO EIXO X (DIREÇÃO HORIZONTAL)
self.std_dev_x_direction = std_dev_x_direction
# 3 - DESVIO PADRÃO DO KERNEL AO LONGO DO EIXO Y (DIREÇÃO VERTICAL)
self.std_dev_y_direction = std_dev_y_direction
# 4 - DEFININDO A PROPRIEDADE DE THRESHOLD (LIMIAR)
# VALOR DE PIXEL QUE SERÁ CONVERTIDO, CASO O PIXEL ULTRAPASSE O LIMIAR
self.max_color_val = max_color_val
# 5 - TAMANHO DO KERNEL PARA THRESHOLD
self.threshold_ksize = threshold_ksize
# 6 - VARIÁVEL QUE REPRESENTA A CONSTANTE UTILIZADA NOS MÉTODOS (SUBTRAÍDA DA MÉDIA OU MÉDIA PONDERADA)
self.subtract_from_mean = subtract_from_mean
# 7 - REALIZANDO O PARÂMETRO DA ESCALA
self.scale = scale
# 8 - DEFININDO O TAMANHO MIN DE ÁREA DA TABELA
self.min_table_area = min_table_area
def smoothing_blurring(self, img):
"""
O DESFOQUE GAUSSIANO É SEMELHANTE AO DESFOQUE MÉDIO,
MAS EM VEZ DE USAR UMA MÉDIA SIMPLES,
ESTAMOS USANDO UMA MÉDIA PONDERADA,
ONDE OS PIXELS DA VIZINHANÇA QUE ESTÃO MAIS PRÓXIMOS DO PIXEL CENTRAL
CONTRIBUEM COM MAIS “PESO” PARA A MÉDIA.
A SUAVIZAÇÃO GAUSSIANA É USADA PARA REMOVER O RUÍDO QUE
SEGUE APROXIMADAMENTE UMA DISTRIBUIÇÃO GAUSSIANA.
O RESULTADO FINAL É QUE NOSSA IMAGEM FICA MENOS DESFOCADA,
PORÉM MAIS "DESFOCADA NATURALMENTE".. ALÉM DISSO, COM BASE NESSA PONDERAÇÃO,
SEREMOS CAPAZES DE PRESERVAR MAIS AS BORDAS EM NOSSA
IMAGEM EM COMPARAÇÃO COM A SUAVIZAÇÃO MÉDIA.
# Arguments
img - Required : Imagem para processamento (Array)
# Returns
validator - Required : Validador de execução da função (Boolean)
blur - Required : Imagem após processamento do desfoque (Array)
"""
# INICIANDO O VALIDADOR DA FUNÇÃO
validator = False
# INICIANDO A VARIÁVEL DE RETORNO
blur = None
try:
# APLICANDO A TÉCNICA DE BLUR GAUSSIANO
blur = cv2.GaussianBlur(img, self.blur_ksize,
self.std_dev_x_direction,
self.std_dev_y_direction)
execute_log.info("OCR TABLES - TÉCNICA DE DESFOQUE GAUSSIANO APLICADO COM SUCESSO - {}".format(get_date_time_now("%d/%m/%Y %H:%M:%S")))
validator = True
except Exception as ex:
execute_log.error("ERRO NA FUNÇÃO: {} - {}".format(stack()[0][3], ex))
return validator, blur
def threshold_image(self, img):
"""
O LIMIAR ADAPTATIVO CONSIDERA UM PEQUENO CONJUNTO
DE PIXELS VIZINHOS POR VEZ, CALCULA T PARA
AQUELA REGIÃO LOCAL ESPECÍFICA E, EM SEGUIDA, REALIZA A SEGMENTAÇÃO.
O SEGUNDO PARÂMETRO DA FUNÇÃO É O VALOR DO LIMITE DE SAÍDA, OU SEJA,
PIXEL <= T TORNARA-SE ESSE VALOR DE PIXEL.
EX: SE PIXEL <= T, o PIXEL TORNA-SE BRANCO (255)
O TERCEIRO ARGUMENTO É O MÉTODO DE LIMIAR ADAPTATIVO. AQUI NÓS
FORNECEMOS UM VALOR DE CV2.ADAPTIVE_THRESH_GAUSSIAN_C
PARA INDICAR QUE ESTAMOS USANDO A MÉDIA GAUSSIANA DA VIZINHANÇA
LOCAL DO PIXEL PARA CALCULAR NOSSO VALOR LIMITE DE T.
O QUARTO VALOR É O MÉTODO DE LIMIAR, AQUI PASSAMOS UM VALOR
DE CV2.THRESH_BINARY_INV PARA INDICAR QUE QUALQUER VALOR DE PIXEL QUE PASSE NO
TESTE DE LIMITE TERÁ UM VALOR DE SAÍDA DE 0. CASO CONTRÁRIO, TERÁ UM VALOR DE 255.
O QUINTO PARÂMETRO É O TAMANHO DE NOSSA VIZINHANÇA DE PIXEL,
AQUI VOCÊ PODE VER QUE IREMOS CALCULAR O VALOR MÉDIO DA INTENSIDADE
DO PIXEL EM TONS DE CINZA DE CADA SUB-REGIÃO 11 × 11 NA IMAGEM PARA CALCULAR NOSSO VALOR LIMITE.
O ARGUMENTO FINAL PARA CV2.ADAPTIVETHRESHOLD É A CONSTANTE C
QUE PERMITE SIMPLESMENTE AJUSTAR O VALOR LIMITE.
# Arguments
img - Required : Imagem para processamento (Array)
# Returns
validator - Required : Validador de execução da função (Boolean)
thresh - Required : Imagem após processamento do limiar (Array)
"""
# INICIANDO O VALIDADOR DA FUNÇÃO
validator = False
# INICIANDO A VARIÁVEL DE RETORNO
thresh = None
try:
thresh = cv2.adaptiveThreshold(img,
self.max_color_val,
cv2.ADAPTIVE_THRESH_MEAN_C,
cv2.THRESH_BINARY,
self.threshold_ksize,
self.subtract_from_mean)
execute_log.info("OCR TABLES - TÉCNICA DE LIMIAR ADAPTATIVO APLICADO COM SUCESSO - {}".format(get_date_time_now("%d/%m/%Y %H:%M:%S")))
validator = True
except Exception as ex:
execute_log.error("ERRO NA FUNÇÃO: {} - {}".format(stack()[0][3], ex))
return validator, thresh
def preprocess_blur_threshold_img(self, img):
"""
REALIZA A ORQUESTRAÇÃO DE DUAS TÉCNICAS DE PRÉ PROCESSAMENTO DA IMAGEM.
1) APLICA AS TÉCNICAS DE DESFOQUE (GAUSSIANBLUR)
2) APLICA LIMIAR DOS PLANOS DA IMAGEM (ADAPTIVETHRESHOLD)
# Arguments
img - Required : Imagem para processamento (Array)
# Returns
validator - Required : Validador de execução da função (Boolean)
thresh - Required : Imagem após ambos processamentos (Array)
"""
# INICIANDO O VALIDADOR DA FUNÇÃO
validator = False
# INICIANDO A VARIÁVEL DE RETORNO
thresh = None
execute_log.info("OCR TABLES - INICIANDO O PRÉ PROCESSAMENTO DA IMAGEM - {}".format(get_date_time_now("%d/%m/%Y %H:%M:%S")))
try:
# REALIZANDO O DESFOQUE GAUSSIANO
validator, blur = Image_Pre_Processing.smoothing_blurring(self, img)
if validator:
# APLICANDO O LIMIAR PARA MELHOR SEPARAÇÃO DE PLANO PRINCIPAL E FUNDO
validator, thresh = Image_Pre_Processing.threshold_image(self, blur)
except Exception as ex:
execute_log.error("ERRO NA FUNÇÃO: {} - {}".format(stack()[0][3], ex))
return validator, thresh
def preprocess_morfo_transformations(self, image):
"""
APLICA TRANSFORMAÇÕES MORFOLÓGICAS NA IMAGEM.
O OBJETIVO É ENFATIZAR A ÁREA DA TABELA
PERMITINDO MAIOR SEPARAÇÃO TABELA/RESTO DA IMAGEM.
# Arguments
image - Required : Imagem para processamento (Array)
# Returns
validator - Required : Validador de execução da função (Boolean)
horizontally_dilated - Required : Imagem dilatada horizontalmente (Array)
vertically_dilated - Required : Imagem dilatada verticalmente (Array)
"""
# INICIANDO O VALIDADOR DA FUNÇÃO
validator = False
# INICIANDO A VARIÁVEL DE RETORNO
horizontally_dilated = vertically_dilated = None
execute_log.info("OCR TABLES - TÉCNICA DE TRANSFORMAÇÕES MORFOLÓGICAS - {}".format(get_date_time_now("%d/%m/%Y %H:%M:%S")))
try:
# OBTENDO O COMPRIMENTO E AMPLITUDE DA IMAGEM
image_width, image_height = image.shape
# REALIZANDO AS OPERAÇÕES NA ESTRUTURA HORIZONTAL DA IMAGEM
horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (int(image_width / self.scale), 1))
horizontally_opened = cv2.morphologyEx(image, cv2.MORPH_OPEN, horizontal_kernel)
# REALIZANDO AS OPERAÇÕES NA ESTRUTURA VERTICAL DA IMAGEM
vertical_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, int(image_height / self.scale)))
vertically_opened = cv2.morphologyEx(image, cv2.MORPH_OPEN, vertical_kernel)
# REALIZANDO A OPERAÇÃO DE DILATAÇÃO
horizontally_dilated = cv2.dilate(horizontally_opened, cv2.getStructuringElement(cv2.MORPH_RECT, (40, 1)))
vertically_dilated = cv2.dilate(vertically_opened, cv2.getStructuringElement(cv2.MORPH_RECT, (1, 60)))
validator = True
except Exception as ex:
execute_log.error("ERRO NA FUNÇÃO: {} - {}".format(stack()[0][3], ex))
return validator, horizontally_dilated, vertically_dilated
def __get_max_contour(self, horizontally_dilated, vertifically_dilated):
"""
REALIZA A OBTENÇÃO DO CONTORNO DE MAIOR ÁREA DA FIGURA.
O OBJETIVO É ENCONTRAR O MÁXIMO CONTORNO, PARA OBTER APENAS O DOCUMENTO DE IDENTIIFAÇÃO,
RETIRANDO POSSÍVEIS OUTROS OBJETOS OU
CASOS NO QUAL O DOCUMENTO POSSA ESTAR SCANEADO EM UMA FOLHA SULFITE.
1) OBTÉM TODOS OS CONTORNOS
2) OBTÉM O CONTORNO DE MÁXIMA ÁREA.
# Arguments
horizontally_dilated - Required : Imagem dilatada horizontalmente (Array)
vertically_dilated - Required : Imagem dilatada verticalmente (Array)
# Returns
validator - Required : Validador de execução da função (Boolean)
bounding_rects - Required : Contornos obtidos (Array)
"""
# INICIANDO A VARIAVEL QUE ARMAZENARÁ O VALOR DE MÁXIMA ÁREA DE CONTORNO
bounding_rects = []
# INICIANDO O VALIDADOR DA FUNÇÃO
validator = False
execute_log.info("OCR TABLES - BUSCANDO O DOCUMENTO NA IMAGEM - {}".format(get_date_time_now("%d/%m/%Y %H:%M:%S")))
try:
# OBTENDO A MÁSCARA E OBTENDO OS CONTORNOS
mask = horizontally_dilated + vertifically_dilated
contours, heirarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# OBTENDO OS CONTORNOS COM TAMANHO MAIOR QUE DA ÁREA MIN ESPERADA
contours = [c for c in contours if cv2.contourArea(c) > self.min_table_area]
# DESSES CONTORNOS, OBTENÇÃO | |
"""
Unit cell stacker
=================
This code can be used for stacking the unit cells of different content and
dimension to a single cell. This is intended to be helpful for constructing
complex super cell structures.
The unit cells should be given in Gaussian format, where the lattice vectors
are input in the same way as atomic coordinates, just the element symbol is
set to 'Tv'. The way the cells are going to be stacked should be given in a
YAML file, which is later termed the stacking file. Also it needs to be noted
that the current code just works for cubic unit cells.
The stacking YAML file contains a large nested list. The first level are
entries corresponding to each layer in the Z direction of the super cell,
given in the order from ground up. For each layer, a list of rows are given
from small Y coordinate value to larger. And within each row list, a list of
building blocks should be given from small x value to larger. So in summary,
the nested structure goes as
1. Layer (Z direction)
2. Row (Y direction)
3. Block (X direction).
And for all three levels, the order is consistently from smaller to larger
coordinate values.
Each block is going to be given as a dictionary, with the following keys,
unit
The base name of the file containing the unit cell of this building block.
The actual file name should end with ``.gjf``. If the ``prefix`` parameter is
set in the parameters, then the file name is going to be prepended with this,
or it will be tried to be found in the current working directory.
repetition
A list giving the repetition of the unit cell. It can be omitted for no
repetition, i.e. ``[1, 1, 1]``. The entries can be integers or string. If
string is given, then the actual integral number is going to be resolved from
the second document of the YAML file. If no symbol is used, the second
document can just be omitted.
When invoking the code, running parameters like the location of the stacking
file should be given in a YAML file on the command line argument, where the
``stacking`` parameter is mandatory to given the name of the stacking file.
Also a ``prefix`` parameter can be given to give the location of the stacking
and the unit cell files when they are not in the current working directory. Its
content is also going to be combined with the second document of the stacking
file to give values of the parameters for the stacking.
If just the plain atomic coordinates are desired, the file name of the output
file can be given in the command line argument ``-o``. Or the command line
parameter ``-t`` can also be used to give a list of `mustache
<mustache.github.io>`_ template files to be initiated by the code. During the
initialization, the tags ``atoms`` will be set, with fields ``symbol``, ``x``,
``y``, and ``z`` set for the atomic symbol and the Cartesian coordinates. Also
set are the ``lattice`` tag, with ``x``, ``y``, and ``z`` fields for Cartesian
components of the lattice vectors. Also in the dictionary are all the fields
that is set in the parameter file. Note that multiple templates can be given.
And the templates can also be given in the ``templates`` field of the parameter
file. And the output is going to be written in the current working directory
with the prefix about the directory and the possible ``.mustache`` suffix
removed.
"""
from __future__ import print_function
import re
import collections
import itertools
import argparse
import sys
import numpy as np
import yaml
#
# The Gaussian file reader
# ------------------------
#
def read_gaussian(file_name):
"""Reads a Gaussian input file
Returns a list of atoms and the three lattice vectors in a pair.
"""
with open(file_name, 'r') as inp:
# form the patterns
symbol_pattern = r'^\s*(?P<symbol>\d{1,2}|[A-Z][a-z]?\d*)'
float_pattern = r'[+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?'
coord_names = ['coord%d' % i for i in xrange(0, 3)]
coord_patterns = ['(?P<%s>%s)' % (i, float_pattern)
for i in coord_names]
atm_pattern = re.compile(
'\\s+'.join([symbol_pattern, ] + coord_patterns)
)
# read the coordinates
coords = []
for line in inp:
match_res = atm_pattern.match(line)
if match_res is not None:
coords.append(
(
match_res.group('symbol'),
np.array(match_res.group(*coord_names),
dtype=np.float64)
)
)
continue
# filter the atoms and the lattice vectors
atms = [i for i in coords if i[0] != 'Tv']
latt_vecs = [i[1] for i in coords if i[0] == 'Tv']
if len(latt_vecs) != 3:
raise ValueError('Wrong number of lattice vectors')
return atms, latt_vecs
#
# Generate the stacking data structure
# ------------------------------------
#
# The YAML input file is tried to be read and parsed. In this process, the unit
# cells are also read, and the symbolic repetition numbers are also resolved
# based on the values that are given in the input file. Then the processed
# blocks are put in the same nested list data structure as in the input file.
# And this nested list structure is going to be termed a ``stacking``.
#
Block = collections.namedtuple('Block',
[
'atms',
'latt_dims',
'repetition'
])
# Note that since the code is just designed to work for square unit cells, the
# lattice vectors, the lattice dimensions of the x, y, and z are stored instead
# of the full lattice vectors here.
def gen_stacking(main_inp, additional_params=None):
"""Generates a stacking based on the YAML input file name
It will just return the same data structure as the input, with the unit
cell and the symbolic repetition number resolved. Also the parameters used
for resolving the symbols are also returned in a dictionary.
:param main_inp: The primary input file.
:param additional_params: The additional parameter dictionary, None for no
additional parameters
"""
yaml_docs = list(yaml.load_all(main_inp))
raw_stacking = yaml_docs[0]
if len(yaml_docs) < 2:
params = {}
else:
params = yaml_docs[1]
if additional_params is not None:
params.update(additional_params)
unit_cells = {}
def process_raw_dict(raw_dict):
"""The closure for process a raw dictionary read for a block"""
# resolve the unit cell
unit_base_name = raw_dict['unit']
unit_prefix = params.get('prefix', '')
unit_file_name = unit_prefix + unit_base_name + '.gjf'
if unit_base_name in unit_cells:
atms, latt_vecs = unit_cells[unit_base_name]
else:
atms, latt_vecs = read_gaussian(unit_file_name)
unit_cells[unit_base_name] = (atms, latt_vecs)
latt_dims = [latt_vecs[i][i] for i in xrange(0, 3)]
# resolve the repetition
dummy_glob = {}
if 'repetition' in raw_dict:
raw_repetition = raw_dict['repetition']
try:
repetition = [
i if type(i) == int else eval(i, dummy_glob, params)
for i in raw_repetition
]
except IndexError:
print('Symbolic repetition number cannot be resolved!',
file=sys.stderr)
raise
else:
repetition = [1, 1, 1]
return Block(atms=atms, latt_dims=latt_dims, repetition=repetition)
# return the three level nested list
return ([
[
[
process_raw_dict(block_i)
for block_i in row_i
]
for row_i in layer_i
]
for layer_i in raw_stacking
], params)
#
# Perform the actual stacking
# ---------------------------
#
# After the stacking data structure has been generated, all the information
# that is needed for obtaining the stacked super cell is ready. This is
# achieved in the function :py:func`do_stacking`, which returns the atoms and
# the lattice vectors of the stacked structure.
#
def translate(atms, vec):
"""Translate a list of atoms
:param atms: The list of atoms, given as a pair of the symbol and the
coordinate in numpy array.
:param vec: A numpy array giving the translation vector.
"""
return [
(i[0], i[1] + vec)
for i in atms
]
def do_stacking(stacking):
"""Does the actual stacking
The pair of atom list and the lattice vectors are going to be returned.
"""
# This function is going to be written in a pure imperative style.
cur_begin = [0.0 for i in xrange(0, 3)]
cur_end = [0.0 for i in xrange(0, 3)]
atms = []
for layer_i in stacking:
for row_i in layer_i:
for block_i in row_i:
# Add the atoms
block_transl = np.array(cur_begin)
for rep_i in itertools.product(
*[xrange(0, i) for i in block_i.repetition]
):
rep_transl = np.array(
[i * j
for i, j in itertools.izip(block_i.latt_dims, rep_i)]
)
transl = block_transl + rep_transl
new_atms = translate(block_i.atms, transl)
atms.extend(new_atms)
# Compute the end point and update the cur_end
end_point = cur_begin + np.array(
[i * j for i, j in itertools.izip(block_i.repetition,
block_i.latt_dims)]
)
cur_end = [
max(i, j) for i, j in itertools.izip(cur_end, end_point)
]
# Update the beginning point for the | |
overwrite == True) and output.find('toaBr') == -1:
print(image)
#raw_input()
try:
convert(image, output, Format = out_format)
except:
print('Could not convert', image)
else:
print(os.path.basename(output), 'already exists')
########################################################################################################################
def make_vct_header(image, header_offset = '0', file_type = 'ENVI Standard', dt_dict = {'Byte': '1', 'UInt16': '12','Int16': '12'}, interleave = 'bsq', byte_order = '0', proj = 'UTM'):
datum_dict = {'WGS84' : 'WGS-84', 'NAD83' : 'NAD-83'}
info = raster_info(image)
hl = ['ENVI\n', 'description = {\n', image + '}\n']
samples = str(info['width'])
lines = str(info['height'])
bands = str(info['band_count'])
dt = str(dt_dict[info['dt']])
hl.append('samples = ' + samples + '\n')
hl.append('lines = ' + lines + '\n')
hl.append('bands = ' + bands + '\n')
hl.append('header offset = ' + header_offset + '\n')
hl.append('file type = ' + file_type + '\n')
hl.append('data type = ' + dt + '\n')
hl.append('interleave = ' + interleave + '\n')
hl.append('byte order = ' + byte_order + '\n')
coord1 = str(info['coords'][0])
coord2 = str(info['coords'][-1])
res = str(int(info['res']))
zone = str(int(info['zone']))
hemisphere = info['hemisphere']
datum = datum_dict[info['datum']]
units = info['units']
map_info = 'map info = {' + proj + ', 1, 1, ' + coord1 + ', ' + coord2 + ', ' + res + ', ' + res + ', ' + zone + ', ' + hemisphere + ', ' + datum + ', units=' + units + '}\n'
hl.append(map_info)
hl.append('band names = {\n')
band_count = info['band_count']
for band in range(band_count):
band = str(band + 1)
hl.append('Band ' + band + ', ')
hl[-1] = hl[-1][:-2] + '}\n'
header = os.path.splitext(image)[0] + '.hdr'
print('Writing header:', header)
open_file = open(header, 'w')
open_file.writelines(hl)
open_file.close()
########################################################################################################################
#Will reproject a shapefile using ogr2ogr
#Can specify the crs manually as a string, only define the necessary parts of a UTM coordinate system, or define the proj = 'Albers' for albers
def reproject_shapefile(shapefile = '', output = '', crs = '', proj = 'utm', zone = '', datum = 'nad83', gdal_dir = gdal_dir, guiable = True):
if shapefile == '':
shapefile = str(askopenfilename(title = 'Select shapefile to reproject',filetypes=[("Shapefile","*.shp")]))
output = str(asksaveasfilename(title = 'Select output shapefile name',initialdir = cwd,filetypes=[("Shapefile","*.shp")]))
proj = askstring('Projection', 'Please enter projection name (e.g. utm, or albers)')
#if proj != 'albers' or proj == 'Albers' or proj == 'albers_conical'
if proj == 'albers' or proj == 'Albers' or proj == 'albers_conical':
#Use for USA Contiguous Albers Equal Area Conic USGS.prj
#Same as in MTBS .prj file:
#PROJCS["USA_Contiguous_Albers_Equal_Area_Conic_USGS_version",GEOGCS["GCS_North_American_1983",
#DATUM["D_North_American_1983",SPHEROID["GRS_1980",6378137.0,298.257222101]],PRIMEM["Greenwich",0.0],
#UNIT["Degree",0.0174532925199433]],PROJECTION["Albers"],PARAMETER["False_Easting",0.0],
#PARAMETER["False_Northing",0.0],PARAMETER["Central_Meridian",-96.0],PARAMETER["Standard_Parallel_1",29.5],
#PARAMETER["Standard_Parallel_2",45.5],PARAMETER["Latitude_Of_Origin",23.0],UNIT["Meter",1.0]]
crs = '"+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=23 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs +towgs84=0,0,0"'
elif proj.upper() == 'GEOG':
crs = '"'+web_mercator_crs+ '"'
elif crs == '':
crs = '"+proj=' + proj + ' +zone=' + str(zone) + ' +datum=' + datum + '"'
elif crs != '':
crs = '"' + crs + '"'
statement = gdal_dir + 'ogr2ogr -f "ESRI Shapefile" -t_srs ' + crs + ' "' + output + '" "' + shapefile + '"'
print(statement)
call = subprocess.Popen(statement)
call.wait()
#####################################################################################################
def is_Geog(Input):
if os.path.splitext(Input)[1] == '.shp':
info = shape_info(Input)
else:
info = raster_info(Input)
crs = info['proj4']
if crs in geog_crss:
return True
else:
return False
def get_geog(shp):
if is_Geog(shp):
print('It already is geographic')
return shp
else:
print('Not geographic. Reprojecting',base(shp))
out_shp = os.path.splitext(shp)[0] + '_geog.shp'
if os.path.exists(out_shp) == False:
reproject_shapefile(shp,out_shp,'','geog')
return out_shp
#####################################################################
#Function to convert to geojson
def to_geojson(shp):
json_filename = os.path.splitext(shp)[0] + '.json'
statement = gdal_dir + 'ogr2ogr -f "GeoJSON" -t_srs EPSG:4326 "' + json_filename+ '" "'+ shp + '"'
if os.path.exists(json_filename) == False:
print(statement)
call = subprocess.Popen(statement)
call.wait()
############################################################################
def select_by_location(shp_to_clip, output, clip_extent, gdal_dir = gdal_dir, overwrite = True):
if overwrite or os.path.exists(output) == False:
if os.path.exists(output):
try:
delete_shapefile(output)
except:
print('Could not delete', output)
statement = gdal_dir + 'ogr2ogr -f "ESRI Shapefile" ' + output + ' ' + shp_to_clip
if type(clip_extent) == list:
statement += ' -clipsrc ' + coords_to_gdal(clip_extent)
elif os.path.splitext(clip_extent)[1] == '.shp':
statement += ' -clipsrc "' + clip_extent + '"'
elif os.path.splitext(clip_extent)[1] in list(format_dict.keys()):
print('Extracting clip coords from raster')
cs =raster_info(clip_extent)['gdal_coords']
print(cs)
statement += ' -clipsrc ' + cs
print(statement)
call = subprocess.Popen(statement)
call.wait()
else:
print('Output already exists')
#####################################################################################################
def utm_maker(shapefile, output, zone, datum):
temp_folder = os.path.dirname(output) + '/temp/'
if os.path.exists(temp_folder) == False:
os.makedirs(temp_folder)
proj_shp = temp_folder + os.path.basename(os.path.splitext(shapefile)[0]) + '_zone_' + str(zone) + '.shp'
if os.path.exists(proj_shp) == False:
reproject_shapefile(shapefile, proj_shp, zone = zone, datum = datum)
proj_info = shape_info(proj_shp, False)
shp_info = shape_info(shapefile, False)
pc = proj_info['coords']
sc = shp_info['coords']
return proj_shp, sc, pc
########################################################################################################################
def xy_utm_zone_conversion(shapefile, zone, extension = '_convert', datum = 'WGS84', cleanup = False):
zone = str(zone)
output = os.path.splitext(shapefile)[0] + '_zone_' + zone + extension + '.shp'
if os.path.exists(output) == False:
reproject_shapefile(shapefile, output, datum = 'WGS84', zone = str(zone))
out = xy_coords(output, False)
if cleanup == True:
try:
delete_shapefile(output)
except:
print('Could not delete', output)
return out
########################################################################################################################
#Reprojects a raster using gdalwarp
#Will produce lines with zero values if RMSE is high
#Can specify the crs manually as a string, only define the necessary parts of a UTM coordinate system, or define the proj = 'Albers' for albers
#resampling_method: near (default), bilinear, cubic, cubicspline, lanczos
#clip_extent: 'xmin ymin xmax ymax'
def reproject(Input, output, crs = '', proj = 'utm', zone = '', datum = 'nad83' , res = '',resampling_method = 'cubic', clip_extent = '', no_data = 'NODATA', src_no_data = '', dst_no_data = '', cutline = '',Format = 'HFA', source_crs = '',dt = '', wm = 40, gdal_dir = gdal_dir):
Format = format_dict[os.path.splitext(output)[1]]
if type(clip_extent) == list:
clip_extent = coords_to_gdal(clip_extent)
if type(Input) == list:
temp = Input
Input = '"'
for t in temp:
Input += t + '" "'
Input = Input[:-2]
if source_crs != '' and source_crs != None:
s_crs = ' -s_srs "' + source_crs + '" '
else:
s_crs = ''
if crs == '':
if proj == 'albers' or proj == 'Albers' or proj == 'albers_conical':
#Use for USA Contiguous Albers Equal Area Conic USGS.prj
#Same as in MTBS .prj file:
#PROJCS["USA_Contiguous_Albers_Equal_Area_Conic_USGS_version",GEOGCS["GCS_North_American_1983",
#DATUM["D_North_American_1983",SPHEROID["GRS_1980",6378137.0,298.257222101]],PRIMEM["Greenwich",0.0],
#UNIT["Degree",0.0174532925199433]],PROJECTION["Albers"],PARAMETER["False_Easting",0.0],
#PARAMETER["False_Northing",0.0],PARAMETER["Central_Meridian",-96.0],PARAMETER["Standard_Parallel_1",29.5],
#PARAMETER["Standard_Parallel_2",45.5],PARAMETER["Latitude_Of_Origin",23.0],UNIT["Meter",1.0]]
crs = '"+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=23 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs +towgs84=0,0,0"'
#lambert_list =
elif proj in ['laea', 'lamberts', 'lambert']:
crs = '"+proj=laea +lat_0=45 +lon_0=-100 +x_0=0 +y_0=0 +a=6370997 +b=6370997 +datum=NAD83 +ellps=GRS80 +units=m +no_defs"'
if crs == '' and zone != '':
crs = '"+proj=' + proj + ' +zone=' + str(zone) + ' +datum=' + str(datum) + '"'
elif crs[0] != '"':
crs = '"' + crs + '"'
if crs != '':
crs = ' -t_srs ' + crs
if res != '':
res = ' -tr ' + str(res) + ' ' + str(res) + ' '
if cutline != '':
cutline = ' -cutline ' + cutline
if src_no_data != '' and dst_no_data != '':
no_data = ' -srcnodata ' + str(src_no_data) + ' -dstnodata '+ str(dst_no_data)
elif src_no_data != '':
no_data = ' -srcnodata ' + str(src_no_data)
elif dst_no_data != '':
no_data = ' -dstnodata ' + str(dst_no_data)
elif no_data != '':
no_data = ' -srcnodata ' + str(no_data) + ' -dstnodata '+ str(no_data)
if clip_extent != '':
clip_extent = ' -te ' + clip_extent
if dt != '':
dt = ' -ot ' + str(dt) + ' '
print('Reprojecting:' ,Input.split('/')[-1])
print()
if Format != '':
ot = ' -of ' + Format
gdal_call = gdal_dir + 'gdalwarp -wm '+str(wm)+' -multi' + ot +s_crs + dt+ no_data +cutline + crs + res + clip_extent +' -r ' + resampling_method + ' "' + Input + '" "' + output + '"'
print(gdal_call)
print()
call = subprocess.Popen(gdal_call)
call.wait()
def gdal_clip(Input, output,clip_file, gdal_dir = gdal_dir):
bounds = shape_info(clip_file)['coords']
print(bounds)
clip_extent = coords_to_gdal(bounds)
print(clip_extent)
Format = format_dict[os.path.splitext(output)[1]]
## gdal_call = gdal_dir + 'gdal_translate -projwin ' + str(bounds[0]) + ' ' + str(bounds[1]) + ' ' + str(bounds[2]) + ' ' + str(bounds[3]) + ' -of ' + Format + ' "' +Input + '" "' + output + '"'
gdal_call = gdal_dir + 'gdalwarp -cutline ' + clip_file + ' -of ' + Format + ' "' +Input + '" "' + output + '"'
print(gdal_call)
| |
<reponame>djconly85/PPA2_0_code
# --------------------------------
# Name: utils.py
# Purpose: Provides general PPA functions that are used throughout various PPA scripts and are not specific to any one PPA script
# NOTE:
# This version, in the __init__ method of the Publish class, hard-codes in variables
# that normally come from the params parameter script. This is because, for some reason
# # when pulled from the params file, the values "stuck" between runs and the wrong output tabs
# in the output Excel spreadsheet would be colored.
# Author: <NAME>
# Last Updated: 8/6/2020
# Updated by: <name>
# Copyright: (c) SACOG
# Python Version: 3.x
# --------------------------------
import os
# import pdb
import sys
import datetime as dt
import time
import gc
import csv
import math
import shutil
import openpyxl
from openpyxl.drawing.image import Image
import xlwings as xw
import pandas as pd
import arcpy
import ppa_input_params as params
def trace():
import traceback, inspect
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
# script name + line number
line = tbinfo.split(", ")[1]
filename = inspect.getfile(inspect.currentframe())
# Get Python syntax error
synerror = traceback.format_exc().splitlines()[-1]
return line, filename, synerror
def remove_forbidden_chars(in_str):
'''Replaces forbidden characters with acceptable characters'''
repldict = {"&":'And','%':'pct','/':'-'}
for old, new in repldict.items():
if old in in_str:
out_str = in_str.replace(old, new)
else:
out_str = in_str
return out_str
def esri_field_exists(in_tbl, field_name):
fields = [f.name for f in arcpy.ListFields(in_tbl)]
if field_name in fields:
return True
else:
return False
def esri_object_to_df(in_esri_obj, esri_obj_fields, index_field=None):
'''converts esri gdb table, feature class, feature layer, or SHP to pandas dataframe'''
data_rows = []
with arcpy.da.SearchCursor(in_esri_obj, esri_obj_fields) as cur:
for row in cur:
out_row = list(row)
data_rows.append(out_row)
out_df = pd.DataFrame(data_rows, index=index_field, columns=esri_obj_fields)
return out_df
def return_perf_outcomes_options(project_type):
xlsx = params.type_template_dict[project_type]
xlsx_path = os.path.join(params.template_dir, xlsx)
wb = openpyxl.load_workbook(xlsx_path) # openpyxl.load_workbook(xlsx_path, read_only=False, keep_vba=True)
sheets = wb.sheetnames
# values in this list will be the potential performance outcomes from which users can choose
perf_outcomes = [s for s in sheets if s not in params.sheets_all_reports]
return perf_outcomes
def rename_dict_keys(dict_in, new_key_dict):
'''if dict in = {0:1} and dict out supposed to be {'zero':1}, this function renames the key accordingly per
the new_key_dict (which for this example would be {0:'zero'}'''
dict_out = {}
for k, v in new_key_dict.items():
if k in list(dict_in.keys()):
dict_out[v] = dict_in[k]
else:
dict_out[v] = 0
return dict_out
def join_xl_import_template(template_xlsx, template_sheet, in_df, joincolidx=0):
'''takes in import tab of destination Excel sheet, then left joins to desired output dataframe to ensure that
output CSV has same rows every time, even if data frame that you're joining doesn't
have all records'''
df_template = pd.read_excel(template_xlsx, template_sheet)
df_template = pd.DataFrame(df_template[df_template.columns[joincolidx]]) # get rid of all columns except for data items column
df_template = df_template.set_index(df_template.columns[joincolidx]) # set data items column to be the index
df_out = df_template.join(in_df) # left join the df from import sheet template to the df with data based on index values
return df_out
def append_proj_to_master_fc(project_fc, proj_attributes_dict, master_fc):
'''Takes project line and appends it to master line feature class with all lines users have entered'''
arcpy.AddMessage("Archiving project line geometry...")
#get geometry of user-drawn input line
try:
fld_shape = "SHAPE@"
geoms = []
with arcpy.da.SearchCursor(project_fc, fld_shape) as cur:
for row in cur:
geoms.append(row[0])
#make row that will be inserted into master fc
new_row = geoms + [v for k, v in proj_attributes_dict.items()]
# use insert cursor to add in appropriate project name, etc.
fields = [fld_shape] + list(proj_attributes_dict.keys())
inscur = arcpy.da.InsertCursor(master_fc, fields)
inscur.insertRow(new_row)
del inscur
t_returns = (params.msg_ok,)
except:
msg = trace()
t_returns = (msg,)
return t_returns
class Publish(object):
def __init__(self, in_df, xl_template, import_tab, xl_out, project_fc, ptype, selecd_po_sheets=None,
proj_name='UnnamedProject'):
# params from input arguments
self.in_df = in_df
self.xl_template = xl_template
self.import_tab = import_tab
self.xl_out = xl_out
self.selecd_po_sheets = selecd_po_sheets # 3 performance outcomes selected by user
self.proj_name = proj_name
self.project_fc = project_fc #remember, this is a feature set!
#=====WORKAROUND - these are normally supposed to be imported from params.py file======
'''Root issue replication:
1 - run the tool, selecting any number of outcomes
2 - re-run the tool, but with different outcomes selected.
Expected result = otuput Excel file has the tabs colored and moved for the outcomes selected for each run
Actual result = output Excel for second run has tabs colored for the outcomes selected for
both runs, e.g., tabs selected for run two have outcome tabs colored for outcomes selected in
run 1 as well.'''
ptype_fwy = 'Freeway'
ptype_arterial = 'Arterial or Transit Expansion'
ptype_sgr = 'Complete Street or State of Good Repair'
ptype_commdesign = "Community Design"
xlsx_disclaimer_sheet = '0BUsingThisReport'
xlsx_titlepg_sheet = '0ATitlePg'
xlsx_socequity_sheet = '8SocioEconEquity'
# regardless of which perf outcomes user selects, these tabs will be printed to
# every PDF report for the selected project type.
sheets_all_reports_workarnd = {ptype_arterial: [xlsx_titlepg_sheet, xlsx_disclaimer_sheet, xlsx_socequity_sheet],
ptype_sgr: [xlsx_titlepg_sheet, xlsx_disclaimer_sheet, xlsx_socequity_sheet],
ptype_commdesign: [xlsx_titlepg_sheet, xlsx_disclaimer_sheet],
ptype_fwy: [xlsx_titlepg_sheet, xlsx_disclaimer_sheet]}
#===============END WORKAROUND PORTION================
# params that are derived or imported from ppa_input_params.py
self.sheets_all_rpts = sheets_all_reports_workarnd[ptype] # params.sheets_all_reports[ptype]
arcpy.AddMessage("This is a {} project".format(ptype))
arcpy.AddMessage("Sheets that always go in report for this proj type: {}" \
.format(self.sheets_all_rpts))
arcpy.AddMessage("Dict of sheets in all reports of this proj type, from params py file: {}" \
.format(params.sheets_all_reports[ptype]))
self.mapimg_configs_csv = params.mapimg_configs_csv
self.img_format = params.map_img_format # jpg, png, etc.
self.map_placement_csv = params.map_placement_csv
self.aprx_path = params.aprx_path
self.proj_line_template_fc = os.path.join(params.fgdb, params.proj_line_template_fc)
# other pre-defined class vars to use
self.time_sufx = str(dt.datetime.now().strftime('%m%d%Y%H%M'))
self.out_folder = arcpy.env.scratchFolder
#xlsx related params
self.xl_out_path = os.path.join(self.out_folder, self.xl_out)
shutil.copyfile(xl_template, self.xl_out_path)
self.xl_workbook = openpyxl.load_workbook(self.xl_out_path) #work off of a copy of the template, so template remains free. Important for multi-user reliability.
def overwrite_df_to_xlsx(self, unused=0, start_row=0, start_col=0): # why does there need to be an argument?
'''Writes pandas dataframe <in_df_ to <tab_name> sheet of <xlsx_template> excel workbook.'''
in_df = self.in_df.reset_index()
df_records = in_df.to_records(index=False)
# get header row for output
out_header_list = [list(in_df.columns)] # get header row for output
out_data_list = [list(i) for i in df_records] # get output data rows
comb_out_list = out_header_list + out_data_list
ws = self.xl_workbook[self.import_tab]
for i, row in enumerate(comb_out_list):
for j, val in enumerate(row):
cell = ws.cell(row=(start_row + (i + 1)), column=(start_col + (j + 1)))
if (cell):
cell.value = val
def build_configs(self):
in_csv = self.mapimg_configs_csv
p_map = "MapName" # map that layout and image are derived from
p_layout = "MapLayout" # layout that will be made into image
p_where = "SQL" # background data layer (e.g. collision heat layer)
p_projline = "ProjLineLayer"
out_config_list = []
with open(in_csv, 'r') as f_in:
reader = csv.DictReader(f_in)
for row in reader:
v_map = row[p_map]
v_layout = row[p_layout]
v_projline = row[p_projline]
v_where = row[p_where]
out_config_row = [v_map, v_layout, v_projline, v_where]
out_config_list.append(out_config_row)
return out_config_list
class PrintConfig(object):
'''each PrintConfig object has attributes: map frame, layer name, where clause'''
def __init__(self, l_print_config, imgtyp):
self.MapFrame = l_print_config[0] # map/mapframe name
self.Layout = l_print_config[1] # layout name
n_elements = len(l_print_config)
if(n_elements>1):
self.Layer = l_print_config[2] #..layerName used to for zoomto (control ext)
else:
self.Layer = ""
if(n_elements>2):
self.Where = l_print_config[3] #..where to get features in the layer.
else:
self.Where = ""
self.OutputImageName = "{}.{}".format(self.MapFrame, imgtyp)
def expandExtent2D(self, ext, ratio):
'''Adjust zoom extent for map of project segment
ext = input extent object
ratio = how you want to change extent. Ratio > 1 zooms away from project line; <1 zooms in to project line
'''
try:
# spref = ext.spatialReference
xmin = ext.XMin
xmax = ext.XMax
ymin = ext.YMin
ymax = ext.YMax
width = ext.width
height = ext.height
dx = (ratio-1.0)*width/2.0 # divided by two so that diff is split evenly between opposite sides, so featur is still center of the extent
dy = (ratio-1.0)*height/2.0
xxmin = xmin - dx
xxmax = xmax + dx
yymin = ymin - dy
yymax = ymax + dy
new_ext = arcpy.Extent(xxmin, yymin, xxmax, yymax)
except:
new_ext = None
return new_ext
# generates image files from maps
def exportMap(self):
arcpy.AddMessage('Generating maps for report...')
arcpy.env.overwriteOutput = True
try:
# create temporary copy of APRX to not | |
from collections import defaultdict
import datetime
from operator import itemgetter
import re
from uuid import getnode as getmac
from gmusicapi import session
from gmusicapi.clients.shared import _Base
from gmusicapi.protocol import mobileclient
from gmusicapi.utils import utils
class Mobileclient(_Base):
"""Allows library management and streaming by posing as the
googleapis.com mobile clients.
Uploading is not supported by this client (use the :class:`Musicmanager`
to upload).
"""
_session_class = session.Mobileclient
FROM_MAC_ADDRESS = object()
def __init__(self, debug_logging=True, validate=True, verify_ssl=True):
super(Mobileclient, self).__init__(self.__class__.__name__,
debug_logging,
validate,
verify_ssl)
def login(self, email, password, android_id):
"""Authenticates the Mobileclient.
Returns ``True`` on success, ``False`` on failure.
:param email: eg ``'<EMAIL>'`` or just ``'test'``.
:param password: password or app-specific password for 2-factor users.
This is not stored locally, and is sent securely over SSL.
:param android_id: 16 hex digits, eg ``'1234567890abcdef'``.
Pass Mobileclient.FROM_MAC_ADDRESS instead to attempt to use
this machine's MAC address as an android id.
**Use this at your own risk**:
the id will be a non-standard 12 characters,
but appears to work fine in testing.
If a valid MAC address cannot be determined on this machine
(which is often the case when running on a VPS), raise OSError.
#TODO 2fa
"""
if android_id is None:
raise ValueError("android_id cannot be None.")
if android_id is self.FROM_MAC_ADDRESS:
mac_int = getmac()
if (mac_int >> 40) % 2:
raise OSError("a valid MAC could not be determined."
" Provide an android_id (and be"
" sure to provide the same one on future runs).")
android_id = utils.create_mac_string(mac_int)
android_id = android_id.replace(':', '')
if not self.session.login(email, password, android_id):
self.logger.info("failed to authenticate")
return False
self.android_id = android_id
self.logger.info("authenticated")
return True
# TODO expose max/page-results, updated_after, etc for list operations
def set_authtoken(self, authtoken):
return self.session.set_authtoken(authtoken)
def get_all_songs(self, incremental=False, include_deleted=False):
"""Returns a list of dictionaries that each represent a song.
:param incremental: if True, return a generator that yields lists
of at most 1000 tracks
as they are retrieved from the server. This can be useful for
presenting a loading bar to a user.
:param include_deleted: if True, include tracks that have been deleted
in the past.
Here is an example song dictionary::
{
'comment':'',
'rating':'0',
'albumArtRef':[
{
'url': 'http://lh6.ggpht.com/...'
}
],
'artistId':[
'Aod62yyj3u3xsjtooghh2glwsdi'
],
'composer':'',
'year':2011,
'creationTimestamp':'1330879409467830',
'id':'5924d75a-931c-30ed-8790-f7fce8943c85',
'album':'Heritage ',
'totalDiscCount':0,
'title':'Haxprocess',
'recentTimestamp':'1372040508935000',
'albumArtist':'',
'trackNumber':6,
'discNumber':0,
'deleted':False,
'storeId':'Txsffypukmmeg3iwl3w5a5s3vzy',
'nid':'Txsffypukmmeg3iwl3w5a5s3vzy',
'totalTrackCount':10,
'estimatedSize':'17229205',
'albumId':'Bdkf6ywxmrhflvtasnayxlkgpcm',
'beatsPerMinute':0,
'genre':'Progressive Metal',
'playCount':7,
'artistArtRef':[
{
'url': 'http://lh3.ggpht.com/...'
}
],
'kind':'sj#track',
'artist':'Opeth',
'lastModifiedTimestamp':'1330881158830924',
'clientId':'+eGFGTbiyMktbPuvB5MfsA',
'durationMillis':'418000'
}
"""
tracks = self._get_all_items(mobileclient.ListTracks, incremental, include_deleted)
return tracks
@utils.accept_singleton(dict)
@utils.empty_arg_shortcircuit
def change_song_metadata(self, songs):
"""Changes the metadata of tracks.
Returns a list of the song ids changed.
:param songs: a list of song dictionaries
or a single song dictionary.
Currently, only the ``rating`` key can be changed.
Set it to ``'0'`` (no thumb), ``'1'`` (down thumb), or ``'5'`` (up thumb)
unless you're using the 5-star ratings lab.
You can also use this to rate All Access tracks
that aren't in your library, eg::
song = mc.get_track_info('<some store track id>')
song['rating'] = '5'
mc.change_song_metadata(song)
"""
mutate_call = mobileclient.BatchMutateTracks
mutations = [{'update': s} for s in songs]
self._make_call(mutate_call, mutations)
# TODO
# store tracks don't send back their id, so we're
# forced to spoof this
return [utils.id_or_nid(d) for d in songs]
def increment_song_playcount(self, song_id, plays=1, playtime=None):
"""Increments a song's playcount and returns its song id.
:params song_id: a song id. Providing the id of an AA track
that has been added to the library will *not* increment the
corresponding library song's playcount. To do this, use the
'id' field (which looks like a uuid and doesn't begin with 'T'),
not the 'nid' field.
:params plays: (optional) positive number of plays to increment by.
The default is 1.
:params playtime: (optional) a datetime.datetime of the
time the song was played.
It will default to the time of the call.
"""
if playtime is None:
playtime = datetime.datetime.now()
self._make_call(mobileclient.IncrementPlayCount, song_id, plays, playtime)
return song_id
@utils.enforce_id_param
def add_aa_track(self, aa_song_id):
"""Adds an All Access track to the library,
returning the library track id.
:param aa_song_id: All Access song id
"""
# TODO is there a way to do this on multiple tracks at once?
# problem is with gathering aa track info
aa_track_info = self.get_track_info(aa_song_id)
mutate_call = mobileclient.BatchMutateTracks
add_mutation = mutate_call.build_track_add(aa_track_info)
res = self._make_call(mutate_call, [add_mutation])
return res['mutate_response'][0]['id']
@utils.accept_singleton(basestring)
@utils.enforce_ids_param
@utils.empty_arg_shortcircuit
def delete_songs(self, library_song_ids):
"""Deletes songs from the library.
Returns a list of deleted song ids.
:param song_ids: a list of song ids, or a single song id.
"""
mutate_call = mobileclient.BatchMutateTracks
del_mutations = mutate_call.build_track_deletes(library_song_ids)
res = self._make_call(mutate_call, del_mutations)
return [d['id'] for d in res['mutate_response']]
@utils.enforce_id_param
def get_stream_url(self, song_id, device_id=None, quality='hi'):
"""Returns a url that will point to an mp3 file.
:param song_id: a single song id
:param device_id: (optional) defaults to ``android_id`` from login.
Otherwise, provide a mobile device id as a string.
Android device ids are 16 characters, while iOS ids
are uuids with 'ios:' prepended.
If you have already used Google Music on a mobile device,
:func:`Mobileclient.get_registered_devices
<gmusicapi.clients.Mobileclient.get_registered_devices>` will provide
at least one working id. Omit ``'0x'`` from the start of the string if present.
Registered computer ids (a MAC address) will not be accepted and will 403.
Providing an unregistered mobile device id will register it to your account,
subject to Google's `device limits
<http://support.google.com/googleplay/bin/answer.py?hl=en&answer=1230356>`__.
**Registering a device id that you do not own is likely a violation of the TOS.**
:param quality: (optional) stream bits per second quality
One of three possible values, hi: 320kbps, med: 160kbps, low: 128kbps.
The default is hi
When handling the resulting url, keep in mind that:
* you will likely need to handle redirects
* the url expires after a minute
* only one IP can be streaming music at once.
This can result in an http 403 with
``X-Rejected-Reason: ANOTHER_STREAM_BEING_PLAYED``.
The file will not contain metadata.
Use :func:`Webclient.get_song_download_info
<gmusicapi.clients.Webclient.get_song_download_info>`
or :func:`Musicmanager.download_song
<gmusicapi.clients.Musicmanager.download_song>`
to download files with metadata.
"""
if device_id is None:
device_id = self.android_id
if len(device_id) == 16 and re.match('^[a-z0-9]*$', device_id):
# android device ids are now sent in base 10
device_id = str(int(device_id, 16))
return self._make_call(mobileclient.GetStreamUrl, song_id, device_id, quality)
def get_all_playlists(self, incremental=False, include_deleted=False):
"""Returns a list of dictionaries that each represent a playlist.
:param incremental: if True, return a generator that yields lists
of at most 1000 playlists
as they are retrieved from the server. This can be useful for
presenting a loading bar to a user.
:param include_deleted: if True, include playlists that have been deleted
in the past.
Here is an example playlist dictionary::
{
# can also be SHARED (public/subscribed to), MAGIC or omitted
'type': 'USER_GENERATED',
'kind': 'sj#playlist',
'name': 'Something Mix',
'deleted': False,
'lastModifiedTimestamp': '1325458766483033',
'recentTimestamp': '1325458766479000',
'shareToken': '<long string>',
'ownerProfilePhotoUrl': 'http://lh3.googleusercontent.com/...',
'ownerName': '<NAME>',
'accessControlled': False, # has to do with shared playlists
'creationTimestamp': '1325285553626172',
'id': '3d72c9b5-baad-4ff7-815d-cdef717e5d61'
}
"""
playlists = self._get_all_items(mobileclient.ListPlaylists, incremental, include_deleted)
return playlists
# these could trivially support multiple creation/edits/deletion, but
# I chose to match the old webclient interface (at least for now).
def create_playlist(self, name, description=None, public=False):
"""Creates a new empty playlist and returns its id.
:param name: the desired title.
Creating multiple playlists with the same name is allowed.
:param description: (optional) the desired description
:param public: if True, create a public All Access playlist.
"""
share_state = 'PUBLIC' if public else 'PRIVATE'
mutate_call = mobileclient.BatchMutatePlaylists
add_mutations = mutate_call.build_playlist_adds([{'name': name,
'description': description,
'public': share_state}])
res = self._make_call(mutate_call, add_mutations)
return res['mutate_response'][0]['id']
@utils.enforce_id_param
def edit_playlist(self, playlist_id, new_name=None, new_description=None, public=None):
"""Changes the name of a playlist and returns its id.
:param playlist_id: the id of the playlist
:param new_name: (optional) desired title
:param new_description: (optional) desired description
:param public: (optional) if True and the user has All Access, share playlist.
"""
if all(value is None for value in (new_name, new_description, public)):
raise ValueError('new_name, new_description, or public must be provided')
if public is None:
share_state = public
else:
share_state = 'PUBLIC' if public else 'PRIVATE'
mutate_call = mobileclient.BatchMutatePlaylists
update_mutations = mutate_call.build_playlist_updates([
{'id': playlist_id, 'name': new_name,
'description': new_description, 'public': share_state}
])
res = self._make_call(mutate_call, | |
<filename>ludwig/combiners/combiners.py<gh_stars>1-10
#! /usr/bin/env python
# coding=utf-8
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import logging
from typing import List
import tensorflow as tf
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import concatenate
from ludwig.encoders.sequence_encoders import ParallelCNN
from ludwig.encoders.sequence_encoders import StackedCNN
from ludwig.encoders.sequence_encoders import StackedCNNRNN
from ludwig.encoders.sequence_encoders import StackedParallelCNN
from ludwig.encoders.sequence_encoders import StackedRNN
from ludwig.modules.attention_modules import TransformerStack
from ludwig.modules.fully_connected_modules import FCStack
from ludwig.modules.reduction_modules import SequenceReducer
from ludwig.modules.tabnet_modules import TabNet
from ludwig.utils.misc_utils import get_from_registry
from ludwig.utils.tf_utils import sequence_length_3D
logger = logging.getLogger(__name__)
class ConcatCombiner(tf.keras.Model):
def __init__(
self,
input_features=None,
fc_layers=None,
num_fc_layers=None,
fc_size=256,
use_bias=True,
weights_initializer='glorot_uniform',
bias_initializer='zeros',
weights_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
# weights_constraint=None,
# bias_constraint=None,
norm=None,
norm_params=None,
activation='relu',
dropout=0,
flatten_inputs=False,
residual=False,
**kwargs
):
super().__init__()
logger.debug(' {}'.format(self.name))
self.flatten_inputs = flatten_inputs
self.fc_stack = None
# todo future: this may be redundant, check
if fc_layers is None and \
num_fc_layers is not None:
fc_layers = []
for i in range(num_fc_layers):
fc_layers.append({'fc_size': fc_size})
if fc_layers is not None:
logger.debug(' FCStack')
self.fc_stack = FCStack(
layers=fc_layers,
num_layers=num_fc_layers,
default_fc_size=fc_size,
default_use_bias=use_bias,
default_weights_initializer=weights_initializer,
default_bias_initializer=bias_initializer,
default_weights_regularizer=weights_regularizer,
default_bias_regularizer=bias_regularizer,
default_activity_regularizer=activity_regularizer,
# default_weights_constraint=weights_constraint,
# default_bias_constraint=bias_constraint,
default_norm=norm,
default_norm_params=norm_params,
default_activation=activation,
default_dropout=dropout,
residual=residual,
)
if input_features and len(input_features) == 1 and fc_layers is None:
self.supports_masking = True
def call(
self,
inputs, # encoder outputs
training=None,
mask=None,
**kwargs
):
encoder_outputs = [inputs[k]['encoder_output'] for k in inputs]
# ================ Flatten ================
if self.flatten_inputs:
batch_size = tf.shape(encoder_outputs[0])[0]
encoder_outputs = [
tf.reshape(eo, [batch_size, -1]) for eo in encoder_outputs
]
# ================ Concat ================
if len(encoder_outputs) > 1:
hidden = concatenate(encoder_outputs, -1)
else:
hidden = list(encoder_outputs)[0]
# ================ Fully Connected ================
if self.fc_stack is not None:
hidden = self.fc_stack(
hidden,
training=training,
mask=mask
)
return_data = {'combiner_output': hidden}
if len(inputs) == 1:
for key, value in [d for d in inputs.values()][0].items():
if key != 'encoder_output':
return_data[key] = value
return return_data
class SequenceConcatCombiner(tf.keras.Model):
def __init__(
self,
reduce_output=None,
main_sequence_feature=None,
**kwargs
):
super().__init__()
logger.debug(' {}'.format(self.name))
self.reduce_output = reduce_output
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
if self.reduce_output is None:
self.supports_masking = True
self.main_sequence_feature = main_sequence_feature
def __call__(
self,
inputs, # encoder outputs
training=None,
mask=None,
**kwargs
):
if (self.main_sequence_feature is None or
self.main_sequence_feature not in inputs):
for if_name, if_outputs in inputs.items():
# todo: when https://github.com/ludwig-ai/ludwig/issues/810 is closed
# convert following test from using shape to use explicit
# if_outputs[TYPE] values for sequence features
if len(if_outputs['encoder_output'].shape) == 3:
self.main_sequence_feature = if_name
break
if self.main_sequence_feature is None:
raise Exception(
'No sequence feature available for sequence combiner'
)
main_sequence_feature_encoding = inputs[self.main_sequence_feature]
representation = main_sequence_feature_encoding['encoder_output']
representations = [representation]
sequence_max_length = representation.shape[1]
sequence_length = sequence_length_3D(representation)
# ================ Concat ================
for if_name, if_outputs in inputs.items():
if if_name != self.main_sequence_feature:
if_representation = if_outputs['encoder_output']
if len(if_representation.shape) == 3:
# The following check makes sense when
# both representations have a specified
# sequence length dimension. If they do not,
# then this check is simply checking if None == None
# and will not catch discrepancies in the different
# feature length dimension. Those errors will show up
# at training time. Possible solutions to this is
# to enforce a length second dimension in
# sequential feature placeholders, but that
# does not work with BucketedBatcher that requires
# the second dimension to be undefined in order to be
# able to trim the data points and speed up computation.
# So for now we are keeping things like this, make sure
# to write in the documentation that training time
# dimensions mismatch may occur if the sequential
# features have different lengths for some data points.
if if_representation.shape[1] != representation.shape[1]:
raise ValueError(
'The sequence length of the input feature {} '
'is {} and is different from the sequence '
'length of the main sequence feature {} which '
'is {}.\n Shape of {}: {}, shape of {}: {}.\n'
'Sequence lengths of all sequential features '
'must be the same in order to be concatenated '
'by the sequence concat combiner. '
'Try to impose the same max sequence length '
'as a preprocessing parameter to both features '
'or to reduce the output of {}.'.format(
if_name,
if_representation.shape[1],
self.main_sequence_feature,
representation.shape[1],
if_name,
if_representation.shape,
if_name,
representation.shape,
if_name
)
)
# this assumes all sequence representations have the
# same sequence length, 2nd dimension
representations.append(if_representation)
elif len(if_representation.shape) == 2:
multipliers = tf.constant([1, sequence_max_length, 1])
tiled_representation = tf.tile(
tf.expand_dims(if_representation, 1),
multipliers
)
representations.append(tiled_representation)
else:
raise ValueError(
'The representation of {} has rank {} and cannot be'
' concatenated by a sequence concat combiner. '
'Only rank 2 and rank 3 tensors are supported.'.format(
if_outputs['name'],
len(if_representation.shape)
)
)
hidden = tf.concat(representations, 2)
logger.debug(' concat_hidden: {0}'.format(hidden))
# ================ Mask ================
# todo future: maybe modify this with TF2 mask mechanics
sequence_mask = tf.sequence_mask(
sequence_length,
sequence_max_length
)
hidden = tf.multiply(
hidden,
tf.cast(tf.expand_dims(sequence_mask, -1), dtype=tf.float32)
)
# ================ Reduce ================
hidden = self.reduce_sequence(hidden)
return_data = {'combiner_output': hidden}
if len(inputs) == 1:
for key, value in [d for d in inputs.values()][0].items():
if key != 'encoder_output':
return_data[key] = value
return return_data
class SequenceCombiner(tf.keras.Model):
def __init__(
self,
reduce_output=None,
main_sequence_feature=None,
encoder=None,
**kwargs
):
super().__init__()
logger.debug(' {}'.format(self.name))
self.combiner = SequenceConcatCombiner(
reduce_output=None,
main_sequence_feature=main_sequence_feature
)
self.encoder_obj = get_from_registry(
encoder, sequence_encoder_registry)(
should_embed=False,
reduce_output=reduce_output,
**kwargs
)
if (hasattr(self.encoder_obj, 'supports_masking') and
self.encoder_obj.supports_masking):
self.supports_masking = True
def __call__(
self,
inputs, # encoder outputs
training=None,
mask=None,
**kwargs
):
# ================ Concat ================
hidden = self.combiner(
inputs, # encoder outputs
training=training,
**kwargs
)
# ================ Sequence encoding ================
hidden = self.encoder_obj(
hidden['combiner_output'],
training=training,
**kwargs
)
return_data = {'combiner_output': hidden['encoder_output']}
for key, value in hidden.items():
if key != 'encoder_output':
return_data[key] = value
return return_data
class TabNetCombiner(tf.keras.Model):
def __init__(
self,
size: int, # N_a in the paper
output_size: int, # N_d in the paper
num_steps: int = 1, # N_steps in the paper
num_total_blocks: int = 4,
num_shared_blocks: int = 2,
relaxation_factor: float = 1.5, # gamma in the paper
bn_epsilon: float = 1e-3,
bn_momentum: float = 0.7, # m_B in the paper
bn_virtual_bs: int = None, # B_v from the paper
sparsity: float = 1e-5, # lambda_sparse in the paper
dropout=0,
**kwargs
):
super().__init__()
logger.debug(' {}'.format(self.name))
self.tabnet = TabNet(
size=size,
output_size=output_size,
num_steps=num_steps,
num_total_blocks=num_total_blocks,
num_shared_blocks=num_shared_blocks,
relaxation_factor=relaxation_factor,
bn_epsilon=bn_epsilon,
bn_momentum=bn_momentum,
bn_virtual_bs=bn_virtual_bs,
sparsity=sparsity
)
if dropout > 0:
self.dropout = tf.keras.layers.Dropout(dropout)
else:
self.dropout = None
def call(
self,
inputs, # encoder outputs
training=None,
mask=None,
**kwargs
):
encoder_outputs = [inputs[k]['encoder_output'] for k in inputs]
# ================ Flatten ================
batch_size = tf.shape(encoder_outputs[0])[0]
encoder_outputs = [
tf.reshape(eo, [batch_size, -1]) for eo in encoder_outputs
]
# ================ Concat ================
if len(encoder_outputs) > 1:
hidden = concatenate(encoder_outputs, 1)
else:
hidden = list(encoder_outputs)[0]
# ================ TabNet ================
hidden, aggregated_mask, masks = self.tabnet(
hidden,
training=training,
)
if self.dropout:
hidden = self.dropout(hidden, training=training)
return_data = {'combiner_output': hidden,
'aggregated_attention_masks': aggregated_mask,
'attention_masks': masks}
if len(inputs) == 1:
for key, value in [d for d in inputs.values()][0].items():
if key != 'encoder_output':
return_data[key] = value
return return_data
class TransformerCombiner(tf.keras.Model):
def __init__(
self,
input_features=None,
num_layers=1,
hidden_size=256,
num_heads=8,
transformer_fc_size=256,
dropout=0.1,
fc_layers=None,
num_fc_layers=0,
fc_size=256,
use_bias=True,
weights_initializer='glorot_uniform',
bias_initializer='zeros',
weights_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
# weights_constraint=None,
# bias_constraint=None,
norm=None,
norm_params=None,
fc_activation='relu',
fc_dropout=0,
fc_residual=False,
reduce_output='mean',
**kwargs
):
super().__init__()
logger.debug(' {}'.format(self.name))
self.reduce_output = reduce_output
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
if self.reduce_output is None:
self.supports_masking = True
logger.debug(' Projectors')
self.projectors = [Dense(hidden_size) for _ in input_features]
logger.debug(' TransformerStack')
self.transformer_stack = TransformerStack(
hidden_size=hidden_size,
num_heads=num_heads,
fc_size=transformer_fc_size,
num_layers=num_layers,
dropout=dropout
)
if self.reduce_output is not None:
logger.debug(' FCStack')
self.fc_stack = FCStack(
layers=fc_layers,
num_layers=num_fc_layers,
default_fc_size=fc_size,
default_use_bias=use_bias,
default_weights_initializer=weights_initializer,
default_bias_initializer=bias_initializer,
default_weights_regularizer=weights_regularizer,
default_bias_regularizer=bias_regularizer,
default_activity_regularizer=activity_regularizer,
# default_weights_constraint=weights_constraint,
# default_bias_constraint=bias_constraint,
default_norm=norm,
default_norm_params=norm_params,
default_activation=fc_activation,
default_dropout=fc_dropout,
fc_residual=fc_residual,
)
def call(
self,
inputs, # encoder outputs
training=None,
mask=None,
**kwargs
):
encoder_outputs = [inputs[k]['encoder_output'] for k in inputs]
# ================ Flatten ================
batch_size = tf.shape(encoder_outputs[0])[0]
encoder_outputs = [
tf.reshape(eo, [batch_size, -1]) for eo in encoder_outputs
]
# ================ Project & Concat ================
projected | |
<filename>examples/fixed_axis_tsne.py
"""
Copyright 2020 heucoder
@ https://github.com/heucoder/dimensionality_reduction_alo_codes/blob/master/codes/T-SNE/TSNE.py
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Access date: Aug 2021
Changes made from the original code:
1. Translated the Chinese comments into English for developing convenience.
2. Added parameters and relative features (mainly among line 150 - 200) in tsne() to adapt
the fixed axis technique.
3. Added functions which will be used by fixed axis technique (mainly among 208 - 290).
4. Changed the program running script into fixed axis technique example (mainly among 290 - 360).
"""
# coding:utf-8
import math
from datetime import timedelta
from timeit import default_timer as timer
from typing import Optional, List, Tuple
import numba
import numpy as np
import sklearn.datasets as datasets
from sklearn.preprocessing import MinMaxScaler
def cal_pairwise_dist(x):
'''Calculate the distance of a pairwise, x is a matrix
(a-b)^2 = a^2 + b^2 - 2*a*b
'''
sum_x = np.sum(np.square(x), 1)
dist = np.add(np.add(-2 * np.dot(x, x.T), sum_x).T, sum_x)
return dist
def cal_perplexity(dist, idx=0, beta=1.0):
'''Calculate perplexity. D is distance vector
idx is the distance between a point and itself,
beta is Gaussian distribution parameter
'''
prob = np.exp(-dist * beta)
prob[idx] = 0
sum_prob = np.sum(prob)
if sum_prob < 1e-12:
prob = np.maximum(prob, 1e-12)
perp = -12
else:
perp = np.log(sum_prob) + beta * np.sum(dist * prob) / sum_prob
prob /= sum_prob
return perp, prob
def seach_prob(x, tol=1e-5, perplexity=30.0):
'''Using binary research to find beta,
then calculate the prob of the pairwise
'''
# initialize parameters
print("Computing pairwise distances...")
(n, d) = x.shape
dist = cal_pairwise_dist(x)
dist[dist < 0] = 0
pair_prob = np.zeros((n, n))
beta = np.ones((n, 1))
# Here use the log value to make the later calculation easier
base_perp = np.log(perplexity)
for i in range(n):
if i % 500 == 0:
print("Computing pair_prob for point %s of %s ..." % (i, n))
betamin = -np.inf
betamax = np.inf
perp, this_prob = cal_perplexity(dist[i], i, beta[i])
# Using binary research to find the prob under the best sigma
perp_diff = perp - base_perp
tries = 0
while np.abs(perp_diff) > tol and tries < 50:
if perp_diff > 0:
betamin = beta[i].copy()
if betamax == np.inf or betamax == -np.inf:
beta[i] = beta[i] * 2
else:
beta[i] = (beta[i] + betamax) / 2
else:
betamax = beta[i].copy()
if betamin == np.inf or betamin == -np.inf:
beta[i] = beta[i] / 2
else:
beta[i] = (beta[i] + betamin) / 2
# update the value for perb and prob
perp, this_prob = cal_perplexity(dist[i], i, beta[i])
perp_diff = perp - base_perp
tries = tries + 1
# record the value for prob
pair_prob[i,] = this_prob
print("Mean value of sigma: ", np.mean(np.sqrt(1 / beta)))
return pair_prob
def tsne(x, no_dims=2, perplexity=30.0, max_iter=1000,
fix_column_to_z_projection_axis: Optional[int] = None,
drop_columns_from_dataset: Optional[List[int]] = None,
scaler: Optional[Tuple[int, int]] = (0, 1)):
"""Runs t-SNE on the dataset in the NxD array x
to reduce its dimensionality to no_dims dimensions.
The syntaxis of the function is Y = tsne.tsne(x, no_dims, perplexity),
where x is an NxD NumPy array.
"""
start = timer()
# Check inputs
if isinstance(no_dims, float):
print("Error: array x should have type float.")
return -1
if scaler is not None:
x = scale_dataset(x, scaler)
z_axis_fixed = x[:, fix_column_to_z_projection_axis]
x = preprocess_data(data=x, drop_columns_from_dataset=drop_columns_from_dataset)
(n, d) = x.shape
# Momentum
initial_momentum = 0.5
final_momentum = 0.8
eta = 500
min_gain = 0.01
# Randomly initiate y
y = np.random.randn(n, no_dims)
dy = np.zeros((n, no_dims))
iy = np.zeros((n, no_dims))
gains = np.ones((n, no_dims))
# Symmetrize
P = seach_prob(x, 1e-5, perplexity)
P = P + np.transpose(P)
P = P / np.sum(P) # pij
# early exaggeration
P = P * 4
P = np.maximum(P, 1e-12)
if fix_column_to_z_projection_axis is not None:
force_projection_dimensions = np.arange(no_dims - 1)
y[:, no_dims - 1] = z_axis_fixed
else:
force_projection_dimensions = np.arange(no_dims)
# Run iterations
for iter in range(max_iter):
# Compute pairwise affinities
sum_y = np.sum(np.square(y), 1)
num = 1 / (1 + np.add(np.add(-2 * np.dot(y, y.T), sum_y).T, sum_y))
num[range(n), range(n)] = 0
Q = num / np.sum(num) # qij
Q = np.maximum(Q, 1e-12)
# Compute gradient
# pij-qij
PQ = P - Q
for i in range(n):
dy[i, :] = np.sum(np.tile(PQ[:, i] * num[:, i], (no_dims, 1)).T * (y[i, :] - y), 0)
# Perform the update
if iter < 20:
momentum = initial_momentum
else:
momentum = final_momentum
gains = (gains + 0.2) * ((dy > 0) != (iy > 0)) + (gains * 0.8) * ((dy > 0) == (iy > 0))
gains[gains < min_gain] = min_gain
# iteration
iy = momentum * iy - eta * (gains * dy)
for inst in range(len(y)):
for index in force_projection_dimensions:
y[inst][index] = y[inst][index] + iy[inst][index]
y[inst][index] = y[inst][index] - np.tile(np.mean(y, 0), (n, 1))[inst][index]
# Compute current value of cost function\
if (iter + 1) % 100 == 0:
C = np.sum(P * np.log(P / Q))
print("Iteration ", (iter + 1), ": error is ", C)
if (iter+1) != 100:
ratio = C/oldC
print("ratio ", ratio)
if ratio >= 0.95:
break
oldC = C
# Stop lying about P-values
if iter == 100:
P = P / 4
end = timer()
print(f'Time elapsed: {timedelta(seconds=end - start)}')
return y
# Below are the functions used during tsne processing
def create_triangular_distance_matrix(
data):
distance_matrix = np.zeros(int((data.shape[0] + 1) * data.shape[0] / 2))
size = len(data)
k = 0
for i in range(size):
for j in range(i, size):
distance_matrix[k] = euclidean(data[i], data[j])
k = k + 1
return distance_matrix
def euclidean(x, y):
"""Standard euclidean distance.
..math::
D(x, y) = \sqrt{\sum_i (x_i - y_i)^2}
"""
result = 0.0
for i in range(x.shape[0]):
result += (x[i] - y[i]) ** 2
return np.sqrt(result)
def kruskal_stress(distance_matrix, projection):
size = len(projection)
total = len(distance_matrix)
den = 0
num = 0
for i in numba.prange(size):
for j in numba.prange(size):
dr2 = euclidean(projection[i], projection[j])
r = (i + j - math.fabs(i - j)) / 2 # min(i,j)
s = (i + j + math.fabs(i - j)) / 2 # max(i,j)
drn = distance_matrix[int(total - ((size - r) * (size - r + 1) / 2) + (s - r))]
num += (drn - dr2) * (drn - dr2)
den += drn * drn
return math.sqrt(num / den)
def scale_dataset(data, feature_range):
"""
Helper function for scaling/normalizing
"""
scaler = MinMaxScaler(feature_range=feature_range)
data = scaler.fit_transform(data)
"""
data_scalared = data
scaler = StandardScaler()
data = scaler.fit_transform(data, data_scalared)
"""
return data
def preprocess_data(data,
drop_columns_from_dataset=None):
"""
Helper function for preprocessing the data
:param data:
:param fixed_axis:
:param drop_columns_from_dataset:
:return: ndarray of shape (n_samples, 2)
Starting configuration of the projection result. By default it is ignored,
and the starting projection is randomized using starting_projection_mode and random_state.
If specified, this must match n_samples.
"""
X = data
if drop_columns_from_dataset is not None:
X = np.delete(data, drop_columns_from_dataset, axis=1)
return X
if __name__ == "__main__":
projection_n_dimensions = 3
# perplexity=85 works best for iris
perplexity = 45.0
max_iter = 300
drop_columns_from_dataset = [-1]
scaler = (0, 1)
fix_column_to_z_projection_axis = -1
plot = True
# data = np.loadtxt('./datasets/mammals.data', delimiter=",")
# data = pd.read_csv('./datasets/whr2019.csv', delimiter=",").values
# data = np.concatenate((datasets.load_iris().data.T, [datasets.load_iris().target.T])).T
data = np.concatenate((datasets.load_breast_cancer().data.T,[datasets.load_breast_cancer().target.T])).T
# data = np.concatenate((datasets.load_boston().data.T,[datasets.load_boston().target.T])).T
# data = np.tile(data, (100, 1)) # use this make the dataset 100x larger for performance test
################
x = preprocess_data(data=data, drop_columns_from_dataset=drop_columns_from_dataset)
dist_for_stress = create_triangular_distance_matrix(x)
# normally perplexity should between 5-50,
# but here for iris 85 let to smallest kruskal stress
projection = tsne(data, no_dims=projection_n_dimensions, perplexity=perplexity, max_iter=max_iter,
fix_column_to_z_projection_axis=fix_column_to_z_projection_axis,
drop_columns_from_dataset=drop_columns_from_dataset,
scaler=scaler)
stress = kruskal_stress(dist_for_stress, projection)
print("kruskal stress is: ", stress)
if plot:
if projection_n_dimensions == 2:
# show projection
import plotly.graph_objects as go
import numpy | |
<gh_stars>0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
#import os
import sys
sys.path.insert(0,'/home/joshi/fixed_point_edits')
import os
import absl
from tensorflow.python.ops import parallel_for as pfor
from FixedPointStore import *
import tensorflow as tf
import horovod.tensorflow as hvd
#import cProfile
# %tensorflow_version 1.x magic
#import matplotlib.pyplot as plt
import numpy.random as nrand
np.random.seed(0)
# import numpy as np
import time
from AdaptiveGradNormClip import AdaptiveGradNormClip
from AdaptiveLearningRate import AdaptiveLearningRate
class FixedPointSearch:
def __init__(self,
ctype,
states,
savepath,
cell=None,
sess=None,
max_iters = 5000,
max_n_unique = np.inf,
tol_q = 1e-12,
tol_dq = 1e-20,
adaptive_learning_rate_hps = {},
grad_norm_clip_hps = {},
adam_optimizer_hps = {'epsilon': 0.01},
exclude_dis_outliers = True,
outlier_distance_scale = 10.0,
rerun_q_outliers = True,
run_additional_iterations_on_outliers = True,
outlier_q_scale = 10.0
):
self.max_iters = max_iters
self.ctype = ctype
self.dtype = np.float32
self.tol_q = tol_q
self.savepath = savepath
self.tol_dq = tol_dq
self.adaptive_learning_rate_hps = adaptive_learning_rate_hps
self.grad_norm_clip_hps =grad_norm_clip_hps
self.adam_optimizer_hps = adam_optimizer_hps
self.outlier_q_scale = outlier_q_scale
self.outlier_distance_scale = outlier_distance_scale
self.states = states
self.bits = 3
self.max_n_unique = max_n_unique
self.rerun_q_outliers = rerun_q_outliers
self.sampled_states = 0
self.cell = cell
self.is_root = False
self.uniq_tol = 1e-3
self.decompose_jacobians = True
self.compute_jacobians = True
self.sess = sess
self.exclude_dis_outliers = exclude_dis_outliers
self.run_additional_iterations_on_outliers = run_additional_iterations_on_outliers
def convert_from_lstm_tuples(self, lstm):
c = lstm.c
h = lstm.h
# print(c.shape)
rank = len(lstm.c.shape)
axis = rank -1
if(tf.is_numeric_tensor(c)):
return tf.concat((c,h),axis=axis)
else:
return np.concatenate((c,h),axis=axis)
def convert_to_lstm_tuples(self, lstm):
array = lstm
rank = len(array.shape)
dim = array.shape[rank-1]
if dim%2 ==0:
conc_dim = dim//2
else:
raise ValueError("Dimentions are not even")
if rank == 3:
c = array[:,:,:conc_dim]
h = array[:,:,conc_dim:]
elif rank == 2:
c = array[:,:conc_dim]
h = array[:,conc_dim:]
return tf.nn.rnn_cell.LSTMStateTuple(c=c,h=h)
def build_vars(self, init_states):
if self.ctype == 'LSTM':
c_h_init = self.convert_from_lstm_tuples(init_states)
x = tf.Variable(c_h_init,dtype=tf.float32)
x_rnn_cell = self.convert_to_lstm_tuples(x)
else:
x = tf.Variable(init_states,dtype=tf.float32)
x_rnn_cell = x
return x,x_rnn_cell
def maybe_convert(self, x_init):
if self.ctype=='LSTM':
return self.convert_from_lstm_tuples(x_init)
else:
return x_init
def get_rnn(self, init_states, inputs):
# print('inside get rnn')
x, x_rnn = self.build_vars(init_states)
inputs = tf.constant(inputs,dtype=tf.float32)
# print('before cell')
output, F_rnn = self.cell(inputs,x_rnn)
# print('before cell')
if self.ctype == 'LSTM':
F = self.convert_from_lstm_tuples(F_rnn)
else:
F = F_rnn
init = tf.variables_initializer(var_list=[x])
self.sess.run(init)
return x, F
def compute_input_jacobians(self, fps):
def grab_RNN_for_dFdu(initial_states, inputs):
x, x_rnn = self.build_vars(initial_states)
inputs = tf.Variable(inputs,dtype=tf.float32)
output, F_rnn = self.cell(inputs,x_rnn)
if self.ctype == 'LSTM':
F = self.convert_from_lstm_tuples(F_rnn)
else:
F = F_rnn
init = tf.variables_initializer(var_list = [x, inputs])
self.sess.run(init)
return inputs, F
inputs_np = fps.inputs
if self.ctype == 'LSTM':
states_np = self.convert_to_lstm_tuples(fps.xstar)
else:
states_np = fps.xstar
inputs, F_tf = grab_RNN_for_dFdu(states_np, inputs_np)
try:
J_tf = pfor.batch_jacobian(F_tf, inputs)
except absl.flags._exceptions.UnparsedFlagAccessError:
J_tf = pfor.batch_jacobian(F_tf, inputs_tf, use_pfor=False)
J_np = self.sess.run(J_tf)
return J_np, J_tf
def compute_recurrent_jacobians(self, fps):
inputs = fps.inputs
if self.ctype == 'LSTM':
# print('line2')
states_np = self.convert_to_lstm_tuples(fps.xstar)
# print('line3')
else:
# print('line4')
states_np = fps.xstar
# print('line6')
x_tf,F_tf = self.get_rnn(states_np,inputs)
# print('line5')
try:
if self.is_root:
print('batch jacobians')
J_tf = pfor.batch_jacobian(F_tf,x_tf)
except absl.flags._exceptions.UnparsedFlagAccessError:
J_tf = pfor.batch_jacobian(F_tf, x_tf, use_pfor=False)
if self.is_root:
print('running cells')
J_np = self.sess.run(J_tf)
if self.is_root:
print('out of batch jacobians')
return J_np, J_tf
def sample_states(self, init_size, state_matrix,c_type, noise):
if c_type =='LSTM':
matrix = self.convert_from_lstm_tuples(state_matrix)
else:
matrix = state_matrix
[n_batch, n_time, n_states] = matrix.shape
valid_idx = np.ones((n_batch, n_time), dtype=np.bool)
(trial_idx, time_idx) = np.nonzero(valid_idx)
min_index = min(len(trial_idx),len(time_idx))
sample_indices = nrand.RandomState(200).randint(0, high = min_index, size = [init_size])
trial_idx = trial_idx[sample_indices]
time_idx = time_idx[sample_indices]
states = np.zeros([init_size, n_states])
for i in range(init_size):
init_idx = trial_idx[i]
t_idx = time_idx[i]
states[i,:] = matrix[init_idx,t_idx,:]
if noise>0.0:
states = states + noise*np.random.randn(*states.shape)
if c_type == 'LSTM':
# print('this')
self.sampled_states = self.convert_to_lstm_tuples(states)
else:
self.sampled_states = states
def identify_distance_non_outliers(self, fps, initial_states, dist_thresh):
if self.ctype == 'LSTM':
initial_states = self.convert_from_lstm_tuples(initial_states)
num_inits = initial_states.shape[0]
n_fps = fps.num_inits
# Centroid of initial_states, shape (n_states,)
centroid = np.mean(initial_states, axis=0)
# Distance of each initial state from the centroid, shape (n,)
init_dists = np.linalg.norm(initial_states - centroid, axis=1)
avg_init_dist = np.mean(init_dists)
# Normalized distances of initial states to the centroid, shape: (n,)
scaled_init_dists = np.true_divide(init_dists, avg_init_dist)
# Distance of each FP from the initial_states centroid
fps_dists = np.linalg.norm(fps.xstar - centroid, axis=1)
# Normalized
scaled_fps_dists = np.true_divide(fps_dists, avg_init_dist)
init_non_outlier_idx = np.where(scaled_init_dists < dist_thresh)[0]
n_init_non_outliers = init_non_outlier_idx.size
if self.is_root:
print('\t\tinitial_states: %d outliers detected (of %d).'
% (num_inits - n_init_non_outliers, num_inits))
fps_non_outlier_idx = np.where(scaled_fps_dists < dist_thresh)[0]
n_fps_non_outliers = fps_non_outlier_idx.size
if self.is_root:
print('\t\tfixed points: %d outliers detected (of %d).'
% (n_fps - n_fps_non_outliers, n_fps))
return fps_non_outlier_idx
def exclude_dis_outliers_(self, fps, initial_states):
idx_keep = self.identify_distance_non_outliers(fps, initial_states, self.outlier_distance_scale)
return fps[idx_keep]
def identify_q_outliers(self, fps, q_thresh):
return np.where(fps.qstar > q_thresh)[0]
def _get_rnncell_compatible_states(self, states):
if self.ctype == 'LSTM':
return self.convert_to_lstm_tuples(states)
else:
return states
def run_additional_iterations_on_outliers_(self, fps):
def perform_outlier_optimization(fps, method):
idx_outliers = self.identify_q_outliers(fps, outlier_min_q)
n_outliers = len(idx_outliers)
outlier_fps = fps[idx_outliers]
n_prev_iters = outlier_fps.n_iters
inputs = outlier_fps.inputs
initial_states = self._get_rnncell_compatible_states(
outlier_fps.xstar)
if method == 'sequential':
updated_outlier_fps = self.run_sequential_optimization(
initial_states, inputs, q_prior=outlier_fps.qstar)
elif method == 'joint':
updated_outlier_fps = self.run_joint_optimization(initial_states, inputs)
else:
raise ValueError('Unsupported method: %s.' % method)
updated_outlier_fps.n_iters += n_prev_iters
fps[idx_outliers] = updated_outlier_fps
return fps
def outlier_update(fps):
idx_outliers = self.identify_q_outliers(fps, outlier_min_q)
n_outliers = len(idx_outliers)
# self._print_if_verbose('\n\tDetected %d putative outliers '
# '(q>%.2e).' % (n_outliers, outlier_min_q))
return idx_outliers
outlier_min_q = np.median(fps.qstar)*self.outlier_q_scale
idx_outliers = outlier_update(fps)
if len(idx_outliers) == 0:
return fps
fps = perform_outlier_optimization(fps, 'sequential')
outlier_update(fps) # For print output only
return fps
def run_iteration_loops(self, states, inputs, init_array):
def print_update(iter_count, q, dq, lr, is_final=False):
t = time.time()
t_elapsed = t - t_start
avg_iter_time = t_elapsed / iter_count
if is_final:
delimiter = '\n\t\t'
print('\t\t%d iters%s' % (iter_count, delimiter), end='')
else:
delimiter = ', '
print('\tIter: %d%s' % (iter_count, delimiter), end='')
if q.size == 1:
print('q = %.2e%sdq = %.2e%s' %
(q, delimiter, dq, delimiter), end='')
else:
mean_q = np.mean(q)
std_q = np.std(q)
mean_dq = np.mean(dq)
std_dq = np.std(dq)
print('q = %.2e +/- %.2e%s'
'dq = %.2e +/- %.2e%s' %
(mean_q, std_q, delimiter, mean_dq, std_dq, delimiter),
end='')
print('learning rate = %.2e%s' % (lr, delimiter), end='')
print('avg iter time = %.2e sec' % avg_iter_time, end='')
if is_final:
print('') # Just for the endline
else:
print('.')
x, F_cell = self.get_rnn(states, inputs)
q = 0.5 * tf.reduce_sum(tf.square(F_cell - x ))
q_scalar = tf.reduce_mean(q)
grads = tf.gradients(q_scalar, [x])
q_prev_tf = tf.placeholder(tf.float32, shape=list(q.shape), name='q_prev')
# when (q-q_prev) is negative, optimization is making progress
dq = tf.abs(q - q_prev_tf)
hps={}
# Optimizer
adaptive_learning_rate = AdaptiveLearningRate(**self.adaptive_learning_rate_hps)
learning_rate = tf.placeholder(tf.float32, name='learning_rate')
adaptive_grad_norm_clip = AdaptiveGradNormClip(**self.grad_norm_clip_hps)
grad_norm_clip_val = tf.placeholder(tf.float32, name='grad_norm_clip_val')
# Gradient clipping
clipped_grads, grad_global_norm = tf.clip_by_global_norm(grads, grad_norm_clip_val)
clipped_grad_global_norm = tf.global_norm(clipped_grads)
clipped_grad_norm_diff = grad_global_norm - clipped_grad_global_norm
grads_to_apply = clipped_grads
# adam_hps = {'epsilon': 0.01}
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, **self.adam_optimizer_hps)
optimizer = hvd.DistributedOptimizer(optimizer)
train = optimizer.apply_gradients(zip(grads_to_apply, [x]))
# Initialize x and AdamOptimizer's auxiliary variables
uninitialized_vars = optimizer.variables()
init = tf.variables_initializer(var_list=uninitialized_vars)
self.sess.run(init)
ops_to_eval = [train,x, F_cell, q_scalar, q, dq, grad_global_norm]
iter_count = 1
t_start = time.time()
q_prev = np.tile(np.nan, q.shape.as_list())
rnn_cell_feed_dict = {}
while True:
# print('inside run iter loops')
iter_learning_rate = adaptive_learning_rate()
iter_clip_val = adaptive_grad_norm_clip()
feed_dict = {learning_rate: iter_learning_rate,
grad_norm_clip_val: iter_clip_val,
q_prev_tf: q_prev}
feed_dict.update(rnn_cell_feed_dict)
(ev_train,
ev_x,
ev_F,
ev_q_scalar,
ev_q,
ev_dq,
ev_grad_norm) = self.sess.run(ops_to_eval, feed_dict)
# print('doing iter count')
if iter_count > 1 and \
np.all(np.logical_or(
ev_dq < self.tol_dq*iter_learning_rate,
ev_q < self.tol_q)):
if self.is_root:
print('\tOptimization complete to desired tolerance.')
break
if iter_count + 1 > 5000:
if self.is_root:
print('\tMaximum iteration count reached. '
'Terminating.')
break
q_prev = ev_q
adaptive_learning_rate.update(ev_q_scalar)
adaptive_grad_norm_clip.update(ev_grad_norm)
iter_count += 1
# print_update(iter_count,
# ev_q, ev_dq,
# iter_learning_rate,
# is_final=True)
iter_count = np.tile(iter_count, ev_q.shape)
fixed_point = FixedPointStore(xstar = ev_x,
inputs = inputs,
dtype = self.dtype,
alloc_zeros = False,
x_init = self.maybe_convert(states),
F_xstar=ev_F,
qstar= ev_q,
dq=ev_dq,
n_iters = iter_count
)
return fixed_point
def find_shape(self, states):
if self.ctype == 'LSTM':
return (states.c.shape[0], states.c.shape[1]*2)
else:
return states.shape[0],states.shape[1]
def return_index(self, states, index):
if self.ctype=='LSTM':
c= states.c[index]
h = states.h[index]
return tf.nn.rnn_cell.LSTMStateTuple(c=c,h=h)
else:
return states[index]
def run_joint_optimization(self, initial_states, inputs):
def print_update(iter_count, q, dq, lr, is_final=False):
t = time.time()
t_elapsed = t - t_start
avg_iter_time = t_elapsed / iter_count
if is_final:
delimiter = '\n\t\t'
print('\t\t%d iters%s' % (iter_count, delimiter), end='')
else:
delimiter = ', '
print('\tIter: %d%s' % (iter_count, delimiter), end='')
if q.size == 1:
print('q = %.2e%sdq = %.2e%s' %
(q, delimiter, dq, delimiter), end='')
| |
<gh_stars>0
# Copyright (c) 2017-2019, <NAME>
# Copyright (c) 2014-2018, iocage
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted providing that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""iocage release module."""
import typing
import hashlib
import os
import urllib.request
import urllib.error
import urllib.parse
import re
import libzfs
import libioc.ZFS
import libioc.errors
import libioc.helpers
import libioc.helpers_object
import libioc.events
import libioc.LaunchableResource
import libioc.ResourceSelector
import libioc.Jail
import libioc.SecureTarfile
# MyPy
import libioc.Resource
import libioc.Host
import libioc.Logger
import libioc.Config.Host
import libioc.Config.Jail.File.RCConf
import libioc.Config.Jail.File.SysctlConf
class ReleaseResource(libioc.LaunchableResource.LaunchableResource):
"""Resource that represents an iocage release."""
_release: typing.Optional['ReleaseGenerator']
_hashes: typing.Optional[typing.Dict[str, str]]
host: libioc.Host.HostGenerator
root_datasets_name: typing.Optional[str]
def __init__(
self,
dataset: typing.Optional[libzfs.ZFSDataset]=None,
dataset_name: typing.Optional[str]=None,
config_type: str="auto",
config_file: typing.Optional[str]=None,
logger: typing.Optional['libioc.Logger.Logger']=None,
zfs: typing.Optional[libioc.ZFS.ZFS]=None,
host: typing.Optional['libioc.Host.HostGenerator']=None,
release: typing.Optional['ReleaseGenerator']=None,
root_datasets_name: typing.Optional[str]=None,
) -> None:
self.host = libioc.helpers_object.init_host(self, host)
self.root_datasets_name = root_datasets_name
libioc.LaunchableResource.LaunchableResource.__init__(
self,
dataset=dataset,
dataset_name=dataset_name,
config_type=config_type,
config_file=config_file,
logger=logger,
zfs=zfs
)
self._release = release
self._hashes = None
@property
def release(self) -> 'ReleaseGenerator':
"""
Return the release instance that belongs to the resource.
Usually the resource becomes inherited from the Release itself.
It can still be used linked to a foreign ReleaseGenerator by passing
release as named attribute to the __init__ function
"""
if self._release is not None:
return self._release
elif isinstance(self, ReleaseGenerator):
return self
raise Exception(
"Resource is not a valid release itself and has no linked release"
)
@property
def full_name(self) -> str:
"""
Return the full identifier of a jail.
When more than one root dataset is managed by iocage, the full source
and name are returned. Otherwise just the name.
For example `mydataset/jailname` or just `jailname`.
"""
if len(self.host.datasets) > 1:
return f"{self.source}/{self.name}"
else:
return str(self.name)
@property
def dataset_name(self) -> str:
"""
Return the name of the releases ZFS dataset.
If the resource has no dataset or dataset_name assigned yet,
the release id is used to find name the dataset
"""
try:
return str(self._assigned_dataset_name)
except AttributeError:
pass
return self._dataset_name_from_release_name
@dataset_name.setter
def dataset_name(self, value: str) -> None:
"""Set the releases dataset name."""
self._dataset_name = value
@property
def base_dataset(self) -> libzfs.ZFSDataset:
"""
Return the ZFS basejail dataset belonging to the release.
base datasets are created from releases. They are required to start
zfs-basejails.
"""
ds: libzfs.ZFSDataset = self.zfs.get_dataset(self.base_dataset_name)
return ds
@property
def base_dataset_name(self) -> str:
"""Return the ZFS basejail datasets name belonging to the release."""
return f"{self._dataset_name_from_base_name}/root"
@property
def _dataset_name_from_release_name(self) -> str:
return f"{self.source_dataset.releases.name}/{self.name}"
@property
def _dataset_name_from_base_name(self) -> str:
return f"{self.source_dataset.base.name}/{self.name}"
@property
def source_dataset(self) -> 'libioc.Datasets.RootDatasets':
"""
Return the releases source root dataset.
iocage can manage multiple source datasets (on different ZFS pools
for instance). This property returns the RootDatasets instance that
belongs to the release.
"""
try:
assigned_name = str(self._assigned_dataset_name)
return self.host.datasets.find_root_datasets(assigned_name)
except AttributeError:
pass
if self.root_datasets_name is None:
return self.host.datasets.main
else:
try:
return self.host.datasets.__getitem__(self.root_datasets_name)
except KeyError:
raise libioc.errors.SourceNotFound(logger=self.logger)
@property
def source(self) -> str:
"""Return the name of the releases source root datasets."""
try:
assigned_name = str(self._assigned_dataset_name)
return str(
self.host.datasets.find_root_datasets_name(assigned_name)
)
except AttributeError:
pass
if self.root_datasets_name is None:
return str(self.host.datasets.main_datasets_name)
else:
return str(self.root_datasets_name)
class ReleaseGenerator(ReleaseResource):
"""Release with generator interfaces."""
DEFAULT_RC_CONF: typing.Dict[str, typing.Union[str, bool]] = {
"netif_enable": False,
"sendmail_enable": False,
"sendmail_submit_enable": False,
"sendmail_msp_queue_enable": False,
"sendmail_outbound_enable": False,
"daily_clean_hoststat_enable": False,
"daily_status_mail_rejects_enable": False,
"daily_status_include_submit_mailq": False,
"daily_submit_queuerun": False,
"cron_flags": "-m ''",
"syslogd_flags": "-ss"
}
DEFAULT_SYSCTL_CONF: typing.Dict[str, int] = {
"net.inet.ip.fw.enable": 0
}
_name: str
patchlevel: typing.Optional[int]
check_eol: bool
logger: 'libioc.Logger.Logger'
zfs: libioc.ZFS.ZFS
host: 'libioc.Host.HostGenerator'
_resource: libioc.Resource.Resource
_assets: typing.List[str]
_mirror_url: typing.Optional[str]
def __init__(
self,
name: str,
dataset: typing.Optional[libzfs.ZFSDataset]=None,
dataset_name: typing.Optional[str]=None,
config_type: str="auto",
config_file: typing.Optional[str]=None,
root_datasets_name: typing.Optional[str]=None,
host: typing.Optional['libioc.Host.HostGenerator']=None,
zfs: typing.Optional[libioc.ZFS.ZFS]=None,
logger: typing.Optional['libioc.Logger.Logger']=None,
check_hashes: bool=True,
check_eol: bool=True,
) -> None:
self.logger = libioc.helpers_object.init_logger(self, logger)
self.zfs = libioc.helpers_object.init_zfs(self, zfs)
self.host = libioc.helpers_object.init_host(self, host)
resource_selector = libioc.ResourceSelector.ResourceSelector(
name,
logger=self.logger
)
if resource_selector.source_name is not None:
is_different = resource_selector.source_name != root_datasets_name
if (root_datasets_name is not None) and (is_different is True):
# ToDo: omit root_datasets_name at all ?
raise libioc.errors.ConflictingResourceSelection(
source_a=resource_selector.source_name,
source_b=root_datasets_name,
logger=self.logger
)
else:
root_datasets_name = resource_selector.source_name
if libioc.helpers.validate_name(resource_selector.name) is False:
raise NameError(f"Invalid 'name' for Release: '{name}'")
self.name = name
self._hbsd_release_branch = None
self._mirror_url = None
self._hashes = None
self.check_hashes = check_hashes is True
self.check_eol = check_eol is True
ReleaseResource.__init__(
self,
host=self.host,
logger=self.logger,
zfs=self.zfs,
root_datasets_name=root_datasets_name,
dataset_name=dataset_name,
config_type=config_type,
config_file=config_file,
release=self
)
self._assets = ["base"]
if self.host.distribution.name != "HardenedBSD":
self._assets.append("lib32")
@property
def name(self) -> str:
"""Return the releases identifier."""
return self._name
@name.setter
def name(self, value: str) -> None:
"""Set the releases identifier (optionally including patchlevel)."""
match = re.match((
r"^(?:(?P<source_dataset_name>.*)/)?"
r"(?P<release_name>.*?)"
r"(?:-p(?P<patchlevel>[0-9]+))?$"
), value)
if match is None:
raise libioc.errors.InvalidReleaseName(
name=value,
logger=self.logger
)
self._name = match.group("release_name")
patchlevel = match.group("patchlevel")
self.patchlevel = None if (patchlevel is None) else int(patchlevel)
if match.group("source_dataset_name") is not None:
self.root_datasets_name = match.group("source_dataset_name")
@property
def full_name(self) -> str:
"""Return the release name including the patchlevel."""
if self.patchlevel is None:
return self._name
return f"{self._name}-p{self.current_snapshot_patchlevel}"
@property
def resource(self) -> 'libioc.Resource.Resource':
"""Return the releases resource."""
return self._resource
@resource.setter
def resource(self, value: 'libioc.Resource.Resource') -> None:
"""Set the releases resource."""
if value is None:
self._resource = ReleaseResource(
release=self,
host=self.host,
logger=self.logger,
zfs=self.zfs
)
else:
self._resource = value
@property
def releases_folder(self) -> str:
"""Return the mountpoint of the iocage/releases dataset."""
return str(self.source_dataset.releases.mountpoint)
@property
def download_directory(self) -> str:
"""Return the download directory."""
return str(self.dataset.mountpoint)
@property
def root_dir(self) -> str:
"""Return the main directory of the release."""
try:
if self.root_dataset.mountpoint:
return str(self.root_dataset.mountpoint)
except AttributeError:
pass
return f"{self.releases_folder}/{self.name}/root"
@property
def assets(self) -> typing.List[str]:
"""Return a list of release assets."""
return self._assets
@assets.setter
def assets(self, value: typing.Union[typing.List[str], str]) -> None:
"""Set the list of release assets."""
value = [value] if isinstance(value, str) else value
self._assets = list(map(
lambda x: x if not x.endswith(".txz") else x[:-4],
value
))
@property
def real_name(self) -> str:
"""Map the release name on HardenedBSD."""
if self.host.distribution.name == "HardenedBSD":
return f"HardenedBSD-{self.name}-{self.host.processor}-LATEST"
return self.name
@property
def annotated_name(self) -> str:
"""
Return the release name with annotations.
Annotations inform whether a release is newer then the host or EOL.
"""
annotations = set()
if self.eol is True:
annotations.add("EOL")
if self.newer_than_host is True:
annotations.add("Newer than Host")
if len(annotations) > 0:
return f"{self.name} ({', '.join(annotations)})"
return f"{self.name}"
@property
def eol(self) -> typing.Optional[bool]:
"""
Return whether the release is EOL or checks are disabled.
When check_eol is disabled, None is returned, True when the release
name was found in the distributions eol_list.
"""
if not self.check_eol:
return None
if self.host.distribution.name == "FreeBSD":
return (self.name in self.host.distribution.eol_list) is True
elif self.host.distribution.name == "HardenedBSD":
if "STABLE" in self.name:
# stable releases are explicitly in the EOL list or supported
return (self.name in self.host.distribution.eol_list) is True
return (self.version_number in map(
lambda x: self._parse_release_version(x),
self.host.distribution.eol_list
)) is True
return False
@property
def current_snapshot_patchlevel(self) -> int:
"""Return the currently chosen patchlevel number or the latest."""
current_snapshot_name = self.current_snapshot.snapshot_name
return int(current_snapshot_name.lstrip('p'))
@property
def current_snapshot(self) -> libzfs.ZFSSnapshot:
"""Return the manually configured or the latest release snapshot."""
if self.patchlevel is not None:
for snapshot in self.version_snapshots:
if snapshot.snapshot_name == f"p{self.patchlevel}":
return snapshot
return self.latest_snapshot
@property
def latest_snapshot(self) -> libzfs.ZFSSnapshot:
"""
Return or create the latest version snapshot.
When no snapshot was taken before `p0` is automatically created.
"""
version_snapshots = self.version_snapshots
if len(version_snapshots) == 0:
self.logger.verbose("No release snapshot found - using @p0")
return self.snapshot("p0")
else:
return version_snapshots[0]
@property
| |
import os
import fnmatch
import logging
import hashlib
import json
from collections import defaultdict, namedtuple
from flask import url_for as flask_url_for
from flask import current_app
from boto.s3.connection import S3Connection
from boto.s3 import connect_to_region
from boto.exception import S3CreateError, S3ResponseError
from boto.s3.key import Key
logger = logging.getLogger('flask_s3')
def get_path_components(path):
"""
http://stackoverflow.com/questions/3167154/how-to-split-a-dos-path-into-its-components-in-python
"""
folders = []
while True:
path, folder = os.path.split(path)
if folder != "":
folders.append(folder)
else:
if path != "":
folders.append(path)
break
folders.reverse()
return folders
def hash_file(filename):
"""
Generate a hash for the contents of a file
"""
hasher = hashlib.sha1()
with open(filename, 'rb') as f:
buf = f.read(65536)
while len(buf) > 0:
hasher.update(buf)
buf = f.read(65536)
return hasher.hexdigest()
def url_for(folders, endpoint, **values):
"""
Generates a URL to the given endpoint.
If the endpoint is for a static resource then an Amazon S3 URL is
generated, otherwise the call is passed on to `flask.url_for`.
Because this function is set as a jinja environment variable when
`FlaskS3.init_app` is invoked, this function replaces
`flask.url_for` in templates automatically. It is unlikely that this
function will need to be directly called from within your
application code, unless you need to refer to static assets outside
of your templates.
"""
app = current_app
if 'S3_BUCKET_NAME' not in app.config:
raise ValueError("S3_BUCKET_NAME not found in app configuration.")
my_endpoints = [f.endpoint for f in folders]
ending_endpoint = endpoint.split('.')[-1]
use_s3 = app.config.get('USE_S3')
if app.debug:
use_s3 = app.config.get('USE_S3_DEBUG') and app.config.get('USE_S3')
if endpoint in my_endpoints or ending_endpoint == 'static' and use_s3:
scheme = 'http'
if app.config['S3_USE_HTTPS']:
scheme = 'https'
if app.config['S3_URL_STYLE'] == 'host':
url_format = '%(bucket_name)s.%(bucket_domain)s'
elif app.config['S3_URL_STYLE'] == 'path':
url_format = '%(bucket_domain)s/%(bucket_name)s'
else:
raise ValueError('Invalid S3 URL style: "%s"'
% app.config['S3_URL_STYLE'])
bucket_path = url_format % {
'bucket_name': app.config['S3_BUCKET_NAME'],
'bucket_domain': app.config['S3_BUCKET_DOMAIN'],
}
if app.config['S3_CDN_DOMAIN']:
bucket_path = '%s' % app.config['S3_CDN_DOMAIN']
urls = app.url_map.bind(bucket_path, url_scheme=scheme)
return urls.build(endpoint, values=values, force_external=True)
return flask_url_for(endpoint, **values)
def _bp_static_url(app, blueprint):
""" builds the absolute url path for a blueprint's static folder """
urls = app.url_map.bind('')
u = urls.build("{}.static".format(blueprint.name), values={"filename":""})
print(u)
return u
def _get_static_folders(app):
""" Gets static folders and returns in list of (folder, url) pairs"""
dirs = [(unicode(app.static_folder), app.static_url_path)]
if hasattr(app, 'blueprints'):
blueprints = app.blueprints.values()
bp_details = lambda x: (x.static_folder, _bp_static_url(app, x))
dirs.extend([bp_details(x) for x in blueprints if x.static_folder])
return dirs
def _gather_files(folders, hidden):
valid_files = defaultdict(list)
for static_folder, static_url_loc in folders:
if not os.path.isdir(static_folder):
logger.warning("WARNING - [%s does not exist]" % static_folder)
else:
logger.debug("Checking static folder: %s" % static_folder)
for root, _, files in os.walk(static_folder):
files = [os.path.join(root, x) \
for x in files if hidden or x[0] != '.']
if files:
valid_files[(static_folder, static_url_loc)].extend(files)
return valid_files
def _path_to_relative_url(path):
""" Converts a folder and filename into a ralative url path """
return os.path.splitdrive(path)[1].replace('\\', '/')
def _static_folder_path(static_url, static_folder, static_asset):
"""
Returns a path to a file based on the static folder, and not on the
filesystem holding the file.
Returns a path relative to static_url for static_asset
"""
# first get the asset path relative to the static folder.
# static_asset is not simply a filename because it could be
# sub-directory then file etc.
if not static_asset.startswith(static_folder):
raise ValueError("%s static asset must be under %s static folder" %
(static_asset, static_folder))
rel_asset = static_asset[len(static_folder):]
# Now bolt the static url path and the relative asset location together
return u'%s/%s' % (static_url.rstrip('/'), rel_asset.lstrip('/'))
def _write_files(app, static_url_loc, static_folder, files, bucket,
ex_keys=None, hashes=None):
""" Writes all the files inside a static folder to S3. """
new_hashes = []
static_folder_rel = _path_to_relative_url(static_folder)
ignore_patterns = app.config.get('S3_IGNORE', [])
for file_path in files:
matches = [fnmatch.fnmatch(file_path, ignore_pattern) for ignore_pattern in ignore_patterns]
if any(matches):
print("ignoring file {}".format(file_path))
continue
asset_loc = _path_to_relative_url(file_path)
key_name = _static_folder_path(static_url_loc, static_folder_rel,
asset_loc).strip('/')
msg = "Uploading %s to %s as %s" % (file_path, bucket, key_name)
logger.debug(msg)
exclude = False
if app.config.get('S3_ONLY_MODIFIED', False):
file_hash = hash_file(file_path)
new_hashes.append((key_name, file_hash))
if hashes and hashes.get(key_name, None) == file_hash:
exclude = True
if ex_keys and key_name in ex_keys or exclude:
logger.debug("%s excluded from upload" % key_name)
else:
k = Key(bucket=bucket, name=key_name)
# Set custom headers
for header, value in app.config['S3_HEADERS'].iteritems():
k.set_metadata(header, value)
k.set_contents_from_filename(file_path)
k.make_public()
print("pushing new file {}".format(key_name))
return new_hashes
def _upload_files(app, files_, bucket, hashes=None):
new_hashes = []
for (static_folder, static_url), names in files_.iteritems():
new_hashes.extend(_write_files(app, static_url, static_folder, names,
bucket, hashes=hashes))
return new_hashes
def get_bucket(app, user=None, password=None, bucket_name=None,
location=None):
user = user or app.config.get('AWS_ACCESS_KEY_ID')
password = password or app.config.get('AWS_SECRET_ACCESS_KEY')
bucket_name = bucket_name or app.config.get('S3_BUCKET_NAME')
if not bucket_name:
raise ValueError("No bucket name provided.")
location = location or app.config.get('S3_REGION')
# connect to s3
if not location:
conn = S3Connection(user, password) # (default region)
else:
conn = connect_to_region(location,
aws_access_key_id=user,
aws_secret_access_key=password)
# get_or_create bucket
try:
try:
bucket = conn.create_bucket(bucket_name)
except S3CreateError as e:
if e.error_code == u'BucketAlreadyOwnedByYou':
bucket = conn.get_bucket(bucket_name)
else:
raise e
bucket.make_public(recursive=False)
except S3CreateError as e:
raise e
return bucket
def create_all(folders, app, include_hidden=False, **kwargs):
"""
Uploads of the static assets associated with a Flask application to
Amazon S3.
All static assets are identified on the local filesystem, including
any static assets associated with *registered* blueprints. In turn,
each asset is uploaded to the bucket described by `bucket_name`. If
the bucket does not exist then it is created.
Flask-S3 creates the same relative static asset folder structure on
S3 as can be found within your Flask application.
Many of the optional arguments to `create_all` can be specified
instead in your application's configuration using the Flask-S3
`configuration`_ variables.
:param app: a :class:`flask.Flask` application object.
:param user: an AWS Access Key ID. You can find this key in the
Security Credentials section of your AWS account.
:type user: `basestring` or None
:param password: an AWS Secret Access Key. You can find this key in
the Security Credentials section of your AWS
account.
:type password: `basestring` or None
:param bucket_name: the name of the bucket you wish to server your
static assets from. **Note**: while a valid
character, it is recommended that you do not
include periods in bucket_name if you wish to
serve over HTTPS. See Amazon's `bucket
restrictions`_ for more details.
:type bucket_name: `basestring` or None
:param location: the AWS region to host the bucket in; an empty
string indicates the default region should be used,
which is the US Standard region. Possible location
values include: `'DEFAULT'`, `'EU'`, `'USWest'`,
`'APSoutheast'`
:type location: `basestring` or None
:param include_hidden: by default Flask-S3 will not upload hidden
files. Set this to true to force the upload of hidden files.
:type include_hidden: `bool`
.. _bucket restrictions: http://docs.amazonwebservices.com/AmazonS3\
/latest/dev/BucketRestrictions.html
"""
bucket = get_bucket(app=app, **kwargs)
# build list of files
my_folders = [(f.folder, f.url) for f in folders]
static_folders = _get_static_folders(app)
all_folders = my_folders + static_folders
all_files = _gather_files(all_folders, include_hidden)
logger.debug("All valid files: %s" % all_files)
if app.config['S3_ONLY_MODIFIED']:
hashes = get_web_hashes(bucket)
new_hashes = _upload_files(app, all_files, bucket, hashes=hashes)
try:
k = Key(bucket=bucket, name=".file-hashes")
k.set_contents_from_string(json.dumps(dict(new_hashes)))
except S3ResponseError as e:
logger.warn("Unable to upload file hashes: %s" % e)
else:
_upload_files(app, all_files, bucket)
def get_web_hashes(bucket):
try:
hashes = json.loads(
Key(bucket=bucket,
name=".file-hashes").get_contents_as_string())
return hashes
except S3ResponseError as e:
logger.warn("No file hashes found: %s" % e)
def clean(app, **kwargs):
bucket = get_bucket(app=app, **kwargs)
hashes = get_web_hashes(bucket)
if hashes is None:
print("no hashes available. Bucket not cleaned")
keys = set(hashes.keys())
bucket_list = bucket.list()
for l in bucket_list:
keyString = str(l.key)
if keyString == '.file-hashes':
continue
if keyString not in keys:
print("deleting {}".format(keyString))
l.delete()
def clone(folders, app, **kwargs):
bucket = get_bucket(app=app, **kwargs)
hashes = get_web_hashes(bucket)
my_folders = [(f.folder, f.url) for f in folders]
static_folders = _get_static_folders(app)
all_folders = my_folders + static_folders
# TODO: use hash to see what needs to be updated
bucket_list = bucket.list()
for l in bucket_list:
keyString = str(l.key)
if keyString == '.file-hashes':
continue
if hashes is not None and keyString not in hashes:
print("file {} has not been pushed (probably needs to be cleaned up) so won't be downloaded".format(keyString))
continue
# find out which local folder to map to
for folder_local, folder_url in all_folders:
folder_comps = get_path_components(folder_url.strip('/'))
key_comps = get_path_components(keyString.strip('/'))
# make sure all components match
for fc, kc in zip(folder_comps, key_comps):
if fc != kc:
break # some component does not match, continue to next
else:
# all components | |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Main entry point into the Identity service."""
import functools
import os
from oslo.config import cfg
from keystone import clean
from keystone.common import controller
from keystone.common import dependency
from keystone.common import manager
from keystone import config
from keystone import exception
from keystone import notifications
from keystone.openstack.common import importutils
from keystone.openstack.common import log as logging
CONF = config.CONF
LOG = logging.getLogger(__name__)
def filter_user(user_ref):
"""Filter out private items in a user dict.
'password', 'tenants' and 'groups' are never returned.
:returns: user_ref
"""
if user_ref:
user_ref = user_ref.copy()
user_ref.pop('password', None)
user_ref.pop('tenants', None)
user_ref.pop('groups', None)
user_ref.pop('domains', None)
try:
user_ref['extra'].pop('password', None)
user_ref['extra'].pop('tenants', None)
except KeyError:
pass
return user_ref
class DomainConfigs(dict):
"""Discover, store and provide access to domain specifc configs.
The setup_domain_drives() call will be made via the wrapper from
the first call to any driver function handled by this manager. This
setup call it will scan the domain config directory for files of the form
keystone.<domain_name>.conf
For each file, the domain_name will be turned into a domain_id and then
this class will:
- Create a new config structure, adding in the specific additional options
defined in this config file
- Initialise a new instance of the required driver with this new config.
"""
configured = False
driver = None
def _load_driver(self, assignment_api, domain_id):
domain_config = self[domain_id]
domain_config['driver'] = (
importutils.import_object(
domain_config['cfg'].identity.driver, domain_config['cfg']))
domain_config['driver'].assignment_api = assignment_api
def _load_config(self, assignment_api, file_list, domain_name):
try:
domain_ref = assignment_api.get_domain_by_name(domain_name)
except exception.DomainNotFound:
msg = (_('Invalid domain name (%s) found in config file name')
% domain_name)
LOG.warning(msg)
if domain_ref:
# Create a new entry in the domain config dict, which contains
# a new instance of both the conf environment and driver using
# options defined in this set of config files. Later, when we
# service calls via this Manager, we'll index via this domain
# config dict to make sure we call the right driver
domain = domain_ref['id']
self[domain] = {}
self[domain]['cfg'] = cfg.ConfigOpts()
config.configure(conf=self[domain]['cfg'])
self[domain]['cfg'](args=[], project='keystone',
default_config_files=file_list)
self._load_driver(assignment_api, domain)
def setup_domain_drivers(self, standard_driver, assignment_api):
# This is called by the api call wrapper
self.configured = True
self.driver = standard_driver
conf_dir = CONF.identity.domain_config_dir
if not os.path.exists(conf_dir):
msg = _('Unable to locate domain config directory: %s') % conf_dir
LOG.warning(msg)
return
for r, d, f in os.walk(conf_dir):
for file in f:
if file.startswith('keystone.') and file.endswith('.conf'):
names = file.split('.')
if len(names) == 3:
self._load_config(assignment_api,
[os.path.join(r, file)],
names[1])
else:
msg = (_('Ignoring file (%s) while scanning domain '
'config directory') % file)
LOG.debug(msg)
def get_domain_driver(self, domain_id):
if domain_id in self:
return self[domain_id]['driver']
def get_domain_conf(self, domain_id):
if domain_id in self:
return self[domain_id]['cfg']
def reload_domain_driver(self, assignment_api, domain_id):
# Only used to support unit tests that want to set
# new config values. This should only be called once
# the domains have been configured, since it relies on
# the fact that the configuration files have already been
# read.
if self.configured:
if domain_id in self:
self._load_driver(assignment_api, domain_id)
else:
# The standard driver
self.driver = self.driver()
self.driver.assignment_api = assignment_api
def domains_configured(f):
"""Wraps API calls to lazy load domain configs after init.
This is required since the assignment manager needs to be initialized
before this manager, and yet this manager's init wants to be
able to make assignment calls (to build the domain configs). So
instead, we check if the domains have been initialized on entry
to each call, and if requires load them,
"""
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
if (not self.domain_configs.configured and
CONF.identity.domain_specific_drivers_enabled):
LOG.warning(_(
'Running an experimental and unsupported configuration '
'(domain_specific_drivers_enabled = True); '
'this will result in known issues.'))
self.domain_configs.setup_domain_drivers(
self.driver, self.assignment_api)
return f(self, *args, **kwargs)
return wrapper
@dependency.provider('identity_api')
@dependency.requires('assignment_api')
class Manager(manager.Manager):
"""Default pivot point for the Identity backend.
See :mod:`keystone.common.manager.Manager` for more details on how this
dynamically calls the backend.
This class also handles the support of domain specific backends, by using
the DomainConfigs class. The setup call for DomainConfigs is called
from with the @domains_configured wrapper in a lazy loading fashion
to get around the fact that we can't satisfy the assignment api it needs
from within our __init__() function since the assignment driver is not
itself yet intitalized.
Each of the identity calls are pre-processed here to choose, based on
domain, which of the drivers should be called. The non-domain-specific
driver is still in place, and is used if there is no specific driver for
the domain in question.
"""
def __init__(self):
super(Manager, self).__init__(CONF.identity.driver)
self.domain_configs = DomainConfigs()
@staticmethod
def v3_to_v2_user(ref):
"""Convert a user_ref from v3 to v2 compatible.
* v2.0 users are not domain aware, and should have domain_id removed
* v2.0 users expect the use of tenantId instead of default_project_id
This method should only be applied to user_refs being returned from the
v2.0 controller(s).
If ref is a list type, we will iterate through each element and do the
conversion.
"""
def _format_default_project_id(ref):
"""Convert default_project_id to tenantId for v2 calls."""
default_project_id = ref.pop('default_project_id', None)
if default_project_id is not None:
ref['tenantId'] = default_project_id
elif 'tenantId' in ref:
# NOTE(morganfainberg): To avoid v2.0 confusion if somehow a
# tenantId property sneaks its way into the extra blob on the
# user, we remove it here. If default_project_id is set, we
# would override it in either case.
del ref['tenantId']
def _normalize_and_filter_user_properties(ref):
"""Run through the various filter/normalization methods."""
_format_default_project_id(ref)
controller.V2Controller.filter_domain_id(ref)
return ref
if isinstance(ref, dict):
return _normalize_and_filter_user_properties(ref)
elif isinstance(ref, list):
return [_normalize_and_filter_user_properties(x) for x in ref]
else:
raise ValueError(_('Expected dict or list: %s') % type(ref))
# Domain ID normalization methods
def _set_domain_id(self, ref, domain_id):
if isinstance(ref, dict):
ref = ref.copy()
ref['domain_id'] = domain_id
return ref
elif isinstance(ref, list):
return [self._set_domain_id(x, domain_id) for x in ref]
else:
raise ValueError(_('Expected dict or list: %s') % type(ref))
def _clear_domain_id(self, ref):
# Clear the domain_id, and then check to ensure that if this
# was not the default domain, it is being handled by its own
# backend driver.
ref = ref.copy()
domain_id = ref.pop('domain_id', CONF.identity.default_domain_id)
if (domain_id != CONF.identity.default_domain_id and
domain_id not in self.domain_configs):
raise exception.DomainNotFound(domain_id=domain_id)
return ref
def _normalize_scope(self, domain_scope):
if domain_scope is None:
return CONF.identity.default_domain_id
else:
return domain_scope
def _select_identity_driver(self, domain_id):
driver = self.domain_configs.get_domain_driver(domain_id)
if driver:
return driver
else:
self.get_domain(domain_id)
return self.driver
def _get_domain_conf(self, domain_id):
conf = self.domain_configs.get_domain_conf(domain_id)
if conf:
return conf
else:
return CONF
def _get_domain_id_and_driver(self, domain_scope):
domain_id = self._normalize_scope(domain_scope)
driver = self._select_identity_driver(domain_id)
return (domain_id, driver)
# The actual driver calls - these are pre/post processed here as
# part of the Manager layer to make sure we:
#
# - select the right driver for this domain
# - clear/set domain_ids for drivers that do not support domains
@domains_configured
def authenticate(self, user_id, password, domain_scope=None):
domain_id, driver = self._get_domain_id_and_driver(domain_scope)
ref = driver.authenticate(user_id, password)
if not driver.is_domain_aware():
ref = self._set_domain_id(ref, domain_id)
return ref
@notifications.created('user')
@domains_configured
def create_user(self, user_id, user_ref):
user = user_ref.copy()
user['name'] = clean.user_name(user['name'])
user.setdefault('enabled', True)
user['enabled'] = clean.user_enabled(user['enabled'])
# For creating a user, the domain is in the object itself
domain_id = user_ref['domain_id']
driver = self._select_identity_driver(domain_id)
if not driver.is_domain_aware():
user = self._clear_domain_id(user)
ref = driver.create_user(user_id, user)
if not driver.is_domain_aware():
ref = self._set_domain_id(ref, domain_id)
return ref
@domains_configured
def get_user(self, user_id, domain_scope=None):
domain_id, driver = self._get_domain_id_and_driver(domain_scope)
ref = driver.get_user(user_id)
if not driver.is_domain_aware():
ref = self._set_domain_id(ref, domain_id)
return ref
@domains_configured
def get_user_by_name(self, user_name, domain_id):
driver = self._select_identity_driver(domain_id)
ref = driver.get_user_by_name(user_name, domain_id)
if not driver.is_domain_aware():
ref = self._set_domain_id(ref, domain_id)
return ref
@domains_configured
def list_users(self, domain_scope=None):
domain_id, driver = self._get_domain_id_and_driver(domain_scope)
user_list = driver.list_users()
if not driver.is_domain_aware():
user_list = self._set_domain_id(user_list, domain_id)
return user_list
@notifications.updated('user')
@domains_configured
def update_user(self, user_id, user_ref, domain_scope=None):
user = user_ref.copy()
| |
``name`` property) does not
affect equality.
Parameters
----------
other : GeneticCode
Genetic code to test for inequality against.
Returns
-------
bool
Indicates whether the genetic code is not equal to `other`.
"""
return not (self == other)
@stable(as_of="0.4.0")
def translate(self, sequence, reading_frame=1, start='ignore',
stop='ignore'):
"""Translate RNA sequence into protein sequence.
Parameters
----------
sequence : RNA
RNA sequence to translate.
reading_frame : {1, 2, 3, -1, -2, -3}
Reading frame to use in translation. 1, 2, and 3 are forward frames
and -1, -2, and -3 are reverse frames. If reverse (negative), will
reverse complement the sequence before translation.
start : {'ignore', 'require', 'optional'}
How to handle start codons:
* "ignore": translation will start from the beginning of the
reading frame, regardless of the presence of a start codon.
* "require": translation will start at the first start codon in
the reading frame, ignoring all prior positions. The first amino
acid in the translated sequence will *always* be methionine
(M character), even if an alternative start codon was used in
translation. This behavior most closely matches the underlying
biology since fMet doesn't have a corresponding IUPAC character.
If a start codon does not exist, a ``ValueError`` is raised.
* "optional": if a start codon exists in the reading frame, matches
the behavior of "require". If a start codon does not exist,
matches the behavior of "ignore".
stop : {'ignore', 'require', 'optional'}
How to handle stop codons:
* "ignore": translation will ignore the presence of stop codons and
translate to the end of the reading frame.
* "require": translation will terminate at the first stop codon.
The stop codon will not be included in the translated sequence.
If a stop codon does not exist, a ``ValueError`` is raised.
* "optional": if a stop codon exists in the reading frame, matches
the behavior of "require". If a stop codon does not exist,
matches the behavior of "ignore".
Returns
-------
Protein
Translated sequence.
See Also
--------
translate_six_frames
Notes
-----
Input RNA sequence metadata are included in the translated protein
sequence. Positional metadata are not included.
Examples
--------
Translate RNA into protein using NCBI's standard genetic code (table ID
1, the default genetic code in scikit-bio):
>>> from skbio import RNA, GeneticCode
>>> rna = RNA('AGUAUUCUGCCACUGUAAGAA')
>>> sgc = GeneticCode.from_ncbi()
>>> sgc.translate(rna)
Protein
--------------------------
Stats:
length: 7
has gaps: False
has degenerates: False
has definites: True
has stops: True
--------------------------
0 SILPL*E
In this command, we used the default ``start`` behavior, which starts
translation at the beginning of the reading frame, regardless of the
presence of a start codon. If we specify "require", translation will
start at the first start codon in the reading frame (in this example,
CUG), ignoring all prior positions:
>>> sgc.translate(rna, start='require')
Protein
--------------------------
Stats:
length: 5
has gaps: False
has degenerates: False
has definites: True
has stops: True
--------------------------
0 MPL*E
Note that the codon coding for L (CUG) is an alternative start codon in
this genetic code. Since we specified "require" mode, methionine (M)
was used in place of the alternative start codon (L). This behavior
most closely matches the underlying biology since fMet doesn't have a
corresponding IUPAC character.
Translate the same RNA sequence, also specifying that translation
terminate at the first stop codon in the reading frame:
>>> sgc.translate(rna, start='require', stop='require')
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 MPL
Passing "require" to both ``start`` and ``stop`` trims the translation
to the CDS (and in fact requires that one is present in the reading
frame). Changing the reading frame to 2 causes an exception to be
raised because a start codon doesn't exist in the reading frame:
>>> sgc.translate(rna, start='require', stop='require',
... reading_frame=2) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: ...
"""
self._validate_translate_inputs(sequence, reading_frame, start, stop)
offset = abs(reading_frame) - 1
if reading_frame < 0:
sequence = sequence.reverse_complement()
# Translation strategy:
#
# 1. Obtain view of underlying sequence bytes from the beginning of
# the reading frame.
# 2. Convert bytes to offsets (0-3, base 4 since there are only 4
# characters allowed: UCAG).
# 3. Reshape byte vector into (N, 3), where N is the number of codons
# in the reading frame. Each row represents a codon in the
# sequence.
# 4. (Optional) Find start codon in the reading frame and trim to
# this position. Replace start codon with M codon.
# 5. Convert each codon (encoded as offsets) into an index
# corresponding to an amino acid (0-63).
# 6. Obtain translated sequence by indexing into the amino acids
# vector (`amino_acids`) using the indices defined in step 5.
# 7. (Optional) Find first stop codon and trim to this position.
data = sequence.values[offset:].view(np.uint8)
# since advanced indexing is used with an integer ndarray, a copy is
# always returned. thus, the in-place modification made below
# (replacing the start codon) is safe.
data = self._offset_table[data]
data = data[:data.size // 3 * 3].reshape((-1, 3))
if start in {'require', 'optional'}:
start_codon_index = data.shape[0]
for start_codon in self._start_codons:
indices = np.all(data == start_codon, axis=1).nonzero()[0]
if indices.size > 0:
first_index = indices[0]
if first_index < start_codon_index:
start_codon_index = first_index
if start_codon_index != data.shape[0]:
data = data[start_codon_index:]
data[0] = self._m_character_codon
elif start == 'require':
self._raise_require_error('start', reading_frame)
indices = (data * self._radix_multiplier).sum(axis=1)
translated = self._amino_acids.values[indices]
if stop in {'require', 'optional'}:
stop_codon_indices = (translated == b'*').nonzero()[0]
if stop_codon_indices.size > 0:
translated = translated[:stop_codon_indices[0]]
elif stop == 'require':
self._raise_require_error('stop', reading_frame)
metadata = None
if sequence.has_metadata():
metadata = sequence.metadata
# turn off validation because `translated` is guaranteed to be valid
return Protein(translated, metadata=metadata, validate=False)
def _validate_translate_inputs(self, sequence, reading_frame, start, stop):
if not isinstance(sequence, RNA):
raise TypeError("Sequence to translate must be RNA, not %s" %
type(sequence).__name__)
if reading_frame not in self.reading_frames:
raise ValueError("`reading_frame` must be one of %r, not %r" %
(self.reading_frames, reading_frame))
for name, value in ('start', start), ('stop', stop):
if value not in self._start_stop_options:
raise ValueError("`%s` must be one of %r, not %r" %
(name, self._start_stop_options, value))
if sequence.has_gaps():
raise ValueError("scikit-bio does not support translation of "
"gapped sequences.")
if sequence.has_degenerates():
raise NotImplementedError("scikit-bio does not currently support "
"translation of degenerate sequences."
"`RNA.expand_degenerates` can be used "
"to obtain all definite versions "
"of a degenerate sequence.")
def _raise_require_error(self, name, reading_frame):
raise ValueError(
"Sequence does not contain a %s codon in the "
"current reading frame (`reading_frame=%d`). Presence "
"of a %s codon is required with `%s='require'`"
% (name, reading_frame, name, name))
@stable(as_of="0.4.0")
def translate_six_frames(self, sequence, start='ignore', stop='ignore'):
"""Translate RNA into protein using six possible reading frames.
The six possible reading frames are:
* 1 (forward)
* 2 (forward)
* 3 (forward)
* -1 (reverse)
* -2 (reverse)
* -3 (reverse)
Translated sequences are yielded in this order.
Parameters
----------
sequence : RNA
RNA sequence to translate.
start : {'ignore', 'require', 'optional'}
How to handle start codons. See ``GeneticCode.translate`` for
details.
stop : {'ignore', 'require', 'optional'}
How to handle stop codons. See ``GeneticCode.translate`` for
details.
Yields
------
Protein
Translated sequence in the current reading frame.
See Also
--------
translate
Notes
-----
This method is faster than (and equivalent to) performing six
independent translations using, for example:
``(gc.translate(seq, reading_frame=rf)
for rf in GeneticCode.reading_frames)``
Input RNA sequence metadata are included in each translated protein
sequence. Positional metadata are not included.
Examples
--------
Translate RNA into protein using the six possible reading frames and
NCBI's standard genetic code (table ID 1, the default genetic code in
scikit-bio):
>>> from skbio import RNA, GeneticCode
>>> rna = RNA('AUGCCACUUUAA')
>>> sgc = GeneticCode.from_ncbi()
>>> for protein in sgc.translate_six_frames(rna):
... protein
... print('')
Protein
--------------------------
Stats:
length: 4
has gaps: False
has degenerates: False
has definites: True
has stops: True
--------------------------
0 MPL*
<BLANKLINE>
Protein
--------------------------
Stats:
length: 3
| |
num_trees,
feature_subset_strategy,
seed,
subsampling_rate, train, test):
code = self.session_assertion() + textwrap.dedent("""
from pyspark.ml.classification import RandomForestClassifier
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
import io, pandas as pd
str_io = io.StringIO()
(train_set, test_set) = """+table_name+""".randomSplit(["""+ train +""","""+ test +"""])
randomforest_""" + table_name + """ = RandomForestClassifier(featuresCol='"""+features_col+"""', \
labelCol='"""+label_col+"""', \
predictionCol='"""+prediction_col+"""', \
probabilityCol='"""+probability_col+"""', \
rawPredictionCol='"""+raw_prediction_col+"""', \
maxBins="""+max_bins+""",\
impurity='"""+impurity+"""', \
numTrees="""+num_trees+""")
append_stage('randomforest_""" + table_name + """')
model = randomforest_""" + table_name + """.fit(train_set)
prediction = model.transform(test_set)
evaluator = MulticlassClassificationEvaluator(predictionCol='"""+prediction_col+"""', labelCol='"""+label_col+"""')
accuracy = evaluator.evaluate(prediction, {evaluator.metricName: "accuracy"})*100
recall = evaluator.evaluate(prediction, {evaluator.metricName: "weightedRecall"})*100
precision = evaluator.evaluate(prediction, {evaluator.metricName: "weightedPrecision"})*100
f1 = evaluator.evaluate(prediction, {evaluator.metricName: "f1"})*100
# prediction.select('"""+label_col+"""', '"""+prediction_col+"""').show()
prediction.name = 'randomforest_"""+table_name+"""'
print("<b>Dataframe name : "), prediction.name+"</b><br>"
sdf_randomforest_"""+ table_name +""" = prediction
sdf_randomforest_"""+ table_name +""".createOrReplaceTempView('sdf_randomforest_"""+ table_name +"""')
print('<br><b>Number of rows: </b>')
print('<b>'+str(sdf_randomforest_"""+table_name+""".count())+'</b><br>')
print("<b>accuracy ="+str('%.3f'%(accuracy))+"</b><br>")
print("<b>recall ="+str('%.3f'%(recall))+"</b><br>")
print("<b>precision ="+str('%.3f'%(precision))+"</b><br>")
print("<b>f1 ="+str('%.3f'%(f1))+"</b><br>")
df_ = prediction.select('*').limit(100).toPandas()
df_.to_html(buf=str_io, classes='table dataframe', index=False)
html_str = str_io.getvalue()
print(html_str)
""")
return code
def fp_growth(self, table_name, splits, min_support, min_confidence, items_col, prediction_col, num_partitions):
code = self.session_assertion() + textwrap.dedent("""
from pyspark.sql.functions import udf
from pyspark.sql.types import ArrayType, StringType
from pyspark.ml.fpm import FPGrowth
import io, pandas as pd
str_io = io.StringIO()
def list_string(x):
out_vec = []
y = x.split('"""+ splits +"""')
for t in y:
out_vec.append(t)
return out_vec
list_df = udf(lambda x: list(set(list_string(x))), ArrayType(StringType()))
dataframe = """+table_name+""".withColumn('"""+items_col+"""', list_df('"""+items_col+"""'))
fpgrowth = FPGrowth(minConfidence="""+min_confidence+""", minSupport="""+min_support+""", itemsCol='"""+items_col+"""')
append_stage('fpgrowth')
model = fpgrowth.fit(dataframe)
association_rule = model.associationRules
# association_rule.show()
association_rule.name = 'sdf_fpgrowth_"""+table_name+"""'
print("<b>Dataframe name : "), association_rule.name+"</b><br>"
sdf_fpgrowth_"""+ table_name +""" = association_rule
association_rule.createOrReplaceTempView('sdf_fpgrowth_"""+ table_name +"""')
print('<br><b>Number of rows: </b>')
print('<b>'+str(sdf_fpgrowth_"""+table_name+""".count())+'</b>')
df_ = association_rule.limit(100).toPandas()
df_.to_html(buf=str_io, classes='table dataframe', index=False)
html_str = str_io.getvalue()
print(html_str)
""")
return code
def linear_svc(self, table_name, features_col, label_col, prediction_col, max_iter, reg_param, tol, raw_prediction_col, fit_intercept, standardization, threshold, weight_col, aggregation_depth, train, test):
code = self.session_assertion() + textwrap.dedent("""
from pyspark.ml.classification import LinearSVC
from pyspark.ml.evaluation import BinaryClassificationEvaluator
import io, pandas as pd
str_io = io.StringIO()
(train_set, test_set) = """+table_name+""".randomSplit(["""+train+""","""+test+"""])
linearsvc_""" + table_name + """ = LinearSVC(featuresCol='"""+features_col+"""', labelCol='"""+label_col+"""', predictionCol='"""+prediction_col+"""',\
maxIter="""+max_iter+""", regParam="""+reg_param+""", tol="""+tol+""", rawPredictionCol='"""+raw_prediction_col+"""')
append_stage('linearsvc_""" + table_name + """')
svmmodel = linearsvc_""" + table_name + """.fit(train_set)
prediction = svmmodel.transform(test_set)
evaluator = BinaryClassificationEvaluator(rawPredictionCol='"""+raw_prediction_col+"""', labelCol='"""+label_col+"""')
areaUnderROC = evaluator.evaluate(prediction, {evaluator.metricName: "areaUnderROC"})*100
areaUnderPR = evaluator.evaluate(prediction, {evaluator.metricName: "areaUnderPR"})*100
prediction.name = 'sdf_linearsvc_"""+table_name+"""'
print("<b>Dataframe name : "), prediction.name+"</b><br>"
sdf_linearsvc_"""+ table_name +""" = prediction
prediction.createOrReplaceTempView('sdf_linearsvc_"""+ table_name +"""')
print('<br><b>Number of rows: </b>')
print('<b>'+str(sdf_linearsvc_"""+table_name+""".count())+'</b><br>')
print("<b>areaUnderROC = "+str('%.3f'%(areaUnderROC))+"</b><br>")
print("<b>areaUnderPR = "+str('%.3f'%(areaUnderPR))+"</b><br>")
df_ = prediction.select('*').limit(100).toPandas()
df_.to_html(buf=str_io, classes='table dataframe', index=False)
html_str = str_io.getvalue()
print(html_str)
""")
return code
def arima(self, table_name, splits, numberofpredict, date_col, timeseries_col, start_p, d, start_q, max_p, max_d, max_q, start_P, D, start_Q, max_P, max_D, max_Q, max_order, m, seasonal, stationary, stepwise, solver, suppress_warnings, error_action, trace, scoring, business, by):
code = self.session_assertion() + textwrap.dedent("""
import datetime
from datetime import date
from pyramid.arima import auto_arima
from pandas.tseries.offsets import DateOffset
from pandas.tseries.offsets import BDay
import numpy as np
import warnings
import io, pandas as pd
str_io = io.StringIO()
warnings.filterwarnings('ignore')
# def ARIMA("""+table_name+""", splits='"""+splits+"""', numberofpredict="""+numberofpredict+""", dateCol='"""+date_col+"""', timeseriesCol='"""+timeseries_col+"""'):
# def to_isoformat(current_format):
# year, month, day = current_format.split('"""+splits+"""')
# # year, month, day = current_format.split('-')
# dt = datetime.date(int(year), int(month), int(day))
# return dt.isoformat()
# def clean(row):
# dt, cols = row.split(",")
# isodate = to_isoformat(dt.replace("''", ''))
# return([isodate, float(cols)])
# spark_df = """+table_name+""".rdd.map(lambda x: x)
# cols1 = ['"""+date_col +"""', '"""+ timeseries_col +"""']
# cols2 = [x for x in """+ table_name +""".columns if x in cols1]
# rdd_airpass = spark_df.map(lambda x: x[cols2[0]]+","+str((x[cols2[1]])))
# data = rdd_airpass.map(clean)
# dataFrame = spark.createDataFrame(data, ['"""+date_col+"""', '"""+timeseries_col+"""'])
# dataFrame_pandas = dataFrame.toPandas()
# dataFrame_pandas['"""+date_col+"""'] = pd.to_datetime(dataFrame_pandas['"""+date_col+"""'])
# dataFrame_pandas = dataFrame_pandas.set_index('"""+date_col+"""')
# future_dates = [dataFrame_pandas.index[-1] + DateOffset(days=x) for x in range(1, """+numberofpredict+""")]
# future_df = pd.DataFrame(index=future_dates, columns=dataFrame_pandas.columns)
# final_df = pd.concat([dataFrame_pandas, future_df])
# stepwise_model = auto_arima(dataFrame_pandas, start_p="""+start_p+""", start_q="""+start_q+""",
# test = "adf",
# trend = "ct",
# max_p="""+max_p+""", max_q="""+max_q+""", max_d="""+max_d+""",
# m="""+m+""",
# d="""+d+""",
# seasonal="""+seasonal+""",
# start_P="""+start_P+""",
# D="""+D+""",
# start_Q="""+start_Q+""",
# max_P="""+max_P+""",
# max_D="""+max_D+""",
# max_Q="""+max_Q+""",
# max_order="""+max_order+""",
# stationary="""+stationary+""",
# solver='"""+solver+"""',
# trace="""+trace+""",
# error_action='"""+error_action+"""',
# suppress_warnings="""+suppress_warnings+""",
# scoring='"""+scoring+"""',
# stepwise="""+stepwise+""")
# stepwise_model.fit(dataFrame_pandas)
# final_df["forecast"] = stepwise_model.predict_in_sample(start=1, end=len(final_df))
# final_df = final_df.reset_index().rename(columns = {"index":'"""+date_col+"""'})
# final_df.fillna(np.nan, inplace=True)
# final_df['date']=final_df['date'].astype(str)
# arima_"""+ table_name +""" = spark.createDataFrame(final_df)
# arima_"""+ table_name +""".createOrReplaceTempView('arima_"""+ table_name +"""')
# final_df.name = 'arima_"""+table_name+"""'
# print("<b>Dataframe name : "), final_df.name+"</b><br>"
# # arima_"""+ table_name +""".show()
# df_ = arima_"""+ table_name +""".limit(100).toPandas()
# df_.to_html(buf=str_io, classes='table dataframe', index=False)
# html_str = str_io.getvalue()
business = """+business+"""
def to_isoformat(current_format):
year, month, day = current_format.split('"""+splits+"""')
dt = datetime.date(int(year), int(month), int(day))
return dt.isoformat()
#memisahkan kolom mana yang akan digunakan dengan format (tanggal(date series), kolom yang ingin di forecast)
def clean(row):
dt, passenger = row.split(",")
isodate = to_isoformat(dt.replace("''", ''))
return [isodate, float(passenger)]
#membuat rdd dengan menggunakan dua fungsi diatas
spark_df = """+table_name+""".rdd.map(lambda x: x)
cols1 = ['"""+date_col +"""', '"""+ timeseries_col +"""']
cols2 = [x for x in """+ table_name +""".columns if x in cols1]
rdd_airpass = spark_df.map(lambda x: x[cols2[0]]+","+str(x[cols2[1]]))
data = rdd_airpass.map(clean)
#membuat rdd menjadi spark dataframe
dataFrame = spark.createDataFrame(data, ['"""+date_col+"""', '"""+timeseries_col+"""'])
#membuat dataframe menjadi pandas dataframe
dataFrame_pandas = dataFrame.toPandas()
xlen = len(dataFrame_pandas)
#mengubah kolom dateCol menjadi timestamp
dataFrame_pandas['"""+date_col+"""'] = pd.to_datetime(dataFrame_pandas['"""+date_col+"""'])
#mengubah kolom dateCol menjadi index dari pandas dataframe
dataFrame_pandas = dataFrame_pandas.set_index('"""+date_col+"""')
#membuat dataframe yang akan di forecast, dengan waktu yang akan ditentukan melalui parameter diatas
future_dates = [dataFrame_pandas.index[-1] + DateOffset(**{'"""+by+"""':x}) for x in range(1, """+numberofpredict+""")]
future_df = pd.DataFrame(index=future_dates, columns=dataFrame_pandas.columns)
#Apabila analisis timeseries hanya ingin mengambil weekday saja dan mengabaikan weekend
if business:
#mapping tanggal-tanggal mana saja yang merupakan weekday, dan menghilangkan tanggal-tanggal yang terduplikasi
future_df.index = future_df.index.map(lambda x: x + BDay())
future_df = future_df.loc[~future_df.index.duplicated(keep='first')]
#menggabungkan dataframe forecast dengan dataframe pandas
final_df = pd.concat([dataFrame_pandas, future_df])
#memisahkan train, dan test untuk prediksi model
train, test = final_df[:xlen], final_df[xlen:]
#Setting Parameter untuk model auto_arima
stepwise_model = auto_arima(dataFrame_pandas, start_p="""+start_p+""", start_q="""+start_q+""",
test = "adf",
trend = "ct",
max_p="""+max_p+""", max_q="""+max_q+""", max_d="""+max_d+""",
m="""+m+""",
d="""+d+""",
seasonal="""+seasonal+""",
start_P="""+start_P+""",
D="""+D+""",
start_Q="""+start_Q+""",
max_P="""+max_P+""",
max_D="""+max_D+""",
max_Q="""+max_Q+""",
max_order="""+max_order+""",
stationary="""+stationary+""",
solver='"""+solver+"""',
trace="""+trace+""",
error_action='"""+error_action+"""',
suppress_warnings="""+suppress_warnings+""",
scoring='"""+scoring+"""',
stepwise="""+stepwise+""")
#fitting model arima dengan menggunakan dataframe train
stepwise_model.fit(train)
#mengekstrak nilai prediksi, confidence interval untuk hasil dari model yang akan diprediksi
preds, conf_int = stepwise_model.predict(n_periods=test.shape[0], return_conf_int=True)
#memasukan hasil forecast ke dalam dataframe final_df
final_df["forecast"] = stepwise_model.predict_in_sample(start=1, end=len(final_df))
#membuat dataframe untuk interval konfidensi dari prediksi arima yang telah buat
max_min = pd.DataFrame(conf_int, columns=["std-", 'std+'], index=future_df.index)
#menggabungkan dataframe final_df dengan dataframe max_min
final_df = pd.concat([final_df, max_min], axis=1)
final_df = final_df.reset_index().rename(columns = {"index":'"""+date_col+"""'})
final_df['"""+date_col+"""'] = final_df['"""+date_col+"""'].astype('str')
final_df.fillna(np.nan, inplace=True)
# df_sparkfinal = spark.createDataFrame(final_df)
# return df_sparkfinal
#Apabila analisis timeseries parameter buisness False
else:
#menggabungkan dataframe forecast dengan dataframe pandas
final_df = pd.concat([dataFrame_pandas, future_df])
#memisahkan train, dan test untuk prediksi model
train, test = final_df[:xlen], final_df[xlen:]
#Setting Parameter untuk model auto_arima
stepwise_model = auto_arima(dataFrame_pandas, start_p="""+start_p+""", start_q="""+start_q+""",
test = "adf",
trend = "ct",
max_p="""+max_p+""", max_q="""+max_q+""", max_d="""+max_d+""",
m="""+m+""",
d="""+d+""",
seasonal="""+seasonal+""",
start_P="""+start_P+""",
D="""+D+""",
start_Q="""+start_Q+""",
max_P="""+max_P+""",
max_D="""+max_D+""",
max_Q="""+max_Q+""",
max_order="""+max_order+""",
stationary="""+stationary+""",
solver='"""+solver+"""',
trace="""+trace+""",
error_action='"""+error_action+"""',
suppress_warnings="""+suppress_warnings+""",
scoring='"""+scoring+"""',
stepwise="""+stepwise+""")
#fitting model arima dengan menggunakan dataframe train
stepwise_model.fit(train)
#mengekstrak nilai prediksi, confidence interval untuk hasil dari model yang akan diprediksi
preds, conf_int = stepwise_model.predict(n_periods=test.shape[0], return_conf_int=True)
#memasukan hasil forecast ke dalam dataframe final_df
final_df["forecast"] = stepwise_model.predict_in_sample(start=1, end=len(final_df))
#membuat dataframe untuk interval konfidensi dari prediksi arima yang telah buat
max_min = pd.DataFrame(conf_int, columns=["std-", 'std+'], index=future_df.index)
#menggabungkan dataframe final_df dengan dataframe max_min
final_df = pd.concat([final_df, max_min], axis=1)
final_df = final_df.reset_index().rename(columns = {"index":'"""+date_col+"""'})
final_df['"""+date_col+"""'] = final_df['"""+date_col+"""'].astype('str')
final_df.fillna(np.nan, inplace=True)
# df_sparkfinal = spark.createDataFrame(final_df)
# return df_sparkfinal
arima_"""+ table_name +""" = spark.createDataFrame(final_df)
arima_"""+ table_name +""".createOrReplaceTempView('arima_"""+ table_name +"""')
final_df.name = 'arima_"""+table_name+"""'
print("<b>Dataframe name : "), final_df.name+"</b><br>"
print('<br><b>Number of rows: </b>')
print('<b>'+str(arima_"""+table_name+""".count())+'</b>')
# arima_"""+ table_name +""".show()
df_ = arima_"""+ table_name +""".limit(100).toPandas()
df_.to_html(buf=str_io, classes='table dataframe', index=False)
html_str = str_io.getvalue()
print(html_str)
# import sys
# print(sys.version)
# dt = datetime.date(int(2010), int(5), int(25))
# print(dt.isoformat())
# data.take(5)
# df_sparkfinal.show()
""")
return code
def line_chart_visual(self, table_name, x, y):
y_str = ",".join(y)
code = textwrap.dedent("""
import json
import io, pandas as pd
str_io = io.StringIO()
dataFrame_pandas = """+table_name+""".toPandas()
dfx = dataFrame_pandas['"""+x+"""'].tolist()
# # dfx = dfx.index()
# # new_dfx | |
<reponame>anthok/overwatch-api<filename>overwatch_api/core.py
import asyncio
import aiohttp
import async_timeout
from overwatch_api.constants import *
from overwatch_api.exceptions import *
"""The async interface to the OWAPI (https://github.com/SunDwarf/OWAPI) api."""
# TODO Handle being ratelimited, asyncio.sleep?
class AsyncOWAPI(object):
"""The async client objeWct to use when you want to use the OWAPI api.
All requests throw ConnectionError if they can't connect or similar problem.
Other exceptions should be reported as bugs if they're raised."""
def __init__(self, default_platform: str = PC, server_url: str = "https://owapi.net", *,
handle_ratelimit: bool = True, max_tries: int = 3, request_timeout: float = 5):
"""Creates and sets up the client object.
default_platform is one of PC, PLAYSTATION, or XBOX, from constants.py.
It specifies what platform the api should default to search on if no platform parameter is supplied to a method.
server_url is the url (or aiohttp compatible address) to the OWAPI server.
handle_ratelimit specifies whether the api should retry when a request gets ratelimited.
max_tries is the maximum number of tries to retry when a request gets ratelimited, only applicable if handle_ratelimit is True.
request_timeout is the timeout to use for each individual request to the API in seconds.
"""
# Stuff the user should have control over
self.server_url = server_url
self.default_platform = default_platform
# Optional client parameters
# If ratelimiting should be handled
self.default_handle_ratelimit = handle_ratelimit
# The max number of tries to do until a Ratelimit exception is raised
self.default_max_tries = max_tries
# The timeout to use on each request
self.default_request_timeout = request_timeout
# If you're an advanced user you maybe, sometime, idk, probably, might want to control these
self._api_version = 3
self._api_urlpath = "/api/v{0}/u/".format(self._api_version)
def _uses_aiohttp_session(func):
"""This is a decorator that creates an async with statement around a function, and makes sure that a _session argument is always passed.
Only usable on async functions of course.
The _session argument is (supposed to be) an aiohttp.ClientSession instance in all functions that this decorator has been used on.
This is used to make sure that all session objects are properly entered and exited, or that they are passed into a function properly.
This adds an session keyword argument to the method signature, and that session will be used as _session if it is not None."""
# The function the decorator returns
async def decorated_func(*args, session=None, **kwargs):
if session is not None:
# There is a session passed
return await func(*args, _session=session, **kwargs)
else:
# The session argument wasn't passed, so we create our own
async with aiohttp.ClientSession() as new_session:
return await func(*args, _session=new_session, **kwargs)
# We return the decorated func
return decorated_func
def _add_request_parameters(func):
"""Adds the ratelimit and request timeout parameters to a function."""
# The function the decorator returns
async def decorated_func(*args, handle_ratelimit=None, max_tries=None, request_timeout=None, **kwargs):
return await func(*args, handle_ratelimit=handle_ratelimit, max_tries=max_tries,
request_timeout=request_timeout, **kwargs)
# We return the decorated func
return decorated_func
@_uses_aiohttp_session
@_add_request_parameters
async def get_profile(self, battletag: str, regions=(EUROPE, KOREA, AMERICAS, CHINA, JAPAN, ANY),
platform=None, _session=None, handle_ratelimit=None, max_tries=None, request_timeout=None):
"""Returns a dictionary where the keys are the regions that there exists an account for, with corresponding values (stats, achievement, heroes).
The regions argument is an iterable of the regions (see constants.py) that the user wants results for (default all regions). If no matching accounts are found for the platform and regions, this returns an empty dict.
The platforms argument is one of the three platforms in constants.py, and only results from that platforms will be returned, the default is the default of the API instance (see __init__)."""
if platform is None:
platform = self.default_platform
try:
blob_dict = await self._base_request(battletag, "blob", _session, platform=platform,
handle_ratelimit=handle_ratelimit, max_tries=max_tries,
request_timeout=request_timeout)
except ProfileNotFoundError as e:
# The battletag doesn't exist
blob_dict = {}
existing_regions = {key: val for key, val in blob_dict.items() if ((val is not None) and (key != "_request"))}
return {key: val for key, val in existing_regions.items() if key in regions}
@_uses_aiohttp_session
@_add_request_parameters
async def get_stats(self, battletag: str, regions=(EUROPE, KOREA, AMERICAS, CHINA, JAPAN, ANY),
platform=None, _session=None, handle_ratelimit=None, max_tries=None, request_timeout=None):
"""Returns the stats for the profiles on the specified regions and platform. The format for regions without a matching user, the format is the same as get_profile.
The stats are returned in a dictionary with a similar format to what https://github.com/SunDwarf/OWAPI/blob/master/api.md#get-apiv3ubattletagstats specifies."""
if platform is None:
platform = self.default_platform
try:
blob_dict = await self._base_request(battletag, "stats", _session, platform=platform,
handle_ratelimit=handle_ratelimit, max_tries=max_tries,
request_timeout=request_timeout)
except ProfileNotFoundError as e:
# The battletag doesn't exist
blob_dict = {}
existing_regions = {key: val for key, val in blob_dict.items() if ((val is not None) and (key != "_request"))}
return {key: [inner_val for inner_key, inner_val in val.items() if inner_key == "stats"][0] for key, val in
existing_regions.items() if key in regions}
@_uses_aiohttp_session
@_add_request_parameters
async def get_achievements(self, battletag: str, regions=(EUROPE, KOREA, AMERICAS, CHINA, JAPAN, ANY),
platform=None, _session=None, handle_ratelimit=None, max_tries=None,
request_timeout=None):
"""Returns the achievements for the profiles on the specified regions and platform. Does not return keys for regions that don't have a matching user, the format is the same as get_profile.
The achievements are returned in a dictionary with a similar format to what https://github.com/SunDwarf/OWAPI/blob/master/api.md#get-apiv3ubattletagachievements specifies."""
if platform is None:
platform = self.default_platform
try:
blob_dict = await self._base_request(battletag, "achievements", _session, platform=platform,
handle_ratelimit=handle_ratelimit, max_tries=max_tries,
request_timeout=request_timeout)
except ProfileNotFoundError as e:
# The battletag doesn't exist
blob_dict = {}
existing_regions = {key: val for key, val in blob_dict.items() if ((val is not None) and (key != "_request"))}
return {key: [inner_val for inner_key, inner_val in val.items() if inner_key == "achievements"][0] for key, val
in existing_regions.items() if key in regions}
@_uses_aiohttp_session
@_add_request_parameters
async def get_hero_stats(self, battletag: str, regions=(EUROPE, KOREA, AMERICAS, CHINA, JAPAN, ANY),
platform=None, _session=None, handle_ratelimit=None, max_tries=None, request_timeout=None):
"""Returns the hero stats for the profiles on the specified regions and platform. Does not return keys for regions that don't have a matching user, the format is the same as get_profile.
The hero stats are returned in a dictionary with a similar format to what https://github.com/SunDwarf/OWAPI/blob/master/api.md#get-apiv3ubattletagheroes specifies."""
if platform is None:
platform = self.default_platform
try:
blob_dict = await self._base_request(battletag, "heroes", _session, platform=platform,
handle_ratelimit=handle_ratelimit, max_tries=max_tries,
request_timeout=request_timeout)
except ProfileNotFoundError as e:
# The battletag doesn't exist
blob_dict = {}
existing_regions = {key: val for key, val in blob_dict.items() if ((val is not None) and (key != "_request"))}
return {key: [inner_val for inner_key, inner_val in val.items() if inner_key == "heroes"][0] for key, val in
existing_regions.items() if key in regions}
@staticmethod
def sanitize_battletag(battle_tag: str) -> str:
"""In the api, battletags' #:s are replaced with dashes, this method does that."""
if "#" in battle_tag:
battle_tag = battle_tag.replace("#", "-")
return battle_tag
async def _base_request(self, battle_tag: str, endpoint_name: str, session: aiohttp.ClientSession, *, platform=None,
handle_ratelimit=None, max_tries=None, request_timeout=None):
"""Does a request to some endpoint. This is also where ratelimit logic is handled."""
# We check the different optional arguments, and if they're not passed (are none) we set them to the default for the client object
if platform is None:
platform = self.default_platform
if handle_ratelimit is None:
handle_ratelimit = self.default_handle_ratelimit
if max_tries is None:
max_tries = self.default_max_tries
if request_timeout is None:
request_timeout = self.default_request_timeout
# The battletag with #s removed
san_battle_tag = self.sanitize_battletag(battle_tag)
# The ratelimit logic
for _ in range(max_tries):
# We execute a request
try:
resp_json, status = await self._async_get(
session,
self.server_url + self._api_urlpath + "{battle_tag}/{endpoint}".format(
battle_tag=san_battle_tag,
endpoint=endpoint_name
),
params={"platform": platform},
# Passed to _async_get and indicates what platform we're searching on
headers={"User-Agent": "overwatch_python_api"},
# According to https://github.com/SunDwarf/OWAPI/blob/master/owapi/v3/v3_util.py#L18 we have to customise our User-Agent, so we do
_async_timeout_seconds=request_timeout
)
if status == 429 and resp_json["msg"] == "you are being ratelimited":
raise RatelimitError
except RatelimitError as e:
# This excepts both RatelimitErrors and TimeoutErrors, ratelimiterrors for server returning a ratelimit, timeouterrors for the connection not being done in with in the timeout
# We are ratelimited, so we check if we handle ratelimiting logic
# If so, we wait and then execute the next iteration of the loop
if handle_ratelimit:
# We wait to remedy ratelimiting, and we wait a bit more than the response says we should
await | |
<gh_stars>0
import cv2
import numpy as np
from numpy import linalg as npla
import scipy as sp
def color_transfer_sot(src,trg, steps=10, batch_size=5, reg_sigmaXY=16.0, reg_sigmaV=5.0):
"""
Color Transform via Sliced Optimal Transfer
ported by @iperov from https://github.com/dcoeurjo/OTColorTransfer
src - any float range any channel image
dst - any float range any channel image, same shape as src
steps - number of solver steps
batch_size - solver batch size
reg_sigmaXY - apply regularization and sigmaXY of filter, otherwise set to 0.0
reg_sigmaV - sigmaV of filter
return value - clip it manually
"""
if not np.issubdtype(src.dtype, np.floating):
raise ValueError("src value must be float")
if not np.issubdtype(trg.dtype, np.floating):
raise ValueError("trg value must be float")
if len(src.shape) != 3:
raise ValueError("src shape must have rank 3 (h,w,c)")
if src.shape != trg.shape:
raise ValueError("src and trg shapes must be equal")
src_dtype = src.dtype
h,w,c = src.shape
new_src = src.copy()
advect = np.empty ( (h*w,c), dtype=src_dtype )
for step in range (steps):
advect.fill(0)
for batch in range (batch_size):
dir = np.random.normal(size=c).astype(src_dtype)
dir /= npla.norm(dir)
projsource = np.sum( new_src*dir, axis=-1).reshape ((h*w))
projtarget = np.sum( trg*dir, axis=-1).reshape ((h*w))
idSource = np.argsort (projsource)
idTarget = np.argsort (projtarget)
a = projtarget[idTarget]-projsource[idSource]
for i_c in range(c):
advect[idSource,i_c] += a * dir[i_c]
new_src += advect.reshape( (h,w,c) ) / batch_size
if reg_sigmaXY != 0.0:
src_diff = new_src-src
src_diff_filt = cv2.bilateralFilter (src_diff, 0, reg_sigmaV, reg_sigmaXY )
if len(src_diff_filt.shape) == 2:
src_diff_filt = src_diff_filt[...,None]
new_src = src + src_diff_filt
return new_src
def color_transfer_mkl(x0, x1):
eps = np.finfo(float).eps
h,w,c = x0.shape
h1,w1,c1 = x1.shape
x0 = x0.reshape ( (h*w,c) )
x1 = x1.reshape ( (h1*w1,c1) )
a = np.cov(x0.T)
b = np.cov(x1.T)
Da2, Ua = np.linalg.eig(a)
Da = np.diag(np.sqrt(Da2.clip(eps, None)))
C = np.dot(np.dot(np.dot(np.dot(Da, Ua.T), b), Ua), Da)
Dc2, Uc = np.linalg.eig(C)
Dc = np.diag(np.sqrt(Dc2.clip(eps, None)))
Da_inv = np.diag(1./(np.diag(Da)))
t = np.dot(np.dot(np.dot(np.dot(np.dot(np.dot(Ua, Da_inv), Uc), Dc), Uc.T), Da_inv), Ua.T)
mx0 = np.mean(x0, axis=0)
mx1 = np.mean(x1, axis=0)
result = np.dot(x0-mx0, t) + mx1
return np.clip ( result.reshape ( (h,w,c) ).astype(x0.dtype), 0, 1)
def color_transfer_idt(i0, i1, bins=256, n_rot=20):
import scipy.stats
relaxation = 1 / n_rot
h,w,c = i0.shape
h1,w1,c1 = i1.shape
i0 = i0.reshape ( (h*w,c) )
i1 = i1.reshape ( (h1*w1,c1) )
n_dims = c
d0 = i0.T
d1 = i1.T
for i in range(n_rot):
r = sp.stats.special_ortho_group.rvs(n_dims).astype(np.float32)
d0r = np.dot(r, d0)
d1r = np.dot(r, d1)
d_r = np.empty_like(d0)
for j in range(n_dims):
lo = min(d0r[j].min(), d1r[j].min())
hi = max(d0r[j].max(), d1r[j].max())
p0r, edges = np.histogram(d0r[j], bins=bins, range=[lo, hi])
p1r, _ = np.histogram(d1r[j], bins=bins, range=[lo, hi])
cp0r = p0r.cumsum().astype(np.float32)
cp0r /= cp0r[-1]
cp1r = p1r.cumsum().astype(np.float32)
cp1r /= cp1r[-1]
f = np.interp(cp0r, cp1r, edges[1:])
d_r[j] = np.interp(d0r[j], edges[1:], f, left=0, right=bins)
d0 = relaxation * np.linalg.solve(r, (d_r - d0r)) + d0
return np.clip ( d0.T.reshape ( (h,w,c) ).astype(i0.dtype) , 0, 1)
def reinhard_color_transfer(target, source, clip=False, preserve_paper=False, source_mask=None, target_mask=None):
"""
Transfers the color distribution from the source to the target
image using the mean and standard deviations of the L*a*b*
color space.
This implementation is (loosely) based on to the "Color Transfer
between Images" paper by <NAME>., 2001.
Parameters:
-------
source: NumPy array
OpenCV image in BGR color space (the source image)
target: NumPy array
OpenCV image in BGR color space (the target image)
clip: Should components of L*a*b* image be scaled by np.clip before
converting back to BGR color space?
If False then components will be min-max scaled appropriately.
Clipping will keep target image brightness truer to the input.
Scaling will adjust image brightness to avoid washed out portions
in the resulting color transfer that can be caused by clipping.
preserve_paper: Should color transfer strictly follow methodology
layed out in original paper? The method does not always produce
aesthetically pleasing results.
If False then L*a*b* components will scaled using the reciprocal of
the scaling factor proposed in the paper. This method seems to produce
more consistently aesthetically pleasing results
Returns:
-------
transfer: NumPy array
OpenCV image (w, h, 3) NumPy array (uint8)
"""
# convert the images from the RGB to L*ab* color space, being
# sure to utilizing the floating point data type (note: OpenCV
# expects floats to be 32-bit, so use that instead of 64-bit)
source = cv2.cvtColor(source, cv2.COLOR_BGR2LAB).astype(np.float32)
target = cv2.cvtColor(target, cv2.COLOR_BGR2LAB).astype(np.float32)
# compute color statistics for the source and target images
src_input = source if source_mask is None else source*source_mask
tgt_input = target if target_mask is None else target*target_mask
(lMeanSrc, lStdSrc, aMeanSrc, aStdSrc, bMeanSrc, bStdSrc) = lab_image_stats(src_input)
(lMeanTar, lStdTar, aMeanTar, aStdTar, bMeanTar, bStdTar) = lab_image_stats(tgt_input)
# subtract the means from the target image
(l, a, b) = cv2.split(target)
l -= lMeanTar
a -= aMeanTar
b -= bMeanTar
if preserve_paper:
# scale by the standard deviations using paper proposed factor
l = (lStdTar / lStdSrc) * l
a = (aStdTar / aStdSrc) * a
b = (bStdTar / bStdSrc) * b
else:
# scale by the standard deviations using reciprocal of paper proposed factor
l = (lStdSrc / lStdTar) * l
a = (aStdSrc / aStdTar) * a
b = (bStdSrc / bStdTar) * b
# add in the source mean
l += lMeanSrc
a += aMeanSrc
b += bMeanSrc
# clip/scale the pixel intensities to [0, 255] if they fall
# outside this range
l = _scale_array(l, clip=clip)
a = _scale_array(a, clip=clip)
b = _scale_array(b, clip=clip)
# merge the channels together and convert back to the RGB color
# space, being sure to utilize the 8-bit unsigned integer data
# type
transfer = cv2.merge([l, a, b])
transfer = cv2.cvtColor(transfer.astype(np.uint8), cv2.COLOR_LAB2BGR)
# return the color transferred image
return transfer
def linear_color_transfer(target_img, source_img, mode='pca', eps=1e-5):
'''
Matches the colour distribution of the target image to that of the source image
using a linear transform.
Images are expected to be of form (w,h,c) and float in [0,1].
Modes are chol, pca or sym for different choices of basis.
'''
mu_t = target_img.mean(0).mean(0)
t = target_img - mu_t
t = t.transpose(2,0,1).reshape( t.shape[-1],-1)
Ct = t.dot(t.T) / t.shape[1] + eps * np.eye(t.shape[0])
mu_s = source_img.mean(0).mean(0)
s = source_img - mu_s
s = s.transpose(2,0,1).reshape( s.shape[-1],-1)
Cs = s.dot(s.T) / s.shape[1] + eps * np.eye(s.shape[0])
if mode == 'chol':
chol_t = np.linalg.cholesky(Ct)
chol_s = np.linalg.cholesky(Cs)
ts = chol_s.dot(np.linalg.inv(chol_t)).dot(t)
if mode == 'pca':
eva_t, eve_t = np.linalg.eigh(Ct)
Qt = eve_t.dot(np.sqrt(np.diag(eva_t))).dot(eve_t.T)
eva_s, eve_s = np.linalg.eigh(Cs)
Qs = eve_s.dot(np.sqrt(np.diag(eva_s))).dot(eve_s.T)
ts = Qs.dot(np.linalg.inv(Qt)).dot(t)
if mode == 'sym':
eva_t, eve_t = np.linalg.eigh(Ct)
Qt = eve_t.dot(np.sqrt(np.diag(eva_t))).dot(eve_t.T)
Qt_Cs_Qt = Qt.dot(Cs).dot(Qt)
eva_QtCsQt, eve_QtCsQt = np.linalg.eigh(Qt_Cs_Qt)
QtCsQt = eve_QtCsQt.dot(np.sqrt(np.diag(eva_QtCsQt))).dot(eve_QtCsQt.T)
ts = np.linalg.inv(Qt).dot(QtCsQt).dot(np.linalg.inv(Qt)).dot(t)
matched_img = ts.reshape(*target_img.transpose(2,0,1).shape).transpose(1,2,0)
matched_img += mu_s
matched_img[matched_img>1] = 1
matched_img[matched_img<0] = 0
return np.clip(matched_img.astype(source_img.dtype), 0, 1)
def lab_image_stats(image):
# compute the mean and standard deviation of each channel
(l, a, b) = cv2.split(image)
(lMean, lStd) = (l.mean(), l.std())
(aMean, aStd) = (a.mean(), a.std())
(bMean, bStd) = (b.mean(), b.std())
# return the color statistics
return (lMean, lStd, aMean, aStd, bMean, bStd)
def _scale_array(arr, clip=True):
if clip:
return np.clip(arr, 0, 255)
mn = arr.min()
mx = arr.max()
scale_range = (max([mn, 0]), min([mx, 255]))
if mn < scale_range[0] or mx > scale_range[1]:
return (scale_range[1] - scale_range[0]) * (arr - mn) / (mx - mn) + scale_range[0]
return arr
def channel_hist_match(source, template, hist_match_threshold=255, mask=None):
# Code borrowed from:
# https://stackoverflow.com/questions/32655686/histogram-matching-of-two-images-in-python-2-x
masked_source = source
masked_template = template
if mask is not None:
masked_source = source * mask
masked_template = template * mask
oldshape = source.shape
source = source.ravel()
template = template.ravel()
masked_source = masked_source.ravel()
masked_template = masked_template.ravel()
s_values, bin_idx, s_counts = np.unique(source, return_inverse=True,
return_counts=True)
t_values, t_counts = np.unique(template, return_counts=True)
s_quantiles = np.cumsum(s_counts).astype(np.float64)
s_quantiles = hist_match_threshold * s_quantiles / s_quantiles[-1]
t_quantiles = np.cumsum(t_counts).astype(np.float64)
t_quantiles = 255 * t_quantiles / t_quantiles[-1]
interp_t_values = np.interp(s_quantiles, t_quantiles, t_values)
return interp_t_values[bin_idx].reshape(oldshape)
def color_hist_match(src_im, tar_im, hist_match_threshold=255):
h,w,c = src_im.shape
matched_R = channel_hist_match(src_im[:,:,0], tar_im[:,:,0], hist_match_threshold, None)
matched_G = channel_hist_match(src_im[:,:,1], tar_im[:,:,1], hist_match_threshold, None)
matched_B = channel_hist_match(src_im[:,:,2], tar_im[:,:,2], hist_match_threshold, None)
to_stack = (matched_R, matched_G, matched_B)
for i in range(3, c):
to_stack += ( src_im[:,:,i],)
matched = np.stack(to_stack, axis=-1).astype(src_im.dtype)
return matched
def color_transfer_mix(img_src,img_trg):
img_src = np.clip(img_src*255.0, 0, 255).astype(np.uint8)
img_trg = np.clip(img_trg*255.0, 0, 255).astype(np.uint8)
img_src_lab = cv2.cvtColor(img_src, cv2.COLOR_BGR2LAB)
img_trg_lab = cv2.cvtColor(img_trg, cv2.COLOR_BGR2LAB)
rct_light = np.clip ( linear_color_transfer(img_src_lab[...,0:1].astype(np.float32)/255.0,
img_trg_lab[...,0:1].astype(np.float32)/255.0 )[...,0]*255.0,
0, 255).astype(np.uint8)
img_src_lab[...,0] = (np.ones_like (rct_light)*100).astype(np.uint8)
img_src_lab = cv2.cvtColor(img_src_lab, cv2.COLOR_LAB2BGR)
img_trg_lab[...,0] = (np.ones_like (rct_light)*100).astype(np.uint8)
img_trg_lab = cv2.cvtColor(img_trg_lab, cv2.COLOR_LAB2BGR)
img_rct = color_transfer_sot( img_src_lab.astype(np.float32), img_trg_lab.astype(np.float32) )
img_rct = np.clip(img_rct, 0, 255).astype(np.uint8)
img_rct = cv2.cvtColor(img_rct, cv2.COLOR_BGR2LAB)
img_rct[...,0] = rct_light
img_rct = cv2.cvtColor(img_rct, cv2.COLOR_LAB2BGR)
return (img_rct / 255.0).astype(np.float32)
def color_transfer(ct_mode, img_src, img_trg):
"""
color transfer | |
abc('st;dc2')
abc('w %s_temp_initial.aig'%f_name)
ni = n_pis() + n_latches()
res = 1
abc('st;if -a -K %d'%k) # to get plain direct map
if n_nodes() > n_initial:
abc('r %s_temp_initial.blif'%f_name)
res = 1
#plain
n_plain = n_nodes()
## print nl()
abc('write_blif %s_temp_plain.blif'%f_name)
## print 'wrote blif'
#clp
print nl()
n_clp_lut = n_clp = 1000000
## if n_nodes() < 250:
if True:
## run_command('st;clp -B 10000')
run_command('st;clp')
nn = n_nodes()
run_command('st')
if not nn == n_nodes(): #st did not work
run_command('if -a -K %d'%k)
print nl()
abc('write_blif %s_temp_clp.blif'%f_name)
n_clp = n_nodes()
abc('r %s_temp_initial.blif'%f_name)
## print 'read blif'
## if n_nodes() < 250:
if True:
## run_command('st;clp -B 10000')
run_commnd('st;clp')
nn = n_nodes()
run_command('st')
if not nn == n_nodes(): #st did not work
run_command('lutmin -K %d'%k)
## print nl()
abc('write_blif %s_temp_clp.blif'%f_name)
## n_clp = n_nodes()
print nl()
## print nl()
if n_plain <= min(n_clp,n_clp_lut):
abc('r %s_temp_plain.blif'%f_name)
res = 1
elif n_clp < n_clp_lut:
abc('r %s_temp_clp.blif'%f_name)
res = 1
else:
abc('r %s_temp_clp_lut.blif'%f_name)
res = 1
## print nl()
assert check_blif(),'inequivalence'
return res
def snap_bestk(k):
abc('write_blif %s_temp.blif'%f_name)
unsave_bestk(k)
snap()
abc('read_blif %s_temp.blif'%f_name)
def cec_it():
""" done because &r changes the names. Can't use -n because rfraig_store reorders pis and pos."""
abc('write_blif %s_temp.blif'%f_name)
abc('&r -s %s.aig;&put'%f_name)
run_command('cec %s_temp.blif'%f_name)
abc('r %s_temp.blif'%f_name)
def save_bestk(current_best,k):
## if os.access('%s_best%d.blif'%(f_name,k),os.R_OK):
## res = get_bestk_value(k)
## else:
""" saves the best, returns bestk and if not best, leaves blif unchanged"""
res = current_best
if n_nodes() < res:
res = n_nodes()
abc('write_blif %s_best%d.blif'%(f_name,k))
print '\n*** best%d for %s *** = %d\n'%(k,f_name,res)
assert check_blif(),'inequivalence'
return res
## unsave_bestk(k)
def unsave_bestk(k):
run_command('read_blif %s_best%d.blif'%(f_name,k))
return n_nodes()
def snap():
## abc('fraig;fraig_store')
run_command('&get;&st;&put;fraig_store')
def fxch_store(k=6):
run_command('fraig_store;sop;fxch;eliminate;fraig_store;eliminate -V 5;fraig_store;fraig_restore;if -a -K %d'%k)
## ps()
run_command('&get;&if -a -K %d;&satlut;&put'%k)
## ps()
def fx_iter(k=6):
best = n_nodes()
run_command('write_blif %s_best.blif'%f_name)
n=0
while True:
fxch_store(k)
## print 'done',
print n_nodes()
## abc('if -am -K %d'%k)
if n_nodes()< best:
assert check_blif(),'inequivalence'
best = n_nodes()
n = 0
abc('write_blif %s_best.blif'%f_name)
assert check_blif(),'inequivalence'
continue
else:
n = n+1
if n>2:
break
abc('r %s_best.blif'%f_name)
def unsnap(k=6):
## snap()
## abc('fraig_restore')
## map_lut_dch(k)
## assert check_blif(),'inequivalence-1'
print 'starting fraig_restore'
run_command('fraig_restore')
## run_command('ps')
abc('if -a -F 2 -C 11 -K %d'%k)
check_blif()
## run_command('ps')
## assert check_blif(),'inequivalence-1'
## print nl()
abc('mfs2 -a ')
print nl()
## assert check_blif(),'inequivalence-2'
abc('lutpack ')
print nl()
def map_until_conv(k=6,prep=1):
global pairs
## pairs = []
kk = 2*k
## kk = k + 1
# make sure that no residual results are left over.
if True:
if os.access('%s_best%d.blif'%(f_name,k),os.R_OK):
os.remove('%s_best%d.blif'%(f_name,k))
if os.access('%s_best%d.blif'%(f_name,kk),os.R_OK):
os.remove('%s_best%d.blif'%(f_name,kk))
pairs = []
tt = time.time()
if not prep:
map_lut_dch(k)
## ps()
if_a_iter(k)
## ps()
## map_lut_synch(k)
bestk = save_bestk(100000,k)
print 'first map_lut_dch yields: ',
print nl()
else:
print 'preprocess entered'
res = preprocess(k=k,ifprep = prep) #get best of initial, clp, and lutmin versions
print nl()
print 'preprocess done'
bestk = save_bestk(100000,k)
print 'starting mapping iteration with %d-LUTs'%k
## map_lut_dch_iter(k,1,0,1) #initialize with mapping with kk input LUTs
## map_lut_synch_iter(kk) PUT IN QUICKER TERMINATTOR
if_synch_iter(k)
bestk = save_bestk(bestk,k)
print nl(), k, kk
abc('r %s_bestk%d.blif'%(f_name,k))
print 'iterating map'
map_iter(k) #1
bestk = save_bestk(bestk,k)
bestkk = 1000000
while True:
print '\nPerturbing with %d-Lut'%kk
## map_lut_dch_iter(kk,1,0,1)
## if_synch_iter(k)
map_lut_synch_iter(kk)
bestkk_old = bestkk
bestkk = save_bestk(bestkk,kk)
if bestkk >= bestkk_old:
break
snap() #puts bestkk in fraig store
unsave_bestk(k)
snap()
unsnap(k) #fraig restore and maps into k-luts
bestk_old = bestk
map_iter(k)
bestk = save_bestk(bestk,k)
if bestk >= bestk_old:
break
continue
abc('fraig_restore') #dump what is left in fraig_store
unsave_bestk(k)
## if_a_iter(k)
if_synch_iter(k)
## run_command('&get;&satlut;&put')
## satlut(k)
bestk = save_bestk(bestk,k)
unsave_bestk(k)
print '\nFinal size = ',
print nl()
print 'time for %s = %.02f'%(f_name,(time.time()-tt))
## cec_it()
def get_bestk_value(k=6):
abc('write_blif %s_temp.blif'%f_name)
unsave_bestk(k)
res = n_nodes()
abc('read_blif %s_temp.blif'%f_name)
return res
def map_iter(k=6):
tt = time.time()
## bestk = get_bestk_value(k)
n=0
bestk = unsave_bestk(k)
## bestk = n_nodes()
while True:
print 'perturbing'
abc('if -a -K %d'%(k+2))
snap()
perturb(k) #
snap()
perturb(k)
snap()
old_bestk = bestk
unsnap(k)
abc('if -a -K %d'%k)
## map_lut(k)
bestk = save_bestk(bestk,k)
## print 'iterating map_lut_dch'
## if_a_iter(k)
## if_synch_iter(k)
## print '#nodes after if_synch_iter = %d'%n_nodes()
#### print '#nodes after if_a_iter = %d'%n_nodes()
#### if n_nodes() > 1.2*bestk: rkb-temp
if n_nodes() > 2*bestk:
print 'perturbation too big'
break
map_lut_dch_iter(k)
if n_nodes() > 1.5*bestk:
print 'not enough progress'
break
bestk = save_bestk(bestk,k)
## print bestk
if bestk < old_bestk:
n=0 # keep it up
continue
elif n == 2: #perturb
break
else:
print 'trying fx_iter'
fx_iter(k)
if n_nodes() > 1.5*bestk:
print 'not enough progress'
break
bestk = save_bestk(bestk,k)
## print bestk
if bestk < old_bestk:
n=0 # keep it up
continue
n = n+1
print '%d-perturb'%n
unsave_bestk(k)
def check_star(name='adder'):
run_command('read_blif %s_best_star.blif'%name)
run_command('st;&get')
run_command('&cec %s.aig'%(name))
def check_blif():
return True #disabling
## print 'Checking: ',
abc('write_blif %s_bb.blif'%f_name)
abc('read_blif %s_best_star.blif'%f_name)
abc('st;&get')
run_command('&cec -n %s.aig'%f_name)
## run_command('cec %s_bb.blif %s.aig'%(f_name,f_name))
res = True
if is_sat():
res = False
print '*** NOT OK ***'
## else:
## print'OK',
abc('read_blif %s_bb.blif'%f_name)
return res
def satlut(k=6):
if k <= 6:
run_command('&get;&st;&if -a -K %d;&satlut;&put'%k)
else:
run_command('&get;&st;&if -a -K %d;&put'%k)
## ps()
def map_star(k=6):
global pairs
pairs = []
tt = time.time()
print '\n**************Starting first map_until_conv**: \n'
map_until_conv(k,1) #1 means do preprocess
abc('write_blif %s_best_star.blif'%f_name)
assert check_blif(),'inequivalence'
best = n_nodes()
while True:
jog(2*k)
print '\n*************Starting next map_until_conv**: \n'
map_until_conv(k,0)
if n_nodes() >= best:
break
else:
best = n_nodes()
abc('write_blif %s_best_star.blif'%f_name)
assert check_blif(),'inequivalence'
abc('r %s_best_star.blif'%f_name)
print '\n\n*** SIZE = %d, TIME = %.2f for %s ***'%(n_nodes(),(time.time() - tt),f_name)
def decomp_444():
abc('st; dch; if -K 10 -S 444')
abc('write_blif -S 444 %s_temp.blif; r %s_temp.blif'%(f_name,f_name))
def dmitri(k=8):
## abc('w t.aig')
## dc2_iter()
## print 'first iter done: %d'%n_ands()
## abc('dc2rs')
#### dc2_iter()
## print 'second iter done: %d'%n_ands()
## sop_balance(k)
## abc('w t_before.aig')
## run_command('cec -n t.aig')
## speedup_iter(k)
## print 'n_levels after speedup = %d'%n_levels()
## abc('write_blif %s_save.blif'%f_name)
## nn=n_levels()
abc('st;dch; if -g -K %d'%(k))
## print 'n_levels after sop balance = %d'%n_levels()
## if n_levels() > nn:
## run_command('r %s_save.blif'%f_name)
## print 'n_levels = %d'%n_levels()
## print 'final n_levels = %d'%n_levels()
## print 'sop_balance done: ',
## print nl()
## run_command('st;w t_after.aig')
## run_command('cec -n t.aig')
abc('if -G %d '%k)
## print 'after if -G %d: '%k,
## print nl()
## run_command('cec -n t.aig')
abc('cubes')
## print 'after cubes: ',
## print nl()
## run_command('cec -n t.aig')
abc('addbuffs -v')
## print 'after addbuffs: ',
print nl(),
## run_command('cec -n t.aig')
def lut():
dc2_iter()
abc('extract -a')
print nl()
dc2_iter()
## ps()
sop_balance(6)
map_lut_dch()
map_lut()
print nl()
## run_command('ps')
################################## gate level abstraction
"""
Code for using
for abstraction
"""
def bip_abs(t=100):
""" t is ignored here"""
set_globals()
time = max(1,.1*G_T)
abc('&get;,bmc -vt=%f'%time)
set_max_bmc(get_bmc_depth(False))
c = 2*G_C
f = max(2*max_bmc,20)
b = min(max(10,max_bmc),200)
t1 = x_factor*max(1,2*G_T)
t = max(t1,t)
s = min(max(3,c/30000),10) # stability between 3 and 10
## cmd = '&get;,abs -bob=%d -stable=%d -timeout=%d -vt=%d -depth=%d -dwr=vabs'%(b,s,t,t,f)
cmd = '&get;,abs -timeout=%d -vt=%d -dwr=%s_vabs'%(t,t,f_name)
print 'Running %s'%cmd
## abc(cmd)
run_command(cmd)
## get_bmc_depth()
abc('&w %s_greg.aig'%f_name)
return max_bmc
def check_frames():
abc('read_status vta.status')
return n_bmc_frames()
def vta_abs(t):
""" Do gate-level abstraction for F frames """
r = 100 *(1 - abs_ratio)
## abc('orpos; &get;&vta -dv -A %s_vabs.aig -P 2 -T %d -R %d; &vta_gla;&w %s_gla.aig;&gla_derive; &put; w %s_gabs.aig'%(f_name,t,r,f_name,f_name))
abc('orpos; &get;&vta -dv -A %s_vabs.aig -P 2 -T %d -R %d; &vta_gla;&w %s_gla.aig'%(f_name,t,r,f_name))
## write_file('abs')
def sizeof():
return [n_pis(),n_pos(),n_latches(),n_ands()]
def abstract(ifb=2):
global abs_ratio, abs_time
## print 'ifb = %d'%ifb
if ifb == 0: #new way using vta_abs and no bip
add_trace('abstracta')
return abstracta(False)
elif ifb == 1: #old way using ,abs
assert ifb == ifbip, 'call to abstract has ifb not = global ifbip'
add_trace('abstractb')
return abstractb()
else:
#new way using ,abs -dwr -- (bip_abs)
add_trace('abstracta')
return abstracta(True)
def abstracta(if_bip=True):
"""
if_bip = 0 it uses a new abstraction based on &vta (gate level abstraction) and no bip operations
Right now, if we do not prove it with abstraction in the time allowed,
we abandon abstraction and go on with speculation
if_bip = 1, we use ,abs -dwr
"""
global G_C, G_T, latches_before_abs, x_factor, last_verify_time, x, win_list, j_last, sims
global latches_before_abs, ands_before_abs, pis_before_abs, abs_ratio, abs_time
## n_vabs = 0
latches_before_abs = n_latches()
ands_before_abs = n_ands()
pis_before_abs = n_real_inputs()
tt = time.time()
print 'using abstracta, ',
## print 'if_bip = %d'%if_bip
## latch_ratio = abs_ratio
## t = 100
t = 1000 #temporary
t = abs_time
if if_bip == 0:
t = 1000 #timeout on vta
t = abs_time
tt = time.time()
## if n_pos() > 1 and if_bip == 0:
## abc('orpos')
## print 'POs ORed together, ',
initial_size = sizeof()
abc('w %s_before_abs.aig'%f_name)
# 25 below means that it will quit if #FF+#ANDS > 75% of original
## funcs = | |
<reponame>piotrmirowski/arnheim<filename>arnheim_3/src/main.py
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Arnheim 3 - Collage
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>
# DeepMind, 2021-2022
# Command-line version of the Google Colab code available at:
# https://github.com/deepmind/arnheim/blob/main/arnheim_3.ipynb
import configargparse
from datetime import datetime
import glob
import os
import pathlib
import subprocess
import sys
import yaml
import numpy as np
import torch
import clip
import collage
import patches
import video_utils
# Specify (and override) the config.
ap = configargparse.ArgumentParser(default_config_files=["configs/config.yaml"])
ap.add_argument("-c", "--config", required=True, is_config_file=True,
help="Config file")
# Use CUDA?
ap.add_argument("--cuda", dest="cuda", action="store_true")
ap.add_argument("--no-cuda", dest="cuda", action="store_false")
ap.set_defaults(cuda=True)
# Output directory.
ap.add_argument("--output_dir", type=str, default="",
help="Output directory")
# Clean-up?
ap.add_argument("--clean_up", dest='clean_up', help="Remove all working files",
action='store_true')
ap.add_argument("--no-clean_up", dest='clean_up',
help="Remove all working files", action='store_false')
ap.set_defaults(clean_up=False)
# GUI?
ap.add_argument('--gui', dest='gui', action='store_true')
ap.add_argument('--no-gui', dest='gui', action='store_false')
ap.set_defaults(gui=False)
# Video and tracing.
ap.add_argument("--video_steps", type=int, default=0,
help="Number of steps between two video frames")
ap.add_argument("--trace_every", type=int, default=50,
help="Number of steps between two logging traces")
ap.add_argument('--population_video', dest='population_video',
action='store_true', help='Write the video of population?')
ap.add_argument('--no_population_video', dest='population_video',
action='store_false', help='Write the video of population?')
ap.set_defaults(population_video=False)
# Canvas size.
ap.add_argument("--canvas_width", type=int, default=224,
help="Image width for CLIP optimization")
ap.add_argument("--canvas_height", type=int, default=224,
help="Image height for CLIP optimization")
# Render methods.
ap.add_argument("--render_method", type=str, default="transparency",
help="opacity patches overlay each other using combinations of "
"alpha and depth, transparency _adds_ patch colours (black "
"therefore appearing transparent), masked_transparency "
"blends patches using the alpha channel")
ap.add_argument("--num_patches", type=int, default=100,
help="Number of patches")
ap.add_argument("--colour_transformations", type=str, default="RGB space",
help="Can be none, RGB space or HHSV space")
ap.add_argument("--invert_colours", dest="invert_colours", action='store_true',
help="Invert image colours to have a white background?")
ap.add_argument("--no_invert_colours", dest="invert_colours",
action='store_false',
help="Invert image colours to have a white background?")
ap.set_defaults(invert_colours=False)
ap.add_argument("--high_res_multiplier", type=int, default=4,
help="Ratio between large canvas and CLIP-optimized canvas")
ap.add_argument('--save_all_arrays', dest='save_all_arrays',
action='store_true',
help='Save the optimised patch arrays as an npy file?')
ap.add_argument('--no_save_all_arrays', dest='save_all_arrays',
action='store_false',
help='Save the optimised patch arrays as an npy file?')
ap.set_defaults(save_all_arrays=False)
# Affine transform settings.
ap.add_argument("--min_trans", type=float, default=-1.,
help="Translation min for X and Y")
ap.add_argument("--max_trans", type=float, default=1.,
help="Translation max for X and Y")
ap.add_argument("--min_scale", type=float, default=1.,
help="Scale min (> 1 means zoom out and < 1 means zoom in)")
ap.add_argument("--max_scale", type=float, default=2.,
help="Scale max (> 1 means zoom out and < 1 means zoom in)")
ap.add_argument("--min_squeeze", type=float, default=0.5,
help="Min ratio between X and Y scale")
ap.add_argument("--max_squeeze", type=float, default=2.,
help="Max ratio between X and Y scale")
ap.add_argument("--min_shear", type=float, default=-0.2,
help="Min shear deformation")
ap.add_argument("--max_shear", type=float, default=0.2,
help="Max shear deformation")
ap.add_argument("--min_rot_deg", type=float, default=-180, help="Min rotation")
ap.add_argument("--max_rot_deg", type=float, default=180, help="Max rotation")
# Colour transform settings.
ap.add_argument("--min_rgb", type=float, default=-0.2,
help="Min RGB between -1 and 1")
ap.add_argument("--max_rgb", type=float, default=1.0,
help="Max RGB between -1 and 1")
ap.add_argument("--initial_min_rgb", type=float, default=0.5,
help="Initial min RGB between -1 and 1")
ap.add_argument("--initial_max_rgb", type=float, default=1.,
help="Initial max RGB between -1 and 1")
ap.add_argument("--min_hue_deg", type=float, default=0.,
help="Min hue between 0 and 360")
ap.add_argument("--max_hue_deg", type=float, default=360,
help="Max hue (in degrees) between 0 and 360")
ap.add_argument("--min_sat", type=float, default=0,
help="Min saturation between 0 and 1")
ap.add_argument("--max_sat", type=float, default=1,
help="Max saturation between 0 and 1")
ap.add_argument("--min_val", type=float, default=0,
help="Min value between 0 and 1")
ap.add_argument("--max_val", type=float, default=1,
help="Max value between 0 and 1")
# Training settings.
ap.add_argument("--clip_model", type=str, default="ViT-B/32", help="CLIP model")
ap.add_argument("--optim_steps", type=int, default=10000,
help="Number of training steps (between 0 and 20000)")
ap.add_argument("--learning_rate", type=float, default=0.1,
help="Learning rate, typically between 0.05 and 0.3")
ap.add_argument("--use_image_augmentations", dest="use_image_augmentations",
action='store_true',
help="User image augmentations for CLIP evaluation?")
ap.add_argument("--no_use_image_augmentations", dest="use_image_augmentations",
action='store_false',
help="User image augmentations for CLIP evaluation?")
ap.set_defaults(use_image_augmentations=True)
ap.add_argument("--num_augs", type=int, default=4,
help="Number of image augmentations to use in CLIP evaluation")
ap.add_argument("--use_normalized_clip", dest="use_normalized_clip",
action='store_true',
help="Normalize colours for CLIP, generally leave this as True")
ap.add_argument("--no_use_normalized_clip", dest="use_normalized_clip",
action='store_false',
help="Normalize colours for CLIP, generally leave this as True")
ap.set_defaults(use_normalized_clip=False)
ap.add_argument("--gradient_clipping", type=float, default=10.0,
help="Gradient clipping during optimisation")
ap.add_argument("--initial_search_size", type=int, default=1,
help="Initial random search size (1 means no search)")
# Evolution settings.
ap.add_argument("--pop_size", type=int, default=2,
help="For evolution set this to greater than 1")
ap.add_argument("--evolution_frequency", type=int, default= 100,
help="Number of gradient steps between two evolution mutations")
ap.add_argument("--ga_method", type=str, default="Microbial",
help="Microbial: loser of randomly selected pair is replaced "
"by mutated winner. A low selection pressure. Evolutionary "
"Strategies: mutantions of the best individual replace the "
"rest of the population. Much higher selection pressure than "
"Microbial GA")
# Mutation levels.
ap.add_argument("--pos_and_rot_mutation_scale", type=float, default=0.02,
help="Probability of position and rotation mutations")
ap.add_argument("--scale_mutation_scale", type=float, default=0.02,
help="Probability of scale mutations")
ap.add_argument("--distort_mutation_scale", type=float, default=0.02,
help="Probability of distortion mutations")
ap.add_argument("--colour_mutation_scale", type=float, default=0.02,
help="Probability of colour mutations")
ap.add_argument("--patch_mutation_probability", type=float, default=1,
help="Probability of patch mutations")
# Visualisation.
ap.add_argument("--max_multiple_visualizations", type=int, default=5,
help="Limit the number of individuals shown during training")
# Load segmented patches.
ap.add_argument("--patch_set", type=str, default="animals.npy",
help="Name of Numpy file with patches")
ap.add_argument("--patch_repo_root", type=str,
default="https://github.com/deepmind/arnheim/raw/main",
help="URL to patches")
ap.add_argument("--url_to_patch_file", type=str, default="",
help="URL to a patch file")
# Resize image patches to low- and high-res.
ap.add_argument("--fixed_scale_patches", dest="fixed_scale_patches",
action='store_true', help="Use fixed scale patches?")
ap.add_argument("--no_fixed_scale_patches", dest="fixed_scale_patches",
action='store_false', help="Use fixed scale patches?")
ap.set_defaults(fixed_scale_patches=True)
ap.add_argument("--fixed_scale_coeff", type=float, default=0.7,
help="Scale coeff for fixed scale patches")
ap.add_argument("--normalize_patch_brightness",
dest="normalize_patch_brightness", action='store_true',
help="Normalize the brightness of patches?")
ap.add_argument("--no_normalize_patch_brightness",
dest="normalize_patch_brightness", action='store_false',
help="Normalize the brightness of patches?")
ap.set_defaults(normalize_patch_brightness=False)
ap.add_argument("--patch_max_proportion", type=int, default= 5,
help="Max proportion of patches, between 2 and 8")
ap.add_argument("--patch_width_min", type=int, default=16,
help="Min width of patches")
ap.add_argument("--patch_height_min", type=int, default=16,
help="Min height of patches")
# Configure a background, e.g. uploaded picture or solid colour.
ap.add_argument("--background_use", type=str, default="Global",
help="Global: use image across whole image, "
"or Local: reuse same image for every tile")
ap.add_argument("--background_url", type=str, default="",
help="URL for background image")
ap.add_argument("--background_red", type=int, default=0,
help="Red solid colour background (0 to 255)")
ap.add_argument("--background_green", type=int, default=0,
help="Green solid colour background (0 to 255)")
ap.add_argument("--background_blue", type=int, default=0,
help="Blue solid colour background (0 to 255)")
# Configure image prompt and content.
ap.add_argument("--global_prompt", type=str,
default="Roman mosaic of an unswept floor",
help="Global description of the image")
# Tile prompts and tiling settings.
ap.add_argument("--tile_images", action='store_true', dest="tile_images",
help="Tile images?")
ap.add_argument("--no_tile_images", action='store_false', dest="tile_images",
help="Tile images?")
ap.set_defaults(tile_images=False)
ap.add_argument("--tiles_wide", type=int, default=1,
help="Number of width tiles")
ap.add_argument("--tiles_high", type=int, default=1,
help="Number of height tiles")
ap.add_argument("--global_tile_prompt", dest="global_tile_prompt",
action='store_true',
help="Global tile prompt uses global_prompt (previous cell) "
"for *all* tiles (e.g. Roman mosaic of an unswept floor)")
ap.add_argument("--no_global_tile_prompt", dest="global_tile_prompt",
action='store_false',
help="Global tile prompt uses global_prompt (previous cell) "
"for *all* tiles (e.g. Roman mosaic of an unswept floor)")
ap.set_defaults(store_false=False)
ap.add_argument("--tile_prompt_string", type=str, default="",
help="Otherwise, specify multiple tile prompts with columns "
"separated by | and / to delineate new row. E.g. multiple "
"prompts for a 3x2 'landscape' image: "
"'sun | clouds | sky / fields | fields | trees'")
# Composition prompts.
ap.add_argument("--compositional_image", dest="compositional_image",
action="store_true",
help="Use additional prompts for different regions")
ap.add_argument("--no_compositional_image", dest="compositional_image",
action="store_false",
help="Do not use additional prompts for different regions")
ap.set_defaults(compositional_image=False)
# Single image (i.e. no tiling) composition prompts:
# specify 3x3 prompts for each composition region.
ap.add_argument("--prompt_x0_y0", type=str,
default="a photorealistic sky with sun", help="Top left prompt")
ap.add_argument("--prompt_x1_y0", type=str,
default="a photorealistic sky", help="Top centre prompt")
ap.add_argument("--prompt_x2_y0", type=str,
default="a photorealistic sky with moon", help="Top right prompt")
ap.add_argument("--prompt_x0_y1", type=str,
default="a photorealistic tree", help="Middle left prompt")
ap.add_argument("--prompt_x1_y1", type=str,
default="a photorealistic tree", help="Centre prompt")
ap.add_argument("--prompt_x2_y1", type=str,
default="a photorealistic tree", help="Middle right prompt")
ap.add_argument("--prompt_x0_y2", type=str,
default="a photorealistic field", help="Bottom left prompt")
ap.add_argument("--prompt_x1_y2", type=str,
default="a photorealistic field", help="Bottom centre prompt")
ap.add_argument("--prompt_x2_y2", type=str,
default="a photorealistic chicken", help="Bottom right prompt")
# Tile composition prompts.
ap.add_argument("--tile_prompt_formating", type=str, default="close-up of {}",
help="This string is formated to autogenerate region prompts "
"from tile prompt. e.g. close-up of {}")
# Get the config.
config = vars(ap.parse_args())
# Adjust config for compositional image.
if config["compositional_image"] == True:
print("Generating compositional image")
config['canvas_width'] *= 2
config['canvas_height'] *= 2
config['high_res_multiplier'] = int(config['high_res_multiplier'] / 2)
print("Using one image augmentations for compositional image creation.")
config["use_image_augmentations"] = True
config["num_augs"] = 1
# Turn off tiling if either boolean is set or width/height set to 1.
if (not config["tile_images"] or
(config["tiles_wide"] == 1 and config["tiles_high"] == 1)):
print("No tiling.")
config["tiles_wide"] = 1
config["tiles_high"] = 1
config["tile_images"] = False
# Default output dir.
if len(config["output_dir"]) == 0:
config["output_dir"] = "output_"
config["output_dir"] += datetime.strftime(datetime.now(), '%Y%m%d_%H%M%S')
config["output_dir"] += '/'
# Print the config.
print("\n")
yaml.dump(config, sys.stdout, default_flow_style=False, allow_unicode=True)
print("\n\n")
# Configure CUDA.
print("Torch version:", torch.__version__)
if config["cuda"]:
cuda_version = [s for s in subprocess.check_output(
["nvcc", "--version"]).decode("UTF-8").split(", ")
if s.startswith("release")][0].split(" ")[-1]
print("CUDA version:", cuda_version)
torch_device = "cuda"
else:
torch_device = "cpu"
config["torch_device"] = torch_device
device = torch.device(torch_device)
# Configure ffmpeg.
os.environ["FFMPEG_BINARY"] = "ffmpeg"
# Initialise and load CLIP model.
print(f"Downloading CLIP model {config['clip_model']}...")
clip_model, _ = clip.load(config["clip_model"], device, jit=False)
# Make output dir.
output_dir = config["output_dir"]
print(f"Storing results in {output_dir}\n")
pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True)
# Save the config.
config_filename = config["output_dir"] + "config.yaml"
with open(config_filename, "w") as f:
yaml.dump(config, f, default_flow_style=False, allow_unicode=True)
# Images patches.
segmented_data, segmented_data_high_res = patches.get_segmented_data(config)
# Tiling.
if not config["tile_images"] or config["global_tile_prompt"]:
tile_prompts = (
[config["global_prompt"]] * config["tiles_high"] * config["tiles_wide"])
else:
tile_prompts = []
count_y = 0
count_x = 0
for row in config["tile_prompt_string"].split("/"):
| |
<filename>src/firebolt/async_db/cursor.py
from __future__ import annotations
import logging
import re
import time
from enum import Enum
from functools import wraps
from inspect import cleandoc
from types import TracebackType
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Optional,
Sequence,
Tuple,
)
from aiorwlock import RWLock
from httpx import Response, codes
from firebolt.async_db._types import (
ColType,
Column,
ParameterType,
RawColType,
parse_type,
parse_value,
split_format_sql,
)
from firebolt.async_db.util import is_db_available, is_engine_running
from firebolt.client import AsyncClient
from firebolt.common.exception import (
CursorClosedError,
DataError,
EngineNotRunningError,
FireboltDatabaseError,
OperationalError,
ProgrammingError,
QueryNotRunError,
)
if TYPE_CHECKING:
from firebolt.async_db.connection import Connection
logger = logging.getLogger(__name__)
JSON_OUTPUT_FORMAT = "JSONCompact"
class CursorState(Enum):
NONE = 1
ERROR = 2
DONE = 3
CLOSED = 4
def check_not_closed(func: Callable) -> Callable:
"""(Decorator) ensure cursor is not closed before calling method."""
@wraps(func)
def inner(self: Cursor, *args: Any, **kwargs: Any) -> Any:
if self.closed:
raise CursorClosedError(method_name=func.__name__)
return func(self, *args, **kwargs)
return inner
def check_query_executed(func: Callable) -> Callable:
cleandoc(
"""
(Decorator) ensure that some query has been executed before
calling cursor method.
"""
)
@wraps(func)
def inner(self: Cursor, *args: Any, **kwargs: Any) -> Any:
if self._state == CursorState.NONE:
raise QueryNotRunError(method_name=func.__name__)
return func(self, *args, **kwargs)
return inner
class BaseCursor:
__slots__ = (
"connection",
"_arraysize",
"_client",
"_state",
"_descriptions",
"_rowcount",
"_rows",
"_idx",
"_idx_lock",
"_row_sets",
"_next_set_idx",
)
default_arraysize = 1
def __init__(self, client: AsyncClient, connection: Connection):
self.connection = connection
self._client = client
self._arraysize = self.default_arraysize
# These fields initialized here for type annotations purpose
self._rows: Optional[List[List[RawColType]]] = None
self._descriptions: Optional[List[Column]] = None
self._row_sets: List[
Tuple[int, Optional[List[Column]], Optional[List[List[RawColType]]]]
] = []
self._rowcount = -1
self._idx = 0
self._next_set_idx = 0
self._reset()
def __del__(self) -> None:
self.close()
@property # type: ignore
@check_not_closed
def description(self) -> Optional[List[Column]]:
"""
Provides information about a single result row of a query
Attributes:
* ``name``
* ``type_code``
* ``display_size``
* ``internal_size``
* ``precision``
* ``scale``
* ``null_ok``
"""
return self._descriptions
@property # type: ignore
@check_not_closed
def rowcount(self) -> int:
"""The number of rows produced by last query."""
return self._rowcount
@property
def arraysize(self) -> int:
"""Default number of rows returned by fetchmany."""
return self._arraysize
@arraysize.setter
def arraysize(self, value: int) -> None:
if not isinstance(value, int):
raise TypeError(
"Invalid arraysize value type, expected int,"
f" got {type(value).__name__}"
)
self._arraysize = value
@property
def closed(self) -> bool:
"""True if connection is closed, False otherwise."""
return self._state == CursorState.CLOSED
def close(self) -> None:
"""Terminate an ongoing query (if any) and mark connection as closed."""
self._state = CursorState.CLOSED
# remove typecheck skip after connection is implemented
self.connection._remove_cursor(self) # type: ignore
def _append_query_data(self, response: Response) -> None:
"""Store information about executed query from httpx response."""
row_set: Tuple[
int, Optional[List[Column]], Optional[List[List[RawColType]]]
] = (-1, None, None)
# Empty response is returned for insert query
if response.headers.get("content-length", "") != "0":
try:
query_data = response.json()
rowcount = int(query_data["rows"])
descriptions = [
Column(
d["name"], parse_type(d["type"]), None, None, None, None, None
)
for d in query_data["meta"]
]
# Parse data during fetch
rows = query_data["data"]
row_set = (rowcount, descriptions, rows)
except (KeyError, ValueError) as err:
raise DataError(f"Invalid query data format: {str(err)}")
self._row_sets.append(row_set)
if self._next_set_idx == 0:
# Populate values for first set
self._pop_next_set()
@check_not_closed
@check_query_executed
def nextset(self) -> Optional[bool]:
"""
Skip to the next available set, discarding any remaining rows
from the current set.
Returns True if operation was successful,
None if there are no more sets to retrive
"""
return self._pop_next_set()
def _pop_next_set(self) -> Optional[bool]:
"""
Same functionality as .nextset, but doesn't check that query has been executed.
"""
if self._next_set_idx >= len(self._row_sets):
return None
self._rowcount, self._descriptions, self._rows = self._row_sets[
self._next_set_idx
]
self._idx = 0
self._next_set_idx += 1
return True
async def _raise_if_error(self, resp: Response) -> None:
"""Raise a proper error if any"""
if resp.status_code == codes.INTERNAL_SERVER_ERROR:
raise OperationalError(
f"Error executing query:\n{resp.read().decode('utf-8')}"
)
if resp.status_code == codes.FORBIDDEN:
if not await is_db_available(self.connection, self.connection.database):
raise FireboltDatabaseError(
f"Database {self.connection.database} does not exist"
)
raise ProgrammingError(resp.read().decode("utf-8"))
if (
resp.status_code == codes.SERVICE_UNAVAILABLE
or resp.status_code == codes.NOT_FOUND
):
if not await is_engine_running(self.connection, self.connection.engine_url):
raise EngineNotRunningError(
f"Firebolt engine {self.connection.engine_url} "
"needs to be running to run queries against it."
)
resp.raise_for_status()
def _reset(self) -> None:
"""Clear all data stored from previous query."""
self._state = CursorState.NONE
self._rows = None
self._descriptions = None
self._rowcount = -1
self._idx = 0
self._row_sets = []
self._next_set_idx = 0
async def _do_execute_request(
self,
query: str,
parameters: Sequence[Sequence[ParameterType]],
set_parameters: Optional[Dict] = None,
) -> None:
self._reset()
try:
queries = split_format_sql(query, parameters)
for query in queries:
start_time = time.time()
# our CREATE EXTERNAL TABLE queries currently require credentials,
# so we will skip logging those queries.
# https://docs.firebolt.io/sql-reference/commands/ddl-commands#create-external-table
if not re.search("aws_key_id|credentials", query, flags=re.IGNORECASE):
logger.debug(f"Running query: {query}")
resp = await self._client.request(
url="/",
method="POST",
params={
"database": self.connection.database,
"output_format": JSON_OUTPUT_FORMAT,
**(set_parameters or dict()),
},
content=query,
)
await self._raise_if_error(resp)
self._append_query_data(resp)
logger.info(
f"Query fetched {self.rowcount} rows in"
f" {time.time() - start_time} seconds"
)
self._state = CursorState.DONE
except Exception:
self._state = CursorState.ERROR
raise
@check_not_closed
async def execute(
self,
query: str,
parameters: Optional[Sequence[ParameterType]] = None,
set_parameters: Optional[Dict] = None,
) -> int:
"""Prepare and execute a database query. Return row count."""
params_list = [parameters] if parameters else []
await self._do_execute_request(query, params_list, set_parameters)
return self.rowcount
@check_not_closed
async def executemany(
self, query: str, parameters_seq: Sequence[Sequence[ParameterType]]
) -> int:
"""
Prepare and execute a database query against all parameter
sequences provided. Return last query row count.
"""
await self._do_execute_request(query, parameters_seq)
return self.rowcount
def _parse_row(self, row: List[RawColType]) -> List[ColType]:
"""Parse a single data row based on query column types"""
assert len(row) == len(self.description)
return [
parse_value(col, self.description[i].type_code) for i, col in enumerate(row)
]
def _get_next_range(self, size: int) -> Tuple[int, int]:
cleandoc(
"""
Return range of next rows of size (if possible),
and update _idx to point to the end of this range
"""
)
if self._rows is None:
# No elements to take
raise DataError("no rows to fetch")
left = self._idx
right = min(self._idx + size, len(self._rows))
self._idx = right
return left, right
@check_not_closed
@check_query_executed
def fetchone(self) -> Optional[List[ColType]]:
"""Fetch the next row of a query result set."""
left, right = self._get_next_range(1)
if left == right:
# We are out of elements
return None
assert self._rows is not None
return self._parse_row(self._rows[left])
@check_not_closed
@check_query_executed
def fetchmany(self, size: Optional[int] = None) -> List[List[ColType]]:
"""
Fetch the next set of rows of a query result,
cursor.arraysize is default size.
"""
size = size if size is not None else self.arraysize
left, right = self._get_next_range(size)
assert self._rows is not None
rows = self._rows[left:right]
return [self._parse_row(row) for row in rows]
@check_not_closed
@check_query_executed
def fetchall(self) -> List[List[ColType]]:
"""Fetch all remaining rows of a query result."""
left, right = self._get_next_range(self.rowcount)
assert self._rows is not None
rows = self._rows[left:right]
return [self._parse_row(row) for row in rows]
@check_not_closed
def setinputsizes(self, sizes: List[int]) -> None:
"""Predefine memory areas for query parameters (does nothing)."""
@check_not_closed
def setoutputsize(self, size: int, column: Optional[int] = None) -> None:
"""Set a column buffer size for fetches of large columns (does nothing)."""
# Context manager support
@check_not_closed
def __enter__(self) -> BaseCursor:
return self
def __exit__(
self, exc_type: type, exc_val: Exception, exc_tb: TracebackType
) -> None:
self.close()
class Cursor(BaseCursor):
"""
Class, responsible for executing asyncio queries to Firebolt Database.
Should not be created directly,
use :py:func:`connection.cursor <firebolt.async_db.connection.Connection>`
Args:
description: information about a single result row
rowcount: the number of rows produced by last query
closed: True if connection is closed, False otherwise
arraysize: Read/Write, specifies the number of rows to fetch at a time
with the :py:func:`fetchmany` method
"""
__slots__ = BaseCursor.__slots__ + ("_async_query_lock",)
def __init__(self, *args: Any, **kwargs: Any) -> None:
self._async_query_lock = RWLock()
super().__init__(*args, **kwargs)
@wraps(BaseCursor.execute)
async def execute(
self,
query: str,
parameters: Optional[Sequence[ParameterType]] = None,
set_parameters: Optional[Dict] = None,
) -> int:
async with self._async_query_lock.writer:
return await super().execute(query, parameters, set_parameters)
"""Prepare and execute a database query"""
@wraps(BaseCursor.executemany)
async def executemany(
self, query: str, parameters_seq: Sequence[Sequence[ParameterType]]
) -> int:
async with self._async_query_lock.writer:
return await super().executemany(query, parameters_seq)
"""
Prepare and execute a database query against all parameter
sequences provided
"""
@wraps(BaseCursor.fetchone)
async def fetchone(self) -> Optional[List[ColType]]:
async with self._async_query_lock.reader:
return super().fetchone()
"""Fetch the next row of a query result set"""
@wraps(BaseCursor.fetchmany)
async def fetchmany(self, size: Optional[int] = None) -> List[List[ColType]]:
async | |
<filename>google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Callable, Dict
from google.api_core import grpc_helpers # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials # type: ignore
import grpc # type: ignore
from google.cloud.aiplatform_v1beta1.types import batch_prediction_job
from google.cloud.aiplatform_v1beta1.types import (
batch_prediction_job as gca_batch_prediction_job,
)
from google.cloud.aiplatform_v1beta1.types import custom_job
from google.cloud.aiplatform_v1beta1.types import custom_job as gca_custom_job
from google.cloud.aiplatform_v1beta1.types import data_labeling_job
from google.cloud.aiplatform_v1beta1.types import (
data_labeling_job as gca_data_labeling_job,
)
from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job
from google.cloud.aiplatform_v1beta1.types import (
hyperparameter_tuning_job as gca_hyperparameter_tuning_job,
)
from google.cloud.aiplatform_v1beta1.types import job_service
from google.longrunning import operations_pb2 as operations # type: ignore
from google.protobuf import empty_pb2 as empty # type: ignore
from .base import JobServiceTransport
class JobServiceGrpcTransport(JobServiceTransport):
"""gRPC backend transport for JobService.
A service for creating and managing AI Platform's jobs.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(
self,
*,
host: str = "aiplatform.googleapis.com",
credentials: credentials.Credentials = None,
channel: grpc.Channel = None
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
"""
# Sanity check: Ensure that channel and credentials are not both
# provided.
if channel:
credentials = False
# Run the base constructor.
super().__init__(host=host, credentials=credentials)
self._stubs = {} # type: Dict[str, Callable]
# If a channel was explicitly provided, set it.
if channel:
self._grpc_channel = channel
@classmethod
def create_channel(
cls,
host: str = "aiplatform.googleapis.com",
credentials: credentials.Credentials = None,
**kwargs
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host, credentials=credentials, scopes=cls.AUTH_SCOPES, **kwargs
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Sanity check: Only create a new channel if we do not already
# have one.
if not hasattr(self, "_grpc_channel"):
self._grpc_channel = self.create_channel(
self._host, credentials=self._credentials,
)
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if "operations_client" not in self.__dict__:
self.__dict__["operations_client"] = operations_v1.OperationsClient(
self.grpc_channel
)
# Return the client from cache.
return self.__dict__["operations_client"]
@property
def create_custom_job(
self,
) -> Callable[[job_service.CreateCustomJobRequest], gca_custom_job.CustomJob]:
r"""Return a callable for the create custom job method over gRPC.
Creates a CustomJob. A created CustomJob right away
will be attempted to be run.
Returns:
Callable[[~.CreateCustomJobRequest],
~.CustomJob]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_custom_job" not in self._stubs:
self._stubs["create_custom_job"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.JobService/CreateCustomJob",
request_serializer=job_service.CreateCustomJobRequest.serialize,
response_deserializer=gca_custom_job.CustomJob.deserialize,
)
return self._stubs["create_custom_job"]
@property
def get_custom_job(
self,
) -> Callable[[job_service.GetCustomJobRequest], custom_job.CustomJob]:
r"""Return a callable for the get custom job method over gRPC.
Gets a CustomJob.
Returns:
Callable[[~.GetCustomJobRequest],
~.CustomJob]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_custom_job" not in self._stubs:
self._stubs["get_custom_job"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.JobService/GetCustomJob",
request_serializer=job_service.GetCustomJobRequest.serialize,
response_deserializer=custom_job.CustomJob.deserialize,
)
return self._stubs["get_custom_job"]
@property
def list_custom_jobs(
self,
) -> Callable[
[job_service.ListCustomJobsRequest], job_service.ListCustomJobsResponse
]:
r"""Return a callable for the list custom jobs method over gRPC.
Lists CustomJobs in a Location.
Returns:
Callable[[~.ListCustomJobsRequest],
~.ListCustomJobsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_custom_jobs" not in self._stubs:
self._stubs["list_custom_jobs"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.JobService/ListCustomJobs",
request_serializer=job_service.ListCustomJobsRequest.serialize,
response_deserializer=job_service.ListCustomJobsResponse.deserialize,
)
return self._stubs["list_custom_jobs"]
@property
def delete_custom_job(
self,
) -> Callable[[job_service.DeleteCustomJobRequest], operations.Operation]:
r"""Return a callable for the delete custom job method over gRPC.
Deletes a CustomJob.
Returns:
Callable[[~.DeleteCustomJobRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_custom_job" not in self._stubs:
self._stubs["delete_custom_job"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.JobService/DeleteCustomJob",
request_serializer=job_service.DeleteCustomJobRequest.serialize,
response_deserializer=operations.Operation.FromString,
)
return self._stubs["delete_custom_job"]
@property
def cancel_custom_job(
self,
) -> Callable[[job_service.CancelCustomJobRequest], empty.Empty]:
r"""Return a callable for the cancel custom job method over gRPC.
Cancels a CustomJob. Starts asynchronous cancellation on the
CustomJob. The server makes a best effort to cancel the job, but
success is not guaranteed. Clients can use
``JobService.GetCustomJob``
or other methods to check whether the cancellation succeeded or
whether the job completed despite cancellation. On successful
cancellation, the CustomJob is not deleted; instead it becomes a
job with a
``CustomJob.error``
value with a ``google.rpc.Status.code`` of
1, corresponding to ``Code.CANCELLED``, and
``CustomJob.state``
is set to ``CANCELLED``.
Returns:
Callable[[~.CancelCustomJobRequest],
~.Empty]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "cancel_custom_job" not in self._stubs:
self._stubs["cancel_custom_job"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.JobService/CancelCustomJob",
request_serializer=job_service.CancelCustomJobRequest.serialize,
response_deserializer=empty.Empty.FromString,
)
return self._stubs["cancel_custom_job"]
@property
def create_data_labeling_job(
self,
) -> Callable[
[job_service.CreateDataLabelingJobRequest],
gca_data_labeling_job.DataLabelingJob,
]:
r"""Return a callable for the create data labeling job method over gRPC.
Creates a DataLabelingJob.
Returns:
Callable[[~.CreateDataLabelingJobRequest],
~.DataLabelingJob]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_data_labeling_job" not in self._stubs:
self._stubs["create_data_labeling_job"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.JobService/CreateDataLabelingJob",
request_serializer=job_service.CreateDataLabelingJobRequest.serialize,
response_deserializer=gca_data_labeling_job.DataLabelingJob.deserialize,
)
return self._stubs["create_data_labeling_job"]
@property
def get_data_labeling_job(
self,
) -> Callable[
[job_service.GetDataLabelingJobRequest], data_labeling_job.DataLabelingJob
]:
r"""Return a callable for the get data labeling job method over gRPC.
Gets a DataLabelingJob.
Returns:
Callable[[~.GetDataLabelingJobRequest],
~.DataLabelingJob]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_data_labeling_job" not in self._stubs:
self._stubs["get_data_labeling_job"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.JobService/GetDataLabelingJob",
request_serializer=job_service.GetDataLabelingJobRequest.serialize,
response_deserializer=data_labeling_job.DataLabelingJob.deserialize,
)
return self._stubs["get_data_labeling_job"]
@property
def list_data_labeling_jobs(
self,
) -> Callable[
[job_service.ListDataLabelingJobsRequest],
job_service.ListDataLabelingJobsResponse,
]:
r"""Return a callable for the list data labeling jobs method over gRPC.
Lists DataLabelingJobs in a Location.
Returns:
Callable[[~.ListDataLabelingJobsRequest],
~.ListDataLabelingJobsResponse]:
A | |
#!/usr/bin/env python3.7
# -*- coding: utf-8 -*-
import argparse
import asyncio
import configparser
import os
from collections import namedtuple
from dataclasses import dataclass, field
from datetime import datetime, date, timedelta
from typing import List, Tuple, Dict
import aiohttp
import keyring
import pytz
@dataclass(order=True)
class Time(object):
seconds: float = field(compare=True, default=0.0)
@classmethod
def from_string(cls, string):
seconds = float(string[:2]) * 3600.0 + float(string[3:]) * 60.0
return Time(seconds)
@classmethod
def from_params(cls, hours=0, minutes=0, seconds=0):
seconds = hours * 3600 + minutes * 60 + seconds
return Time(seconds)
def __format__(self, t_format=None):
if not t_format:
t_format = '''{sign}{day}{hour:02}:{minute:02}'''
negative = self.seconds < 0
total_seconds = round(abs(self.seconds))
m_sec = 60
h_sec = 60 * m_sec
d_sec = 24 * h_sec
day = total_seconds // d_sec
total_seconds -= day * d_sec
hour = total_seconds // h_sec
total_seconds -= hour * h_sec
minute = total_seconds // m_sec
total_seconds -= minute * m_sec
second = total_seconds
repr_str = t_format.format(sign='-' if negative else '',
day=f'{day} day ' if day > 0 else '',
hour=hour,
minute=minute,
second=second)
return repr_str
def ceil(self, seconds):
tmp = self
if tmp.seconds % seconds != 0:
tmp.seconds = tmp.seconds // seconds * seconds
tmp.seconds += seconds
return tmp
def __repr__(self):
return self.__format__('''{sign}{day}{hour:02}:{minute:02}''')
def __add__(self, other):
if other == 0:
return self
return Time(self.seconds + other.seconds)
def __radd__(self, other):
if type(other) is int:
return self
else:
return self.__add__(other)
def __sub__(self, other):
return self.__add__(Time(-other.seconds))
def colored(text, color):
colors = {'black': '\033[30m',
'red': '\033[31m',
'green': '\033[32m',
'yellow': '\033[33m',
'blue': '\033[34m',
'magenta': '\033[35m',
'cyan': '\033[36m',
'white': '\033[37m',
'reset': '\033[39m',
'normal': '\033[39m'}
if text:
return colors[color] + text + colors['reset']
else:
return text
@dataclass
class Entry(object):
id: int
billable: bool
spent: Time
note: str = ''
@dataclass
class TicketingSystem:
config: configparser.RawConfigParser
report_date: datetime = datetime.today()
json: Dict = None
entries: List[Entry] = field(default_factory=list)
auth: Tuple[str] = field(init=False)
params: Dict[str, str] = field(init=False)
url: str = field(init=False)
api_url: str = field(init=False)
entry_url: str = field(init=False)
max_retries: int = field(init=False, default=5)
timeout: int = field(init=False, default=5)
def __str__(self):
res = []
for entry in self.entries:
res.append(
f'''{self.entry_url}{entry.id}'''
f'''\n\t{'Bill' if entry.billable else 'Free'}: {entry.spent} {entry.note}''')
return '\n'.join(res)
def __repr__(self):
return __name__ + self.__str__()
async def get_json(self):
async with aiohttp.ClientSession() as session:
try:
async with session.get(self.api_url, params=self.params, timeout=self.timeout, auth=self.auth) as resp:
return await resp.json()
except asyncio.TimeoutError:
print(f'Got timeout while getting {self.__class__.__name__}')
async def get_entries(self):
raise NotImplementedError
def get_bill(self):
time = Time(sum(i.spent.seconds for i in self.entries if i.billable))
return time
def get_free(self):
time = Time(sum(i.spent.seconds for i in self.entries if not i.billable))
return time
def get_total(self):
return self.get_bill() + self.get_free()
def print_if_not_empty(self):
if self.entries:
print(self)
@dataclass
class Freshdesk(TicketingSystem):
# FIXME someday in must become async and include self.json = self.get.json() initialisation
def __post_init__(self):
local = pytz.timezone(self.config.get('global', 'timezone'))
local_dt = local.localize(self.report_date)
utc_dt = local_dt.astimezone(pytz.utc)
self.report_date = utc_dt - timedelta(seconds=1)
self.agent_id = self.config.get('freshdesk', 'agent_id')
self.auth = aiohttp.BasicAuth(keyring.get_password('freshdesk', self.agent_id), 'X')
self.params = {'agent_id': self.agent_id,
'executed_after': self.report_date.strftime('%Y-%m-%dT%H:%M:%SZ'),
'executed_before': (self.report_date + timedelta(days=1)).strftime('%Y-%m-%dT%H:%M:%SZ')}
self.url = self.config.get('freshdesk', 'url')
self.api_url = self.url + '/api/v2/time_entries'
self.entry_url = self.url + '/a/tickets/'
self.free_tags = self.config.get('freshdesk', 'free_tags').split()
def __parse_json__(self):
if self.json:
data = sorted(self.json, key=lambda k: (k.get('ticket_id'), k.get('updated_at')))
self.entries = [Entry(id=i.get('ticket_id'),
billable=i.get('billable'),
spent=Time.from_string(i.get('time_spent')),
note=i.get('note'))
for i in data]
if self.free_tags:
for entry in self.entries:
if entry.billable:
if any(tag in entry.note for tag in self.free_tags):
entry.note += colored(' Warn! Billable entry with free tag!', 'red')
else:
if all(tag not in entry.note for tag in self.free_tags):
entry.note += colored(' Warn! Free entry without free tag!', 'red')
async def get_entries(self):
self.json = await self.get_json()
self.__parse_json__()
return self
async def get_ticket(self, ticket_num):
self.api_url = f'''{self.config.get('freshdesk', 'url')}/api/v2/tickets/{ticket_num}/time_entries'''
self.params = None
self.json = await self.get_json()
self.__parse_json__()
return (f'''Time records for ticket {ticket_num}:
Total: {self.get_total()}
Bill: {self.get_bill()}
Free: {self.get_free()}
''')
@dataclass
class TeamWork(TicketingSystem):
def __post_init__(self):
self.agent_id = self.config.get('teamwork', 'agent_id')
self.auth = aiohttp.BasicAuth(keyring.get_password('teamwork', self.agent_id), 'x')
self.url = self.config.get('teamwork', 'url')
self.api_url = self.url + '/time_entries.json'
self.entry_url = self.url + '/#tasks/'
self.params = {
'userId': self.config.get('teamwork', 'agent_id'),
'fromdate': self.report_date.strftime('%Y%m%d'),
'todate': self.report_date.strftime('%Y%m%d')
}
async def get_entries(self):
self.json = await self.get_json()
if self.json:
data = sorted(self.json.get('time-entries'), key=lambda k: (k.get('date')))
self.entries = [Entry(id=i.get('todo-item-id'),
spent=(Time(int(i.get('hours')) * 3600 + int(i.get('minutes')) * 60)),
billable=(i.get('isbillable') == 1),
note=i.get('project-name'))
for i in data]
return self
@dataclass
class Jira(TicketingSystem):
def __post_init__(self):
self.login = self.config.get('jira', 'login')
self.auth = aiohttp.BasicAuth(self.login, keyring.get_password('jira', self.login))
self.url = self.config.get('jira', 'url')
self.api_url = self.url + '/rest/api/2/search'
self.entry_url = self.url + '/browse/'
self.params = {
'jql': f'''worklogAuthor=currentUser() and worklogDate={self.report_date.strftime('%Y-%m-%d')}''',
'maxResults': 1000,
'fields': 'id'}
async def get_entries(self):
async def get_issue(url, issue_id):
async with aiohttp.ClientSession() as session:
try:
async with session.get(url, timeout=self.timeout, auth=self.auth, compress=True) as resp:
result = await resp.json()
for worklog in result['worklogs']:
if (worklog['author']['name'] == self.config.get('jira', 'login') and
worklog['started'].split('T')[0] == self.report_date.strftime('%Y-%m-%d')):
time_spent = int(worklog.get('timeSpentSeconds'))
self.entries.append(Entry(id=issue_id,
billable=False,
spent=Time(time_spent),
note=worklog.get('comment')))
except asyncio.TimeoutError:
print(f'Got timeout while getting {url}')
self.json = await self.get_json()
if self.json:
for issue in self.json.get('issues'):
await get_issue(issue.get('self') + '/worklog', issue.get('key'))
return self
def calc_stats(total_bill_time, total_free_time, time_now, report_date, config, ceil_seconds=5 * 60):
workday_begin = Time.from_string(config.get('global', 'workday_begin'))
workday_end = Time.from_string(config.get('global', 'workday_end'))
launch_begin = Time.from_string(config.get('global', 'launch_begin'))
launch_end = Time.from_string(config.get('global', 'launch_end'))
launch_duration = launch_end - launch_begin
workday_duration = workday_end - workday_begin - launch_duration
total_tracked_time = total_bill_time + total_free_time
report_is_now = report_date.date() == date.today() and workday_begin <= time_now <= workday_end
if report_is_now:
if time_now < launch_begin:
time_from_wd_begin = time_now - workday_begin
elif launch_begin <= time_now <= launch_end:
time_from_wd_begin = launch_begin - workday_begin
else:
time_from_wd_begin = time_now - workday_begin - launch_duration
untracked_time = time_from_wd_begin - total_tracked_time
till_end_of_work_time = workday_duration - time_from_wd_begin
else:
untracked_time = workday_duration - total_tracked_time
till_end_of_work_time = Time(0)
# Ceil to 5 minutes
untracked_time = untracked_time.ceil(ceil_seconds)
stats = namedtuple('Stats', ['total_tracked_time',
'total_bill_time',
'total_free_time',
'untracked_time',
'till_end_of_work_time',
'workday_duration', ])
return stats(total_tracked_time,
total_bill_time,
total_free_time,
untracked_time,
till_end_of_work_time,
workday_duration)
def get_stats_str(pool, stats):
res = [f'Total tracked time: {stats.total_tracked_time}']
for ts in pool:
ts_name = ts.__class__.__name__
ts_bill = ts.get_bill()
if ts_bill.seconds > 0:
res.append(f' {ts_name:<8} bill: {ts_bill}')
ts_free = ts.get_free()
if ts_free.seconds > 0:
res.append(f' {ts_name:<8} free: {ts_free}')
return '\n'.join(res)
def get_ratio_str(stats, terminal_width_chr: int = 48) -> str:
total = max(stats.total_tracked_time, stats.workday_duration)
width = total.seconds / terminal_width_chr
if stats.untracked_time.seconds > 0:
untracked_time = stats.untracked_time
else:
untracked_time = Time(0)
if stats.total_tracked_time > stats.workday_duration:
rest_time = Time(0)
else:
rest_time = stats.workday_duration - stats.total_tracked_time - untracked_time
bill_part = colored('#' * round(stats.total_bill_time.seconds / width), 'green')
free_part = colored('#' * round(stats.total_free_time.seconds / width), 'normal')
none_part = colored('#' * round(untracked_time.seconds / width), 'red')
rest_part = '_' * round(rest_time.seconds / width)
return f'Progress: [{bill_part + free_part + none_part + rest_part}]'
def setup_wizard(config, config_path):
def get_option(prompt, default=''):
if default:
res = input(prompt + f' [{default}]?: ')
else:
res = input(prompt + ': ')
if not res:
res = default
return res
print(f'''Cannot find config at {config_path}. Let's create it!''')
config.add_section('global')
config.set('global', 'workday_begin', get_option('Workday begins at', '10:00'))
config.set('global', 'workday_end', get_option('Workday ends at', '19:00'))
config.set('global', 'launch_begin', get_option('Launch begins at', '13:00'))
config.set('global', 'launch_end', get_option('Launch ends at', '14:00'))
config.set('global', 'timezone', get_option('Timezone is', 'Europe/Moscow'))
config.set('global', 'date_format', get_option('Date format is', '%d.%m.%Y'))
if 'y' in get_option('Add Freshdesk details', 'Y/n').lower():
config.add_section('freshdesk')
config.set('freshdesk', 'url', 'https://' + get_option('Company name').lower() + '.freshdesk.com')
config.set('freshdesk', 'agent_id', get_option('Agent ID'))
keyring.set_password('<PASSWORD>', config.get('freshdesk', 'agent_id'), get_option('API key'))
config.set('freshdesk', 'free_tags', get_option('Tags with non-billable time',
'DEVBUG SUPBUG STUDY HELP CONTR COM ORG OTHER UPDATE'))
if 'y' in get_option('Add Jira details', 'Y/n').lower():
config.add_section('jira')
config.set('jira', 'url', get_option('Jira URL'))
config.set('jira', 'login', get_option('Login'))
keyring.set_password('<PASSWORD>', config.get('jira', 'login'), get_option('Password'))
if 'y' in get_option('Add TeamWork details', 'Y/n').lower():
config.add_section('teamwork')
config.set('teamwork', 'url', get_option('TeamWork URL', 'latera'))
config.set('teamwork', 'agent_id', get_option('Agent ID'))
keyring.set_password('<PASSWORD>', config.get('teamwork', 'agent_id'), get_option('API key'))
with open(config_path, mode='w') as f:
config.write(f)
async def main():
parser = argparse.ArgumentParser(description='Simple time tracker for Freshdesk, TeamWork and Jira')
parser.add_argument('offset', default='0', type=str, nargs='?',
help='Offset in days from today or date in format dd-mm-yyyy')
parser.add_argument('-c', '--config', default='~/timer.conf', type=str, nargs='?', help='Path to config')
parser.add_argument('-t', '--ticket', type=int, nargs='?',
help='Freshdesk ticker number. If provided, return spent time for the ticket')
args = parser.parse_args()
config = configparser.RawConfigParser()
config_path = os.path.expanduser(args.config)
if not os.path.exists(config_path):
setup_wizard(config, config_path)
config.read(os.path.expanduser(args.config))
if args.ticket:
fd = Freshdesk(config)
result = await fd.get_ticket(args.ticket)
print(result)
else:
if args.offset.isdigit():
report_date = datetime.combine(date.today(), datetime.min.time()) - timedelta(days=int(args.offset))
else:
try:
report_date = datetime.strptime(args.offset, config.get('global', 'date_format'))
except ValueError:
print(
f'{args.offset} is neither an integer nor matches format {config.get("global", "date_format")}.')
print(f'Try to run script with -h to get help')
raise SystemExit(1)
# Highlight date if report date if | |
"""
Downloading NeuroImaging datasets: atlas datasets
"""
import os
import warnings
import xml.etree.ElementTree
from tempfile import mkdtemp
import json
import shutil
import nibabel as nb
import numpy as np
from numpy.lib import recfunctions
from sklearn.utils import Bunch
from .utils import _get_dataset_dir, _fetch_files, _get_dataset_descr
from .._utils import check_niimg
from .._utils.compat import _basestring
from ..image import new_img_like, get_data
_TALAIRACH_LEVELS = ['hemisphere', 'lobe', 'gyrus', 'tissue', 'ba']
def fetch_atlas_craddock_2012(data_dir=None, url=None, resume=True, verbose=1):
"""Download and return file names for the Craddock 2012 parcellation
The provided images are in MNI152 space.
Parameters
----------
data_dir: string
directory where data should be downloaded and unpacked.
url: string
url of file to download.
resume: bool
whether to resumed download of a partly-downloaded file.
verbose: int
verbosity level (0 means no message).
Returns
-------
data: sklearn.datasets.base.Bunch
dictionary-like object, keys are:
scorr_mean, tcorr_mean,
scorr_2level, tcorr_2level,
random
References
----------
Licence: Creative Commons Attribution Non-commercial Share Alike
http://creativecommons.org/licenses/by-nc-sa/2.5/
Craddock, <NAME>, <NAME>, <NAME>, <NAME>,
and <NAME>. "A Whole Brain fMRI Atlas Generated via Spatially
Constrained Spectral Clustering". Human Brain Mapping 33, no 8 (2012):
1914-1928. doi:10.1002/hbm.21333.
See http://www.nitrc.org/projects/cluster_roi/ for more information
on this parcellation.
"""
if url is None:
url = "ftp://www.nitrc.org/home/groups/cluster_roi/htdocs" \
"/Parcellations/craddock_2011_parcellations.tar.gz"
opts = {'uncompress': True}
dataset_name = "craddock_2012"
keys = ("scorr_mean", "tcorr_mean",
"scorr_2level", "tcorr_2level",
"random")
filenames = [
("scorr05_mean_all.nii.gz", url, opts),
("tcorr05_mean_all.nii.gz", url, opts),
("scorr05_2level_all.nii.gz", url, opts),
("tcorr05_2level_all.nii.gz", url, opts),
("random_all.nii.gz", url, opts)
]
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,
verbose=verbose)
sub_files = _fetch_files(data_dir, filenames, resume=resume,
verbose=verbose)
fdescr = _get_dataset_descr(dataset_name)
params = dict([('description', fdescr)] + list(zip(keys, sub_files)))
return Bunch(**params)
def fetch_atlas_destrieux_2009(lateralized=True, data_dir=None, url=None,
resume=True, verbose=1):
"""Download and load the Destrieux cortical atlas (dated 2009)
Parameters
----------
lateralized: boolean, optional
If True, returns an atlas with distinct regions for right and left
hemispheres.
data_dir: string, optional
Path of the data directory. Use to forec data storage in a non-
standard location. Default: None (meaning: default)
url: string, optional
Download URL of the dataset. Overwrite the default URL.
Returns
-------
data: sklearn.datasets.base.Bunch
dictionary-like object, contains:
- Cortical ROIs, lateralized or not (maps)
- Labels of the ROIs (labels)
References
----------
<NAME>, et al. "Automatically parcellating the human cerebral
cortex." Cerebral cortex 14.1 (2004): 11-22.
<NAME>., et al. "A sulcal depth-based anatomical parcellation
of the cerebral cortex." NeuroImage 47 (2009): S151.
"""
if url is None:
url = "https://www.nitrc.org/frs/download.php/7739/"
url += "destrieux2009.tgz"
opts = {'uncompress': True}
lat = '_lateralized' if lateralized else ''
files = [
('destrieux2009_rois_labels' + lat + '.csv', url, opts),
('destrieux2009_rois' + lat + '.nii.gz', url, opts),
('destrieux2009.rst', url, opts)
]
dataset_name = 'destrieux_2009'
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,
verbose=verbose)
files_ = _fetch_files(data_dir, files, resume=resume,
verbose=verbose)
params = dict(maps=files_[1], labels=np.recfromcsv(files_[0]))
with open(files_[2], 'r') as rst_file:
params['description'] = rst_file.read()
return Bunch(**params)
def fetch_atlas_harvard_oxford(atlas_name, data_dir=None,
symmetric_split=False,
resume=True, verbose=1):
"""Load Harvard-Oxford parcellations from FSL.
This function downloads Harvard Oxford atlas packaged from FSL 5.0
and stores atlases in NILEARN_DATA folder in home directory.
This function can also load Harvard Oxford atlas from your local directory
specified by your FSL installed path given in `data_dir` argument.
See documentation for details.
Parameters
----------
atlas_name: string
Name of atlas to load. Can be:
cort-maxprob-thr0-1mm, cort-maxprob-thr0-2mm,
cort-maxprob-thr25-1mm, cort-maxprob-thr25-2mm,
cort-maxprob-thr50-1mm, cort-maxprob-thr50-2mm,
sub-maxprob-thr0-1mm, sub-maxprob-thr0-2mm,
sub-maxprob-thr25-1mm, sub-maxprob-thr25-2mm,
sub-maxprob-thr50-1mm, sub-maxprob-thr50-2mm,
cort-prob-1mm, cort-prob-2mm,
sub-prob-1mm, sub-prob-2mm
data_dir: string, optional
Path of data directory where data will be stored. Optionally,
it can also be a FSL installation directory (which is dependent
on your installation).
Example, if FSL is installed in /usr/share/fsl/ then
specifying as '/usr/share/' can get you Harvard Oxford atlas
from your installed directory. Since we mimic same root directory
as FSL to load it easily from your installation.
symmetric_split: bool, optional, (default False).
If True, lateralized atlases of cort or sub with maxprob will be
returned. For subcortical types (sub-maxprob), we split every
symmetric region in left and right parts. Effectively doubles the
number of regions.
NOTE Not implemented for full probabilistic atlas (*-prob-* atlases).
Returns
-------
data: sklearn.datasets.base.Bunch
dictionary-like object, keys are:
- "maps": nibabel.Nifti1Image, 4D maps if a probabilistic atlas is
requested and 3D labels if a maximum probabilistic atlas was
requested.
- "labels": string list, labels of the regions in the atlas.
"""
atlas_items = ("cort-maxprob-thr0-1mm", "cort-maxprob-thr0-2mm",
"cort-maxprob-thr25-1mm", "cort-maxprob-thr25-2mm",
"cort-maxprob-thr50-1mm", "cort-maxprob-thr50-2mm",
"sub-maxprob-thr0-1mm", "sub-maxprob-thr0-2mm",
"sub-maxprob-thr25-1mm", "sub-maxprob-thr25-2mm",
"sub-maxprob-thr50-1mm", "sub-maxprob-thr50-2mm",
"cort-prob-1mm", "cort-prob-2mm",
"sub-prob-1mm", "sub-prob-2mm")
if atlas_name not in atlas_items:
raise ValueError("Invalid atlas name: {0}. Please chose an atlas "
"among:\n{1}".format(
atlas_name, '\n'.join(atlas_items)))
url = 'http://www.nitrc.org/frs/download.php/9902/HarvardOxford.tgz'
# For practical reasons, we mimic the FSL data directory here.
dataset_name = 'fsl'
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,
verbose=verbose)
opts = {'uncompress': True}
root = os.path.join('data', 'atlases')
if atlas_name[0] == 'c':
if 'cort-maxprob' in atlas_name and symmetric_split:
split_name = atlas_name.split('cort')
atlas_name = 'cortl' + split_name[1]
label_file = 'HarvardOxford-Cortical-Lateralized.xml'
lateralized = True
else:
label_file = 'HarvardOxford-Cortical.xml'
lateralized = False
else:
label_file = 'HarvardOxford-Subcortical.xml'
lateralized = False
label_file = os.path.join(root, label_file)
atlas_file = os.path.join(root, 'HarvardOxford',
'HarvardOxford-' + atlas_name + '.nii.gz')
atlas_img, label_file = _fetch_files(
data_dir,
[(atlas_file, url, opts), (label_file, url, opts)],
resume=resume, verbose=verbose)
names = {}
from xml.etree import ElementTree
names[0] = 'Background'
for label in ElementTree.parse(label_file).findall('.//label'):
names[int(label.get('index')) + 1] = label.text
names = list(names.values())
if not symmetric_split:
return Bunch(maps=atlas_img, labels=names)
if atlas_name in ("cort-prob-1mm", "cort-prob-2mm",
"sub-prob-1mm", "sub-prob-2mm"):
raise ValueError("Region splitting not supported for probabilistic "
"atlases")
atlas_img = check_niimg(atlas_img)
if lateralized:
return Bunch(maps=atlas_img, labels=names)
atlas = get_data(atlas_img)
labels = np.unique(atlas)
# Build a mask of both halves of the brain
middle_ind = (atlas.shape[0] - 1) // 2
# Put zeros on the median plane
atlas[middle_ind, ...] = 0
# Split every zone crossing the median plane into two parts.
left_atlas = atlas.copy()
left_atlas[middle_ind:, ...] = 0
right_atlas = atlas.copy()
right_atlas[:middle_ind, ...] = 0
new_label = 0
new_atlas = atlas.copy()
# Assumes that the background label is zero.
new_names = [names[0]]
for label, name in zip(labels[1:], names[1:]):
new_label += 1
left_elements = (left_atlas == label).sum()
right_elements = (right_atlas == label).sum()
n_elements = float(left_elements + right_elements)
if (left_elements / n_elements < 0.05 or
right_elements / n_elements < 0.05):
new_atlas[atlas == label] = new_label
new_names.append(name)
continue
new_atlas[right_atlas == label] = new_label
new_names.append(name + ', left part')
new_label += 1
new_atlas[left_atlas == label] = new_label
new_names.append(name + ', right part')
atlas_img = new_img_like(atlas_img, new_atlas, atlas_img.affine)
return Bunch(maps=atlas_img, labels=new_names)
def fetch_atlas_msdl(data_dir=None, url=None, resume=True, verbose=1):
"""Download and load the MSDL brain atlas.
Parameters
----------
data_dir: string, optional
Path of the data directory. Used to force data storage in a specified
location. Default: None
url: string, optional
Override download URL. Used for test only (or if you setup a mirror of
the data).
Returns
-------
data: sklearn.datasets.base.Bunch
Dictionary-like object, the interest attributes are :
- 'maps': str, path to nifti file containing regions definition.
- 'labels': string list containing the labels of the regions.
- 'region_coords': tuple list (x, y, z) containing coordinates
of each region in MNI space.
- 'networks': string list containing names of the networks.
- 'description': description about the atlas.
References
----------
:Download:
https://team.inria.fr/parietal/files/2015/01/MSDL_rois.zip
:Paper to cite:
`Multi-subject dictionary learning to segment an atlas of brain
spontaneous activity <http://hal.inria.fr/inria-00588898/en>`_
<NAME>, <NAME>, <NAME>, <NAME>,
<NAME>. Information Processing in Medical Imaging, 2011,
pp. 562-573, Lecture Notes in Computer Science.
:Other references:
`Learning and comparing functional connectomes across subjects
<http://hal.inria.fr/hal-00812911/en>`_.
<NAME>, <NAME> NeuroImage, 2013.
"""
url = 'https://team.inria.fr/parietal/files/2015/01/MSDL_rois.zip'
opts = {'uncompress': True}
dataset_name = "msdl_atlas"
files = [(os.path.join('MSDL_rois', 'msdl_rois_labels.csv'), url, opts),
(os.path.join('MSDL_rois', 'msdl_rois.nii'), url, opts)]
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,
verbose=verbose)
files = _fetch_files(data_dir, files, resume=resume, verbose=verbose)
csv_data = np.recfromcsv(files[0])
labels = [name.strip() for name in csv_data['name'].tolist()]
labels = [label.decode("utf-8") for label in labels]
with warnings.catch_warnings():
warnings.filterwarnings('ignore', module='numpy',
category=FutureWarning)
region_coords = csv_data[['x', 'y', 'z']].tolist()
net_names = [net_name.strip() for net_name in csv_data['net_name'].tolist()]
fdescr = _get_dataset_descr(dataset_name)
return Bunch(maps=files[1], labels=labels, region_coords=region_coords,
networks=net_names, description=fdescr)
def fetch_coords_power_2011():
"""Download and load the Power et al. brain atlas composed of 264 ROIs.
Returns
-------
data: sklearn.datasets.base.Bunch
dictionary-like object, contains:
- "rois": coordinates of 264 ROIs in MNI space
References
----------
Power, <NAME>., et al. "Functional network organization of the human
brain." Neuron 72.4 (2011): 665-678.
"""
dataset_name = 'power_2011'
fdescr = _get_dataset_descr(dataset_name)
package_directory = os.path.dirname(os.path.abspath(__file__))
csv = os.path.join(package_directory, | |
<reponame>bugface/transformers
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib
import logging
import random
import string
import unittest
from abc import abstractmethod
from functools import lru_cache
from unittest import skipIf
from transformers import (
FEATURE_EXTRACTOR_MAPPING,
TOKENIZER_MAPPING,
AutoFeatureExtractor,
AutoTokenizer,
DistilBertForSequenceClassification,
IBertConfig,
RobertaConfig,
TextClassificationPipeline,
pipeline,
)
from transformers.pipelines import get_task
from transformers.pipelines.base import _pad
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch
logger = logging.getLogger(__name__)
def get_checkpoint_from_architecture(architecture):
try:
module = importlib.import_module(architecture.__module__)
except ImportError:
logger.error(f"Ignoring architecture {architecture}")
return
if hasattr(module, "_CHECKPOINT_FOR_DOC"):
return module._CHECKPOINT_FOR_DOC
else:
logger.warning(f"Can't retrieve checkpoint from {architecture.__name__}")
def get_tiny_config_from_class(configuration_class):
if "OpenAIGPT" in configuration_class.__name__:
# This is the only file that is inconsistent with the naming scheme.
# Will rename this file if we decide this is the way to go
return
model_type = configuration_class.model_type
camel_case_model_name = configuration_class.__name__.split("Config")[0]
try:
model_slug = model_type.replace("-", "_")
module = importlib.import_module(f".test_modeling_{model_slug}", package=f"tests.models.{model_slug}")
model_tester_class = getattr(module, f"{camel_case_model_name}ModelTester", None)
except (ImportError, AttributeError):
logger.error(f"No model tester class for {configuration_class.__name__}")
return
if model_tester_class is None:
logger.warning(f"No model tester class for {configuration_class.__name__}")
return
model_tester = model_tester_class(parent=None)
if hasattr(model_tester, "get_pipeline_config"):
config = model_tester.get_pipeline_config()
elif hasattr(model_tester, "get_config"):
config = model_tester.get_config()
else:
config = None
logger.warning(f"Model tester {model_tester_class.__name__} has no `get_config()`.")
return config
@lru_cache(maxsize=100)
def get_tiny_tokenizer_from_checkpoint(checkpoint):
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
if tokenizer.vocab_size < 300:
# Wav2Vec2ForCTC for instance
# ByT5Tokenizer
# all are already small enough and have no Fast version that can
# be retrained
return tokenizer
logger.info("Training new from iterator ...")
vocabulary = string.ascii_letters + string.digits + " "
tokenizer = tokenizer.train_new_from_iterator(vocabulary, vocab_size=len(vocabulary), show_progress=False)
logger.info("Trained.")
return tokenizer
def get_tiny_feature_extractor_from_checkpoint(checkpoint, tiny_config, feature_extractor_class):
try:
feature_extractor = AutoFeatureExtractor.from_pretrained(checkpoint)
except Exception:
try:
if feature_extractor_class is not None:
feature_extractor = feature_extractor_class()
else:
feature_extractor = None
except Exception:
feature_extractor = None
if hasattr(tiny_config, "image_size") and feature_extractor:
feature_extractor = feature_extractor.__class__(size=tiny_config.image_size, crop_size=tiny_config.image_size)
# Speech2TextModel specific.
if hasattr(tiny_config, "input_feat_per_channel") and feature_extractor:
feature_extractor = feature_extractor.__class__(
feature_size=tiny_config.input_feat_per_channel, num_mel_bins=tiny_config.input_feat_per_channel
)
return feature_extractor
class ANY:
def __init__(self, *_types):
self._types = _types
def __eq__(self, other):
return isinstance(other, self._types)
def __repr__(self):
return f"ANY({', '.join(_type.__name__ for _type in self._types)})"
class PipelineTestCaseMeta(type):
def __new__(mcs, name, bases, dct):
def gen_test(ModelClass, checkpoint, tiny_config, tokenizer_class, feature_extractor_class):
@skipIf(tiny_config is None, "TinyConfig does not exist")
@skipIf(checkpoint is None, "checkpoint does not exist")
def test(self):
if ModelClass.__name__.endswith("ForCausalLM"):
tiny_config.is_encoder_decoder = False
if hasattr(tiny_config, "encoder_no_repeat_ngram_size"):
# specific for blenderbot which supports both decoder-only
# encoder/decoder but the test config only reflects
# encoder/decoder arch
tiny_config.encoder_no_repeat_ngram_size = 0
if ModelClass.__name__.endswith("WithLMHead"):
tiny_config.is_decoder = True
try:
model = ModelClass(tiny_config)
except ImportError as e:
self.skipTest(
f"Cannot run with {tiny_config} as the model requires a library that isn't installed: {e}"
)
if hasattr(model, "eval"):
model = model.eval()
if tokenizer_class is not None:
try:
tokenizer = get_tiny_tokenizer_from_checkpoint(checkpoint)
# XLNet actually defines it as -1.
if isinstance(model.config, (RobertaConfig, IBertConfig)):
tokenizer.model_max_length = model.config.max_position_embeddings - 2
elif (
hasattr(model.config, "max_position_embeddings")
and model.config.max_position_embeddings > 0
):
tokenizer.model_max_length = model.config.max_position_embeddings
# Rust Panic exception are NOT Exception subclass
# Some test tokenizer contain broken vocabs or custom PreTokenizer, so we
# provide some default tokenizer and hope for the best.
except: # noqa: E722
self.skipTest(f"Ignoring {ModelClass}, cannot create a simple tokenizer")
else:
tokenizer = None
feature_extractor = get_tiny_feature_extractor_from_checkpoint(
checkpoint, tiny_config, feature_extractor_class
)
if tokenizer is None and feature_extractor is None:
self.skipTest(
f"Ignoring {ModelClass}, cannot create a tokenizer or feature_extractor (PerceiverConfig with"
" no FastTokenizer ?)"
)
pipeline, examples = self.get_test_pipeline(model, tokenizer, feature_extractor)
if pipeline is None:
# The test can disable itself, but it should be very marginal
# Concerns: Wav2Vec2ForCTC without tokenizer test (FastTokenizer don't exist)
return
self.run_pipeline_test(pipeline, examples)
def run_batch_test(pipeline, examples):
# Need to copy because `Conversation` are stateful
if pipeline.tokenizer is not None and pipeline.tokenizer.pad_token_id is None:
return # No batching for this and it's OK
# 10 examples with batch size 4 means there needs to be a unfinished batch
# which is important for the unbatcher
def data(n):
for _ in range(n):
# Need to copy because Conversation object is mutated
yield copy.deepcopy(random.choice(examples))
out = []
for item in pipeline(data(10), batch_size=4):
out.append(item)
self.assertEqual(len(out), 10)
run_batch_test(pipeline, examples)
return test
for prefix, key in [("pt", "model_mapping"), ("tf", "tf_model_mapping")]:
mapping = dct.get(key, {})
if mapping:
for configuration, model_architectures in mapping.items():
if not isinstance(model_architectures, tuple):
model_architectures = (model_architectures,)
for model_architecture in model_architectures:
checkpoint = get_checkpoint_from_architecture(model_architecture)
tiny_config = get_tiny_config_from_class(configuration)
tokenizer_classes = TOKENIZER_MAPPING.get(configuration, [])
feature_extractor_class = FEATURE_EXTRACTOR_MAPPING.get(configuration, None)
feature_extractor_name = (
feature_extractor_class.__name__ if feature_extractor_class else "nofeature_extractor"
)
if not tokenizer_classes:
# We need to test even if there are no tokenizers.
tokenizer_classes = [None]
else:
# Remove the non defined tokenizers
# ByT5 and Perceiver are bytes-level and don't define
# FastTokenizer, we can just ignore those.
tokenizer_classes = [
tokenizer_class for tokenizer_class in tokenizer_classes if tokenizer_class is not None
]
for tokenizer_class in tokenizer_classes:
if tokenizer_class is not None:
tokenizer_name = tokenizer_class.__name__
else:
tokenizer_name = "notokenizer"
test_name = f"test_{prefix}_{configuration.__name__}_{model_architecture.__name__}_{tokenizer_name}_{feature_extractor_name}"
if tokenizer_class is not None or feature_extractor_class is not None:
dct[test_name] = gen_test(
model_architecture,
checkpoint,
tiny_config,
tokenizer_class,
feature_extractor_class,
)
@abstractmethod
def inner(self):
raise NotImplementedError("Not implemented test")
# Force these 2 methods to exist
dct["test_small_model_pt"] = dct.get("test_small_model_pt", inner)
dct["test_small_model_tf"] = dct.get("test_small_model_tf", inner)
return type.__new__(mcs, name, bases, dct)
@is_pipeline_test
class CommonPipelineTest(unittest.TestCase):
@require_torch
def test_pipeline_iteration(self):
from torch.utils.data import Dataset
class MyDataset(Dataset):
data = [
"This is a test",
"This restaurant is great",
"This restaurant is awful",
]
def __len__(self):
return 3
def __getitem__(self, i):
return self.data[i]
text_classifier = pipeline(
task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt"
)
dataset = MyDataset()
for output in text_classifier(dataset):
self.assertEqual(output, {"label": ANY(str), "score": ANY(float)})
@require_torch
def test_check_task_auto_inference(self):
pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert")
self.assertIsInstance(pipe, TextClassificationPipeline)
@require_torch
def test_pipeline_batch_size_global(self):
pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert")
self.assertEqual(pipe._batch_size, None)
self.assertEqual(pipe._num_workers, None)
pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert", batch_size=2, num_workers=1)
self.assertEqual(pipe._batch_size, 2)
self.assertEqual(pipe._num_workers, 1)
@require_torch
def test_pipeline_override(self):
class MyPipeline(TextClassificationPipeline):
pass
text_classifier = pipeline(model="hf-internal-testing/tiny-random-distilbert", pipeline_class=MyPipeline)
self.assertIsInstance(text_classifier, MyPipeline)
def test_check_task(self):
task = get_task("gpt2")
self.assertEqual(task, "text-generation")
with self.assertRaises(RuntimeError):
# Wrong framework
get_task("espnet/siddhana_slurp_entity_asr_train_asr_conformer_raw_en_word_valid.acc.ave_10best")
@require_torch
def test_iterator_data(self):
def data(n: int):
for _ in range(n):
yield "This is a test"
pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert")
results = []
for out in pipe(data(10)):
self.assertEqual(nested_simplify(out), {"label": "LABEL_0", "score": 0.504})
results.append(out)
self.assertEqual(len(results), 10)
# When using multiple workers on streamable data it should still work
# This will force using `num_workers=1` with a warning for now.
results = []
for out in pipe(data(10), num_workers=2):
self.assertEqual(nested_simplify(out), {"label": "LABEL_0", "score": 0.504})
results.append(out)
self.assertEqual(len(results), 10)
@require_tf
def test_iterator_data_tf(self):
def data(n: int):
for _ in range(n):
yield "This is a test"
pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert", framework="tf")
out = pipe("This is a test")
results = []
for out in pipe(data(10)):
self.assertEqual(nested_simplify(out), {"label": "LABEL_0", "score": 0.504})
results.append(out)
self.assertEqual(len(results), 10)
@require_torch
def test_unbatch_attentions_hidden_states(self):
model = DistilBertForSequenceClassification.from_pretrained(
"hf-internal-testing/tiny-random-distilbert", output_hidden_states=True, output_attentions=True
)
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-distilbert")
text_classifier = TextClassificationPipeline(model=model, tokenizer=tokenizer)
# Used to throw an error because `hidden_states` are a tuple of tensors
# instead of the expected tensor.
outputs = text_classifier(["This is great !"] * 20, batch_size=32)
self.assertEqual(len(outputs), 20)
@is_pipeline_test
class PipelinePadTest(unittest.TestCase):
@require_torch
def test_pipeline_padding(self):
import torch
items = [
{
"label": "label1",
"input_ids": torch.LongTensor([[1, 23, 24, 2]]),
"attention_mask": torch.LongTensor([[0, 1, 1, 0]]),
},
{
"label": "label2",
"input_ids": torch.LongTensor([[1, 23, 24, 43, 44, 2]]),
"attention_mask": torch.LongTensor([[0, 1, 1, 1, 1, 0]]),
},
]
self.assertEqual(_pad(items, "label", 0, "right"), ["label1", "label2"])
self.assertTrue(
torch.allclose(
_pad(items, "input_ids", 10, "right"),
torch.LongTensor([[1, 23, 24, 2, 10, 10], [1, 23, 24, 43, 44, 2]]),
)
)
self.assertTrue(
torch.allclose(
_pad(items, "input_ids", 10, "left"),
torch.LongTensor([[10, 10, 1, 23, 24, 2], [1, 23, 24, 43, 44, 2]]),
)
)
self.assertTrue(
torch.allclose(
_pad(items, "attention_mask", 0, "right"), torch.LongTensor([[0, 1, 1, 0, 0, 0], [0, 1, 1, 1, 1, 0]])
)
)
@require_torch
def test_pipeline_image_padding(self):
import torch
items = [
{
"label": "label1",
"pixel_values": torch.zeros((1, 3, 10, 10)),
},
{
"label": "label2",
"pixel_values": torch.zeros((1, 3, 10, 10)),
},
]
self.assertEqual(_pad(items, "label", 0, "right"), ["label1", "label2"])
self.assertTrue(
| |
config]
##------------------------------------------------------------------------------------
## Old Shapes TESTING
##------------------------------------------------------------------------------------
def prep_oldshapes_test(init_with = None, FCN_layers = False, batch_sz = 5, epoch_steps = 4, training_folder= "mrcnn_oldshape_test_logs"):
import mrcnn.shapes as shapes
MODEL_DIR = os.path.join(DIR_TRAINING, training_folder)
# MODEL_DIR = os.path.join(DIR_TRAINING, "mrcnn_development_logs")
# Build configuration object -----------------------------------------------
config = shapes.ShapesConfig()
config.BATCH_SIZE = batch_sz # Batch size is 2 (# GPUs * images/GPU).
config.IMAGES_PER_GPU = batch_sz # Must match BATCH_SIZE
config.STEPS_PER_EPOCH = epoch_steps
config.FCN_INPUT_SHAPE = config.IMAGE_SHAPE[0:2]
# Build shape dataset -----------------------------------------------
dataset_test = shapes.ShapesDataset(config)
dataset_test.load_shapes(500)
dataset_test.prepare()
# Recreate the model in inference mode
try :
del model
print('delete model is successful')
gc.collect()
except:
pass
KB.clear_session()
model = modellib.MaskRCNN(mode="inference",
config=config,
model_dir=MODEL_DIR,
FCN_layers = FCN_layers )
print(' COCO Model Path : ', COCO_DIR_TRAINING)
print(' Checkpoint folder Path: ', MODEL_DIR)
print(' Model Parent Path : ', DIR_TRAINING)
print(' Resent Model Path : ', RESNET_DIR_TRAINING)
model.load_model_weights(init_with = init_with)
test_generator = data_generator(dataset_test, model.config, shuffle=True,
batch_size=model.config.BATCH_SIZE,
augment = False)
model.config.display()
return [model, dataset_test, test_generator, config]
"""
##------------------------------------------------------------------------------------
## Old Shapes DEVELOPMENT
##------------------------------------------------------------------------------------
def prep_oldshapes_dev(init_with = None, FCN_layers = False, batch_sz = 5):
import mrcnn.shapes as shapes
MODEL_DIR = os.path.join(DIR_TRAINING, "mrcnn_oldshape_dev_logs")
config = build_config(batch_sz = batch_sz)
dataset_train = shapes.ShapesDataset()
dataset_train.load_shapes(150, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])
dataset_train.prepare()
try :
del model
print('delete model is successful')
gc.collect()
except:
pass
KB.clear_session()
model = modellib.MaskRCNN(mode="training", config=config, model_dir=MODEL_DIR, FCN_layers = FCN_layers)
print(' COCO Model Path : ', COCO_DIR_TRAINING)
print(' Checkpoint folder Path: ', MODEL_DIR)
print(' Model Parent Path : ', DIR_TRAINING)
print(' Resent Model Path : ', RESNET_DIR_TRAINING)
load_model(model, init_with = init_with)
train_generator = data_generator(dataset_train, model.config, shuffle=True,
batch_size=model.config.BATCH_SIZE,
augment = False)
model.config.display()
return [model, dataset_train, train_generator, config]
"""
##------------------------------------------------------------------------------------
## New Shapes DEVELOPMENT
##------------------------------------------------------------------------------------
def prep_newshapes_dev(init_with = "last", FCN_layers= False, batch_sz = 5):
import mrcnn.new_shapes as new_shapes
MODEL_DIR = os.path.join(DIR_TRAINING, "mrcnn_newshape_dev_logs")
config = build_config(batch_sz = batch_sz, newshapes=True)
# Build shape dataset -----------------------------------------------
# Training dataset
dataset_train = new_shapes.NewShapesDataset()
dataset_train.load_shapes(3000, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])
dataset_train.prepare()
# Validation dataset
dataset_val = new_shapes.NewShapesDataset()
dataset_val.load_shapes(500, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])
dataset_val.prepare()
try :
del model, train_generator, val_generator, mm
gc.collect()
except:
pass
KB.clear_session()
model = modellib.MaskRCNN(mode="training", config=config, model_dir=MODEL_DIR,FCN_layers = FCN_layers)
print('DIR_TRAINING : ', DIR_TRAINING)
print('COCO_DIR_TRAINING : ', COCO_DIR_TRAINING)
print('RESNET_DIR_TRAINING : ', RESNET_DIR_TRAINING)
print('MODEL_DIR : ', MODEL_DIR)
print('Last Saved Model : ', model.find_last())
load_model(model, init_with = 'last')
train_generator = data_generator(dataset_train, model.config, shuffle=True,
batch_size=model.config.BATCH_SIZE,
augment = False)
config.display()
return [model, dataset_train, train_generator, config]
"""
# def prep_newshapes_train(init_with = "last", FCN_layers= False, batch_sz =5, epoch_steps = 4, training_folder= None):
import mrcnn.new_shapes as new_shapes
# config.CHECKPOINT_FOLDER = os.path.join(DIR_TRAINING, config.CHECKPOINT_FOLDER)
# MODEL_DIR = os.path.join(DIR_TRAINING, training_folder)
# Build configuration object -----------------------------------------------
config = new_shapes.NewShapesConfig()
config.TRAINING_PATH = os.path.join(DIR_TRAINING, training_folder)
# config.BATCH_SIZE = batch_sz # Batch size is 2 (# GPUs * images/GPU).
# config.IMAGES_PER_GPU = batch_sz # Must match BATCH_SIZE
# config.STEPS_PER_EPOCH = epoch_steps
# config.FCN_INPUT_SHAPE = config.IMAGE_SHAPE[0:2]
# Build shape dataset -----------------------------------------------
# Training dataset
dataset_train = new_shapes.NewShapesDataset(config)
dataset_train.load_shapes(10000)
dataset_train.prepare()
# Validation dataset
dataset_val = new_shapes.NewShapesDataset(config)
dataset_val.load_shapes(2500)
dataset_val.prepare()
# print('DIR_TRAINING : ', DIR_TRAINING)
# print('COCO_DIR_TRAINING : ', COCO_DIR_TRAINING)
# print('RESNET_DIR_TRAINING : ', RESNET_DIR_TRAINING)
# print('MODEL_DIR : ', MODEL_DIR)
# print('Last Saved Model : ', model.find_last())
# exclude_layers = \
# ['fcn_block1_conv1'
# ,'fcn_block1_conv2'
# ,'fcn_block1_pool'
# ,'fcn_block2_conv1'
# ,'fcn_block2_conv2'
# ,'fcn_block2_pool'
# ,'fcn_block3_conv1'
# ,'fcn_block3_conv2'
# ,'fcn_block3_conv3'
# ,'fcn_block3_pool'
# ,'fcn_block4_conv1'
# ,'fcn_block4_conv2'
# ,'fcn_block4_conv3'
# ,'fcn_block4_pool'
# ,'fcn_block5_conv1'
# ,'fcn_block5_conv2'
# ,'fcn_block5_conv3'
# ,'fcn_block5_pool'
# ,'fcn_fc1'
# ,'dropout_1'
# ,'fcn_fc2'
# ,'dropout_2'
# ,'fcn_classify'
# ,'fcn_bilinear'
# ,'fcn_heatmap_norm'
# ,'fcn_scoring'
# ,'fcn_heatmap'
# ,'fcn_norm_loss']
# load_model(model, init_with = 'last', exclude = exclude_layers)
# model.load_model_weights(init_with = init_with)
# print('=====================================')
# print(" Load second weight file ?? ")
# model.keras_model.load_weights('E:/Models/vgg16_weights_tf_dim_ordering_tf_kernels.h5', by_name= True)
dataset_train, train_generator = newshapes_dataset("train", mrcnn_config, image_count = 10000)
dataset_val , val_generator = newshapes_dataset("val" , mrcnn_config, image_count = 2500)
config.display()
return [model, dataset_train, dataset_val, train_generator, val_generator, config]
"""
##------------------------------------------------------------------------------------
## build_trainfcn_pipeline()
##------------------------------------------------------------------------------------
def build_trainfcn_pipeline( fcn_weight_file = 'last', batch_size = 2):
start_time = datetime.now().strftime("%m-%d-%Y @ %H:%M:%S")
print()
print('--> Execution started at:', start_time)
print(" Tensorflow Version: {} Keras Version : {} ".format(tf.__version__,keras.__version__))
# Parse command line arguments
#------------------------------------------------------------------------------------
parser = command_line_parser()
input_parms = "--epochs 2 --steps_in_epoch 32 --last_epoch 0 "
input_parms +="--batch_size "+str(batch_size)+ " --lr 0.00001 --val_steps 8 "
# input_parms +="--mrcnn_logs_dir train_mrcnn_newshapes "
# input_parms +="--fcn_logs_dir train_fcn8_newshapes "
input_parms +="--mrcnn_logs_dir train_mrcnn_coco "
input_parms +="--fcn_logs_dir train_fcn8_coco_adam "
input_parms +="--mrcnn_model last "
input_parms +="--fcn_model init "
input_parms +="--opt adagrad "
input_parms +="--fcn_arch fcn8 "
input_parms +="--fcn_layers all "
input_parms +="--sysout screen "
input_parms +="--new_log_folder "
# input_parms +="--fcn_model /home/kbardool/models/train_fcn_adagrad/shapes20180709T1732/fcn_shapes_1167.h5"
print(input_parms)
args = parser.parse_args(input_parms.split())
# args = parser.parse_args()
# if debug is true set stdout destination to stringIO
#----------------------------------------------------------------------------------------------
# debug = False
if args.sysout == 'FILE':
sys.stdout = io.StringIO()
# print(" Dataset : ", args.dataset)
# print(" Logs : ", args.logs)
# print(" Limit : ", args.limit)
print(" MRCNN Model : ", args.mrcnn_model)
print(" FCN Model : ", args.fcn_model)
print(" MRCNN Log Dir : ", args.mrcnn_logs_dir)
print(" FCN Log Dir : ", args.fcn_logs_dir)
print(" FCN Arch : ", args.fcn_arch)
print(" FCN Log Dir : ", args.fcn_layers)
print(" Last Epoch : ", args.last_epoch)
print(" Epochs to run : ", args.epochs)
print(" Steps in each epoch: ", args.steps_in_epoch)
print(" Validation steps : ", args.val_steps)
print(" Batch Size : ", args.batch_size)
print(" Optimizer : ", args.opt)
print(" sysout : ", args.sysout)
# setup project directories
#---------------------------------------------------------------------------------
paths = Paths(fcn_training_folder = args.fcn_logs_dir, mrcnn_training_folder = args.mrcnn_logs_dir)
paths.display()
# Build configuration object
#------------------------------------------------------------------------------------
mrcnn_config = CocoConfig()
# import mrcnn.new_shapes as new_shapes
# mrcnn_config = new_shapes.NewShapesConfig()
mrcnn_config.NAME = 'mrcnn'
mrcnn_config.TRAINING_PATH = paths.MRCNN_TRAINING_PATH
mrcnn_config.COCO_DATASET_PATH = paths.COCO_DATASET_PATH
mrcnn_config.COCO_MODEL_PATH = paths.COCO_MODEL_PATH
mrcnn_config.RESNET_MODEL_PATH = paths.RESNET_MODEL_PATH
mrcnn_config.VGG16_MODEL_PATH = paths.VGG16_MODEL_PATH
mrcnn_config.COCO_CLASSES = None
mrcnn_config.DETECTION_PER_CLASS = 200
mrcnn_config.HEATMAP_SCALE_FACTOR = 4
mrcnn_config.BATCH_SIZE = int(args.batch_size) # Batch size is 2 (# GPUs * images/GPU).
mrcnn_config.IMAGES_PER_GPU = int(args.batch_size) # Must match BATCH_SIZE
mrcnn_config.STEPS_PER_EPOCH = int(args.steps_in_epoch)
mrcnn_config.LEARNING_RATE = float(args.lr)
mrcnn_config.EPOCHS_TO_RUN = int(args.epochs)
mrcnn_config.FCN_INPUT_SHAPE = mrcnn_config.IMAGE_SHAPE[0:2]
mrcnn_config.LAST_EPOCH_RAN = int(args.last_epoch)
mrcnn_config.NEW_LOG_FOLDER = True
mrcnn_config.SYSOUT = args.sysout
# mrcnn_config.WEIGHT_DECAY = 2.0e-4
# mrcnn_config.VALIDATION_STEPS = int(args.val_steps)
# mrcnn_config.REDUCE_LR_FACTOR = 0.5
# mrcnn_config.REDUCE_LR_COOLDOWN = 30
# mrcnn_config.REDUCE_LR_PATIENCE = 40
# mrcnn_config.EARLY_STOP_PATIENCE= 80
# mrcnn_config.EARLY_STOP_MIN_DELTA = 1.0e-4
# mrcnn_config.MIN_LR = 1.0e-10
# mrcnn_config.OPTIMIZER = args.opt.upper()
# mrcnn_config.display()
# Build MRCNN Model
#------------------------------------------------------------------------------------
from mrcnn.prep_notebook import mrcnn_coco_train
mrcnn_model, mrcnn_config = mrcnn_coco_train(mode = 'trainfcn', mrcnn_config = mrcnn_config)
# Build configuration for FCN model
#------------------------------------------------------------------------------------
fcn_config = CocoConfig()
fcn_config.COCO_DATASET_PATH = paths.COCO_DATASET_PATH
fcn_config.COCO_HEATMAP_PATH = paths.COCO_HEATMAP_PATH
# mrcnn_config.COCO_MODEL_PATH = COCO_MODEL_PATH
# mrcnn_config.RESNET_MODEL_PATH = RESNET_MODEL_PATH
fcn_config.NAME = 'fcn'
fcn_config.TRAINING_PATH = paths.FCN_TRAINING_PATH
fcn_config.VGG16_MODEL_PATH = paths.FCN_VGG16_MODEL_PATH
fcn_config.HEATMAP_SCALE_FACTOR = 4
fcn_config.FCN_INPUT_SHAPE = fcn_config.IMAGE_SHAPE[0:2] // fcn_config.HEATMAP_SCALE_FACTOR
fcn_config.BATCH_SIZE = int(args.batch_size) # Batch size is 2 (# GPUs * images/GPU).
fcn_config.IMAGES_PER_GPU = int(args.batch_size) # Must match BATCH_SIZE
fcn_config.EPOCHS_TO_RUN = int(args.epochs)
fcn_config.STEPS_PER_EPOCH = int(args.steps_in_epoch)
fcn_config.LAST_EPOCH_RAN = int(args.last_epoch)
fcn_config.LEARNING_RATE = float(args.lr)
fcn_config.VALIDATION_STEPS = int(args.val_steps)
fcn_config.BATCH_MOMENTUM = 0.9
fcn_config.WEIGHT_DECAY = 2.0e-4
fcn_config.REDUCE_LR_FACTOR = 0.5
fcn_config.REDUCE_LR_COOLDOWN = 5
fcn_config.REDUCE_LR_PATIENCE = 5
fcn_config.EARLY_STOP_PATIENCE = 15
fcn_config.EARLY_STOP_MIN_DELTA = 1.0e-4
fcn_config.MIN_LR = 1.0e-10
fcn_config.NEW_LOG_FOLDER = args.new_log_folder
fcn_config.OPTIMIZER = args.opt
fcn_config.SYSOUT = args.sysout
fcn_config.display()
# Build FCN Model in Training Mode
#------------------------------------------------------------------------------------
try :
del fcn_model
gc.collect()
except:
pass
fcn_model = fcn_modellib.FCN(mode="training", arch = 'FCN8', config=fcn_config)
#### Display FCN model info
# fcn_model.config.display()
fcn_model.layer_info()
# exclude=["mrcnn_class_logits"] # ,"mrcnn_bbox_fc"] #, "mrcnn_bbox", "mrcnn_mask"])
mrcnn_model.load_model_weights(init_with = 'last', exclude = None)
# Load FCN Model weights
#------------------------------------------------------------------------------------
fcn_model.load_model_weights(init_with = fcn_weight_file)
return mrcnn_model, fcn_model
##------------------------------------------------------------------------------------
## build_inference_pipeline()
##------------------------------------------------------------------------------------
def build_inference_pipeline( fcn_weight_file = 'last', batch_size = 2):
start_time = datetime.now().strftime("%m-%d-%Y @ %H:%M:%S")
print()
print('--> Execution started at:', start_time)
print(" Tensorflow Version: {} Keras Version : {} ".format(tf.__version__,keras.__version__))
# Parse command line arguments
#------------------------------------------------------------------------------------
parser = command_line_parser()
input_parms = "--batch_size "+str(batch_size)+ " "
input_parms +="--mrcnn_logs_dir train_mrcnn_coco "
input_parms +="--fcn_logs_dir train_fcn8_coco_adam "
input_parms +="--mrcnn_model last | |
from __future__ import print_function, division
import numpy as np
import matplotlib.pyplot as plt
from numpy import linalg as LA
from matplotlib.animation import FuncAnimation
from matplotlib.ticker import FormatStrFormatter
from mpl_toolkits.mplot3d import Axes3D
from operator import itemgetter, attrgetter, truediv
import statistics as stat
from scipy import signal
from scipy.optimize import minimize
import math
import matplotlib.image as mpimg
#from scipy import signal
import librosa
import librosa.display
import wave
import sys
import soundfile as sf
import os
import pyaudio
import threading
import pickle
np.seterr(divide='ignore', invalid='ignore')
import params
WINDOW = params.WINDOW
BINS_PER_OCTAVE = params.BINS_PER_OCTAVE
BINS_PER_OCTAVE_ONSETS = params.BINS_PER_OCTAVE_ONSETS
FILTER_SCALE = params.FILTER_SCALE
STEP = 512
α = params.α
ω = params.ω
H = params.H
T = params.T
T_att = params.T_att
cmap = params.cmap
ϵ = sys.float_info.epsilon
class AudioFile:
chunk = 1024
def __init__(self, file):
""" Init audio stream """
self.file = file
self.wf = wave.open(file, 'rb')
self.p = pyaudio.PyAudio()
self.stream = self.p.open(
format = self.p.get_format_from_width(self.wf.getsampwidth()),
channels = self.wf.getnchannels(),
rate = self.wf.getframerate(),
output = True
)
def __truePlay(self):
data = self.wf.readframes(self.chunk)
while data != '':
self.stream.write(data)
data = self.wf.readframes(self.chunk)
def play(self):
""" Play entire file """
x = threading.Thread(target=self.__truePlay, args=())
x.start()
def close(self):
""" Graceful shutdown """
self.stream.close()
self.p.terminate()
class SignalSepare:
""" Prend en entrée en signal et le signal des pistes audio séparées. """
def __init__(self, signal, sr, pistes, Notemin = 'D3', Notemax = 'D9', onset_frames = [], delOnsets = [], addOnsets = [], score = [], instrument = ''):
self.y = signal
self.pistes = pistes
self.sr = sr
self.n_pistes = len(pistes)
self.Notemin = Notemin
self.Notemax = Notemax
self.delOnsets = delOnsets
self.addOnsets = addOnsets
self.score = score
self.instrument = instrument
self.n_bins_ONSETS = 0
self.n_bins = 0
self.N = 0
self.fmin = 0
self.fmax = 0
self.n_bins = 0
self.n_att = 0
self.n_frames = 0
self.N_sample = []
self.Dev = []
self.Seuil = []
self.times = []
self.Onset_given = True
self.onset_times = []
self.onset_times_graph = []
self.onset_frames = onset_frames
self.Percu = []
self.Chrom_ONSETS = []
self.ChromDB_ONSETS = []
self.ChromSync_ONSETS = []
self.ChromPistesSync_ONSETS = []
self.ChromDB_reloc = []
self.Chrom = []
self.chromSync = []
self.chromSyncDB = []
self.chromPistesSync = []
self.chromSyncSimpl = []
self.chromPistesSyncSimpl = []
self.ChromNoHpss = []
self.energy = []
self.energyPistes = []
self.activation = []
self.n_notes = []
self.chrom_concordance = []
self.concordance = []
self.chrom_concordanceTot = []
self.concordanceTot = []
self.chrom_concordance3 = []
self.concordance3 = []
self.tension = []
self.roughness = []
self.chrom_harmonicity = []
self.liste_partials = []
self.tensionSignal = []
self.chrom_roughness = []
self.roughnessSignal = []
self.chrom_harmonicChange = []
self.harmonicChange = []
self.chrom_crossConcordance = []
self.crossConcordance = []
self.chrom_crossConcordanceTot = []
self.crossConcordanceTot = []
self.chrom_diffConcordance = []
self.diffRoughness = []
self.chrom_diffRoughness = []
self.diffConcordance = []
self.harmonicity = []
self.virtualPitch = []
self.context = []
self.contextSimpl = []
self.energyContext = []
self.chrom_harmonicNovelty = []
self.harmonicNovelty = []
self.harmonicityContext = []
self.virtualPitchContext = []
self.roughnessContext = []
self.chrom_roughnessContext = []
self.diffConcordanceContext = []
self.chrom_diffConcordanceContext = []
self.diffRoughnessContext = []
self.chrom_diffRoughnessContext = []
def DetectionOnsets(self):
self.fmin = librosa.note_to_hz(self.Notemin)
self.fmax = librosa.note_to_hz(self.Notemax)
#Nmin = int((sr/(fmax*(2**(1/BINS_PER_OCTAVE)-1))))
#Nmax = int((sr/(fmin*(2**(1/BINS_PER_OCTAVE)-1))))
self.n_bins_ONSETS = int((librosa.note_to_midi(self.Notemax) - librosa.note_to_midi(self.Notemin))*BINS_PER_OCTAVE_ONSETS/12)
self.Chrom_ONSETS = np.abs(librosa.cqt(y=self.y, sr=self.sr, hop_length = STEP, fmin= self.fmin, bins_per_octave=BINS_PER_OCTAVE_ONSETS, n_bins=self.n_bins_ONSETS, window=WINDOW))
self.ChromDB_ONSETS = librosa.amplitude_to_db(self.Chrom_ONSETS, ref=np.max)
self.N = len(self.ChromDB_ONSETS[0])
self.times = librosa.frames_to_time(np.arange(self.N), sr=self.sr, hop_length=STEP)
# CALCUL DES ONSETS (pour onset précalculé, le rentrer dans self.onset_frames à l'initialisation)
if len(self.onset_frames) == 0:
self.Onset_given = False
Diff = np.zeros((self.n_bins_ONSETS,self.N))
self.Dev = np.zeros(self.N)
for j in range(1,self.N):
for i in range(self.n_bins_ONSETS):
Diff[i,j] = np.abs(self.ChromDB_ONSETS[i,j]-self.ChromDB_ONSETS[i,j-1])
self.Dev[j] = sum(Diff[:,j])
# FONCTION DE SEUIL
# Ajout de zéros en queue et en tête
l = []
Onsets = []
for k in range(int(H/2)):
l.append(0)
for val in self.Dev:
l.append(val)
for k in range(int(H/2)):
l.append(0)
#Calcul de la médiane
for i in range(self.N):
self.Seuil.append(α + ω*stat.median(l[i:i+H]))
if self.Dev[i] > self.Seuil[i]:
Onsets.append(i)
# FONCTION DE TRI SUR LES ONSETS
# Onsets espacés d'au moins T
i=0
while i<(len(Onsets)-1):
while (i<(len(Onsets)-1)) and (self.times[Onsets[i+1]]< self.times[Onsets[i]]+T):
if (self.Dev[Onsets[i+1]]-self.Seuil[Onsets[i+1]]) < (self.Dev[Onsets[i]]-self.Seuil[Onsets[i]]): del Onsets[i+1]
#if (Dev[Onsets[i+1]]) < (Dev[Onsets[i]]): del Onsets[i+1]
else: del Onsets[i]
i=i+1
# Suppression manuelle des onsets en trop (cela nécessite d'avoir affiché les onsets jusqu'ici détectés)
if isinstance(self.delOnsets, str): Onsets = []
else:
self.delOnsets.sort(reverse = True)
for o in self.delOnsets:
Onsets.pop(o-1)
#Ajout manuel des onsets
for t in self.addOnsets:
Onsets.append(librosa.time_to_frames(t, sr=self.sr, hop_length=STEP))
Onsets.sort()
self.onset_frames = librosa.util.fix_frames(Onsets, x_min=0, x_max=self.ChromDB_ONSETS.shape[1]-1)
self.onset_frames = librosa.util.fix_frames(self.onset_frames, x_min=0, x_max=self.ChromDB_ONSETS.shape[1]-1)
self.onset_times = librosa.frames_to_time(self.onset_frames, sr=self.sr, hop_length = STEP)
self.n_frames = len(self.onset_frames)-1
self.n_notes = np.ones(self.n_frames)
# TRANSFORMÉE avec la précision due pour l'analyse
self.n_bins = int((librosa.note_to_midi(self.Notemax) - librosa.note_to_midi(self.Notemin))*BINS_PER_OCTAVE/12)
self.Chrom = np.abs(librosa.cqt(y=self.y, sr=self.sr, hop_length = STEP, fmin= self.fmin, bins_per_octave=BINS_PER_OCTAVE, n_bins=self.n_bins, window=WINDOW, filter_scale = FILTER_SCALE))
# self.Chrom[np.isnan(self.Chrom)] = 0
# Relocalisation
if params.spectral_reloc:
freq_analyse = [self.fmin*2**(k/BINS_PER_OCTAVE) for k in range(self.n_bins)]
N = [round(self.sr * params.FILTER_SCALE/(f*(2**(1/BINS_PER_OCTAVE)-1))) for f in freq_analyse]
self.N_sample = [round(n/STEP) for n in N]
Chrom_copy = np.copy(self.Chrom)
for k in range(self.n_bins):
for n in reversed(range(self.N)):
if n <= self.N_sample[k]: self.Chrom[k,n] = Chrom_copy[k,n]
else: self.Chrom[k,n] = Chrom_copy[k,n-int(self.N_sample[k]/2)]
# Décomposition partie harmonique / partie percussive
if params.decompo_hpss:
self.ChromNoHpss = np.copy(self.Chrom)
self.Chrom = librosa.decompose.hpss(self.Chrom, margin=params.margin)[0]
self.ChromDB = librosa.amplitude_to_db(self.Chrom, ref=np.max)
#Synchronisation sur les onsets, en enlevant le début et la fin des longues frames
self.chromSync = np.zeros((self.n_bins,self.n_frames))
self.n_att = int(librosa.time_to_frames(T_att, sr=self.sr, hop_length = STEP))
# for j in range(self.n_frames):
# if j==0:
# for i in range(self.n_bins):
# self.chromSync[i,j] = np.median(self.Chrom[i][self.onset_frames[j]:self.onset_frames[j+1]])
# else:
# for i in range(self.n_bins):
# self.chromSync[i,j] = np.median(self.Chrom[i][(self.onset_frames[j]+self.n_att):(self.onset_frames[j+1])])
Δmin = 0.1 # en secondes
for i in range(self.n_bins):
f = self.fmin*2**(i/BINS_PER_OCTAVE)
T_ret = 1.5 / (f * (2**(1.0/(12*4)) - 1))
for j in range(self.n_frames):
if j==0: self.chromSync[i,j] = np.median(self.Chrom[i][self.onset_frames[j]:self.onset_frames[j+1]])
else:
if T_ret < (self.onset_times[j+1] - self.onset_times[j+1]) - Δmin:
self.chromSync[i,j] = np.median(self.Chrom[i][(self.onset_frames[j]+int(librosa.time_to_frames(T_ret, sr=self.sr, hop_length = STEP))):(self.onset_frames[j+1])])
else:
self.chromSync[i,j] = np.median(self.Chrom[i][(self.onset_frames[j+1]-int(librosa.time_to_frames(Δmin, sr=self.sr, hop_length = STEP))):(self.onset_frames[j+1])])
self.chromSync[np.isnan(self.chromSync)] = 0
self.chromSync[:,0] = np.zeros(self.n_bins)
self.chromSync[:,-1] = np.zeros(self.n_bins)
self.chromSyncDB = librosa.amplitude_to_db(self.chromSync, ref=np.max)
#Calcul de l'énergie
for t in range(self.n_frames):
self.energy.append(LA.norm(self.chromSync[:,t])**2)
def Clustering(self):
""" Découpe et synchronise les pistes séparées sur les ONSETS, stoque le spectrogramme
synchronisé en construisant self.chromPistesSync"""
if len(self.pistes) != 0:
# Construction de chromPistesSync
ChromPistes = []
for k, voice in enumerate(self.pistes):
if params.decompo_hpss:
ChromPistes.append(np.nan_to_num(librosa.decompose.hpss(np.abs(librosa.cqt(y=voice, sr=self.sr, hop_length = STEP, fmin= self.fmin, bins_per_octave=BINS_PER_OCTAVE, n_bins=self.n_bins)), margin=params.margin)[0],False))
else: ChromPistes.append(np.nan_to_num(np.abs(librosa.cqt(y=voice, sr=self.sr, hop_length = STEP, fmin= self.fmin, bins_per_octave=BINS_PER_OCTAVE, n_bins=self.n_bins)),False))
for k, voice in enumerate(self.pistes):
# if params.spectral_reloc:
# ChromPistesCopy = np.copy(ChromPistes)
# for f in range(self.n_bins):
# for n in reversed(range(self.N)):
# if n <= self.N_sample[f]: ChromPistes[k][f,n] = ChromPistesCopy[k][f,n]
# else: ChromPistes[k][f,n] = ChromPistesCopy[k][f,n-int(self.N_sample[f]/2)]
self.chromPistesSync.append(np.zeros((self.n_bins,self.n_frames)))
for j in range(self.n_frames):
if j==0:
for i in range(self.n_bins):
self.chromPistesSync[k][i,j] = np.median(ChromPistes[k][i][self.onset_frames[j]:self.onset_frames[j+1]])
else:
for i in range(self.n_bins):
self.chromPistesSync[k][i,j] = np.median(ChromPistes[k][i][(self.onset_frames[j]+self.n_att):(self.onset_frames[j+1]-self.n_att)])
# Calcul de l'énergie des pistes
self.energyPistes = np.zeros((self.n_pistes, self.n_frames))
for t in range(self.n_frames):
for k in range(self.n_pistes):
self.energyPistes[k,t] = np.sum(np.multiply(self.chromPistesSync[k][:,t], self.chromPistesSync[k][:,t]))
# Calcul de la matrice d'activation (pour savoir quelles voix contiennent des notes à quel moment) + mise à zéro des pistes sans note
# Par défaut, tout est à 1, ie dans chaque frame chaque piste joue une note
self.activation = np.ones((self.n_pistes, self.n_frames))
if title in params.list_calcul_nombre_notes:
max_energy = np.amax(self.energyPistes, axis = 1)
for k in range(self.n_pistes):
for t in range(self.n_frames):
if (self.energyPistes[k,t] < params.seuil_activation * max_energy[k]):
self.activation[k,t] = 0
self.chromPistesSync[k][:,t] = 0
self.activation[:,0] = 0
self.activation[:,self.n_frames-1] = 0
elif title in params.list_3_voix:
self.activation[-1] = np.zeros(self.n_frames)
# Calcul du nombre de notes
self.n_notes = np.sum(self.activation, axis=0)
self.n_notes[0] = 0
self.n_notes[-1] = 0
def Context(self, type = params.memory_type, size = params.memory_size - 1, decr = params.memory_decr_ponderation):
#Construction du context harmonique
self.context = np.zeros((self.n_bins,self.n_frames))
self.context[:,0] = self.chromSync[:,0]
# Memory = "full"
if isinstance(size,str):
if type == 'max':
for t in range(1,self.n_frames):
self.context[:,t] = np.fmax(self.context[:,t-1],self.chromSync[:,t])
elif type == 'mean':
#Construction du vecteur de pondération
weights = [(1/i**decr) for i in range(1,self.n_frames+2)]
#Moyennage
for t in range(1,self.n_frames):
self.context[:,t] = np.average(self.chromSync[:,:(t+1)], axis=1, weights=[weights[t-i] for i in range(t+1)])
# Memory = int
elif isinstance(size,int):
if type == 'max':
for t in range(1,size+1):
self.context[:,t] = np.fmax(self.chromSync[:,t], self.context[:,t-1])
for t in range(size+1,self.n_frames):
self.context[:,t] = np.amax(self.chromSync[:,(t-size):(t+1)], axis = 1)
elif type == 'mean':
#Construction du vecteur de pondération
weights = [(1/i**decr) for i in range(1,size+2)]
#Moyennage
for t in range(1,size+1):
self.context[:,t] = np.average(self.chromSync[:,1:(t+1)], axis=1, weights=[weights[t-i] for i in range(1,t+1)])
for t in range(size+1,self.n_frames):
self.context[:,t] = np.average(self.chromSync[:,(t-size):(t+1)], axis=1, weights=[weights[size-i] for i | |
##
## Instructions:
## 1) Install python-protobuf for your IDAPython installation. This probably means
## downloading it from https://protobuf.googlecode.com/files/protobuf-2.5.0.tar.gz
## and manually running setup.py
## 2) This script should be run via IDA's batch mode. See the output
## of --help for more details on the command line options.
##
import idautils
import idaapi
import idc
import sys
from os import path
import os
import argparse
import struct
#hack for IDAPython to see google protobuf lib
sys.path.append('/usr/lib/python2.7/dist-packages')
import CFG_pb2
_DEBUG = False
EXTERNALS = set()
DATA_SEGMENTS = []
RECOVERED_EAS = set()
ACCESSED_VIA_JMP = set()
EMAP = {}
EMAP_DATA = {}
SPECIAL_REP_HANDLING = [
[0xC3],
]
TRAPS = [
idaapi.NN_int3,
idaapi.NN_icebp,
]
CALLS = [
idaapi.NN_call,
idaapi.NN_callfi,
idaapi.NN_callni]
RETS = [
idaapi.NN_retf,
idaapi.NN_retfd,
idaapi.NN_retfq,
idaapi.NN_retfw,
idaapi.NN_retn,
idaapi.NN_retnd,
idaapi.NN_retnq,
idaapi.NN_retnw]
COND_BRANCHES = [\
idaapi.NN_ja,\
idaapi.NN_jae,\
idaapi.NN_jb,\
idaapi.NN_jbe,\
idaapi.NN_jc,\
idaapi.NN_jcxz,\
idaapi.NN_je,\
idaapi.NN_jecxz,\
idaapi.NN_jg,\
idaapi.NN_jge,\
idaapi.NN_jl,\
idaapi.NN_jle,\
idaapi.NN_jna,\
idaapi.NN_jnae,\
idaapi.NN_jnb,\
idaapi.NN_jnbe,\
idaapi.NN_jnc,\
idaapi.NN_jne,\
idaapi.NN_jng,\
idaapi.NN_jnge,\
idaapi.NN_jnl,\
idaapi.NN_jnle,\
idaapi.NN_jno,\
idaapi.NN_jnp,\
idaapi.NN_jns,\
idaapi.NN_jnz,\
idaapi.NN_jo,\
idaapi.NN_jp,\
idaapi.NN_jpe,\
idaapi.NN_jpo,\
idaapi.NN_jrcxz,\
idaapi.NN_js,\
idaapi.NN_jz,]
UCOND_BRANCHES = [\
idaapi.NN_jmp,\
idaapi.NN_jmpfi,\
idaapi.NN_jmpni,\
idaapi.NN_jmpshort]
def DEBUG(s):
if _DEBUG:
sys.stdout.write(s)
def readDword(ea):
bytestr = readBytesSlowly(ea, ea+4);
dword = struct.unpack("<L", bytestr)[0]
return dword
def isLinkedElf():
return idc.GetLongPrm(INF_FILETYPE) == idc.FT_ELF and \
idc.BeginEA() !=0xffffffffL
def fixExternalName(fn):
if fn in EMAP:
return fn
if fn in EMAP_DATA:
return fn
if not isLinkedElf() and fn[0] == '_':
return fn[1:]
if fn.endswith("_0"):
newfn = fn[:-2]
if newfn in EMAP:
return newfn
return fn
def nameInMap(themap, fn):
return fixExternalName(fn) in themap
def getFromEMAP(fname):
fixname = fixExternalName(fname)
return EMAP[fixname]
def doesNotReturn(fname):
try:
args, conv, ret, sign = getFromEMAP(fname)
if ret == "Y":
return True
except KeyError, ke:
raise Exception("Unknown external: " + fname)
return False
def isHlt(ea):
insn_t = idautils.DecodeInstruction(ea)
return insn_t.itype in [idaapi.NN_hlt]
def isJmpTable(ea):
insn_t = idautils.DecodeInstruction(ea)
is_jmp = insn_t.itype in [idaapi.NN_jmp,
idaapi.NN_jmpfi,
idaapi.NN_jmpni]
if not is_jmp: return False
if idaapi.get_switch_info_ex(ea):
return True
return False
def addFunction(M, ep):
F = M.internal_funcs.add()
F.entry_address = ep
return F
def entryPointHandler(M, ep, name, args_from_stddef=False):
EP = M.entries.add()
EP.entry_name = name
EP.entry_address = ep
have_edata = False
# should we get argument count
# calling ocnvention, and return type from std_defs?
if args_from_stddef:
try:
(argc, conv, ret, sign) = getFromEMAP(name)
have_edata = True
except KeyError as ke:
pass
if not have_edata:
(argc, conv, ret) = getExportType(name, ep)
EP.entry_extra.entry_argc = argc
EP.entry_extra.entry_cconv = conv
if ret == 'Y':
EP.entry_extra.does_return = False
else:
EP.entry_extra.does_return = True
F = addFunction(M, ep)
DEBUG("At EP {0}:{1:x}\n".format(name,ep))
return F
def basicBlockHandler(F, block, blockset, processed_blocks):
B = F.blocks.add()
B.base_address = block.startEA
DEBUG("BB: {0:x}\n".format(block.startEA))
B.block_follows.extend(block.succs)
if _DEBUG:
str_l = ["{0:x}".format(i) for i in block.succs]
if len(str_l) > 0:
DEBUG("Successors: {0}\n".format(", ".join(str_l)))
return B
def readInstructionBytes(inst):
insn_t = idautils.DecodeInstruction(inst)
return [idc.Byte(b) for b in xrange(inst, inst+insn_t.size)]
def isInternalCode(ea):
pf = idc.GetFlags(ea)
return idc.isCode(pf) and not idc.isData(pf)
def isNotCode(ea):
pf = idc.GetFlags(ea)
return not idc.isCode(pf)
def isExternalReference(ea):
# see if this is in an internal or external code ref
DEBUG("Testing {0:x} for externality\n".format(ea))
ext_types = [idc.SEG_XTRN]
seg = idc.SegStart(ea)
if seg == idc.BADADDR:
DEBUG("WARNING: Could not get segment addr for: {0:x}\n".format(ea))
return False
segtype = idc.GetSegmentAttr(seg, idc.SEGATTR_TYPE)
if segtype in ext_types:
return True
return False
def getFunctionName(ea):
return idc.GetTrueNameEx(ea,ea)
def addInst(block, addr, inst_bytes, true_target=None, false_target=None):
# check if there is a lock prefix:
insn_t = idautils.DecodeInstruction(addr)
if insn_t is not None and (insn_t.auxpref & 0x1) == 0x1:
# has LOCK
i_lock = block.insts.add()
i_lock.inst_addr = addr
i_lock.inst_bytes = chr(inst_bytes[0])
i_lock.inst_len = 1
addr += 1
inst_bytes = inst_bytes[1:]
if insn_t is not None and (insn_t.auxpref & 0x3) == 0x2:
DEBUG("REP Prefix at: 0x{0:x}\n".format(addr))
# special handling of certain REP pairs
rest_bytes = inst_bytes[1:]
if rest_bytes in SPECIAL_REP_HANDLING:
# generate a separate REP_PREFIX instruction
i_rep = block.insts.add()
i_rep.inst_addr = addr
i_rep.inst_bytes = chr(inst_bytes[0])
i_rep.inst_len = 1
addr += 1
inst_bytes = inst_bytes[1:]
inst = block.insts.add()
inst.inst_addr = addr
str_val = "".join([chr(b) for b in inst_bytes])
inst.inst_bytes = str_val
inst.inst_len = len(inst_bytes)
if true_target != None: inst.true_target = true_target
if false_target != None: inst.false_target = false_target
return inst
def isConditionalJump(ea):
insn_t = idautils.DecodeInstruction(ea)
return insn_t.itype in COND_BRANCHES
def isUnconditionalJump(ea):
insn_t = idautils.DecodeInstruction(ea)
return insn_t.itype in UCOND_BRANCHES
def isCall(ea):
insn_t = idautils.DecodeInstruction(ea)
return insn_t.itype in CALLS
def isRet(ea):
insn_t = idautils.DecodeInstruction(ea)
return insn_t.itype in RETS
def isTrap(ea):
insn_t = idautils.DecodeInstruction(ea)
return insn_t.itype in TRAPS
def findRelocOffset(ea, size):
for i in xrange(ea,ea+size):
if idc.GetFixupTgtOff(i) != -1:
return i-ea
return -1
def handleExternalRef(fn):
# Don't mangle symbols for fully linked ELFs... yet
in_a_map = fn in EMAP or fn in EMAP_DATA
if not isLinkedElf():
if fn.startswith("__imp_"):
fn = fn[6:]
if fn.endswith("_0"):
fn = fn[:-2]
if fn.startswith("_") and not in_a_map:
fn = fn[1:]
if fn.startswith("@") and not in_a_map:
fn = fn[1:]
if '@' in fn:
fn = fn[:fn.find('@')]
fixfn = fixExternalName(fn)
EXTERNALS.add(fixfn)
return fixfn
def isInData(start_ea, end_ea):
for (start,end) in DATA_SEGMENTS:
if start_ea >= start and start_ea < end:
DEBUG("Data Range: {0:x} <= {1:x} < {2:x}\n".format(start, start_ea, end))
DEBUG("Data Range: {:x} - {:x}\n".format(start_ea, end_ea))
if end_ea <= end:
return True
else:
DEBUG("{0:x} NOT <= {1:x}\n".format(end_ea, end))
DEBUG("{0:x}-{1:x} overlaps with: {2:x}-{3:x}\n".format(start_ea, end_ea, start, end))
raise Exception("Overlapping data segments!")
else:
if end_ea > start and end_ea <= end:
DEBUG("Overlaps with: {0:x}-{1:x}\n".format(start, end))
raise Exception("Overlapping data segments!")
return False
def isExternalData(fn):
indata = fn in EMAP_DATA
incode = fn in EMAP
if indata and not incode:
return True
elif indata and incode:
raise Exception("Symbol "+fn+" defined as both code and data!")
else:
return False
def handleJmpTable(I, inst, new_eas):
si = idaapi.get_switch_info_ex(inst)
jsize = si.get_jtable_element_size()
jstart = si.jumps
# only handle size 4 cases
if jsize != 4:
raise Exception("Jump table size not 4!")
return
DEBUG("\tJMPTable Start: {0:x}\n".format(jstart))
seg_start = idc.SegStart(jstart)
if seg_start != idc.BADADDR:
I.jump_table.offset_from_data = jstart - seg_start
DEBUG("\tJMPTable offset from data: {:x}\n".format(I.jump_table.offset_from_data))
I.jump_table.zero_offset = 0
i = 0
entries = si.get_jtable_size()
for i in xrange(entries):
je = readDword(jstart+i*jsize)
I.jump_table.table_entries.append(je)
if je not in RECOVERED_EAS and isStartOfFunction(je):
new_eas.add(je)
DEBUG("\t\tAdding JMPTable {0}: {1:x}\n".format(i, je))
#je = idc.GetFixupTgtOff(jstart+i*jsize)
#while je != -1:
# I.jump_table.table_entries.append(je)
# if je not in RECOVERED_EAS:
# new_eas.add(je)
# DEBUG("\t\tAdding JMPTable {0}: {1:x}\n".format( i, je))
# i += 1
# je = idc.GetFixupTgtOff(jstart+i*jsize)
def isElfThunk(ea):
if not isLinkedElf():
return False, None
if isUnconditionalJump(ea):
have_ext_ref = False
for cref in idautils.CodeRefsFrom(ea, 0):
if isExternalReference(cref):
have_ext_ref = True
break
if have_ext_ref:
fn = getFunctionName(cref)
return True, fn
return False, None
def addDataReference(M, I, inst, dref, new_eas):
if inValidSegment(dref):
if isExternalReference(dref):
fn = getFunctionName(dref)
fn = handleExternalRef(fn)
if isExternalData(fn):
I.ext_data_name = fn
sys.stdout.write("EXTERNAL DATA REF FROM {0:x} to {1}\n".format(inst, fn))
else:
I.ext_call_name = fn
sys.stdout.write("EXTERNAL CODE REF FROM {0:x} to {1}\n".format(inst, fn))
elif isInternalCode(dref):
I.call_target = dref
if dref not in RECOVERED_EAS:
new_eas.add(dref)
else:
dref_size = idc.ItemSize(dref)
DEBUG("\t\tData Ref: {0:x}, size: {1}\n".format(
dref, dref_size))
I.data_offset = handleDataRelocation(M, dref, new_eas)
else:
DEBUG("WARNING: Data not in valid segment {0:x}\n".format(dref))
def instructionHandler(M, B, inst, new_eas):
insn_t = idautils.DecodeInstruction(inst)
if not insn_t:
# handle jumps after noreturn functions
if idc.Byte(inst) == 0xCC:
I = addInst(B, inst, [0xCC])
return I, True
else:
raise Exception("Cannot read instruction at: {0:x}".format(inst))
# skip HLTs -- they are privileged, and are used in ELFs after a noreturn call
if isHlt(inst):
return None, False
DEBUG("\t\tinst: {0}\n".format(idc.GetDisasm(inst)))
inst_bytes = readInstructionBytes(inst)
DEBUG("\t\tBytes: {0}\n".format(inst_bytes))
I = addInst(B, inst, inst_bytes)
if isJmpTable(inst):
handleJmpTable(I, inst, new_eas)
return I, False
crefs_from_here = idautils.CodeRefsFrom(inst, 0)
#check for code refs from here
crefs = []
# pull code refs from generator into a list
for cref_i in crefs_from_here:
crefs.append(cref_i)
is_call = isCall(inst)
isize = insn_t.size
next_ea = inst+isize
had_refs = False
# this is a call $+5, needs special handling
if len(crefs) == 0 and is_call and isize == 5:
selfCallEA = next_ea
DEBUG("INTERNAL CALL $+5: {0:x}\n".format(selfCallEA))
sys.stdout.write("LOCAL NORETURN CALL!\n")
I.local_noreturn = True
if selfCallEA not in RECOVERED_EAS:
DEBUG("Adding new EA: {0:x}\n".format(selfCallEA))
new_eas.add(selfCallEA)
I.call_target = selfCallEA
return I, True
for cref in crefs:
had_refs = True
fn = getFunctionName(cref)
if is_call:
elfy, fn_replace = isElfThunk(cref)
if elfy:
fn = fn_replace
if isExternalReference(cref) or elfy:
fn = handleExternalRef(fn)
I.ext_call_name = fn
DEBUG("EXTERNAL CALL: {0}\n".format(fn))
if doesNotReturn(fn):
return I, True
else:
I.call_target = cref
if cref not in RECOVERED_EAS:
new_eas.add(cref)
DEBUG("INTERNAL CALL: {0}\n".format(fn))
| |
standard supervised training
error_metrics = train_super(basic_setting['epochs'], model, optimizer, train_loader, test_loader, basic_setting['b'])
super_results[str(k)] = error_metrics
# semi-supervised training using pseudo-labels
# error_metrics = train_semisuper(basic_setting['epochs'], model, optimizer, train_loader, unlabeled_loader, test_loader, basic_setting['b'])
# save list to pickle file
with open(file_name, 'w') as f:
json.dump(super_results, f)
"""### Experiment 2 - Epoch-wise DD vs. n_samples"""
basic_setting = {
'k': 64,
'epochs': 300,
'label_noise': 0.2,
'n_batch': 128,
'n_classes': 10,
'lr': 1e-4,
'b': 0.15,
'n_labeled': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]
'augmentation': True
}
super_results = {}
semisuper_results = {}
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
file_name = '/gdrive/My Drive/CMSC 828W Research/Code (Won & Amartya)/super_epoch_n_samples.json'
open_file = open(file_name, "ab")
# define transformations for training and test set
if basic_setting['augmentation']:
transform_cifar = transforms.Compose([transforms.ToTensor(),transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip()])
else:
transform_cifar = transforms.Compose([transforms.ToTensor()])
transform_test = transforms.Compose([transforms.ToTensor()])
print(basic_setting)
n_labeled, n_unlabeled = basic_setting['n_labeled']
for ratio in basic_setting['n_labeled']:
train = datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_cifar)
test = datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
total_samples = len(train)
# assign random labels to (label_noise)% of the training set (needed for semi-supervised learning)
rands = np.random.choice(total_samples, int(basic_setting['label_noise']*total_samples), replace=False)
for rand in rands:
train.targets[rand] = torch.randint(high=10, size=(1,1)).item()
# split training data into labeled and unlabeled
train, val = random_split(train, [n_labeled, n_unlabeled])
print("number of labeled: {}, number of unlabeled: {}\n".format(len(train), len(val)))
train_loader = DataLoader(train, batch_size=basic_setting['n_batch'], shuffle=True, num_workers=2)
test_loader = DataLoader(test, batch_size=basic_setting['n_batch'], shuffle=True, num_workers=2)
model = CIFARResNet(basic_setting['n_classes'], basic_setting['k']) # define model with the number of parameter
model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=basic_setting['lr'])
# optimizer = torch.optim.SGD(model.parameters(), lr=lr)
# standard supervised training
error_metrics = train_super(basic_setting['epochs'], model, optimizer, train_loader, test_loader, basic_setting['b'])
super_results[str(ratio)] = error_metrics
# save list to pickle file
with open(file_name, 'w') as f:
json.dump(super_results, f)
"""### Experiment 3 - Epoch-wise DD vs. flooding (Finished!)"""
basic_setting = {
'k': 64,
'epochs': 200,
'label_noise': 0.2,
'n_batch': 128,
'n_classes': 10,
'lr': 1e-4,
'b': [0.1, 0.15, 0.2],
'n_labeled': (50000, 0),
'augmentation': True
}
super_results = {}
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
file_name = '/gdrive/My Drive/CMSC 828W Research/Code (Won & Amartya)/Supervised Experiments/super_epoch_flooding.json'
open_file = open(file_name, "ab")
# define transformations for training and test set
if basic_setting['augmentation']:
transform_cifar = transforms.Compose([transforms.ToTensor(),transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip()])
else:
transform_cifar = transforms.Compose([transforms.ToTensor()])
transform_test = transforms.Compose([transforms.ToTensor()])
print(basic_setting)
train = datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_cifar)
test = datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
total_samples = len(train)
# assign random labels to (label_noise)% of the training set (needed for semi-supervised learning)
rands = np.random.choice(total_samples, int(basic_setting['label_noise']*total_samples), replace=False)
for rand in rands:
train.targets[rand] = torch.randint(high=10, size=(1,1)).item()
# split training data into labeled and unlabeled
n_labeled, n_unlabeled = basic_setting['n_labeled']
train, val = random_split(train, [n_labeled, n_unlabeled])
print("number of labeled: {}, number of unlabeled: {}\n".format(len(train), len(val)))
train_loader = DataLoader(train, batch_size=basic_setting['n_batch'], shuffle=True, num_workers=2)
test_loader = DataLoader(test, batch_size=basic_setting['n_batch'], shuffle=True, num_workers=2)
for flood in basic_setting['b']:
model = CIFARResNet(basic_setting['n_classes'], basic_setting['k']) # define model with the number of parameter
model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=basic_setting['lr'])
# optimizer = torch.optim.SGD(model.parameters(), lr=lr)
# standard supervised training
error_metrics = train_super(basic_setting['epochs'], model, optimizer, train_loader, test_loader, flood)
super_results[str(flood)] = error_metrics
# save list to pickle file
with open(file_name, 'w') as f:
json.dump(super_results, f)
"""### Experiment 4 - Epoch-wise DD vs. label noise (Finished!)"""
basic_setting = {
'k': 64,
'epochs': 200,
'noise': [0.1, 0.15, 0.2],
'n_batch': 128,
'n_classes': 10,
'lr': 1e-4,
'b': 0.1,
'n_labeled': (20000, 30000),
'augmentation': True
}
super_results = {}
semisuper_results = {}
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
file_name = '/gdrive/My Drive/CMSC 828W Research/Code (Won & Amartya)/Supervised Experiments/super_epoch_label_noise.json'
open_file = open(file_name, "ab")
# define transformations for training and test set
if basic_setting['augmentation']:
transform_cifar = transforms.Compose([transforms.ToTensor(),transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip()])
else:
transform_cifar = transforms.Compose([transforms.ToTensor()])
transform_test = transforms.Compose([transforms.ToTensor()])
print(basic_setting)
n_labeled, n_unlabeled = basic_setting['n_labeled']
for noise in basic_setting['noise']:
# train = datasets.MNIST(root='./data', train=True, download=True, transform=transform_cifar)
# test = datasets.MNIST(root='./data', train=False, download=True, transform=transform_test)
train = datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_cifar)
test = datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
total_samples = len(train)
# assign random labels to (label_noise)% of the training set (needed for semi-supervised learning)
rands = np.random.choice(total_samples, int(noise*total_samples), replace=False)
for rand in rands:
train.targets[rand] = torch.randint(high=10, size=(1,1)).item()
# split training data into labeled and unlabeled
train, val = random_split(train, [n_labeled, n_unlabeled])
print("number of labeled: {}, number of unlabeled: {}\n".format(len(train), len(val)))
train_loader = DataLoader(train, batch_size=basic_setting['n_batch'], shuffle=True, num_workers=2)
test_loader = DataLoader(test, batch_size=basic_setting['n_batch'], shuffle=True, num_workers=2)
model = CIFARResNet(basic_setting['n_classes'], basic_setting['k']) # define model with the number of parameter
model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=basic_setting['lr'])
# optimizer = torch.optim.SGD(model.parameters(), lr=lr)
# standard supervised training
error_metrics = train_super(basic_setting['epochs'], model, optimizer, train_loader, test_loader, basic_setting['b'])
super_results[str(noise)] = error_metrics
# save list to pickle file
with open(file_name, 'w') as f:
json.dump(super_results, f)
"""# Semi-Supervised Experiments
### Experiment 1 - Epoch-wise DD vs. labeled ratio
"""
basic_setting = {
'k': 64,
'epochs': 200,
'n_batch': 128,
'n_classes': 10,
'lr': 1e-4,
'b': 0.1,
'n_labeled': [(20000, 30000),(10000, 40000)],
'augmentation': True
}
# We observe all forms of double descent most strongly in settings with label noise in the train set (as is often the case when collecting train data in the real-world).
# lr = (n_unlabeled)**(-0.5) # SGD learning rate
print(basic_setting)
semisuper_results = {}
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
file_name = '/gdrive/My Drive/CMSC 828W Research/Code (Won & Amartya)/Semi-supervised Experiments/semisuper_epoch_ratio.json'
open_file = open(file_name, "ab")
# define transformations for training and test set
if basic_setting['augmentation']:
transform_cifar = transforms.Compose([transforms.ToTensor(),transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip()])
else:
transform_cifar = transforms.Compose([transforms.ToTensor()])
transform_test = transforms.Compose([transforms.ToTensor()])
# load either MNIST or CIFAR-10
# train = datasets.MNIST(root='./data', train=True, download=True, transform=transform_mnist)
# test = datasets.MNIST(root='./data', train=False, download=True, transform=transform_mnist)
for n_labeled, n_unlabeled in basic_setting['n_labeled']:
train = datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_cifar)
test = datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
train, val = random_split(train, [n_labeled, n_unlabeled])
print("number of labeled: {}, number of unlabeled: {}\n".format(len(train), len(val)))
train_loader = DataLoader(train, batch_size=int(len(train)/basic_setting['n_batch']), shuffle=True, num_workers=2)
unlabeled_loader = DataLoader(val, batch_size=int(len(val)/basic_setting['n_batch']), shuffle=True, num_workers=2)
test_loader = DataLoader(test, batch_size=basic_setting['n_batch'], shuffle=True, num_workers=2)
model = CIFARResNet(basic_setting['n_classes'], basic_setting['k']) # define model with the number of parameter
model.to(device)
# total_params = sum(p.numel() for p in model.parameters())
# print("number of model parameters = {} when k={}".format(total_params, basic_setting['k']))
optimizer = torch.optim.Adam(model.parameters(), lr=basic_setting['lr'])
# optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
# semi-supervised training using pseudo-labels
error_metrics = train_semisuper(basic_setting['epochs'], model, optimizer, train_loader, unlabeled_loader, test_loader, basic_setting['b'])
semisuper_results = {}
semisuper_results[str(n_labeled)] = error_metrics
# save list to pickle file
with open(file_name, 'w') as f:
json.dump(semisuper_results, f)
"""### Experiment 2 - Model-wise DD"""
basic_setting = {
'k': 64,
'epochs': 300,
'n_batch': 128,
'n_classes': 10,
'lr': 1e-4,
'b': 0.15,
'n_labeled': [0.6, 0.4, 0.2],
'augmentation': True
}
# We observe all forms of double descent most strongly in settings with label noise in the train set (as is often the case when collecting train data in the real-world).
# lr = (n_unlabeled)**(-0.5) # SGD learning rate
print(basic_setting)
semisuper_results = {}
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
file_name = '/gdrive/My Drive/CMSC 828W Research/Code (Won & Amartya)/semisuper_epoch.json'
open_file = open(file_name, "ab")
# define transformations for training and test set
if basic_setting['augmentation']:
transform_cifar = transforms.Compose([transforms.ToTensor(),transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip()])
else:
transform_cifar = transforms.Compose([transforms.ToTensor()])
transform_test = transforms.Compose([transforms.ToTensor()])
# load either MNIST or CIFAR-10
# train = datasets.MNIST(root='./data', train=True, download=True, transform=transform_mnist)
# test = datasets.MNIST(root='./data', train=False, download=True, transform=transform_mnist)
for ratio in basic_setting['n_labeled']:
train = datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_cifar)
test = datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
# split training data into labeled and unlabeled
n_labeled = int(total_samples*ratio)
n_unlabeled = int(total_samples*(1-ratio))
train, val = random_split(train, [n_labeled, n_unlabeled])
print("number of labeled: {}, number of unlabeled: {}\n".format(len(train), len(val)))
train_loader = DataLoader(train, batch_size=int(len(train)/basic_setting['n_batch']), shuffle=True, num_workers=2)
unlabeled_loader = DataLoader(val, batch_size=int(len(val)/basic_setting['n_batch']), shuffle=True, num_workers=2)
test_loader = DataLoader(test, batch_size=basic_setting['n_batch'], shuffle=True, num_workers=2)
model = CIFARResNet(basic_setting['n_classes'], basic_setting['k']) # define model with the number of parameter
model.to(device)
# total_params = sum(p.numel() for p in model.parameters())
# print("number of model parameters = {} when k={}".format(total_params, basic_setting['k']))
optimizer = torch.optim.Adam(model.parameters(), lr=basic_setting['lr'])
# optimizer = torch.optim.SGD(model.parameters(), lr=lr)
# semi-supervised training using pseudo-labels
error_metrics = train_semisuper(basic_setting['epochs'], model, optimizer, train_loader, unlabeled_loader, test_loader, basic_setting['b'])
semi_super_results[str(ratio)] = error_metrics
# save list to pickle file
with open(file_name, 'w') as f:
json.dump(semi_super_results, f)
"""# Plotting"""
# matplotlib.rcParams.update({'font.size': 25})
def plot_modelwise(results, fname):
titles = ['error', 'loss']
colors = ['blue', 'lime']
labels = ['train', 'test']
k = [] # fixed x-axis
metrics =. []
exp_type = fname.split('/')[-2]
exp_name = fname.split('/')[-1].split('.')[0]
# extract model size and metrics into separate lists
for model_size, metric in results.items():
k.append(int(model_size))
metrics.append(metric[-1])
metrics = np.array(metrics)
# test/train error and loss
for i, title in enumerate(titles):
fig, axes = plt.subplots(figsize=(8, 6), dpi=300)
axes.grid()
axes.plot(k, metrics[:, 2*i], label=labels[0], color=colors[0]) # train
axes.plot(k, metrics[:, 2*i+1], label=labels[1], color=colors[1]) # test
axes.set_xlabel('resnet width=k')
axes.set_ylabel(title)
axes.set_title("model width vs. {}".format(title))
axes.legend(loc='upper right', prop={'size': 15})
fig.savefig('/gdrive/My Drive/CMSC 828W Research/Code (Won & Amartya)/{}/{}_{}.png'.format(exp_type, exp_name, title), dpi=300)
def plot_epochwise(results, fname):
titles = ['error', 'loss']
colors = ['red', 'lime', 'blue']
labels = | |
# -*- coding: utf-8 -*-
"""
Source:
http://www.turingfinance.com/random-walks-down-wall-street-stochastic-processes-in-python/
https://github.com/StuartGordonReid/Python-Notebooks/blob/master/Stochastic%20Process%20Algorithms.ipynb
Processes that can be simulated in this module are:
- Brownian Motion
- Geometric Brownian Motion
- The Merton Jump Diffusion Model
- The Heston Stochastic Volatility Model
- <NAME>
- <NAME>
"""
import math
import os
from pathlib import Path
from typing import Tuple
import numpy as np
import numpy.random as nrand
import plotly.graph_objs as go
from plotly.offline import plot
from openseries.load_plotly import load_plotly_dict
class ModelParameters(object):
"""
Encapsulates model parameters
"""
def __init__(
self,
all_s0: float,
all_time: int,
all_delta: float,
all_sigma: float,
gbm_mu: float,
jumps_lamda: float = 0.0,
jumps_sigma: float = 0.0,
jumps_mu: float = 0.0,
cir_a: float = 0.0,
cir_mu: float = 0.0,
all_r0: float = 0.0,
cir_rho: float = 0.0,
ou_a: float = 0.0,
ou_mu: float = 0.0,
heston_a: float = 0.0,
heston_mu: float = 0.0,
heston_vol0: float = 0.0,
):
"""
:param all_s0: This is the starting asset value
:param all_time: This is the amount of time to simulate for
:param all_delta: This is the delta, the rate of time
e.g. 1/252 = daily, 1/12 = monthly
:param all_sigma: This is the volatility of the stochastic processes
:param all_r0: This is the starting interest rate value
:param gbm_mu: This is the annual drift factor for geometric
brownian motion
:param jumps_lamda: This is the probability of a jump happening at
each point in time
:param jumps_sigma: This is the volatility of the jump size
:param jumps_mu: This is the average jump size
:param cir_a: This is the rate of mean reversion for Cox Ingersoll Ross
:param cir_mu: This is the long run average interest rate for
Cox Ingersoll Ross
:param cir_rho: This is the correlation between the wiener processes
of the Heston model
:param ou_a: This is the rate of mean reversion for Ornstein Uhlenbeck
:param ou_mu: This is the long run average interest rate for
Ornstein Uhlenbeck
:param heston_a: This is the rate of mean reversion for volatility in
the Heston model
:param heston_mu: This is the long run average volatility for
the Heston model
:param heston_vol0: This is the starting volatility value for
the Heston model
"""
self.all_s0 = all_s0
self.all_time = all_time
self.all_delta = all_delta
self.all_sigma = all_sigma
self.gbm_mu = gbm_mu
self.lamda = jumps_lamda
self.jumps_sigma = jumps_sigma
self.jumps_mu = jumps_mu
self.cir_a = cir_a
self.cir_mu = cir_mu
self.all_r0 = all_r0
self.cir_rho = cir_rho
self.ou_a = ou_a
self.ou_mu = ou_mu
self.heston_a = heston_a
self.heston_mu = heston_mu
self.heston_vol0 = heston_vol0
def convert_to_returns(log_returns: np.ndarray) -> np.ndarray:
"""
This method exponentiates a sequence of log returns to get daily returns.
:param log_returns: the log returns to exponentiated
:return: the exponentiated returns
"""
return np.exp(log_returns)
def convert_to_prices(
param: ModelParameters, log_returns: np.ndarray
) -> np.ndarray:
"""
This method converts a sequence of log returns into normal returns
(exponentiation) and then computes a price
sequence given a starting price, param.all_s0.
:param param: the model parameters object
:param log_returns: the log returns to exponentiated
:return:
"""
assert isinstance(
param, ModelParameters
), "param must be an object of Class ModelParameters"
returns = convert_to_returns(log_returns)
# A sequence of prices starting with param.all_s0
price_sequence: list = [param.all_s0]
for n in range(1, len(returns)):
# Add the price at t-1 * return at t
price_sequence.append(price_sequence[n - 1] * returns[n - 1])
return np.array(price_sequence)
def plot_stochastic_processes(
processes: list, title: str = None
) -> (go.Figure, str):
"""
This method plots a list of stochastic processes with a specified title
:param processes:
:param title:
"""
file_name = (
title.replace("/", "").replace("#", "").replace(" ", "").upper()
)
plotfile = os.path.join(
os.path.abspath(str(Path.home())), "Documents", f"{file_name}.html"
)
fig, logo = load_plotly_dict()
figure = go.Figure(fig)
x_axis = np.arange(0, len(processes[0]), 1)
for n in range(len(processes)):
figure.add_trace(
go.Scatter(
x=x_axis,
y=processes[n],
mode="lines",
hovertemplate="%{y}<br>%{x}",
line=dict(width=2.5, dash="solid"),
)
)
figure.update_layout(
title=dict(text=title),
xaxis_title="Time, t",
yaxis_title="simulated asset price",
showlegend=False,
yaxis=dict(tickformat=None),
)
figure.add_layout_image(logo)
plot(
figure,
filename=plotfile,
auto_open=True,
link_text="",
include_plotlyjs="cdn",
)
return figure, plotfile
def brownian_motion_log_returns(
param: ModelParameters, seed: int = None
) -> np.ndarray:
"""
This method returns a Wiener process. The Wiener process is also called
Brownian motion. For more information about the Wiener process check out
the Wikipedia page: http://en.wikipedia.org/wiki/Wiener_process
:param param: the model parameters object
:param seed:
:return: brownian motion log returns
"""
assert isinstance(
param, ModelParameters
), "param must be an object of Class ModelParameters"
if seed is not None:
nrand.seed(seed)
sqrt_delta_sigma = math.sqrt(param.all_delta) * param.all_sigma
return nrand.normal(loc=0, scale=sqrt_delta_sigma, size=param.all_time)
def brownian_motion_levels(
param: ModelParameters, seed: int = None
) -> np.ndarray:
"""
Returns a price sequence whose returns evolve according to
a brownian motion
:param param: model parameters object
:param seed:
:return: returns a price sequence which follows a brownian motion
"""
assert isinstance(
param, ModelParameters
), "param must be an object of Class ModelParameters"
return convert_to_prices(
param, brownian_motion_log_returns(param, seed=seed)
)
def geometric_brownian_motion_log_returns(
param: ModelParameters, seed: int = None
) -> np.ndarray:
"""
This method constructs a sequence of log returns which, when
exponentiated, produce a random Geometric Brownian Motion (GBM).
GBM is the stochastic process underlying the Black Scholes
options pricing formula.
:param param: model parameters object
:param seed:
:return: returns the log returns of a geometric brownian motion process
"""
assert isinstance(
param, ModelParameters
), "param must be an object of Class ModelParameters"
wiener_process = np.array(brownian_motion_log_returns(param, seed=seed))
sigma_pow_mu_delta = (
param.gbm_mu - 0.5 * math.pow(param.all_sigma, 2.0)
) * param.all_delta
return wiener_process + sigma_pow_mu_delta
def geometric_brownian_motion_levels(
param: ModelParameters, seed: int = None
) -> np.ndarray:
"""
Returns a sequence of price levels for an asset which evolves according
to a geometric brownian motion
:param param: model parameters object
:param seed:
:return: the price levels for the asset
"""
assert isinstance(
param, ModelParameters
), "param must be an object of Class ModelParameters"
return convert_to_prices(
param, geometric_brownian_motion_log_returns(param, seed=seed)
)
def jump_diffusion_process(param: ModelParameters, seed: int = None) -> list:
"""
This method produces a sequence of Jump Sizes which represent a jump
diffusion process. These jumps are combined with a geometric brownian
motion (log returns) to produce the Merton model.
:param param: the model parameters object
:param seed:
:return: jump sizes for each point in time
(mostly zeroes if jumps are infrequent)
"""
assert isinstance(
param, ModelParameters
), "param must be an object of Class ModelParameters"
if seed is not None:
nrand.seed(seed)
s_n = time = 0
small_lamda = -(1.0 / param.lamda)
jump_sizes = []
for k in range(0, param.all_time):
jump_sizes.append(0.0)
while s_n < param.all_time:
s_n += small_lamda * math.log(nrand.uniform(0, 1))
for j in range(0, param.all_time):
if (
time * param.all_delta
<= s_n * param.all_delta
<= (j + 1) * param.all_delta
):
jump_sizes[j] += nrand.normal(
param.jumps_mu, param.jumps_sigma
)
break
time += 1
return jump_sizes
def geometric_brownian_motion_jump_diffusion_log_returns(
param: ModelParameters, seed: int = None
) -> np.ndarray:
"""
This method constructs combines a geometric brownian motion process
(log returns) with a jump diffusion process (log returns) to produce a
sequence of gbm jump returns.
:param param: model parameters object
:param seed:
:return: returns a GBM process with jumps in it
"""
assert isinstance(
param, ModelParameters
), "param must be an object of Class ModelParameters"
jump_diffusion = jump_diffusion_process(param, seed=seed)
geometric_brownian_motion = geometric_brownian_motion_log_returns(
param, seed=seed
)
return np.add(jump_diffusion, geometric_brownian_motion)
def geometric_brownian_motion_jump_diffusion_levels(
param: ModelParameters, seed: int = None
) -> np.ndarray:
"""
This method converts a sequence of gbm jmp returns into a price sequence
which evolves according to a geometric brownian motion but can contain
jumps at any point in time.
:param param: model parameters object
:param seed:
:return: the price levels
"""
assert isinstance(
param, ModelParameters
), "param must be an object of Class ModelParameters"
return convert_to_prices(
param,
geometric_brownian_motion_jump_diffusion_log_returns(param, seed=seed),
)
def heston_construct_correlated_path(
param: ModelParameters, brownian_motion_one: np.ndarray, seed: int = None
) -> Tuple[np.ndarray, np.ndarray]:
"""
This method is a simplified version of the Cholesky decomposition method
for just two assets. It does not make use of matrix algebra and is
therefore quite easy to implement.
:param param: model parameters object
:param brownian_motion_one: A first path to correlate against
:param seed:
:return: a correlated brownian motion path
"""
assert isinstance(
param, ModelParameters
), "param must be an object of Class ModelParameters"
if seed is not None:
nrand.seed(seed)
# We do not multiply by sigma here, we do that in | |
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import Dict, Union
import cirq
import numpy as np
from . import qsim
# List of parameter names that appear in valid Cirq protos.
GATE_PARAMS = [
"exponent",
"phase_exponent",
"global_shift",
"x_exponent",
"z_exponent",
"axis_phase_exponent",
"phi",
"theta",
]
def _cirq_gate_kind(gate: cirq.ops.Gate):
if isinstance(gate, cirq.ops.ControlledGate):
return _cirq_gate_kind(gate.sub_gate)
if isinstance(gate, cirq.ops.identity.IdentityGate):
# Identity gates will decompose to no-ops.
pass
if isinstance(gate, cirq.ops.XPowGate):
# cirq.rx also uses this path.
if gate.exponent == 1 and gate.global_shift == 0:
return qsim.kX
return qsim.kXPowGate
if isinstance(gate, cirq.ops.YPowGate):
# cirq.ry also uses this path.
if gate.exponent == 1 and gate.global_shift == 0:
return qsim.kY
return qsim.kYPowGate
if isinstance(gate, cirq.ops.ZPowGate):
# cirq.rz also uses this path.
if gate.global_shift == 0:
if gate.exponent == 1:
return qsim.kZ
if gate.exponent == 0.5:
return qsim.kS
if gate.exponent == 0.25:
return qsim.kT
return qsim.kZPowGate
if isinstance(gate, cirq.ops.HPowGate):
if gate.exponent == 1 and gate.global_shift == 0:
return qsim.kH
return qsim.kHPowGate
if isinstance(gate, cirq.ops.CZPowGate):
if gate.exponent == 1 and gate.global_shift == 0:
return qsim.kCZ
return qsim.kCZPowGate
if isinstance(gate, cirq.ops.CXPowGate):
if gate.exponent == 1 and gate.global_shift == 0:
return qsim.kCX
return qsim.kCXPowGate
if isinstance(gate, cirq.ops.PhasedXPowGate):
return qsim.kPhasedXPowGate
if isinstance(gate, cirq.ops.PhasedXZGate):
return qsim.kPhasedXZGate
if isinstance(gate, cirq.ops.XXPowGate):
if gate.exponent == 1 and gate.global_shift == 0:
return qsim.kXX
return qsim.kXXPowGate
if isinstance(gate, cirq.ops.YYPowGate):
if gate.exponent == 1 and gate.global_shift == 0:
return qsim.kYY
return qsim.kYYPowGate
if isinstance(gate, cirq.ops.ZZPowGate):
if gate.exponent == 1 and gate.global_shift == 0:
return qsim.kZZ
return qsim.kZZPowGate
if isinstance(gate, cirq.ops.SwapPowGate):
if gate.exponent == 1 and gate.global_shift == 0:
return qsim.kSWAP
return qsim.kSwapPowGate
if isinstance(gate, cirq.ops.ISwapPowGate):
# cirq.riswap also uses this path.
if gate.exponent == 1 and gate.global_shift == 0:
return qsim.kISWAP
return qsim.kISwapPowGate
if isinstance(gate, cirq.ops.PhasedISwapPowGate):
# cirq.givens also uses this path.
return qsim.kPhasedISwapPowGate
if isinstance(gate, cirq.ops.FSimGate):
return qsim.kFSimGate
if isinstance(gate, cirq.ops.TwoQubitDiagonalGate):
return qsim.kTwoQubitDiagonalGate
if isinstance(gate, cirq.ops.ThreeQubitDiagonalGate):
return qsim.kThreeQubitDiagonalGate
if isinstance(gate, cirq.ops.CCZPowGate):
if gate.exponent == 1 and gate.global_shift == 0:
return qsim.kCCZ
return qsim.kCCZPowGate
if isinstance(gate, cirq.ops.CCXPowGate):
if gate.exponent == 1 and gate.global_shift == 0:
return qsim.kCCX
return qsim.kCCXPowGate
if isinstance(gate, cirq.ops.CSwapGate):
return qsim.kCSwapGate
if isinstance(gate, cirq.ops.MatrixGate):
if gate.num_qubits() <= 6:
return qsim.kMatrixGate
raise NotImplementedError(
f"Received matrix on {gate.num_qubits()} qubits; "
+ "only up to 6-qubit gates are supported."
)
if isinstance(gate, cirq.ops.MeasurementGate):
# needed to inherit SimulatesSamples in sims
return qsim.kMeasurement
# Unrecognized gates will be decomposed.
return None
def _control_details(gate: cirq.ops.ControlledGate, qubits):
control_qubits = []
control_values = []
# TODO: support qudit control
for i, cvs in enumerate(gate.control_values):
if 0 in cvs and 1 in cvs:
# This qubit does not affect control.
continue
elif 0 not in cvs and 1 not in cvs:
# This gate will never trigger.
warnings.warn(f"Gate has no valid control value: {gate}", RuntimeWarning)
return (None, None)
# Either 0 or 1 is in cvs, but not both.
control_qubits.append(qubits[i])
if 0 in cvs:
control_values.append(0)
elif 1 in cvs:
control_values.append(1)
return (control_qubits, control_values)
def add_op_to_opstring(
qsim_op: cirq.GateOperation,
qubit_to_index_dict: Dict[cirq.Qid, int],
opstring: qsim.OpString,
):
"""Adds an operation to an opstring (observable).
Raises:
ValueError if qsim_op is not a single-qubit Pauli (I, X, Y, or Z).
"""
qsim_gate = qsim_op.gate
gate_kind = _cirq_gate_kind(qsim_gate)
if gate_kind not in {qsim.kX, qsim.kY, qsim.kZ, qsim.kI1}:
raise ValueError(f"OpString should only have Paulis; got {gate_kind}")
if len(qsim_op.qubits) != 1:
raise ValueError(f"OpString ops should have 1 qubit; got {len(qsim_op.qubits)}")
is_controlled = isinstance(qsim_gate, cirq.ops.ControlledGate)
if is_controlled:
raise ValueError(f"OpString ops should not be controlled.")
qubits = [qubit_to_index_dict[q] for q in qsim_op.qubits]
qsim.add_gate_to_opstring(gate_kind, qubits, opstring)
def add_op_to_circuit(
qsim_op: cirq.GateOperation,
time: int,
qubit_to_index_dict: Dict[cirq.Qid, int],
circuit: Union[qsim.Circuit, qsim.NoisyCircuit],
):
"""Adds an operation to a noisy or noiseless circuit."""
qsim_gate = qsim_op.gate
gate_kind = _cirq_gate_kind(qsim_gate)
qubits = [qubit_to_index_dict[q] for q in qsim_op.qubits]
qsim_qubits = qubits
is_controlled = isinstance(qsim_gate, cirq.ops.ControlledGate)
if is_controlled:
control_qubits, control_values = _control_details(qsim_gate, qubits)
if control_qubits is None:
# This gate has no valid control, and will be omitted.
return
num_targets = qsim_gate.num_qubits() - qsim_gate.num_controls()
if num_targets > 4:
raise NotImplementedError(
f"Received control gate on {num_targets} target qubits; "
+ "only up to 4-qubit gates are supported."
)
qsim_qubits = qubits[qsim_gate.num_controls() :]
qsim_gate = qsim_gate.sub_gate
if (
gate_kind == qsim.kTwoQubitDiagonalGate
or gate_kind == qsim.kThreeQubitDiagonalGate
):
if isinstance(circuit, qsim.Circuit):
qsim.add_diagonal_gate(
time, qsim_qubits, qsim_gate._diag_angles_radians, circuit
)
else:
qsim.add_diagonal_gate_channel(
time, qsim_qubits, qsim_gate._diag_angles_radians, circuit
)
elif gate_kind == qsim.kMatrixGate:
m = [
val for i in list(cirq.unitary(qsim_gate).flat) for val in [i.real, i.imag]
]
if isinstance(circuit, qsim.Circuit):
qsim.add_matrix_gate(time, qsim_qubits, m, circuit)
else:
qsim.add_matrix_gate_channel(time, qsim_qubits, m, circuit)
else:
params = {}
for p, val in vars(qsim_gate).items():
key = p.strip("_")
if key not in GATE_PARAMS:
continue
if isinstance(val, (int, float, np.integer, np.floating)):
params[key] = val
else:
raise ValueError("Parameters must be numeric.")
if isinstance(circuit, qsim.Circuit):
qsim.add_gate(gate_kind, time, qsim_qubits, params, circuit)
else:
qsim.add_gate_channel(gate_kind, time, qsim_qubits, params, circuit)
if is_controlled:
if isinstance(circuit, qsim.Circuit):
qsim.control_last_gate(control_qubits, control_values, circuit)
else:
qsim.control_last_gate_channel(control_qubits, control_values, circuit)
class QSimCircuit(cirq.Circuit):
def __init__(
self,
cirq_circuit: cirq.Circuit,
device: cirq.devices = cirq.devices.UNCONSTRAINED_DEVICE,
allow_decomposition: bool = False,
):
if allow_decomposition:
super().__init__([], device=device)
for moment in cirq_circuit:
for op in moment:
# This should call decompose on the gates
self.append(op)
else:
super().__init__(cirq_circuit, device=device)
def __eq__(self, other):
if not isinstance(other, QSimCircuit):
return False
# equality is tested, for the moment, for cirq.Circuit
return super().__eq__(other)
def _resolve_parameters_(
self, param_resolver: cirq.study.ParamResolver, recursive: bool = True
):
return QSimCircuit(
cirq.resolve_parameters(super(), param_resolver, recursive),
device=self.device,
)
def translate_cirq_to_qsim(
self, qubit_order: cirq.ops.QubitOrderOrList = cirq.ops.QubitOrder.DEFAULT
) -> qsim.Circuit:
"""
Translates this Cirq circuit to the qsim representation.
:qubit_order: Ordering of qubits
:return: a tuple of (C++ qsim Circuit object, moment boundary
gate indices)
"""
qsim_circuit = qsim.Circuit()
ordered_qubits = cirq.ops.QubitOrder.as_qubit_order(qubit_order).order_for(
self.all_qubits()
)
qsim_circuit.num_qubits = len(ordered_qubits)
# qsim numbers qubits in reverse order from cirq
ordered_qubits = list(reversed(ordered_qubits))
def has_qsim_kind(op: cirq.ops.GateOperation):
return _cirq_gate_kind(op.gate) != None
def to_matrix(op: cirq.ops.GateOperation):
mat = cirq.unitary(op.gate, None)
if mat is None:
return NotImplemented
return cirq.ops.MatrixGate(mat).on(*op.qubits)
qubit_to_index_dict = {q: i for i, q in enumerate(ordered_qubits)}
time_offset = 0
gate_count = 0
moment_indices = []
for moment in self:
ops_by_gate = [
cirq.decompose(op, fallback_decomposer=to_matrix, keep=has_qsim_kind)
for op in moment
]
moment_length = max((len(gate_ops) for gate_ops in ops_by_gate), default=0)
# Gates must be added in time order.
for gi in range(moment_length):
for gate_ops in ops_by_gate:
if gi >= len(gate_ops):
continue
qsim_op = gate_ops[gi]
time = time_offset + gi
add_op_to_circuit(qsim_op, time, qubit_to_index_dict, qsim_circuit)
gate_count += 1
time_offset += moment_length
moment_indices.append(gate_count)
return qsim_circuit, moment_indices
def translate_cirq_to_qtrajectory(
self, qubit_order: cirq.ops.QubitOrderOrList = cirq.ops.QubitOrder.DEFAULT
) -> qsim.NoisyCircuit:
"""
Translates this noisy Cirq circuit to the qsim representation.
:qubit_order: Ordering of qubits
:return: a tuple of (C++ qsim NoisyCircuit object, moment boundary
gate indices)
"""
qsim_ncircuit = qsim.NoisyCircuit()
ordered_qubits = cirq.ops.QubitOrder.as_qubit_order(qubit_order).order_for(
self.all_qubits()
)
# qsim numbers qubits in reverse order from cirq
ordered_qubits = list(reversed(ordered_qubits))
qsim_ncircuit.num_qubits = len(ordered_qubits)
def has_qsim_kind(op: cirq.ops.GateOperation):
return _cirq_gate_kind(op.gate) != None
def to_matrix(op: cirq.ops.GateOperation):
mat = cirq.unitary(op.gate, None)
if mat is None:
return NotImplemented
return cirq.ops.MatrixGate(mat).on(*op.qubits)
qubit_to_index_dict = {q: i for i, q in enumerate(ordered_qubits)}
time_offset = 0
gate_count = 0
moment_indices = []
for moment in self:
moment_length = 0
ops_by_gate = []
ops_by_mix = []
ops_by_channel = []
# Capture ops of each type in the appropriate list.
for qsim_op in moment:
if cirq.has_unitary(qsim_op) or cirq.is_measurement(qsim_op):
oplist = cirq.decompose(
qsim_op, fallback_decomposer=to_matrix, keep=has_qsim_kind
)
ops_by_gate.append(oplist)
moment_length = max(moment_length, len(oplist))
pass
elif cirq.has_mixture(qsim_op):
ops_by_mix.append(qsim_op)
moment_length = max(moment_length, 1)
pass
elif cirq.has_kraus(qsim_op):
ops_by_channel.append(qsim_op)
moment_length = max(moment_length, 1)
pass
else:
raise ValueError(f"Encountered unparseable op: {qsim_op}")
# Gates must be added in time order.
for gi in range(moment_length):
# Handle gate output.
for gate_ops in ops_by_gate:
if gi >= len(gate_ops):
continue
qsim_op = gate_ops[gi]
| |
#!/usr/bin/env python
#%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%#
#%!%!% ----------------------------- FPTE_Result_Stress_2nd ----------------------------- %!%!%#
#%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%#
#
# AUTHORs:
# <NAME>
# <EMAIL>
#
# <NAME>
# <EMAIL>
#
# DATE:
# July 01 00:00:00 2018
#
# SYNTAX:
# python FPTE_Result_Stress_2nd.py
# FPTE_Result_Stress_2nd
#
# EXPLANATION:
#
#__________________________________________________________________________________________________
from sys import stdin
# from numpy import *
import numpy as np
import subprocess
import os.path
import shutil
import math
import time
import sys
import os
def fpte_results():
#%!%!%--- Dictionaries ---%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%
head = {\
'CI':'\
for, space group-number between 207 and 230, Cubic I structure. \n\n\
C11 C12 C12 0 0 0 \n\
C12 C11 C12 0 0 0 \n\
C12 C12 C11 0 0 0 \n\
0 0 0 C44 0 0 \n\
0 0 0 0 C44 0 \n\
0 0 0 0 0 C44 \n',\
'CII':'\
for, space group-number between 195 and 206, Cubic II structure. \n\n\
C11 C12 C12 0 0 0 \n\
C12 C11 C12 0 0 0 \n\
C12 C12 C11 0 0 0 \n\
0 0 0 C44 0 0 \n\
0 0 0 0 C44 0 \n\
0 0 0 0 0 C44 \n',\
'HI':'\
for, space group-number between 177 and 194, Hexagonal I structure. \n\n\
C11 C12 C13 0 0 0 \n\
C12 C11 C13 0 0 0 \n\
C13 C13 C33 0 0 0 \n\
0 0 0 C44 0 0 \n\
0 0 0 0 C44 0 \n\
0 0 0 0 0 (C11-C12)/2 \n',\
'HII':'\
for, space group-number between 168 and 176, Hexagonal II structure. \n\n\
C11 C12 C13 0 0 0 \n\
C12 C11 C13 0 0 0 \n\
C13 C13 C33 0 0 0 \n\
0 0 0 C44 0 0 \n\
0 0 0 0 C44 0 \n\
0 0 0 0 0 (C11-C12)/2 \n',\
'RI':'\
for, space group-number between 149 and 167, Rhombohedral I structure. \n\n\
C11 C12 C13 C14 0 0 \n\
C12 C11 C13 -C14 0 0 \n\
C13 C13 C33 0 0 0 \n\
C14 -C14 0 C44 0 0 \n\
0 0 0 0 C44 C14 \n\
0 0 0 0 C14 (C11-C12)/2 \n',\
'RII':'\
for, space group-number between 143 and 148, Rhombohedral II structure.\n\n\
C11 C12 C13 C14 C15 0 \n\
C12 C11 C13 -C14 -C15 0 \n\
C13 C13 C33 0 0 0 \n\
C14 -C14 0 C44 0 -C15 \n\
C15 -C15 0 0 C44 C14 \n\
0 0 0 -C15 C14 (C11-C12)/2 \n',\
'TI':'\
for, space group-number between 89 and 142, Tetragonal I structure. \n\n\
C11 C12 C13 0 0 0 \n\
C12 C11 C13 0 0 0 \n\
C13 C13 C33 0 0 0 \n\
0 0 0 C44 0 0 \n\
0 0 0 0 C44 0 \n\
0 0 0 0 0 C66 \n',\
'TII':'\
for, space group-number between 75 and 88, Tetragonal II structure. \n\n\
C11 C12 C13 0 0 C16 \n\
C12 C11 C13 0 0 -C16 \n\
C13 C13 C33 0 0 0 \n\
0 0 0 C44 0 0 \n\
0 0 0 0 C44 0 \n\
C16 -C16 0 0 0 C66 \n',\
'O':'\
for, space group-number between 16 and 74, Orthorhombic structure. \n\n\
C11 C12 C13 0 0 0 \n\
C12 C22 C23 0 0 0 \n\
C13 C23 C33 0 0 0 \n\
0 0 0 C44 0 0 \n\
0 0 0 0 C55 0 \n\
0 0 0 0 0 C66 \n',\
'M':'\
for, space group-number between 3 and 15, Monoclinic structure. \n\n\
C11 C12 C13 0 0 C16 \n\
C12 C22 C23 0 0 C26 \n\
C13 C23 C33 0 0 C36 \n\
0 0 0 C44 C45 0 \n\
0 0 0 C45 C55 0 \n\
C16 C26 C36 0 0 C66 \n',\
'N':'\
for, space group-number between 1 and 2, Triclinic structure. \n\n\
C11 C12 C13 C14 C15 C16 \n\
C12 C22 C23 C24 C25 C26 \n\
C13 C23 C33 C34 C35 C36 \n\
C14 C24 C34 C44 C45 C46 \n\
C15 C25 C35 C45 C55 C56 \n\
C16 C26 C36 C46 C56 C66 \n'}
#--------------------------------------------------------------------------------------------------
#%!%!%--- Reading the "INFO_FPTE" file ---%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!
INFO=open('INFO_FPTE', 'r')
l1 = INFO.readline()
ordr= int(l1.split()[-1])
if (ordr != 2 and ordr != 3):
sys.exit('\n.... Oops ERROR: The order of the elastic constant is NOT clear !?!?!?'\
'\n Something is WRONG in the "INFO_FPTE" file.\n')
l2 = INFO.readline()
mthd= l2.split()[-1]
if (mthd != 'Stress' and mthd != 'Energy'):
sys.exit('\n.... Oops ERROR: The method of the calculation is NOT clear !?!?!?'\
'\n Something is WRONG in the "INFO_FPTE" file.\n')
l3 = INFO.readline()
cod = l3.split()[-1]
if (cod != 'WIEN2k' and cod != 'exciting' and cod != 'ESPRESSO' and cod != 'VASP'):
sys.exit('\n.... Oops ERROR: The DFT code is NOT clear !?!?!?'\
'\n Something is WRONG in the "INFO_FPTE" file.\n')
#l5 = INFO.readline()
#V0 = float(l5.split()[-2])
l6 = INFO.readline()
mdr = float(l6.split()[-1])
l7 = INFO.readline()
NoP = int(l7.split()[-1])
l4 = INFO.readline()
SGN = int(l4.split()[-1])
INFO.close()
#--------------------------------------------------------------------------------------------------
#%%%--- Calculating the Space-Group Number and classifying it ---%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!
# NoD = Number of Deformation
if (1 <= SGN and SGN <= 2): # Triclinic
LC = 'N'
NoD= 6
elif(3 <= SGN and SGN <= 15): # Monoclinic
LC = 'M'
NoD= 5
elif(16 <= SGN and SGN <= 74): # Orthorhombic
LC = 'O'
NoD= 3
elif(75 <= SGN and SGN <= 88): # Tetragonal II
LC = 'TII'
NoD= 2
elif(89 <= SGN and SGN <= 142): # Tetragonal I
LC = 'TI'
NoD= 2
elif(143 <= SGN and SGN <= 148): # Rhombohedral II
LC = 'RII'
NoD= 2
elif(149 <= SGN and SGN <= 167): # Rhombohedral I
LC = 'RI'
NoD= 2
elif(168 <= SGN and SGN <= 176): # Hexagonal II
LC = 'HII'
NoD= 2
elif(177 <= SGN and SGN <= 194): # Hexagonal I
LC = 'HI'
NoD= 2
elif(195 <= SGN and SGN <= 206): # Cubic II
LC = 'CII'
NoD= 1
elif(207 <= SGN and SGN <= 230): # Cubic I
LC = 'CI'
NoD= 1
else: sys.exit('\n.... Oops ERROR: WRONG Space-Group Number !?!?!? \n')
#--------------------------------------------------------------------------------------------------
lineseparator=' '
for i in range(0,79):
lineseparator=lineseparator+'%'
#%%%--- Making the Matrix ---%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%
if (LC == 'CI' or \
LC == 'CII'):
Matrix = np.mat([[1.0, 5.0, 0.0],
[2.0, 4.0, 0.0],
[3.0, 3.0, 0.0],
[0.0, 0.0, 4.0],
[0.0, 0.0, 5.0],
[0.0, 0.0, 6.0]])
if (LC == 'HI' or \
LC == 'HII'):
Matrix = np.mat([[ 1, 2, 3, 0, 0],
[ 2, 1, 3, 0, 0],
[ 0, 0, 3, 3, 0],
[ 0, 0, 0, 0, 4],
[ 0, 0, 0, 0, 5],
[ 3,-3, 0, 0, 0],
[ 3,-5,-1, 0, 0],
[-5, 3,-1, 0, 0],
[ 0, 0,-2,-1, 0],
[ 0, 0, 0, 0, 6],
[ 0, 0, 0, 0, 2],
[-2, 2, 0, 0, 0]])
if (LC == 'RI'):
Matrix = np.mat([[ 1, 2, 3, 4, 0, 0],
[ 2, 1, 3,-4, 0, 0],
[ 0, 0, 3, 0, 3, 0],
[ 0, 0, 0,-1, 0, 4],
[ 0, 0, 0, 6, 0, 5],
[ 3,-3, 0, 5, 0, 0],
[ 3,-5,-1, 6, 0, 0],
[-5, 3,-1,-6, 0, 0],
[ 0, 0,-2, 0,-1, 0],
[ 0, 0, 0, 8, 0, 6],
[ 0, 0, 0,-4, 0, 2],
[-2, 2, 0, 2, 0, 0]])
if (LC == 'RII'):
Matrix = np.mat([[ 1, 2, 3, 4, 5, 0, 0],
[ 2, 1, 3,-4,-5, 0, 0],
[ 0, 0, 3, 0, 0, 3, 0],
[ 0, 0, 0,-1,-6, 0, 4],
[ 0, 0, 0, 6,-1, 0, 5],
[ 3,-3, 0, 5,-4, 0, 0],
[ 3,-5,-1, 6, 2, 0, 0],
[-5, 3,-1,-6,-2, 0, 0],
[ 0, 0,-2, 0, 0,-1, 0],
[ 0, 0, 0, 8, 4, 0, 6],
[ 0, 0, 0,-4, 8, 0, 2],
[-2, 2, 0, 2,-6, 0, 0]])
if (LC == 'TI'):
Matrix = np.mat([[ 1, 2, 3, 0, 0, 0],
[ 2, 1, 3, 0, 0, 0],
[ 0, 0, 3, 3, 0, 0],
[ 0, 0, 0, 0, 4, 0],
| |
council to deal only with the important and necessary matters, we should not only refuse to obey you, but consider you insane or criminals.",
"Even Lucifer was not guilty of so great a sacrilege in heaven, for he only presumed to be God's equal. God help us!",
"You condemned the holy gospel and replaced it with the teaching of the dragon from hell.",
"Your words are un-Christian, antichristian, and spoken by the inspiration of the evil spirit.",
"What happened to the house built on sand in Matthewwill also happen to you.",
"Must we believe your nightmares?",
"Look how this great heretic speaks brazenly and sacrilegiously.",
"You run against God with the horns of your pride up in the air and thus plunge into the abyss of hell. Woe unto you, Antichrist!",
"You are the devil's most dangerous tool!",
"It seems I must have liars and villains for opponents. I am not worthy in the sight of God that a godly and honorable person should discuss these matters with me in a Christian way. This is my greatest lament.May the Lord Jesus protect me and all devout souls from your contagion and your company!",
"This venom - the mere smell of which kills a man!",
"You are a Baal-zebub - that is, a man of flies.",
"You are full of poisonous refuse and insane foolishness.",
"You are ignorant, stupid, godless blasphemers.",
"You moderate enforcer and eulogizer of moderation. You are one of those bloody and deceitful people who affect modesty in words and appearance, but who meanwhile breathe out threats and blood.",
"We leave you to your own devices, for nothing properly suits you except hypocrisy, flattery, and lies.",
"In lying fashion you ignore what even children know.",
"The reward of such flattery is what your crass stupidity deserves. Therefore, we shall turn from you, a sevenfold stupid and blasphemous wise person.",
"People of your sort are hirelings, dumb dogs unable to bark, who see the wolf coming and flee or, rather, join up with the wolf.",
"You are a wolf and apostle of Satan.",
"You are the ultimate scourges of the world, the Antichrist together with your sophists and bishops.",
"You cowardly slave, you corrupt sycophant, with your sickening advice!",
"You are idiots and swine.",
"Every letter of yours breathes Moabitish pride. So much can a single bull inflate a single bubble that you practically make distinguished asses into gods.",
"You sophistic worms, grasshoppers, locusts, frogs and lice!",
"You completely close your mind and do nothing but shout, 'Anathema, anathema, anathema!' so that by your own voice you are judged mad.",
"Let this generation of vipers prepare itself for unquenchable fire!",
"You rush forward as an ass under the pelt of a lion.",
"In appearance and words you simulate modesty, but you are so swollen with haughtiness, arrogance, pride, malice, villainy, rashness, superciliousness, ignorance, and stupidity that there is nothing to surpass you.",
"Blind moles!",
"We despise your whorish impudence.",
"You arsonists, enemies of languages and truth!",
"Before God and men I accuse all of you as arsonists, blasphemers, murderers, and ravagers of Christian piety.",
"My soul, like Ezekiel's, is nauseated at eating your bread covered with human dung. Do you know what this means?",
"You pant after the garlic and melons of Egypt and have already long suffered from perverted tastes.",
"You people are more stupid than a block of wood.",
"You foster in your heart a Lucian, or some other pig from Epicurus' sty.",
"You reek of nothing but Lucian, and you breathe out on me the vast drunken folly of Epicurus.",
"You find things irreverent, inquisitive, and vain just as all ungodly men do, or rather, as the demons and the damned find things hateful and detestable.",
"You seem to be wrangling about goat's wool, like the man who watched the play in an empty theater.",
"You are dumber than Seriphian frogs and fishes.",
"You conduct yourself like one drunk or asleep, belching out between your snores, 'Yes, No.'",
"How is it, then, that you drivel like people in their second childhood?",
"Just as in a picture or dream you might see the king of the flies with his lances of straw and shields of hay arrayed against a real and regular army of seasoned human troops, that is how you go to war.",
"Proteus is no Proteus compared with you.",
"You do nothing with all your profusion of words but fight a fire with dry straw.",
"Perhaps you want me to die of unrelieved boredom while you keep on talking.",
"Are you ignorant of what it means to be ignorant?",
"You speak and act only as an ungodly person does.",
"I would not smell the foul odor of your name.",
"Are you not making an elephant out of a fly? What wonder workers!",
"You worship a Dagon and a god of your stomachs.",
"You have a priesthood of Satan.",
"As for the signs of your peculiar priesthood, we are willing to let you boast of these mean things, for we know it would be quite easy to shave, anoint, and clothe in a long robe even a pig or a block of wood.",
"In your hiding place you use the most fearless language as though you were full of three holy spirits. Such unseemly boasting reveals clearly what kind of a spirit you are.",
"Truly, I never imagined, and at the same time was shocked, to see how deeply you still cling to your errors.",
"You are a coarse devil who hurts me but little.",
"Such loose, lame, empty talk, set forth on the basis of your own reason and idiosyncrasy, would lead me to believe first of all that your opinions amount to nothing.",
"Get out in the name of a thousand devils, and break your neck before you are out of the city.",
"You have a perverted spirit that thinks only of murdering the conscience.",
"You teach the disorderly masses to break into this field in disorder like pigs.",
"Phooey on you, you servant of idols!",
"You are a toad eater and a fawner.",
"Take care, you evil and wrathful spirits. God may ordain that in swallowing you may choke to death.",
"Perhaps you like to hear yourself talk, as the stork its own clattering.",
"You are the cousins of the Antichrist.",
"You are the sin-master and soul-murdered.",
"Just as the devil is disorderly and jumbles things together, so your writings and head are equally disordered and mixed up, so that it is exceedingly annoying to read and difficult to remember what you write.",
"Do you not see here the devil, the enemy of God's order?",
"Who ever does not know the devil might be misled by these many splendid words to think that five holy spirits were in possession of you. Whoever differs from you is a papist twice over who crucifies or murders Christ; indeed, those who differ from you are Scribes. Whoever agrees with you, however, is up to their boots in the spirit and is a learned light. O wonderful saints! What do you think of yourselves? Do you fully grasp what kind of a spirit you have?",
"You plunge in like a sow to devour pearls, and like a dog tearing holy things to pieces.",
"In devil's fashion you go out where God would enter and enter where God goes out. It ought surprise no one that I call you a devil.",
"Listen, murdered of souls and sinful spirit!",
"Stupid spirit.",
"What light can there be in heads that hold such tangible darkness?",
"We may confidently suppose and be sure that your spirit will produce evidence and proof when the devil becomes God.",
"The devil rides you.",
"You have | |
the layer's input: all the outputs of
the previous layers, resulting in [x_0, ..., x_l-1, x_l].
Returns the layer's output, as well as the input of its conv2d.
Args:
_input: tensor, the operation's input;
layer: `int`, identifier number for this layer (within a block);
growth_rate: `int`, number of new convolutions per dense layer.
"""
with tf.variable_scope("layer_%d" % layer):
# use the composite function H_l (3x3 kernel conv)
if not self.bc_mode:
comp_out, filter_ref, kernels, in_cv = self.composite_function(
_input, out_features=growth_rate, kernel_size=3)
# in DenseNet-BC mode, add a bottleneck layer before H_l (1x1 conv)
elif self.bc_mode:
bottleneck_out, filter_ref = self.bottleneck(
_input, out_features=growth_rate)
if self.ft_filters or self.should_self_construct:
self.filter_ref_list[-1].append(filter_ref)
comp_out, filter_ref, kernels, in_cv = self.composite_function(
bottleneck_out, out_features=growth_rate, kernel_size=3)
# save a reference to the composite function's filter
if self.ft_filters or self.should_self_construct:
self.filter_ref_list[-1].append(filter_ref)
if self.ft_kernels or self.should_self_construct:
self.kernel_name_counter = growth_rate-1
self.kernels_ref_list[-1].append(kernels)
# concatenate output of H_l with layer input (all previous outputs)
if TF_VERSION[0] >= 1 and TF_VERSION[1] >= 0:
output = tf.concat(axis=3, values=(_input, comp_out))
else:
output = tf.concat(3, (_input, comp_out))
# If self-constructing at kernel level, keep track of kernel CS.
if self.has_micro_algo:
self.kCS_FIFO = [
deque(maxlen=self.dkCS_softening) for i in range(growth_rate)]
self.dkCS_FIFO = [
deque(maxlen=self.dkCS_std_window) for i in range(growth_rate)]
return output, in_cv
def add_block(self, _input, block, growth_rate, layers_in_block, is_last):
"""
Adds a new block containing several convolutional (dense) layers.
These are connected together following a DenseNet architecture,
as defined in the paper.
Returns the block's output, as well as the inputs to the last layer
and to its conv2d.
Args:
_input: tensor, the operation's input;
block: `int`, identifier number for this block;
growth_rate: `int`, number of new convolutions per dense layer;
layers_in_block: `int`, number of dense layers in this block;
is_last: `bool`, is this the last block in the network or not.
"""
if self.ft_filters or self.should_self_construct:
self.filter_ref_list.append([])
if self.ft_kernels or self.should_self_construct:
self.kernels_ref_list.append([])
if is_last:
self.cross_entropy = []
with tf.variable_scope("Block_%d" % block) as self.current_block:
output = _input
for layer in range(layers_in_block):
# The inputs of the last layer and its conv2d must be saved
# (useful for self-construction kernel by kernel)
input_lt_lay = output
output, input_lt_cnv = self.add_internal_layer(
input_lt_lay, layer, growth_rate)
if self.ft_cross_entropies and is_last:
# Save the cross-entropy for all layers except the last one
# (it is always saved as part of the end-graph operations)
if layer != layers_in_block-1:
_, cross_entropy = self.cross_entropy_loss(
output, self.labels, block, layer,
preserve_transition=self.preserve_transition_l)
self.cross_entropy.append(cross_entropy)
return output, input_lt_lay, input_lt_cnv
# TRANSITION LAYERS -------------------------------------------------------
# -------------------------------------------------------------------------
def transition_layer(self, _input, block):
"""
Adds a new transition layer after a block. This layer's inputs are the
concatenated feature maps of each layer in the block.
The layer first runs the composite function with kernel size 1:
- In DenseNet mode, it produces as many feature maps as the input had.
- In DenseNet-BC mode, it produces reduction (theta) times as many,
compressing the output.
Afterwards, an average pooling operation (of size 2) is carried to
change the output's size.
Args:
_input: tensor, the operation's input;
block: `int`, identifier number for the previous block.
"""
with tf.variable_scope("Transition_after_block_%d" % block):
# add feature map compression in DenseNet-BC mode
out_features = int(int(_input.get_shape()[-1]) * self.reduction)
# use the composite function H_l (1x1 kernel conv)
output, filter_ref, kernels, in_cv = self.composite_function(
_input, out_features=out_features, kernel_size=1)
# save a reference to the composite function's filter
if self.ft_filters or self.should_self_construct:
self.filter_ref_list[-1].append(filter_ref)
if self.ft_kernels or self.should_self_construct:
self.kernels_ref_list[-1].append(kernels)
# use average pooling to reduce feature map size
output = self.avg_pool(output, k=2)
return output
def transition_layer_to_classes(self, _input, block, layer):
"""
Adds the transition layer after the last block. This layer outputs the
estimated probabilities by classes.
It performs:
- batch normalisation,
- ReLU activation function,
- wider-than-normal average pooling,
- reshaping the output into a 1d tensor,
- fully-connected layer (matrix multiplication, weights and biases).
Args:
_input: tensor, the operation's input;
block: `int`, identifier number for the last block;
layer: `int`, identifier number for the last layer in that block.
"""
self.features_total = int(_input.get_shape()[-1])
var_scope = "Transition_to_FC_block_%d" % block
FC_name = "FC_block_%d" % block
if not self.preserve_transition_l:
var_scope += "_layer_%d" % layer
FC_name += "_layer_%d" % layer
with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):
# Batch normalisation.
self.batch_norm_counter = 0
output = self.batch_norm(
_input, scope='BatchNorm_'+str(self.batch_norm_counter))
# ReLU activation function.
output = tf.nn.relu(output)
# Wide average pooling.
last_pool_kernel = int(output.get_shape()[-2])
output = self.avg_pool(output, k=last_pool_kernel)
# Reshaping the output into 1d.
output = tf.reshape(output, [-1, self.features_total])
# FC (fully-connected) layer.
self.FC_W = []
for i in range(self.features_total):
self.FC_W.append(self.weight_variable_xavier(
[self.n_classes], name=FC_name+("_W%d" % i)))
self.FC_W_counter = self.features_total-1
self.FC_bias = self.bias_variable(
[self.n_classes], name=FC_name+"_bias")
stacked_FC_W = tf.stack(self.FC_W, axis=0)
logits = tf.matmul(output, stacked_FC_W) + self.FC_bias
return logits
def reconstruct_transition_to_classes(self, _input, block, layer):
"""
Reconstruct the transition layer to classes after adding a new kernel
or layer in the last block (in such a case, the transition layer must
remain mostly unchanged except for the new weights).
Args:
_input: tensor, the operation's input;
block: `int`, identifier number for the last block;
layer: `int`, identifier number for the last layer in that block.
"""
new_features_total = int(_input.get_shape()[-1])
var_scope = "Transition_to_FC_block_%d" % block
FC_name = "FC_block_%d" % block
if not self.preserve_transition_l:
var_scope += "_layer_%d" % layer
FC_name += "_layer_%d" % layer
with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):
# The batch norm contains beta and gamma params for each kernel,
# we first copy the param values from old kernels.
beta_values = self.sess.run(tf.get_variable(
"BatchNorm_"+str(self.batch_norm_counter)+"/beta",
[self.features_total]))
gamma_values = self.sess.run(tf.get_variable(
"BatchNorm_"+str(self.batch_norm_counter)+"/gamma",
[self.features_total]))
# Then we create a new batch norm and initialize its params.
self.batch_norm_counter += 1
output = self.batch_norm(
_input, scope='BatchNorm_'+str(self.batch_norm_counter))
new_beta = tf.get_variable(
"BatchNorm_"+str(self.batch_norm_counter)+"/beta",
[new_features_total])
new_gamma = tf.get_variable(
"BatchNorm_"+str(self.batch_norm_counter)+"/gamma",
[new_features_total])
self.sess.run(tf.variables_initializer([new_beta, new_gamma]))
# For these params, we copy the old param values, and leave
# the remaining new values for the new kernels.
new_beta_values = self.sess.run(new_beta)
new_gamma_values = self.sess.run(new_gamma)
difference = new_features_total-self.features_total
new_beta_values[:-difference] = beta_values
new_gamma_values[:-difference] = gamma_values
# Then we assign the modified values to reconstruct the batch norm.
self.sess.run(new_beta.assign(new_beta_values))
self.sess.run(new_gamma.assign(new_gamma_values))
self.features_total = new_features_total
# ReLU, average pooling, and reshaping into 1d
# these do not contain any trainable params, so they are rewritten.
output = tf.nn.relu(output)
last_pool_kernel = int(output.get_shape()[-2])
output = self.avg_pool(output, k=last_pool_kernel)
features_total = int(output.get_shape()[-1])
output = tf.reshape(output, [-1, features_total])
# For the FC layer: add new weights, keep biases and old weights.
for i in range(len(self.FC_W), features_total):
self.FC_W_counter += 1
self.FC_W.append(self.weight_variable_xavier(
[self.n_classes],
name=FC_name+("_W%d" % self.FC_W_counter)))
stacked_FC_W = tf.stack(self.FC_W, axis=0)
logits = tf.matmul(output, stacked_FC_W) + self.FC_bias
return logits
def reconstruct_transition_to_classes_post_pruning(self, _input,
block, layer,
kernels_to_prune):
"""
Reconstruct the transition layer to classes after pruning kernels in
the last layer of the last block (in such a case, the transition layer
must remain mostly unchanged and unused weights must be removed).
Args:
_input: tensor, the operation's input;
block: `int`, identifier number for the last block;
layer: `int`, identifier number for the last layer in that block;
kernels_to_prune: `list` of `int`, gives the specific kernels that
were pruned in the last layer.
"""
new_features_total = int(_input.get_shape()[-1])
var_scope = "Transition_to_FC_block_%d" % block
if not self.preserve_transition_l:
var_scope += "_layer_%d" % layer
with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):
# Copy the batch norm beta and gamma param values from old kernels.
beta_values = self.sess.run(tf.get_variable(
"BatchNorm_"+str(self.batch_norm_counter)+"/beta",
[self.features_total]))
gamma_values = self.sess.run(tf.get_variable(
"BatchNorm_"+str(self.batch_norm_counter)+"/gamma",
[self.features_total]))
# Create a new batch norm and get its param variables.
self.batch_norm_counter += 1
output = self.batch_norm(
_input, scope='BatchNorm_'+str(self.batch_norm_counter))
new_beta = tf.get_variable(
"BatchNorm_"+str(self.batch_norm_counter)+"/beta",
[new_features_total])
new_gamma = tf.get_variable(
"BatchNorm_"+str(self.batch_norm_counter)+"/gamma",
[new_features_total])
# self.sess.run(tf.variables_initializer([new_beta, new_gamma]))
# Copy the param values corresponding to the remaining kernels.
prepruning_kernel_count = len(self.kernels_ref_list[block][-1])
prepruning_kernel_count += len(kernels_to_prune)
difference = self.features_total - prepruning_kernel_count
new_beta_values = beta_values[:difference]
new_gamma_values = gamma_values[:difference]
for k in range(prepruning_kernel_count):
if k not in kernels_to_prune:
new_beta_values = np.append(
new_beta_values, beta_values[k+difference])
new_gamma_values = np.append(
new_gamma_values, gamma_values[k+difference])
print(new_features_total)
print(len(new_beta_values))
print("%d (difference) = %d (new_features_total) - %d (current_kernel_count)" % (
difference, new_features_total, prepruning_kernel_count-len(kernels_to_prune)))
print("%d (old difference) = %d (features_total) - %d (prepruning_kernel_count)" % (
difference, self.features_total, prepruning_kernel_count))
# Assign those param values to reconstruct the batch norm.
self.sess.run(new_beta.assign(new_beta_values))
self.sess.run(new_gamma.assign(new_gamma_values))
self.features_total = new_features_total
# Rewrite: ReLU, average | |
sage: x.is_gamma0_equiv(y, 3, 'matrix')
(False, None)
sage: Cusp(1/2).is_gamma0_equiv(1/3,11,'corner')
(True, 19)
sage: Cusp(1,0)
Infinity
sage: z = Cusp(1,0)
sage: x.is_gamma0_equiv(z, 3, 'matrix')
(
[-1 1]
True, [-3 2]
)
ALGORITHM: See Proposition 2.2.3 of Cremona's book 'Algorithms for
Modular Elliptic Curves', or Prop 2.27 of Stein's Ph.D. thesis.
"""
if transformation not in [False, True, "matrix", None, "corner"]:
raise ValueError("Value %s of the optional argument transformation is not valid.")
if not isinstance(other, Cusp):
other = Cusp(other)
N = ZZ(N)
u1 = self.__a
v1 = self.__b
u2 = other.__a
v2 = other.__b
zero = ZZ.zero()
one = ZZ.one()
if transformation == "matrix":
from sage.matrix.constructor import matrix
if v1 == v2 and u1 == u2:
if not transformation:
return True
elif transformation == "matrix":
return True, matrix(ZZ, [[1, 0], [0, 1]])
else:
return True, one
# a necessary, but not sufficient condition unless N is square-free
if v1.gcd(N) != v2.gcd(N):
if not transformation:
return False
else:
return False, None
if (u1, v1) != (zero, one):
if v1 in [zero, one]:
s1 = one
else:
s1 = u1.inverse_mod(v1)
else:
s1 = 0
if (u2, v2) != (zero, one):
if v2 in [zero, one]:
s2 = one
else:
s2 = u2.inverse_mod(v2)
else:
s2 = zero
g = (v1 * v2).gcd(N)
a = s1 * v2 - s2 * v1
if a % g != 0:
if not transformation:
return False
else:
return False, None
if not transformation:
return True
# Now we know the cusps are equivalent. Use the proof of Prop 2.2.3
# of Cremona to find a matrix in Gamma_0(N) relating them.
if v1 == 0: # the first is oo
if v2 == 0: # both are oo
if transformation == "matrix":
return (True, matrix(ZZ, [[1, 0], [0, 1]]))
else:
return (True, one)
else:
dum, s2, r2 = u2.xgcd(-v2)
assert dum.is_one()
if transformation == "matrix":
return (True, matrix(ZZ, [[u2, r2], [v2, s2]]))
else:
return (True, u2)
elif v2 == 0: # the second is oo
dum, s1, r1 = u1.xgcd(-v1)
assert dum.is_one()
if transformation == "matrix":
return (True, matrix(ZZ, [[s1, -r1], [-v1, u1]]))
else:
return (True, s1)
dum, s2, r2 = u2.xgcd(-v2)
assert dum.is_one()
dum, s1, r1 = u1.xgcd(-v1)
assert dum.is_one()
a = s1 * v2 - s2 * v1
assert (a % g).is_zero()
# solve x*v1*v2 + a = 0 (mod N).
d, x0, y0 = (v1 * v2).xgcd(N) # x0*v1*v2 + y0*N = d = g.
# so x0*v1*v2 - g = 0 (mod N)
x = -x0 * ZZ(a / g)
# now x*v1*v2 + a = 0 (mod N)
# the rest is all added in trac #10926
s1p = s1 + x * v1
M = N // g
if transformation == "matrix":
C = s1p * v2 - s2 * v1
if C % (M * v1 * v2) == 0:
k = - C // (M * v1 * v2)
else:
k = - (C / (M * v1 * v2)).round()
s1pp = s1p + k * M * v1
# C += k*M*v1*v2 # is now the smallest in absolute value
C = s1pp * v2 - s2 * v1
A = u2 * s1pp - r2 * v1
r1pp = r1 + (x + k * M) * u1
B = r2 * u1 - r1pp * u2
D = s2 * u1 - r1pp * v2
ga = matrix(ZZ, [[A, B], [C, D]])
assert ga.det() == 1
assert C % N == 0
assert (A * u1 + B * v1) / (C * u1 + D * v1) == u2 / v2
return (True, ga)
else:
# mainly for backwards compatibility and
# for how it is used in modular symbols
A = (u2 * s1p - r2 * v1)
if u2 != 0 and v1 != 0:
A = A % (u2 * v1 * M)
return (True, A)
def is_gamma1_equiv(self, other, N):
"""
Return whether self and other are equivalent modulo the action of
Gamma_1(N) via linear fractional transformations.
INPUT:
- ``other`` - Cusp
- ``N`` - an integer (specifies the group
Gamma_1(N))
OUTPUT:
- ``bool`` - True if self and other are equivalent
- ``int`` - 0, 1 or -1, gives further information
about the equivalence: If the two cusps are u1/v1 and u2/v2, then
they are equivalent if and only if v1 = v2 (mod N) and u1 = u2 (mod
gcd(v1,N)) or v1 = -v2 (mod N) and u1 = -u2 (mod gcd(v1,N)) The
sign is +1 for the first and -1 for the second. If the two cusps
are not equivalent then 0 is returned.
EXAMPLES::
sage: x = Cusp(2,3)
sage: y = Cusp(4,5)
sage: x.is_gamma1_equiv(y,2)
(True, 1)
sage: x.is_gamma1_equiv(y,3)
(False, 0)
sage: z = Cusp(QQ(x) + 10)
sage: x.is_gamma1_equiv(z,10)
(True, 1)
sage: z = Cusp(1,0)
sage: x.is_gamma1_equiv(z, 3)
(True, -1)
sage: Cusp(0).is_gamma1_equiv(oo, 1)
(True, 1)
sage: Cusp(0).is_gamma1_equiv(oo, 3)
(False, 0)
"""
if not isinstance(other, Cusp):
other = Cusp(other)
N = ZZ(N)
u1 = self.__a
v1 = self.__b
u2 = other.__a
v2 = other.__b
g = v1.gcd(N)
if ((v2 - v1) % N == 0 and (u2 - u1) % g == 0):
return True, 1
elif ((v2 + v1) % N == 0 and (u2 + u1) % g == 0):
return True, -1
return False, 0
def is_gamma_h_equiv(self, other, G):
r"""
Return a pair (b, t), where b is True or False as self and other
are equivalent under the action of G, and t is 1 or -1, as
described below.
Two cusps `u1/v1` and `u2/v2` are equivalent modulo
Gamma_H(N) if and only if `v1 = h*v2 (\mathrm{mod} N)` and
`u1 = h^{(-1)}*u2 (\mathrm{mod} gcd(v1,N))` or
`v1 = -h*v2 (mod N)` and
`u1 = -h^{(-1)}*u2 (\mathrm{mod} gcd(v1,N))` for some
`h \in H`. Then t is 1 or -1 as c and c' fall into the
first or second case, respectively.
INPUT:
- ``other`` - Cusp
- ``G`` - a congruence subgroup Gamma_H(N)
OUTPUT:
- ``bool`` - True if self and other are equivalent
- ``int`` - -1, 0, 1; extra info
EXAMPLES::
sage: x = Cusp(2,3)
sage: y = Cusp(4,5)
sage: x.is_gamma_h_equiv(y,GammaH(13,[2]))
(True, 1)
sage: x.is_gamma_h_equiv(y,GammaH(13,[5]))
(False, 0)
sage: x.is_gamma_h_equiv(y,GammaH(5,[]))
(False, 0)
sage: x.is_gamma_h_equiv(y,GammaH(23,[4]))
(True, -1)
Enumerating the cusps for a space of modular symbols uses this
function.
::
sage: G = GammaH(25,[6]) ; M = G.modular_symbols() ; M
Modular Symbols space of dimension 11 for Congruence Subgroup Gamma_H(25) with H generated by [6] of weight 2 with sign 0 over Rational Field
sage: M.cusps()
[8/25, 1/3, 6/25, 1/4, 1/15, -7/15, 7/15, 4/15, 1/20, 3/20, 7/20, 9/20]
sage: len(M.cusps())
12
This is always one more than the associated space of weight 2 Eisenstein
series.
::
sage: G.dimension_eis(2)
11
sage: M.cuspidal_subspace()
Modular Symbols subspace of dimension 0 of Modular Symbols space of dimension 11 for Congruence Subgroup Gamma_H(25) with H generated by [6] of weight 2 with sign 0 over Rational Field
sage: G.dimension_cusp_forms(2)
0
"""
from sage.modular.arithgroup.all import is_GammaH
if not isinstance(other, Cusp):
other = Cusp(other)
if not is_GammaH(G):
raise TypeError("G must be a group GammaH(N).")
H = G._list_of_elements_in_H()
N = ZZ(G.level())
u1 = self.__a
v1 = self.__b
u2 = other.__a
v2 = other.__b
g = v1.gcd(N)
for h in H:
v_tmp = (h * v1) % N
u_tmp = (h * u2) % N
if (v_tmp - v2) % N == 0 and (u_tmp - u1) % g == 0:
return True, 1
if (v_tmp + v2) % N == 0 and (u_tmp + u1) % g == 0:
return True, -1
return False, 0
def _acted_upon_(self, g, self_on_left):
r"""
Implement the left action of `SL_2(\ZZ)` on self.
EXAMPLES::
sage: g = matrix(ZZ, 2, [1,1,0,1]); g
[1 1]
[0 1]
sage: g * Cusp(2,5)
7/5
sage: Cusp(2,5) * g
Traceback (most | |
import cv2
import math
import numpy as np
import random
import torch
from scipy import special
from scipy.stats import multivariate_normal
from torchvision.transforms.functional_tensor import rgb_to_grayscale
# -------------------------------------------------------------------- #
# --------------------------- blur kernels --------------------------- #
# -------------------------------------------------------------------- #
# --------------------------- util functions --------------------------- #
def sigma_matrix2(sig_x, sig_y, theta):
"""Calculate the rotated sigma matrix (two dimensional matrix).
Args:
sig_x (float):
sig_y (float):
theta (float): Radian measurement.
Returns:
ndarray: Rotated sigma matrix.
"""
d_matrix = np.array([[sig_x**2, 0], [0, sig_y**2]])
u_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])
return np.dot(u_matrix, np.dot(d_matrix, u_matrix.T))
def mesh_grid(kernel_size):
"""Generate the mesh grid, centering at zero.
Args:
kernel_size (int):
Returns:
xy (ndarray): with the shape (kernel_size, kernel_size, 2)
xx (ndarray): with the shape (kernel_size, kernel_size)
yy (ndarray): with the shape (kernel_size, kernel_size)
"""
ax = np.arange(-kernel_size // 2 + 1., kernel_size // 2 + 1.)
xx, yy = np.meshgrid(ax, ax)
xy = np.hstack((xx.reshape((kernel_size * kernel_size, 1)), yy.reshape(kernel_size * kernel_size,
1))).reshape(kernel_size, kernel_size, 2)
return xy, xx, yy
def pdf2(sigma_matrix, grid):
"""Calculate PDF of the bivariate Gaussian distribution.
Args:
sigma_matrix (ndarray): with the shape (2, 2)
grid (ndarray): generated by :func:`mesh_grid`,
with the shape (K, K, 2), K is the kernel size.
Returns:
kernel (ndarrray): un-normalized kernel.
"""
inverse_sigma = np.linalg.inv(sigma_matrix)
kernel = np.exp(-0.5 * np.sum(np.dot(grid, inverse_sigma) * grid, 2))
return kernel
def cdf2(d_matrix, grid):
"""Calculate the CDF of the standard bivariate Gaussian distribution.
Used in skewed Gaussian distribution.
Args:
d_matrix (ndarrasy): skew matrix.
grid (ndarray): generated by :func:`mesh_grid`,
with the shape (K, K, 2), K is the kernel size.
Returns:
cdf (ndarray): skewed cdf.
"""
rv = multivariate_normal([0, 0], [[1, 0], [0, 1]])
grid = np.dot(grid, d_matrix)
cdf = rv.cdf(grid)
return cdf
def bivariate_Gaussian(kernel_size, sig_x, sig_y, theta, grid=None, isotropic=True):
"""Generate a bivariate isotropic or anisotropic Gaussian kernel.
In the isotropic mode, only `sig_x` is used. `sig_y` and `theta` is ignored.
Args:
kernel_size (int):
sig_x (float):
sig_y (float):
theta (float): Radian measurement.
grid (ndarray, optional): generated by :func:`mesh_grid`,
with the shape (K, K, 2), K is the kernel size. Default: None
isotropic (bool):
Returns:
kernel (ndarray): normalized kernel.
"""
if grid is None:
grid, _, _ = mesh_grid(kernel_size)
if isotropic:
sigma_matrix = np.array([[sig_x**2, 0], [0, sig_x**2]])
else:
sigma_matrix = sigma_matrix2(sig_x, sig_y, theta)
kernel = pdf2(sigma_matrix, grid)
kernel = kernel / np.sum(kernel)
return kernel
def bivariate_generalized_Gaussian(kernel_size, sig_x, sig_y, theta, beta, grid=None, isotropic=True):
"""Generate a bivariate generalized Gaussian kernel.
Described in `Parameter Estimation For Multivariate Generalized
Gaussian Distributions`_
by Pascal et. al (2013).
In the isotropic mode, only `sig_x` is used. `sig_y` and `theta` is ignored.
Args:
kernel_size (int):
sig_x (float):
sig_y (float):
theta (float): Radian measurement.
beta (float): shape parameter, beta = 1 is the normal distribution.
grid (ndarray, optional): generated by :func:`mesh_grid`,
with the shape (K, K, 2), K is the kernel size. Default: None
Returns:
kernel (ndarray): normalized kernel.
.. _Parameter Estimation For Multivariate Generalized Gaussian
Distributions: https://arxiv.org/abs/1302.6498
"""
if grid is None:
grid, _, _ = mesh_grid(kernel_size)
if isotropic:
sigma_matrix = np.array([[sig_x**2, 0], [0, sig_x**2]])
else:
sigma_matrix = sigma_matrix2(sig_x, sig_y, theta)
inverse_sigma = np.linalg.inv(sigma_matrix)
kernel = np.exp(-0.5 * np.power(np.sum(np.dot(grid, inverse_sigma) * grid, 2), beta))
kernel = kernel / np.sum(kernel)
return kernel
def bivariate_plateau(kernel_size, sig_x, sig_y, theta, beta, grid=None, isotropic=True):
"""Generate a plateau-like anisotropic kernel.
1 / (1+x^(beta))
Ref: https://stats.stackexchange.com/questions/203629/is-there-a-plateau-shaped-distribution
In the isotropic mode, only `sig_x` is used. `sig_y` and `theta` is ignored.
Args:
kernel_size (int):
sig_x (float):
sig_y (float):
theta (float): Radian measurement.
beta (float): shape parameter, beta = 1 is the normal distribution.
grid (ndarray, optional): generated by :func:`mesh_grid`,
with the shape (K, K, 2), K is the kernel size. Default: None
Returns:
kernel (ndarray): normalized kernel.
"""
if grid is None:
grid, _, _ = mesh_grid(kernel_size)
if isotropic:
sigma_matrix = np.array([[sig_x**2, 0], [0, sig_x**2]])
else:
sigma_matrix = sigma_matrix2(sig_x, sig_y, theta)
inverse_sigma = np.linalg.inv(sigma_matrix)
kernel = np.reciprocal(np.power(np.sum(np.dot(grid, inverse_sigma) * grid, 2), beta) + 1)
kernel = kernel / np.sum(kernel)
return kernel
def random_bivariate_Gaussian(kernel_size,
sigma_x_range,
sigma_y_range,
rotation_range,
noise_range=None,
isotropic=True):
"""Randomly generate bivariate isotropic or anisotropic Gaussian kernels.
In the isotropic mode, only `sigma_x_range` is used. `sigma_y_range` and `rotation_range` is ignored.
Args:
kernel_size (int):
sigma_x_range (tuple): [0.6, 5]
sigma_y_range (tuple): [0.6, 5]
rotation range (tuple): [-math.pi, math.pi]
noise_range(tuple, optional): multiplicative kernel noise,
[0.75, 1.25]. Default: None
Returns:
kernel (ndarray):
"""
assert kernel_size % 2 == 1, 'Kernel size must be an odd number.'
assert sigma_x_range[0] < sigma_x_range[1], 'Wrong sigma_x_range.'
sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1])
if isotropic is False:
assert sigma_y_range[0] < sigma_y_range[1], 'Wrong sigma_y_range.'
assert rotation_range[0] < rotation_range[1], 'Wrong rotation_range.'
sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1])
rotation = np.random.uniform(rotation_range[0], rotation_range[1])
else:
sigma_y = sigma_x
rotation = 0
kernel = bivariate_Gaussian(kernel_size, sigma_x, sigma_y, rotation, isotropic=isotropic)
# add multiplicative noise
if noise_range is not None:
assert noise_range[0] < noise_range[1], 'Wrong noise range.'
noise = np.random.uniform(noise_range[0], noise_range[1], size=kernel.shape)
kernel = kernel * noise
kernel = kernel / np.sum(kernel)
return kernel
def random_bivariate_generalized_Gaussian(kernel_size,
sigma_x_range,
sigma_y_range,
rotation_range,
beta_range,
noise_range=None,
isotropic=True):
"""Randomly generate bivariate generalized Gaussian kernels.
In the isotropic mode, only `sigma_x_range` is used. `sigma_y_range` and `rotation_range` is ignored.
Args:
kernel_size (int):
sigma_x_range (tuple): [0.6, 5]
sigma_y_range (tuple): [0.6, 5]
rotation range (tuple): [-math.pi, math.pi]
beta_range (tuple): [0.5, 8]
noise_range(tuple, optional): multiplicative kernel noise,
[0.75, 1.25]. Default: None
Returns:
kernel (ndarray):
"""
assert kernel_size % 2 == 1, 'Kernel size must be an odd number.'
assert sigma_x_range[0] < sigma_x_range[1], 'Wrong sigma_x_range.'
sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1])
if isotropic is False:
assert sigma_y_range[0] < sigma_y_range[1], 'Wrong sigma_y_range.'
assert rotation_range[0] < rotation_range[1], 'Wrong rotation_range.'
sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1])
rotation = np.random.uniform(rotation_range[0], rotation_range[1])
else:
sigma_y = sigma_x
rotation = 0
# assume beta_range[0] < 1 < beta_range[1]
if np.random.uniform() < 0.5:
beta = np.random.uniform(beta_range[0], 1)
else:
beta = np.random.uniform(1, beta_range[1])
kernel = bivariate_generalized_Gaussian(kernel_size, sigma_x, sigma_y, rotation, beta, isotropic=isotropic)
# add multiplicative noise
if noise_range is not None:
assert noise_range[0] < noise_range[1], 'Wrong noise range.'
noise = np.random.uniform(noise_range[0], noise_range[1], size=kernel.shape)
kernel = kernel * noise
kernel = kernel / np.sum(kernel)
return kernel
def random_bivariate_plateau(kernel_size,
sigma_x_range,
sigma_y_range,
rotation_range,
beta_range,
noise_range=None,
isotropic=True):
"""Randomly generate bivariate plateau kernels.
In the isotropic mode, only `sigma_x_range` is used. `sigma_y_range` and `rotation_range` is ignored.
Args:
kernel_size (int):
sigma_x_range (tuple): [0.6, 5]
sigma_y_range (tuple): [0.6, 5]
rotation range (tuple): [-math.pi/2, math.pi/2]
beta_range (tuple): [1, 4]
noise_range(tuple, optional): multiplicative kernel noise,
[0.75, 1.25]. Default: None
Returns:
kernel (ndarray):
"""
assert kernel_size % 2 == 1, 'Kernel size must be an odd number.'
assert sigma_x_range[0] < sigma_x_range[1], 'Wrong sigma_x_range.'
sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1])
if isotropic is False:
assert sigma_y_range[0] < sigma_y_range[1], 'Wrong sigma_y_range.'
assert rotation_range[0] < rotation_range[1], 'Wrong rotation_range.'
sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1])
rotation = np.random.uniform(rotation_range[0], rotation_range[1])
else:
sigma_y = sigma_x
rotation = 0
# TODO: this may be not proper
if np.random.uniform() < 0.5:
beta = np.random.uniform(beta_range[0], 1)
else:
beta = np.random.uniform(1, beta_range[1])
kernel = bivariate_plateau(kernel_size, sigma_x, sigma_y, rotation, beta, isotropic=isotropic)
# add multiplicative noise
if noise_range is not None:
assert noise_range[0] < noise_range[1], 'Wrong noise range.'
noise = np.random.uniform(noise_range[0], noise_range[1], size=kernel.shape)
kernel = kernel * noise
kernel = kernel / np.sum(kernel)
return kernel
def random_mixed_kernels(kernel_list,
kernel_prob,
kernel_size=21,
sigma_x_range=(0.6, 5),
sigma_y_range=(0.6, 5),
rotation_range=(-math.pi, math.pi),
betag_range=(0.5, 8),
betap_range=(0.5, 8),
noise_range=None):
"""Randomly generate mixed kernels.
Args:
kernel_list (tuple): a list name of kernel types,
support ['iso', 'aniso', 'skew', 'generalized', 'plateau_iso',
'plateau_aniso']
kernel_prob (tuple): corresponding kernel probability for each
kernel type
kernel_size (int):
sigma_x_range (tuple): [0.6, 5]
sigma_y_range (tuple): [0.6, 5]
rotation range (tuple): [-math.pi, math.pi]
beta_range (tuple): [0.5, 8]
noise_range(tuple, optional): multiplicative kernel noise,
[0.75, 1.25]. Default: None
Returns:
kernel (ndarray):
"""
kernel_type = random.choices(kernel_list, kernel_prob)[0]
if kernel_type == 'iso':
kernel = random_bivariate_Gaussian(
kernel_size, sigma_x_range, sigma_y_range, rotation_range, noise_range=noise_range, isotropic=True)
elif kernel_type == 'aniso':
kernel = random_bivariate_Gaussian(
kernel_size, sigma_x_range, sigma_y_range, rotation_range, noise_range=noise_range, isotropic=False)
elif kernel_type == 'generalized_iso':
kernel = random_bivariate_generalized_Gaussian(
kernel_size,
sigma_x_range,
sigma_y_range,
rotation_range,
betag_range,
noise_range=noise_range,
isotropic=True)
elif kernel_type == 'generalized_aniso':
kernel = random_bivariate_generalized_Gaussian(
kernel_size,
sigma_x_range,
sigma_y_range,
rotation_range,
betag_range,
noise_range=noise_range,
isotropic=False)
elif kernel_type == 'plateau_iso':
kernel = random_bivariate_plateau(
kernel_size, sigma_x_range, sigma_y_range, rotation_range, betap_range, noise_range=None, isotropic=True)
elif kernel_type == 'plateau_aniso':
kernel = random_bivariate_plateau(
kernel_size, sigma_x_range, sigma_y_range, rotation_range, betap_range, noise_range=None, isotropic=False)
return kernel
np.seterr(divide='ignore', invalid='ignore')
def circular_lowpass_kernel(cutoff, kernel_size, pad_to=0):
"""2D sinc filter, ref: | |
<reponame>queueit/KnownUser.V3.Python<filename>SDK/test_integration_config_helpers.py<gh_stars>1-10
import unittest
from queueit_knownuserv3.integration_config_helpers import *
from queueit_knownuserv3.http_context_providers import HttpContextProvider
class HttpContextProviderMock(HttpContextProvider):
def __init__(self):
self.headers = {}
self.cookies = {}
self.body = ""
def getHeader(self, header_name):
if header_name not in self.headers:
return None
return self.headers[header_name]
def getCookie(self, cookie_name):
if cookie_name not in self.cookies:
return None
return self.cookies[cookie_name]
def getRequestBodyAsString(self):
return self.body
class TestIntegrationEvaluator(unittest.TestCase):
def test_getMatchedIntegrationConfig_oneTrigger_and_notMatched(self):
integrationConfig = {
"Integrations": [{
"Triggers": [{
"LogicalOperator":
"And",
"TriggerParts": [{
"CookieName": "c1",
"Operator": "Equals",
"ValueToCompare": "value1",
"ValidatorType": "CookieValidator",
"IsIgnoreCase": False,
"IsNegative": False
}, {
"UrlPart": "PageUrl",
"ValidatorType": "UrlValidator",
"ValueToCompare": "test",
"Operator": "Contains",
"IsIgnoreCase": False,
"IsNegative": False
}]
}]
}]
}
url = "http://test.testdomain.com:8080/test?q=2"
testObject = IntegrationEvaluator()
matchedConfig = testObject.getMatchedIntegrationConfig(
integrationConfig, url, HttpContextProviderMock())
assert (matchedConfig == None)
def test_getMatchedIntegrationConfig_oneTrigger_and_matched(self):
integrationConfig = {
"Integrations": [{
"Name":
"integration1",
"Triggers": [{
"LogicalOperator":
"And",
"TriggerParts": [{
"CookieName": "c1",
"Operator": "Equals",
"ValueToCompare": "value1",
"ValidatorType": "CookieValidator",
"IsIgnoreCase": True,
"IsNegative": False
}, {
"UrlPart": "PageUrl",
"ValidatorType": "UrlValidator",
"ValueToCompare": "test",
"Operator": "Contains",
"IsIgnoreCase": False,
"IsNegative": False
}]
}]
}]
}
url = "http://test.testdomain.com:8080/test?q=2"
hcpMock = HttpContextProviderMock()
hcpMock.cookies = {"c2": "ddd", "c1": "Value1"}
testObject = IntegrationEvaluator()
matchedConfig = testObject.getMatchedIntegrationConfig(
integrationConfig, url, hcpMock)
assert (matchedConfig["Name"] == "integration1")
def test_getMatchedIntegrationConfig_oneTrigger_and_notmatched_UserAgent(
self):
integrationConfig = {
"Integrations": [{
"Name":
"integration1",
"Triggers": [{
"LogicalOperator":
"And",
"TriggerParts": [{
"CookieName": "c1",
"Operator": "Equals",
"ValueToCompare": "value1",
"ValidatorType": "CookieValidator",
"IsIgnoreCase": True,
"IsNegative": False
}, {
"UrlPart": "PageUrl",
"ValidatorType": "UrlValidator",
"ValueToCompare": "test",
"Operator": "Contains",
"IsIgnoreCase": False,
"IsNegative": False
}, {
"ValidatorType": "userAgentValidator",
"ValueToCompare": "Googlebot",
"Operator": "Contains",
"IsIgnoreCase": True,
"IsNegative": True
}]
}]
}]
}
url = "http://test.testdomain.com:8080/test?q=2"
hcpMock = HttpContextProviderMock()
hcpMock.headers = {"user-agent": "bot.html google.com googlebot test"}
hcpMock.cookies = {"c2": "ddd", "c1": "Value1"}
testObject = IntegrationEvaluator()
matchedConfig = testObject.getMatchedIntegrationConfig(
integrationConfig, url, hcpMock)
assert (matchedConfig == None)
def test_getMatchedIntegrationConfig_oneTrigger_or_notMatched(self):
integrationConfig = {
"Integrations": [{
"Name":
"integration1",
"Triggers": [{
"LogicalOperator":
"Or",
"TriggerParts": [{
"CookieName": "c1",
"Operator": "Equals",
"ValueToCompare": "value1",
"ValidatorType": "CookieValidator",
"IsIgnoreCase": True,
"IsNegative": True
}, {
"UrlPart": "PageUrl",
"ValidatorType": "UrlValidator",
"ValueToCompare": "test",
"Operator": "Equals",
"IsIgnoreCase": False,
"IsNegative": False
}]
}]
}]
}
url = "http://test.testdomain.com:8080/test?q=2"
hcpMock = HttpContextProviderMock()
hcpMock.cookies = {"c2": "ddd", "c1": "Value1"}
testObject = IntegrationEvaluator()
matchedConfig = testObject.getMatchedIntegrationConfig(
integrationConfig, url, hcpMock)
assert (matchedConfig == None)
def test_getMatchedIntegrationConfig_oneTrigger_or_matched(self):
integrationConfig = {
"Integrations": [{
"Name":
"integration1",
"Triggers": [{
"LogicalOperator":
"Or",
"TriggerParts": [{
"CookieName": "c1",
"Operator": "Equals",
"ValueToCompare": "value1",
"ValidatorType": "CookieValidator",
"IsIgnoreCase": True,
"IsNegative": True
}, {
"UrlPart": "PageUrl",
"ValidatorType": "UrlValidator",
"ValueToCompare": "test",
"Operator": "Equals",
"IsIgnoreCase": False,
"IsNegative": True
}]
}]
}]
}
url = "http://test.testdomain.com:8080/test?q=2"
hcpMock = HttpContextProviderMock()
hcpMock.cookies = {"c2": "ddd", "c1": "Value1"}
testObject = IntegrationEvaluator()
matchedConfig = testObject.getMatchedIntegrationConfig(
integrationConfig, url, hcpMock)
assert (matchedConfig["Name"] == "integration1")
def test_getMatchedIntegrationConfig_twoTriggers_matched(self):
integrationConfig = {
"Integrations": [{
"Name":
"integration1",
"Triggers": [{
"LogicalOperator":
"And",
"TriggerParts": [{
"CookieName": "c1",
"Operator": "Equals",
"ValueToCompare": "value1",
"ValidatorType": "CookieValidator",
"IsIgnoreCase": True,
"IsNegative": True
}]
}, {
"LogicalOperator":
"And",
"TriggerParts": [{
"CookieName": "c1",
"Operator": "Equals",
"ValueToCompare": "Value1",
"ValidatorType": "CookieValidator",
"IsIgnoreCase": False,
"IsNegative": False
}, {
"UrlPart": "PageUrl",
"ValidatorType": "UrlValidator",
"ValueToCompare": "test",
"Operator": "Contains",
"IsIgnoreCase": False,
"IsNegative": False
}]
}]
}]
}
url = "http://test.testdomain.com:8080/test?q=2"
hcpMock = HttpContextProviderMock()
hcpMock.cookies = {"c2": "ddd", "c1": "Value1"}
testObject = IntegrationEvaluator()
matchedConfig = testObject.getMatchedIntegrationConfig(
integrationConfig, url, hcpMock)
assert (matchedConfig["Name"] == "integration1")
def test_getMatchedIntegrationConfig_threeIntegrationsInOrder_secondMatched(
self):
integrationConfig = {
"Integrations": [{
"Name":
"integration0",
"Triggers": [{
"LogicalOperator":
"And",
"TriggerParts": [{
"UrlPart": "PageUrl",
"ValidatorType": "UrlValidator",
"ValueToCompare": "Test",
"Operator": "Contains",
"IsIgnoreCase": False,
"IsNegative": False
}]
}]
}, {
"Name":
"integration1",
"Triggers": [{
"LogicalOperator":
"And",
"TriggerParts": [{
"UrlPart": "PageUrl",
"ValidatorType": "UrlValidator",
"ValueToCompare": "test",
"Operator": "Contains",
"IsIgnoreCase": False,
"IsNegative": False
}]
}]
}, {
"Name":
"integration2",
"Triggers": [{
"LogicalOperator":
"And",
"TriggerParts": [{
"CookieName": "c1",
"ValidatorType": "CookieValidator",
"ValueToCompare": "c1",
"Operator": "Equals",
"IsIgnoreCase": True,
"IsNegative": False
}]
}]
}]
}
url = "http://test.testdomain.com:8080/test?q=2"
hcpMock = HttpContextProviderMock()
hcpMock.cookies = {"c2": "ddd", "c1": "Value1"}
testObject = IntegrationEvaluator()
matchedConfig = testObject.getMatchedIntegrationConfig(
integrationConfig, url, hcpMock)
assert (matchedConfig["Name"] == "integration1")
class TestUrlValidatorHelper(unittest.TestCase):
def test_evaluate(self):
assert (not UrlValidatorHelper.evaluate(None, "notimportant"))
assert (not UrlValidatorHelper.evaluate({}, "notimportant"))
triggerPart = {
"UrlPart": "PageUrl",
"Operator": "Contains",
"IsIgnoreCase": True,
"IsNegative": False,
"ValueToCompare": "http://test.testdomain.com:8080/test?q=1"
}
assert (not UrlValidatorHelper.evaluate(
triggerPart, "http://test.testdomain.com:8080/test?q=2"))
triggerPart = {
"UrlPart": "PagePath",
"Operator": "Equals",
"IsIgnoreCase": True,
"IsNegative": False,
"ValueToCompare": "/Test/t1"
}
assert (UrlValidatorHelper.evaluate(
triggerPart, "http://test.testdomain.com:8080/test/t1?q=2&y02"))
triggerPart = {
"UrlPart": "HostName",
"Operator": "Contains",
"IsIgnoreCase": True,
"IsNegative": False,
"ValueToCompare": "test.testdomain.com"
}
assert (UrlValidatorHelper.evaluate(
triggerPart, "http://m.test.testdomain.com:8080/test?q=2"))
triggerPart = {
"UrlPart": "HostName",
"Operator": "Contains",
"IsIgnoreCase": True,
"IsNegative": True,
"ValueToCompare": "test.testdomain.com"
}
assert (not UrlValidatorHelper.evaluate(
triggerPart, "http://m.test.testdomain.com:8080/test?q=2"))
class TestCookieValidatorHelper(unittest.TestCase):
def test_evaluate(self):
hcpMock = HttpContextProviderMock()
assert (not CookieValidatorHelper.evaluate(None, hcpMock))
assert (not CookieValidatorHelper.evaluate({}, hcpMock))
triggerPart = {
"CookieName": "c1",
"Operator": "Contains",
"IsIgnoreCase": True,
"IsNegative": False,
"ValueToCompare": "1"
}
hcpMock.cookies = {"c1": "hhh"}
assert (not CookieValidatorHelper.evaluate(triggerPart, hcpMock))
triggerPart = {
"CookieName": "c1",
"Operator": "Contains",
"ValueToCompare": "1"
}
hcpMock.cookies = {"c2": "ddd", "c1": "3"}
assert (not CookieValidatorHelper.evaluate(triggerPart, hcpMock))
triggerPart = {
"CookieName": "c1",
"Operator": "Contains",
"IsIgnoreCase": True,
"IsNegative": False,
"ValueToCompare": "1"
}
hcpMock.cookies = {"c2": "ddd", "c1": "1"}
assert (CookieValidatorHelper.evaluate(triggerPart, hcpMock))
triggerPart = {
"CookieName": "c1",
"Operator": "Contains",
"IsIgnoreCase": True,
"IsNegative": True,
"ValueToCompare": "1"
}
hcpMock.cookies = {"c2": "ddd", "c1": "1"}
assert (not CookieValidatorHelper.evaluate(triggerPart, hcpMock))
class TestUserAgentValidatorHelper(unittest.TestCase):
def test_evaluate(self):
hcpMock = HttpContextProviderMock()
assert (not UserAgentValidatorHelper.evaluate(None, hcpMock))
assert (not UserAgentValidatorHelper.evaluate({}, hcpMock))
triggerPart = {
"Operator": "Contains",
"IsIgnoreCase": False,
"IsNegative": False,
"ValueToCompare": "googlebot"
}
hcpMock.headers = {"user-agent": "Googlebot sample useraagent"}
assert (not UserAgentValidatorHelper.evaluate(triggerPart, hcpMock))
triggerPart = {
"Operator": "Equals",
"IsIgnoreCase": True,
"IsNegative": True,
"ValueToCompare": "googlebot"
}
hcpMock.headers = {"user-agent": "ooglebot sample useraagent"}
assert (UserAgentValidatorHelper.evaluate(triggerPart, hcpMock))
triggerPart = {
"Operator": "Contains",
"IsIgnoreCase": False,
"IsNegative": True,
"ValueToCompare": "googlebot"
}
hcpMock.headers = {"user-agent": "googlebot"}
assert (not UserAgentValidatorHelper.evaluate(triggerPart, hcpMock))
triggerPart = {
"Operator": "Contains",
"IsIgnoreCase": True,
"IsNegative": False,
"ValueToCompare": "googlebot"
}
hcpMock.headers = {"user-agent": "Googlebot"}
assert (UserAgentValidatorHelper.evaluate(triggerPart, hcpMock))
class TestHttpHeaderValidatorHelper(unittest.TestCase):
def test_evaluate(self):
hcpMock = HttpContextProviderMock()
assert (not HttpHeaderValidatorHelper.evaluate(None, hcpMock))
assert (not HttpHeaderValidatorHelper.evaluate({}, hcpMock))
triggerPart = {
"HttpHeaderName": "a-header",
"Operator": "Contains",
"IsIgnoreCase": True,
"IsNegative": False,
"ValueToCompare": "value"
}
hcpMock.headers = {'a-header': "VaLuE"}
assert (HttpHeaderValidatorHelper.evaluate(triggerPart, hcpMock))
triggerPart = {
"HttpHeaderName": "a-header",
"Operator": "Contains",
"IsIgnoreCase": True,
"IsNegative": False,
"ValueToCompare": "value"
}
hcpMock.headers = {'a-header': "not"}
assert (not HttpHeaderValidatorHelper.evaluate(triggerPart, hcpMock))
triggerPart = {
"HttpHeaderName": "a-header",
"Operator": "Contains",
"IsNegative": True,
"IsIgnoreCase": False,
"ValueToCompare": "value"
}
hcpMock.headers = {'a-header': "not"}
assert (HttpHeaderValidatorHelper.evaluate(triggerPart, hcpMock))
class TestComparisonOperatorHelper(unittest.TestCase):
def test_evaluate_equals_operator(self):
assert (ComparisonOperatorHelper.evaluate("Equals", False, False, None,
None, None))
assert (ComparisonOperatorHelper.evaluate("Equals", False, False,
"test1", "test1", None))
assert (not ComparisonOperatorHelper.evaluate("Equals", False, False,
"test1", "Test1", None))
assert (ComparisonOperatorHelper.evaluate("Equals", False, True,
"test1", "Test1", None))
assert (ComparisonOperatorHelper.evaluate("Equals", True, False,
"test1", "Test1", None))
assert (not ComparisonOperatorHelper.evaluate("Equals", True, False,
"test1", "test1", None))
assert (not ComparisonOperatorHelper.evaluate("Equals", True, True,
"test1", "Test1", None))
def test_evaluate_contains_operator(self):
assert (ComparisonOperatorHelper.evaluate("Contains", False, False,
None, None, None))
assert (ComparisonOperatorHelper.evaluate(
"Contains", False, False, "test_test1_test", "test1", None))
assert (not ComparisonOperatorHelper.evaluate(
"Contains", False, False, "test_test1_test", "Test1", None))
assert (ComparisonOperatorHelper.evaluate(
"Contains", False, True, "test_test1_test", "Test1", None))
assert (ComparisonOperatorHelper.evaluate(
"Contains", True, False, "test_test1_test", "Test1", None))
assert (not ComparisonOperatorHelper.evaluate(
"Contains", True, True, "test_test1", "Test1", None))
assert (not ComparisonOperatorHelper.evaluate(
"Contains", True, False, "test_test1", "test1", None))
assert (ComparisonOperatorHelper.evaluate(
"Contains", False, False, "test_dsdsdsdtest1", "*", None))
assert (not ComparisonOperatorHelper.evaluate(
"Contains", False, False, "", "*", None))
def test_evaluate_equalsAny_operator(self):
assert (ComparisonOperatorHelper.evaluate("EqualsAny", False, False,
"test1", None, ["test1"]))
assert (not ComparisonOperatorHelper.evaluate(
"EqualsAny", False, False, "test1", None, ["Test1"]))
assert (ComparisonOperatorHelper.evaluate("EqualsAny", False, True,
"test1", None, ["Test1"]))
assert (ComparisonOperatorHelper.evaluate("EqualsAny", True, False,
"test1", None, ["Test1"]))
assert (not ComparisonOperatorHelper.evaluate(
"EqualsAny", True, False, "test1", None, ["test1"]))
assert (not ComparisonOperatorHelper.evaluate(
"EqualsAny", True, True, "test1", None, ["Test1"]))
def test_evaluate_containsAny_operator(self):
assert (ComparisonOperatorHelper.evaluate(
"ContainsAny", False, False, "test_test1_test", None, ["test1"]))
assert (not ComparisonOperatorHelper.evaluate(
"ContainsAny", False, False, "test_test1_test", None, ["Test1"]))
assert (ComparisonOperatorHelper.evaluate(
"ContainsAny", False, True, "test_test1_test", None, ["Test1"]))
assert (ComparisonOperatorHelper.evaluate(
"ContainsAny", True, False, "test_test1_test", None, ["Test1"]))
assert (not ComparisonOperatorHelper.evaluate(
"ContainsAny", True, True, "test_test1", None, ["Test1"]))
assert (not ComparisonOperatorHelper.evaluate(
"ContainsAny", True, False, "test_test1", None, ["test1"]))
assert (ComparisonOperatorHelper.evaluate(
"ContainsAny", False, False, "test_dsdsdsdtest1", None, ["*"]))
def test_evaluate_unsupported_operator(self):
assert (not ComparisonOperatorHelper.evaluate("-not-supported-", False,
False, None, None, None))
class TestRequestBodyValidatorHelper(unittest.TestCase):
def test_evaluate(self):
hcp_mock = HttpContextProviderMock()
assert (not RequestBodyValidatorHelper.evaluate(None, hcp_mock))
assert (not RequestBodyValidatorHelper.evaluate({}, hcp_mock))
trigger_part = {
"Operator": "Contains",
"IsIgnoreCase": True,
"IsNegative": False,
"ValueToCompare": "test body"
}
assert (not RequestBodyValidatorHelper.evaluate(trigger_part, hcp_mock))
hcp_mock.body = "my test body is here"
assert (RequestBodyValidatorHelper.evaluate(trigger_part, hcp_mock))
trigger_part = {
"Operator": "Equals",
"IsIgnoreCase": True,
"IsNegative": False,
"ValueToCompare": "Test"
}
assert (not RequestBodyValidatorHelper.evaluate(trigger_part, hcp_mock))
trigger_part = {
"Operator": "Contains",
"IsIgnoreCase": | |
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import sys
import os
import os.path
import numpy as np
from collections import namedtuple
from collections import OrderedDict
import json
import deeplift
import tensorflow as tf
NEAR_ZERO_THRESHOLD = 10**(-7)
_SESS = None
def get_session():
try:
#use the keras session if there is one
import keras.backend as K
return K.get_session()
except:
#Warning: I haven't really tested this behaviour out...
global _SESS
if _SESS is None:
print("MAKING A SESSION")
_SESS = tf.Session()
_SESS.run(tf.global_variables_initializer())
return _SESS
def compile_func(inputs, outputs):
if (isinstance(inputs, list)==False):
print("Wrapping the inputs in a list...")
inputs = [inputs]
assert isinstance(inputs, list)
def func_to_return(inp):
if len(inp) > len(inputs) and len(inputs)==1:
print("Wrapping the inputs in a list...")
inp = [inp]
assert len(inp)==len(inputs),\
("length of provided list should be "
+str(len(inputs))+" for tensors "+str(inputs)
+" but got input of length "+str(len(inp)))
feed_dict = {}
for input_tensor, input_val in zip(inputs, inp):
feed_dict[input_tensor] = input_val
sess = get_session()
return sess.run(outputs, feed_dict=feed_dict)
return func_to_return
def enum(**enums):
class Enum(object):
pass
to_return = Enum;
for key,val in enums.items():
if hasattr(val, '__call__'):
setattr(to_return, key, staticmethod(val))
else:
setattr(to_return,key,val);
to_return.vals = [x for x in enums.values()];
to_return.the_dict = enums
return to_return;
def assert_is_type(instance, the_class, instance_var_name):
return assert_type(instance, the_class, instance_var_name, True)
def assert_is_not_type(instance, the_class, instance_var_name):
return assert_type(instance, the_class, instance_var_name, False)
def assert_type(instance, the_class, instance_var_name, is_type_result):
assert (is_type(instance, the_class) == is_type_result),\
instance_var_name+" should be an instance of "\
+the_class.__name__+" but is "+str(instance.__class__)
return True
def is_type(instance, the_class):
return superclass_in_base_classes(instance.__class__.__bases__, the_class)
def superclass_in_base_classes(base_classes, the_class):
"""
recursively determine if the_class is among or is a superclass of
one of the classes in base_classes. The comparison is done by
name so that even if reload is called on a module, this still
works.
"""
for base_class in base_classes:
if base_class.__name__ == the_class.__name__:
return True
else:
#if the base class in turn has base classes of its own
if len(base_class.__bases__)!=1 or\
base_class.__bases__[0].__name__ != 'object':
#check them. If there's a hit, return True
if (superclass_in_base_classes(
base_classes=base_class.__bases__,
the_class=the_class)):
return True
#if 'True' was not returned in the code above, that means we don't
#have a superclass
return False
def run_function_in_batches(func,
input_data_list,
learning_phase=None,
batch_size=10,
progress_update=1000,
multimodal_output=False):
#func has a return value such that the first index is the
#batch. This function will run func in batches on the inputData
#and will extend the result into one big list.
#if multimodal_output=True, func has a return value such that first
#index is the mode and second index is the batch
assert isinstance(input_data_list, list), "input_data_list must be a list"
#input_datas is an array of the different input_data modes.
to_return = [];
i = 0;
while i < len(input_data_list[0]):
if (progress_update is not None):
if (i%progress_update == 0):
print("Done",i)
func_output = func(([x[i:i+batch_size] for x in input_data_list]
+([] if learning_phase is
None else [learning_phase])
))
if (multimodal_output):
assert isinstance(func_output, list),\
"multimodal_output=True yet function return value is not a list"
if (len(to_return)==0):
to_return = [[] for x in func_output]
for to_extend, batch_results in zip(to_return, func_output):
to_extend.extend(batch_results)
else:
to_return.extend(func_output)
i += batch_size;
return to_return
def mean_normalise_weights_for_sequence_convolution(weights,
bias,
axis_of_normalisation,
dim_ordering):
print("Normalising weights for one-hot encoded sequence convolution")
print("axis of normalisation is: "+str(axis_of_normalisation))
print("Weight shape on that axis is: "
+str(weights.shape[axis_of_normalisation]))
mean_weights_at_positions=np.mean(weights,axis=axis_of_normalisation)
if (dim_ordering=='th'):
print("Theano dimension ordering; output channel axis is first one "
"which has a length of "+str(weights.shape[0]))
#sum across remaining dimensions except output channel which is first
new_bias = bias + np.sum(np.sum(mean_weights_at_positions,
axis=1),axis=1)
elif (dim_ordering=='tf'):
print("Tensorflow dimension ordering; output channel axis is last one "
"which has a length of "+str(weights.shape[-1]))
#sum across remaining dimensions except output channel which is last
new_bias = bias + np.sum(np.sum(mean_weights_at_positions,
axis=0),axis=0)
else:
raise RuntimeError("Unsupported dim ordering "+str(dim_ordering))
mean_weights_at_positions = np.expand_dims(
mean_weights_at_positions,
axis_of_normalisation)
renormalised_weights=weights-mean_weights_at_positions
return renormalised_weights, new_bias
def load_yaml_data_from_file(file_name):
file_handle = get_file_handle(file_name)
data = yaml.load(file_handle)
file_handle.close()
return data
def get_file_handle(file_name, mode='r'):
use_gzip_open = False
#if want to read from file, check that is gzipped and set
#use_gzip_open to True if it is
if (mode=="r" or mode=="rb"):
if (is_gzipped(file_name)):
mode="rb"
use_gzip_open = True
#Also check if gz or gzip is in the name, and use gzip open
#if writing to the file.
if (re.search('.gz$',filename) or re.search('.gzip',filename)):
#check for the case where the file name implies the file
#is gzipped, but the file is not actually detected as gzipped,
#and warn the user accordingly
if (mode=="r" or mode=="rb"):
if (use_gzip_open==False):
print("Warning: file has gz or gzip in name, but was not"
" detected as gzipped")
if (mode=="w"):
use_gzip_open = True
#I think write will actually append if the file already
#exists...so you want to remove it if it exists
if os.path.isfile(file_name):
os.remove(file_name)
if (use_gzip_open):
return gzip.open(file_name,mode)
else:
return open(file_name,mode)
def is_gzipped(file_name):
file_handle = open(file_name, 'rb')
magic_number = file_handle.read(2)
file_handle.close()
is_gzipped = (magic_number == b'\x1f\x8b' )
return is_gzipped
def connect_list_of_layers(deeplift_layers):
if (len(deeplift_layers) > 1):
#string the layers together so that subsequent layers take the previous
#layer as input
last_layer_processed = deeplift_layers[0]
for layer in deeplift_layers[1:]:
layer.set_inputs(last_layer_processed)
last_layer_processed = layer
return deeplift_layers
def get_integrated_gradients_function(gradient_computation_function,
num_intervals):
def compute_integrated_gradients(
task_idx, input_data_list, input_references_list,
batch_size, progress_update=None):
outputs = []
#remember, input_data_list and input_references_list are
#a list with one entry per mode
input_references_list =\
[np.ones_like(np.array(input_data)) *input_reference for
input_data, input_reference in
zip(input_data_list, input_references_list)]
#will flesh out multimodal case later...
assert len(input_data_list)==1
assert len(input_references_list)==1
vectors = []
interpolated_inputs = []
interpolated_inputs_references = []
for an_input, a_reference in zip(input_data_list[0],
input_references_list[0]):
#interpolate between reference and input with num_intervals
vector = an_input - a_reference
vectors.append(vector)
step = vector/float(num_intervals)
#prepare the array that has the inputs at different steps
for i in range(num_intervals):
interpolated_inputs.append(
a_reference + step*(i+0.5))
interpolated_inputs_references.append(a_reference)
#find the gradients at different steps for all the inputs
interpolated_gradients =\
np.array(gradient_computation_function(
task_idx=task_idx,
input_data_list=[interpolated_inputs],
input_references_list=[interpolated_inputs_references],
batch_size=batch_size,
progress_update=progress_update))
#reshape for taking the mean over all the steps
#the first dim is the sample idx, second dim is the step
#I've checked this is the appropriate axis ordering for the reshape
interpolated_gradients = np.reshape(
interpolated_gradients,
[input_data_list[0].shape[0], num_intervals]
+list(input_data_list[0].shape[1:]))
#take the mean gradient over all the steps, multiply by vector
#equivalent to the stepwise integral
mean_gradient = np.mean(interpolated_gradients,axis=1)
contribs = mean_gradient*np.array(vectors)
return contribs
return compute_integrated_gradients
def get_hypothetical_contribs_func_onehot(multipliers_function):
"""
Meant for models with one-hot encoded sequence input.
Inputs:
multipliers_function: a function (usually produced by
model.get_target_multipliers_func) that takes task_idx,
input_data_list, input_references_list, batch_size
and progress_update as inputs and returns the multipliers
on one-hot encoded sequence input. The first
entry of input_data_list is assumed to be a 3-dimensional
array where the first dimension is the example index,
the second dimension is length and the
last dimension is the one-hot encoded channel axis.
Returns:
a function that takes the same arguments as multipliers_func
and returns an estimate of what the contributions would
be for each of the one-hot encoding possibilities.
The calculation is as follows: At each
position, we iterate over the one-hot encoding
possibilities (eg: for genomic sequence, this is ACGT i.e.
1000, 0100, 0010 and 0001) and compute the hypothetical
difference-from-reference in each case.
We then multiply the hypothetical
differences-from-reference with the
multipliers to get the hypothetical contributions.
For each of the one-hot encoding possibilities,
the hypothetical contributions are summed across the
channel axis to estimate the total hypothetical
contribution at each position.
The reason this is only an estimate
is that the multipliers were computed
using the actual input and not the hypothetical inputs.
"""
def hypothetical_contribs_func(task_idx,
input_data_list,
input_references_list,
batch_size,
progress_update):
assert len(input_data_list[0].shape)==3, input_data_list[0].shape
assert len(input_data_list[0].shape)==3, input_data_list[0].shape
multipliers = multipliers_function(
task_idx=task_idx,
input_data_list=input_data_list,
input_references_list=input_references_list,
batch_size=batch_size,
progress_update=progress_update)
to_return = np.zeros_like(input_data_list[0]).astype("float")
for i in range(input_data_list[0].shape[-1]):
hypothetical_input = np.zeros_like(input_data_list[0])\
.astype("float")
hypothetical_input[:,:,i] = 1.0
difference_from_reference =\
(hypothetical_input-np.array(input_references_list[0]))
hypothetical_contribs = difference_from_reference*multipliers
to_return[:,:,i] = np.sum(hypothetical_contribs,axis=-1)
return to_return
return hypothetical_contribs_func
def get_shuffle_seq_ref_function(score_computation_function,
shuffle_func, one_hot_func=None):
def compute_scores_with_shuffle_seq_refs(
task_idx, input_data_sequences, num_refs_per_seq,
batch_size, seed=1, progress_update=None):
import numpy as np
np.random.seed(seed)
import random
random.seed(seed)
to_run_input_data_seqs = []
to_run_input_data_refs = []
references_generated = 0
for seq in input_data_sequences:
for i in range(num_refs_per_seq):
references_generated += 1
if (progress_update is not None and
references_generated%progress_update==0):
print(str(references_generated)
+" reference seqs generated")
if isinstance(seq,np.ndarray):
seq=seq.squeeze()
to_run_input_data_seqs.append(seq)
to_run_input_data_refs.append(shuffle_func(seq))
if one_hot_func is not None:
if (progress_update is not None):
| |
<gh_stars>1-10
import os, json, sys
import os.path as osp
import argparse
import warnings
from tqdm import tqdm
import cv2
import numpy as np
from skimage.io import imsave
from skimage.util import img_as_ubyte
from skimage.transform import resize
from skimage.color import label2rgb
import shutil
from PIL import Image
from torchvision.utils import save_image
import torch
import torch.nn.functional as F
import torchvision
from models.get_model import get_arch
from utils.get_loaders import get_test_dataset
from utils.model_saving_loading import load_model
from skimage import filters, measure
import skimage
import pandas as pd
from skimage.morphology import skeletonize,remove_small_objects
# argument parsing
parser = argparse.ArgumentParser()
required_named = parser.add_argument_group('required arguments')
parser.add_argument('--experiment_path', help='experiments/subfolder where checkpoint is', default=None)
parser.add_argument('--config_file', type=str, default=None,
help='experiments/name_of_config_file, overrides everything')
# in case no config file is passed
parser.add_argument('--im_size', help='delimited list input, could be 600,400', type=str, default='512')
parser.add_argument('--device', type=str, default='cuda:0', help='where to run the training code (e.g. "cpu" or "cuda:0") [default: %(default)s]')
parser.add_argument('--results_path', type=str, default='results', help='path to save predictions (defaults to results')
def intersection(mask,vessel_, it_x, it_y):
"""
Remove the intersection in case the whole vessel is too long
"""
x_less = max(0, it_x - 1)
y_less = max(0, it_y - 1)
x_more = min(vessel_.shape[0] - 1, it_x + 1)
y_more = min(vessel_.shape[1] - 1, it_y + 1)
active_neighbours = (vessel_[x_less, y_less]>0).astype('float')+ \
(vessel_[x_less, it_y]>0).astype('float')+ \
(vessel_[x_less, y_more]>0).astype('float')+ \
(vessel_[it_x, y_less]>0).astype('float')+ \
(vessel_[it_x, y_more]>0).astype('float')+ \
(vessel_[x_more, y_less]>0).astype('float')+ \
(vessel_[x_more, it_y]>0).astype('float')+ \
(vessel_[x_more, y_more]>0).astype('float')
if active_neighbours > 2:
cv2.circle(mask,(it_y,it_x),radius=1,color=(0,0,0),thickness=-1)
return mask,active_neighbours
def optic_disc_centre(result_path, binary_vessel_path, artery_vein_path):
if os.path.exists(result_path+'.ipynb_checkpoints'):
shutil.rmtree(result_path+'.ipynb_checkpoints')
optic_binary_result_path = '../Results/M3/Disc_centred/'
macular_binary_result_path = '../Results/M3/Macular_centred/'
B_optic_process_binary_vessel_path = binary_vessel_path + 'Zone_B_disc_centred_binary_process/'
B_optic_process_artery_path = artery_vein_path + 'Zone_B_disc_centred_artery_process/'
B_optic_process_vein_path = artery_vein_path + 'Zone_B_disc_centred_vein_process/'
B_optic_skeleton_binary_vessel_path = binary_vessel_path + 'Zone_B_disc_centred_binary_skeleton/'
B_optic_skeleton_artery_path = artery_vein_path + 'Zone_B_disc_centred_artery_skeleton/'
B_optic_skeleton_vein_path = artery_vein_path + 'Zone_B_disc_centred_vein_skeleton/'
C_optic_process_binary_vessel_path = binary_vessel_path + 'Zone_C_disc_centred_binary_process/'
C_optic_process_artery_path = artery_vein_path + 'Zone_C_disc_centred_artery_process/'
C_optic_process_vein_path = artery_vein_path + 'Zone_C_disc_centred_vein_process/'
C_optic_skeleton_binary_vessel_path = binary_vessel_path + 'Zone_C_disc_centred_binary_skeleton/'
C_optic_skeleton_artery_path = artery_vein_path + 'Zone_C_disc_centred_artery_skeleton/'
C_optic_skeleton_vein_path = artery_vein_path + 'Zone_C_disc_centred_vein_skeleton/'
macular_process_binary_vessel_path = binary_vessel_path + 'macular_centred_binary_process/'
macular_process_artery_path = artery_vein_path + 'macular_centred_artery_process/'
macular_process_vein_path = artery_vein_path + 'macular_centred_vein_process/'
macular_skeleton_binary_vessel_path = binary_vessel_path + 'macular_centred_binary_skeleton/'
macular_skeleton_artery_path = artery_vein_path + 'macular_centred_artery_skeleton/'
macular_skeleton_vein_path = artery_vein_path + 'macular_centred_vein_skeleton/'
#2021/11/2
zone_b_macular_process_binary_vessel_path = binary_vessel_path + 'macular_Zone_B_centred_binary_process/'
zone_b_macular_process_artery_path = artery_vein_path + 'macular_Zone_B_centred_artery_process/'
zone_b_macular_process_vein_path = artery_vein_path + 'macular_Zone_B_centred_vein_process/'
zone_b_macular_skeleton_binary_vessel_path = binary_vessel_path + 'macular_Zone_B_centred_binary_skeleton/'
zone_b_macular_skeleton_artery_path = artery_vein_path + 'macular_Zone_B_centred_artery_skeleton/'
zone_b_macular_skeleton_vein_path = artery_vein_path + 'macular_Zone_B_centred_vein_skeleton/'
#2021/11/2
zone_c_macular_process_binary_vessel_path = binary_vessel_path + 'macular_Zone_C_centred_binary_process/'
zone_c_macular_process_artery_path = artery_vein_path + 'macular_Zone_C_centred_artery_process/'
zone_c_macular_process_vein_path = artery_vein_path + 'macular_Zone_C_centred_vein_process/'
zone_c_macular_skeleton_binary_vessel_path = binary_vessel_path + 'macular_Zone_C_centred_binary_skeleton/'
zone_c_macular_skeleton_artery_path = artery_vein_path + 'macular_Zone_C_centred_artery_skeleton/'
zone_c_macular_skeleton_vein_path = artery_vein_path + 'macular_Zone_C_centred_vein_skeleton/'
if not os.path.exists(optic_binary_result_path):
os.makedirs(optic_binary_result_path)
if not os.path.exists(macular_binary_result_path):
os.makedirs(macular_binary_result_path)
if not os.path.exists(B_optic_process_binary_vessel_path):
os.makedirs(B_optic_process_binary_vessel_path)
if not os.path.exists(B_optic_process_artery_path):
os.makedirs(B_optic_process_artery_path)
if not os.path.exists(B_optic_process_vein_path):
os.makedirs(B_optic_process_vein_path)
if not os.path.exists(B_optic_skeleton_binary_vessel_path):
os.makedirs(B_optic_skeleton_binary_vessel_path)
if not os.path.exists(B_optic_skeleton_artery_path):
os.makedirs(B_optic_skeleton_artery_path)
if not os.path.exists(B_optic_skeleton_vein_path):
os.makedirs(B_optic_skeleton_vein_path)
if not os.path.exists(C_optic_process_binary_vessel_path):
os.makedirs(C_optic_process_binary_vessel_path)
if not os.path.exists(C_optic_process_artery_path):
os.makedirs(C_optic_process_artery_path)
if not os.path.exists(C_optic_process_vein_path):
os.makedirs(C_optic_process_vein_path)
if not os.path.exists(C_optic_skeleton_binary_vessel_path):
os.makedirs(C_optic_skeleton_binary_vessel_path)
if not os.path.exists(C_optic_skeleton_artery_path):
os.makedirs(C_optic_skeleton_artery_path)
if not os.path.exists(C_optic_skeleton_vein_path):
os.makedirs(C_optic_skeleton_vein_path)
if not os.path.exists(macular_process_binary_vessel_path):
os.makedirs(macular_process_binary_vessel_path)
if not os.path.exists(macular_process_artery_path):
os.makedirs(macular_process_artery_path)
if not os.path.exists(macular_process_vein_path):
os.makedirs(macular_process_vein_path)
if not os.path.exists(macular_skeleton_binary_vessel_path):
os.makedirs(macular_skeleton_binary_vessel_path)
if not os.path.exists(macular_skeleton_artery_path):
os.makedirs(macular_skeleton_artery_path)
if not os.path.exists(macular_skeleton_vein_path):
os.makedirs(macular_skeleton_vein_path)
if not os.path.exists(zone_b_macular_process_binary_vessel_path):
os.makedirs(zone_b_macular_process_binary_vessel_path)
if not os.path.exists(zone_b_macular_process_artery_path):
os.makedirs(zone_b_macular_process_artery_path)
if not os.path.exists(zone_b_macular_process_vein_path):
os.makedirs(zone_b_macular_process_vein_path)
if not os.path.exists(zone_b_macular_skeleton_binary_vessel_path):
os.makedirs(zone_b_macular_skeleton_binary_vessel_path)
if not os.path.exists(zone_b_macular_skeleton_artery_path):
os.makedirs(zone_b_macular_skeleton_artery_path)
if not os.path.exists(zone_b_macular_skeleton_vein_path):
os.makedirs(zone_b_macular_skeleton_vein_path)
if not os.path.exists(zone_c_macular_process_binary_vessel_path):
os.makedirs(zone_c_macular_process_binary_vessel_path)
if not os.path.exists(zone_c_macular_process_artery_path):
os.makedirs(zone_c_macular_process_artery_path)
if not os.path.exists(zone_c_macular_process_vein_path):
os.makedirs(zone_c_macular_process_vein_path)
if not os.path.exists(zone_c_macular_skeleton_binary_vessel_path):
os.makedirs(zone_c_macular_skeleton_binary_vessel_path)
if not os.path.exists(zone_c_macular_skeleton_artery_path):
os.makedirs(zone_c_macular_skeleton_artery_path)
if not os.path.exists(zone_c_macular_skeleton_vein_path):
os.makedirs(zone_c_macular_skeleton_vein_path)
optic_vertical_CDR,optic_vertical_disc,optic_vertical_cup = [],[],[]
optic_horizontal_CDR,optic_horizontal_disc,optic_horizontal_cup = [],[],[]
macular_vertical_CDR,macular_vertical_disc,macular_vertical_cup = [],[],[]
macular_horizontal_CDR,macular_horizontal_disc,macular_horizontal_cup = [],[],[]
optic_centre_list = []
macular_centre_list = []
disc_cup_list = sorted(os.listdir(result_path))
for i in disc_cup_list:
path_ = result_path+i
disc_cup_ = cv2.imread(path_)
disc_cup_912 = cv2.resize(disc_cup_,(912,912),interpolation = cv2.INTER_NEAREST)
#image_ = cv2.imread('../Results/M1/Good_quality/'+i)
#IMAGE_912 = cv2.resize(image_,(912,912),interpolation = cv2.INTER_AREA)
#disc_cup_912 = disc_cup_
try:
disc_ = disc_cup_912[...,2]
cup_ = disc_cup_912[...,0]
## judgement the optic disc/cup segmentation
disc_mask = measure.label(disc_)
regions = measure.regionprops(disc_mask)
regions.sort(key=lambda x: x.area, reverse=True)
if len(regions) > 1:
for rg in regions[2:]:
disc_mask[rg.coords[:,0], rg.coords[:,1]] = 0
disc_[disc_mask!=0] = 255
cup_mask = measure.label(cup_)
regions = measure.regionprops(cup_mask)
regions.sort(key=lambda x: x.area, reverse=True)
if len(regions) > 1:
for rg in regions[2:]:
cup_mask[rg.coords[:,0], rg.coords[:,1]] = 0
cup_[cup_mask!=0] = 255
disc_index = np.where(disc_>0)
disc_index_width = disc_index[1]
disc_index_height = disc_index[0]
disc_horizontal_width = np.max(disc_index_width)-np.min(disc_index_width)
disc_vertical_height = np.max(disc_index_height)-np.min(disc_index_height)
cup_index = np.where(cup_>0)
cup_index_width = cup_index[1]
cup_index_height = cup_index[0]
cup_horizontal_width = np.max(cup_index_width)-np.min(cup_index_width)
cup_vertical_height = np.max(cup_index_height)-np.min(cup_index_height)
cup_width_centre = np.mean(cup_index_width)
cup_height_centre = np.mean(cup_index_height)
if disc_horizontal_width<(disc_.shape[0]/3) and disc_vertical_height<(disc_.shape[1]/3) and cup_width_centre<=np.max(disc_index_width) and cup_width_centre>=np.min(disc_index_width) and cup_height_centre<=np.max(disc_index_height) and cup_height_centre>=np.min(disc_index_height) and cup_vertical_height<disc_vertical_height and cup_horizontal_width<disc_horizontal_width:
whole_index = np.where(disc_cup_912>0)
whole_index_width = whole_index[1]
whole_index_height = whole_index[0]
horizontal_distance = np.absolute(np.mean(whole_index_height)-disc_cup_912.shape[1]/2)
vertical_distance = np.absolute(np.mean(whole_index_width)-disc_cup_912.shape[0]/2)
distance_ = np.sqrt(np.square(horizontal_distance)+np.square(vertical_distance))
binary_process_ = cv2.imread(binary_vessel_path+'binary_process/'+i)[...,0]
artery_process_ = cv2.imread(artery_vein_path+'artery_binary_process/'+i)[...,0]
vein_process_ = cv2.imread(artery_vein_path+'vein_binary_process/'+i)[...,0]
binary_skeleton_ = cv2.imread(binary_vessel_path+'binary_skeleton/'+i)[...,0]
artery_skeleton_ = cv2.imread(artery_vein_path+'artery_binary_skeleton/'+i)[...,0]
vein_skeleton_ = cv2.imread(artery_vein_path+'vein_binary_skeleton/'+i)[...,0]
# remove the intersection of binary_skeleton_
ignored_pixels = 1
mask_ = np.ones((binary_skeleton_.shape))
for it_x in range(ignored_pixels, mask_.shape[0] - ignored_pixels):
for it_y in range(ignored_pixels, mask_.shape[1] - ignored_pixels):
if binary_skeleton_[it_x, it_y] > 0:
mask,active_neighbours = intersection(mask_,binary_skeleton_, it_x, it_y)
binary_skeleton_ = binary_skeleton_ * mask
# remove the intersection of artery_skeleton_
mask_ = np.ones((artery_skeleton_.shape))
for it_x in range(ignored_pixels, mask_.shape[0] - ignored_pixels):
for it_y in range(ignored_pixels, mask_.shape[1] - ignored_pixels):
if artery_skeleton_[it_x, it_y] > 0:
mask,active_neighbours = intersection(mask_,artery_skeleton_, it_x, it_y)
artery_skeleton_ = artery_skeleton_ * mask
# remove the intersection of vein_skeleton_
mask_ = np.ones((vein_skeleton_.shape))
for it_x in range(ignored_pixels, mask_.shape[0] - ignored_pixels):
for it_y in range(ignored_pixels, mask_.shape[1] - ignored_pixels):
if vein_skeleton_[it_x, it_y] > 0:
mask,active_neighbours = intersection(mask_,vein_skeleton_, it_x, it_y)
vein_skeleton_ = vein_skeleton_ * mask
zone_mask_B = np.zeros(binary_process_.shape)
zone_mask_C = np.zeros(binary_process_.shape)
zone_centre = (int(np.mean(whole_index_width)), int(np.mean(whole_index_height)))
radius = max(int(disc_horizontal_width/2),int(disc_vertical_height/2))
cv2.circle(zone_mask_B,zone_centre,radius=3*radius,color=(255,255,255),thickness=-1)
cv2.circle(zone_mask_B,zone_centre,radius=2*radius,color=(0,0,0),thickness=-1)
zone_mask_B = zone_mask_B/255
binary_process_B = binary_process_*zone_mask_B
artery_process_B = artery_process_*zone_mask_B
vein_process_B = vein_process_*zone_mask_B
binary_skeleton_B = binary_skeleton_*zone_mask_B
artery_skeleton_B = artery_skeleton_*zone_mask_B
vein_skeleton_B = vein_skeleton_*zone_mask_B
cv2.circle(zone_mask_C,zone_centre,radius=5*radius,color=(255,255,255),thickness=-1)
cv2.circle(zone_mask_C,zone_centre,radius=2*radius,color=(0,0,0),thickness=-1)
zone_mask_C = zone_mask_C/255
binary_process_C = binary_process_*zone_mask_C
artery_process_C = artery_process_*zone_mask_C
vein_process_C = vein_process_*zone_mask_C
binary_skeleton_C = binary_skeleton_*zone_mask_C
artery_skeleton_C = artery_skeleton_*zone_mask_C
vein_skeleton_C = vein_skeleton_*zone_mask_C
if (distance_/disc_cup_912.shape[1])<0.1:
optic_centre_list.append(i)
cv2.imwrite(B_optic_process_binary_vessel_path+i,binary_process_B)
cv2.imwrite(B_optic_process_artery_path+i,artery_process_B)
cv2.imwrite(B_optic_process_vein_path+i,vein_process_B)
cv2.imwrite(B_optic_skeleton_binary_vessel_path+i,binary_skeleton_B)
cv2.imwrite(B_optic_skeleton_artery_path+i,artery_skeleton_B)
cv2.imwrite(B_optic_skeleton_vein_path+i,vein_skeleton_B)
cv2.imwrite(C_optic_process_binary_vessel_path+i,binary_process_C)
cv2.imwrite(C_optic_process_artery_path+i,artery_process_C)
cv2.imwrite(C_optic_process_vein_path+i,vein_process_C)
cv2.imwrite(C_optic_skeleton_binary_vessel_path+i,binary_skeleton_C)
cv2.imwrite(C_optic_skeleton_artery_path+i,artery_skeleton_C)
cv2.imwrite(C_optic_skeleton_vein_path+i,vein_skeleton_C)
optic_vertical_disc.append(disc_vertical_height)
optic_horizontal_disc.append(disc_horizontal_width)
optic_vertical_cup.append(cup_vertical_height)
optic_horizontal_cup.append(cup_horizontal_width)
optic_vertical_CDR.append(cup_vertical_height/disc_vertical_height)
optic_horizontal_CDR.append(cup_horizontal_width/disc_horizontal_width)
else:
macular_centre_list.append(i)
cv2.imwrite(zone_b_macular_process_binary_vessel_path+i,binary_process_B)
cv2.imwrite(zone_b_macular_process_artery_path+i,artery_process_B)
cv2.imwrite(zone_b_macular_process_vein_path+i,vein_process_B)
cv2.imwrite(zone_b_macular_skeleton_binary_vessel_path+i,binary_skeleton_B)
cv2.imwrite(zone_b_macular_skeleton_artery_path+i,artery_skeleton_B)
cv2.imwrite(zone_b_macular_skeleton_vein_path+i,vein_skeleton_B)
cv2.imwrite(zone_c_macular_process_binary_vessel_path+i,binary_process_C)
cv2.imwrite(zone_c_macular_process_artery_path+i,artery_process_C)
cv2.imwrite(zone_c_macular_process_vein_path+i,vein_process_C)
cv2.imwrite(zone_c_macular_skeleton_binary_vessel_path+i,binary_skeleton_C)
cv2.imwrite(zone_c_macular_skeleton_artery_path+i,artery_skeleton_C)
cv2.imwrite(zone_c_macular_skeleton_vein_path+i,vein_skeleton_C)
shutil.copy(binary_vessel_path+'binary_process/'+i,macular_process_binary_vessel_path+i)
shutil.copy(artery_vein_path+'artery_binary_process/'+i,macular_process_artery_path+i)
shutil.copy(artery_vein_path+'vein_binary_process/'+i,macular_process_vein_path+i)
shutil.copy(binary_vessel_path+'binary_skeleton/'+i,macular_skeleton_binary_vessel_path+i)
shutil.copy(artery_vein_path+'artery_binary_skeleton/'+i,macular_skeleton_artery_path+i)
shutil.copy(artery_vein_path+'vein_binary_skeleton/'+i,macular_skeleton_vein_path+i)
macular_vertical_disc.append(disc_vertical_height)
macular_horizontal_disc.append(disc_horizontal_width)
macular_vertical_cup.append(cup_vertical_height)
macular_horizontal_cup.append(cup_horizontal_width)
macular_vertical_CDR.append(cup_vertical_height/disc_vertical_height)
macular_horizontal_CDR.append(cup_horizontal_width/disc_horizontal_width)
else:
macular_centre_list.append(i)
shutil.copy(binary_vessel_path+'binary_process/'+i,macular_process_binary_vessel_path+i)
shutil.copy(artery_vein_path+'artery_binary_process/'+i,macular_process_artery_path+i)
shutil.copy(artery_vein_path+'vein_binary_process/'+i,macular_process_vein_path+i)
shutil.copy(binary_vessel_path+'binary_skeleton/'+i,macular_skeleton_binary_vessel_path+i)
shutil.copy(artery_vein_path+'artery_binary_skeleton/'+i,macular_skeleton_artery_path+i)
shutil.copy(artery_vein_path+'vein_binary_skeleton/'+i,macular_skeleton_vein_path+i)
macular_vertical_disc.append(-1)
macular_horizontal_disc.append(-1)
macular_vertical_cup.append(-1)
macular_horizontal_cup.append(-1)
macular_vertical_CDR.append(-1)
macular_horizontal_CDR.append(-1)
except:
macular_centre_list.append(i)
shutil.copy(binary_vessel_path+'binary_process/'+i,macular_process_binary_vessel_path+i)
shutil.copy(artery_vein_path+'artery_binary_process/'+i,macular_process_artery_path+i)
shutil.copy(artery_vein_path+'vein_binary_process/'+i,macular_process_vein_path+i)
shutil.copy(binary_vessel_path+'binary_skeleton/'+i,macular_skeleton_binary_vessel_path+i)
shutil.copy(artery_vein_path+'artery_binary_skeleton/'+i,macular_skeleton_artery_path+i)
shutil.copy(artery_vein_path+'vein_binary_skeleton/'+i,macular_skeleton_vein_path+i)
macular_vertical_disc.append(-1)
macular_horizontal_disc.append(-1)
macular_vertical_cup.append(-1)
macular_horizontal_cup.append(-1)
macular_vertical_CDR.append(-1)
macular_horizontal_CDR.append(-1)
Pd_optic_centre = pd.DataFrame({'Name':optic_centre_list, 'Disc_height':optic_vertical_disc, 'Disc_width':optic_horizontal_disc, 'Cup_height': optic_vertical_cup, 'Cup_width': optic_horizontal_cup, 'CDR_vertical': optic_vertical_CDR, 'CDR_horizontal': optic_horizontal_CDR})
Pd_optic_centre.to_csv(optic_binary_result_path + 'Disc_cup_results.csv', index = None, encoding='utf8')
Pd_macular_centre = pd.DataFrame({'Name':macular_centre_list, 'Disc_height':macular_vertical_disc, 'Disc_width':macular_horizontal_disc, 'Cup_height': macular_vertical_cup, 'Cup_width': macular_horizontal_cup, 'CDR_vertical': macular_vertical_CDR, 'CDR_horizontal': macular_horizontal_CDR})
Pd_macular_centre.to_csv(macular_binary_result_path + 'Disc_cup_results.csv', index = None, encoding='utf8')
def misc_measures(true_vessel_arr, pred_vessel_arr):
cm=confusion_matrix(true_vessel_arr, pred_vessel_arr)
mse = mean_squared_error(true_vessel_arr, pred_vessel_arr)
try:
acc=1.*(cm[0,0]+cm[1,1])/np.sum(cm)
sensitivity=1.*cm[1,1]/(cm[1,0]+cm[1,1])
specificity=1.*cm[0,0]/(cm[0,1]+cm[0,0])
precision=1.*cm[1,1]/(cm[1,1]+cm[0,1])
G = np.sqrt(sensitivity*specificity)
F1_score = 2*precision*sensitivity/(precision+sensitivity)
iou = 1.*cm[1,1]/(cm[1,0]+cm[0,1]+cm[1,1])
return acc, sensitivity, specificity, precision, G, F1_score, mse, iou
except:
return 0,0,0,0,0,0,0,0
def evaluate_disc(results_path, label_path):
if os.path.exists(results_path+'.ipynb_checkpoints'):
shutil.rmtree(results_path+'.ipynb_checkpoints')
if not os.path.exists(results_path):
os.makedirs(results_path)
seg_list = os.listdir(results_path)
tot=[]
sent=[]
spet=[]
pret=[]
G_t=[]
F1t=[]
mset=[]
iout=[]
n_val = len(seg_list)
for i in seg_list:
label_name = i.split('.')[0] + '_OD.png'
label_ = cv2.imread(label_path+label_name)/255
label_=label_[...,0]
seg_ = cv2.imread(results_path + i)
seg_ = (seg_<255).astype('float')[...,0]
acc, sensitivity, specificity, precision, G, F1_score, mse, iou = misc_measures(label_.flatten(), seg_.flatten())
tot.append(acc)
sent.append(sensitivity)
spet.append(specificity)
pret.append(precision)
G_t.append(G)
F1t.append(F1_score)
mset.append(mse)
iout.append(iou)
Data4stage2 = pd.DataFrame({'ACC':tot, 'Sensitivity':sent, 'Specificity':spet, 'Precision': pret, 'G_value': G_t, \
'F1-score': F1t, 'MSE': mset, 'IOU': iout})
Data4stage2.to_csv('./results/IDRID_optic/performance.csv', index = None, encoding='utf8')
#return tot / n_val, sent / n_val, spet / n_val, pret / n_val, G_t / n_val, F1t / n_val, auc_roct / n_val, auc_prt / n_val, iout/n_val, mset/n_val
def prediction_eval(model_1,model_2,model_3,model_4,model_5,model_6,model_7,model_8, test_loader):
n_val = len(test_loader)
seg_results_small_path = '../Results/M2/optic_disc_cup/resized/'
seg_results_raw_path = '../Results/M2/optic_disc_cup/raw/'
if not os.path.isdir(seg_results_small_path):
os.makedirs(seg_results_small_path)
if not os.path.isdir(seg_results_raw_path):
os.makedirs(seg_results_raw_path)
seg_uncertainty_small_path = '../Results/M2/optic_disc_cup/resize_uncertainty/'
if not os.path.isdir(seg_uncertainty_small_path):
os.makedirs(seg_uncertainty_small_path)
seg_uncertainty_raw_path = '../Results/M2/optic_disc_cup/raw_uncertainty/'
if not os.path.isdir(seg_uncertainty_raw_path):
os.makedirs(seg_uncertainty_raw_path)
with tqdm(total=n_val, desc='Validation round', unit='batch', leave=False) as pbar:
for batch in test_loader:
imgs = batch['image']
img_name = batch['name']
ori_width=batch['original_sz'][0]
ori_height=batch['original_sz'][1]
mask_pred_tensor_small_all = 0
imgs = imgs.to(device=device, dtype=torch.float32)
with torch.no_grad():
_,mask_pred = model_1(imgs)
mask_pred_tensor_small = mask_pred.clone().detach()
mask_pred_tensor_small_1 = F.softmax(mask_pred_tensor_small,dim=1)
mask_pred_tensor_small_all+=mask_pred_tensor_small_1.type(torch.FloatTensor)
_,mask_pred= model_2(imgs)
mask_pred_tensor_small = mask_pred.clone().detach()
mask_pred_tensor_small_2 = F.softmax(mask_pred_tensor_small,dim=1)
mask_pred_tensor_small_all+=mask_pred_tensor_small_2.type(torch.FloatTensor)
_,mask_pred = model_3(imgs)
mask_pred_tensor_small | |
# coding: utf-8
# Welcome to the fourth Lab session of the "Advanced algorithmics and graph theory with Python" MOOC. In this exercise you will learn how to find solutions to the Traveling Salesman Problem.
#
# The main objective of this exercise is to:
# * Describe the classic traveling salesman problem and use it as a reduction for other problems.
# * Write functions to solve a problem NP-complete using backtracking.
#
# Based on what we learned in the previous exercices, we will define advanced functions and use them to define the Traveling Salesman algorithm.
# ## List manipulation
#
# Before starting this session we will do a quick recap on list manipulation with python.
#
# We would like to point out a major difference between the '+' operator that concatenate two lists and the append function. As a matter of fact, using '+' operator creates a new list that is the concatenation of the two given lists and therefore does not modify the input list, whereas the append function modifies the initial list.
#
# Let us illustrate:
# In[3]:
import matplotlib.pyplot as plt
import time
import pyrat
from imports import maze
input_list = [3, 5, 4, 1, 0, 7]
# add a new element to the list using both methods
print("We first use '+' to generate a new list containing the new element and the input list")
output_list = input_list+[8]
print("the output list is: ")
print(output_list)
print("input list is: ")
print(input_list)
print("The element was added only to the output_list not to the input list")
input_list = [3, 5, 4, 1, 0, 7]
print("Now we use append function to add an element to the list")
input_list.append(8)
print("input list is: ")
print(input_list)
print("The element was added to input_list")
# ## Exercise A (1pt)
#
# We will first define our metagraph. To do this the first thing we need to define are the vertices of our metagraph. These vertices are the positions of the cheeses in the maze and the player position.
#
# The function we are going to define is create_vertices_meta_graph. This function takes two inputs, a list of vertices 'pieces_of_cheese', which contains the positions of cheeses in the maze, and a vertex 'player_position' which contains the position of the player in the maze.
#
# This function returns a list containing 'piece_of_cheese' elements and 'player_position', but does not modify the list 'piece_of_cheese'.
# In[4]:
def create_vertices_meta_graph(piece_of_cheese, player_location):
#
# YOUR CODE HERE
#
meta = sorted(piece_of_cheese+[player_location])
return meta
# In[5]:
#
# AUTOGRADER TEST - DO NOT REMOVE
#
piece_of_cheese = [(3, 4), (1, 2), (5, 4), (0, 3), (8, 9)]
player_position = (0, 1)
vertices_meta_graph = create_vertices_meta_graph(
piece_of_cheese, player_position)
print("Positions of pieces of cheese are:")
# we add sorted in order to always get the same result.
print(sorted(piece_of_cheese))
print("Player position is:")
print(player_position)
print("The vertices of the meta graph are:")
# The player position can be add at any position in the list, so we add sorted in order to always get the same result.
print(sorted(vertices_meta_graph))
# The obtained result after executing the cell above should be:
# ```
# Positions of pieces of cheese are:
# [(0, 3), (1, 2), (3, 4), (5, 4), (8, 9)]
# Player position is:
# (0, 1)
# The vertices of the meta graph are:
# [(0, 1), (0, 3), (1, 2), (3, 4), (5, 4), (8, 9)]
# ```
# ## Exercice B (1pt)
#
# With the vertices defined we now need to connect them with weighted edges. To do this we are going to define the create_edge_weight_maze_graph function.
#
# This function has two inputs, 'maze_graph' and 'vertices', and returns an adjacency matrix containing distances between each pair of vertices in the meta graph. Note that for the moment we are only interested in the shortest distance, but we disregard the corresponding shortest path.
#
# Tip: You don't need to recode the Dijkstra function or to copy the code from Lab 3. The Dijkstra functions is already defined in the utils package and can be called as follows:
#
# utils.Dijkstra(maze_graph,initial_vertex).
#
# This function returns an array containing three elements:
#
# 1. an array containing the explored vertices in the order that they were explored
# 2. the parent dictionary containing the source vertex for each one of the vertices of the maze.
# 3. a dictionary where each key is a vertex on the maze graph and each value is the distance from the initial vertex to the given vertex
# In[103]:
def create_edge_weight_maze_graph(maze_graph, vertices):
import utils
adjacency_matrix = {}
for initial_vertex in vertices:
explored_vertices, _, distances = utils.Dijkstra(
maze_graph, initial_vertex)
adjacency_matrix[initial_vertex] = {}
for vertex in vertices:
if vertex != initial_vertex:
adjacency_matrix[initial_vertex][vertex] = distances[vertex]
return adjacency_matrix
# In[104]:
#
# AUTOGRADER TEST - DO NOT REMOVE
#
width = 3
height = 3
number_of_cheeses = 3
_, _, _, maze_graph = maze.generate_maze(
width, height, 0, True, False, 0.5, 5, "", 0)
pieces_of_cheese, player1_location, _ = maze.generate_pieces_of_cheese(
number_of_cheeses, width, height, False, None, None, False)
vertices = create_vertices_meta_graph(pieces_of_cheese, player1_location)
adjacency_matrix = create_edge_weight_maze_graph(maze_graph, vertices)
print("the adjacency matrix is:")
for key, value in sorted(adjacency_matrix.items()):
print("{}:{}".format(key, value))
# After coding create_edge_weight_maze_graph and runing the test code, the result should be:
#
# ```
# the adjacency matrix is:
# (0, 0):{(0, 1): 3, (2, 0): 2, (2, 1): 3}
# (0, 1):{(2, 0): 3, (0, 0): 3, (2, 1): 3}
# (2, 0):{(0, 1): 3, (0, 0): 2, (2, 1): 1}
# (2, 1):{(0, 1): 3, (2, 0): 1, (0, 0): 3}
# ```
# ## Exercice C (1pt)
#
# With the metagraph in hand you are now ready to program a bruteforce search for the Traveling Salesman Problem (TSP).
#
# You will define the bruteforceTSP function which uses bruteforce search to find the shortest walk going through all vertices. Note that these are the vertices from the metagraph, which represent positions of the player and cheeses in the maze.
#
# To implement this algorithm, you will be using a recursive function as introduced in the lessons. The function bruteforceTSP defines all the variables needed, computes the metagraph and then execute a recursive function called auxbf to find the shortest walk through vertices.
#
# The auxbf function is responsible for searching the shortest walk possible in a recursive way given an already started, but preliminary, walk.
# In[98]:
def auxbf(current_walk, best_walk, adjacency_matrix, vertices, current_distance, best_distance):
# if length of current walk is larger than the order of the graph:
# if moreover the current distance is shorter than the best distance obtained so far:
# update the value of the best distance
# update the corresponding best walk
# otherwise:
# for any vertex in the graph:
# if the vertex is not in the current walk:
# obtain potential values for best walk and best distance by recursively calling auxbf() with updated current walk and current distance
# if potential distance is shorter than best distance:
# update the value of the best distance
# update the corresponding best walk
#
if(len(current_walk) == len(vertices)):
if(current_distance < best_distance):
best_distance = current_distance
best_walk = current_walk
else:
for next_vertex in vertices:
if not(next_vertex in current_walk):
current_walk_temp = current_walk+[next_vertex]
current_distance_temp = current_distance + \
adjacency_matrix[current_walk[-1]][next_vertex]
best_walk_temp, best_distance_temp = auxbf(
current_walk_temp, best_walk, adjacency_matrix, vertices, current_distance_temp, best_distance)
if best_distance_temp < best_distance:
best_distance = best_distance_temp
best_walk = best_walk_temp
return best_walk, best_distance
def bruteforceTSP(maze_graph, pieces_of_cheese, player_location):
# first we compute the vertices of the meta_graph:
vertices = create_vertices_meta_graph(pieces_of_cheese, player_location)
# then we create the adjacency matrix of the meta graph
adjacency_matrix = create_edge_weight_maze_graph(maze_graph, vertices)
# now we can start defining our variables
# current_distance is the length of the walk for the current exploration branch
current_distance = 0
# current_walk is a container for the current exploration branch
current_walk = [player_location]
# best_distance is an indicator of the shortest walk found so far
best_distance = float('inf')
# best_walk is a container for the corresponding walk
best_walk = []
# we start the exploration:
best_walk, best_distance = auxbf(
current_walk, best_walk, adjacency_matrix, vertices, current_distance, best_distance)
return best_walk, best_distance
# In[99]:
#
# AUTOGRADER TEST - DO NOT REMOVE
#
width = 3
height = 3
number_of_cheeses = 3
_, _, _, maze_graph = maze.generate_maze(
width, height, 0, True, False, 0.5, 5, "", 0)
pieces_of_cheese, player1_location, _ = maze.generate_pieces_of_cheese(
number_of_cheeses, width, height, False, None, None, False)
best_walk, best_distance = bruteforceTSP(
maze_graph, pieces_of_cheese, player1_location)
print("one best walk is: {}".format(best_walk))
print("the best distance is: {}".format(best_distance))
# The result of test cell | |
import numpy as np
import scipy.interpolate as interpolate
import h5py as h5
import os
from lxml import objectify, etree
import sharpy.utils.generator_interface as generator_interface
import sharpy.utils.settings as settings
import sharpy.utils.cout_utils as cout
@generator_interface.generator
class TurbVelocityField(generator_interface.BaseGenerator):
r"""
Turbulent Velocity Field Generator
``TurbVelocitityField`` is a class inherited from ``BaseGenerator``
The ``TurbVelocitityField`` class generates a velocity field based on the input from an [XDMF](http://www.xdmf.org) file.
It supports time-dependant fields as well as frozen turbulence.
To call this generator, the ``generator_id = TurbVelocityField`` shall be used.
This is parsed as the value for the ``velocity_field_generator`` key in the desired aerodynamic solver's settings.
Supported files:
- `field_id.xdmf`: Steady or Unsteady XDMF file
This generator also performs time interpolation between two different time steps. For now, only linear interpolation is possible.
Space interpolation is done through `scipy.interpolate` trilinear interpolation. However, turbulent fields are
read directly from the binary file and not copied into memory. This is performed using `np.memmap`.
The overhead of this procedure is ~18% for the interpolation stage, however, initially reading the binary velocity field
(which will be much more common with time-domain simulations) is faster by a factor of 1e4.
Also, memory savings are quite substantial: from 6Gb for a typical field to a handful of megabytes for the whole program.
Args:
in_dict (dict): Input data in the form of dictionary. See acceptable entries below:
Attributes:
See Also:
.. py:class:: sharpy.utils.generator_interface.BaseGenerator
"""
generator_id = 'TurbVelocityField'
generator_classification = 'velocity-field'
settings_types = dict()
settings_default = dict()
settings_description = dict()
settings_types['print_info'] = 'bool'
settings_default['print_info'] = True
settings_description['print_info'] = 'Output solver-specific information in runtime.'
settings_types['turbulent_field'] = 'str'
settings_default['turbulent_field'] = None
settings_description['turbulent_field'] = 'XDMF file path of the velocity field'
settings_types['offset'] = 'list(float)'
settings_default['offset'] = np.zeros((3,))
settings_description['offset'] = 'Spatial offset in the 3 dimensions'
settings_types['centre_y'] = 'bool'
settings_default['centre_y'] = True
settings_description['centre_y'] = 'Flat for changing the domain to [``-y_max/2``, ``y_max/2``]'
settings_types['periodicity'] = 'str'
settings_default['periodicity'] = 'xy'
settings_description['periodicity'] = 'Axes in which periodicity is enforced'
settings_types['frozen'] = 'bool'
settings_default['frozen'] = True
settings_description['frozen'] = 'If ``True``, the turbulent field will not be updated in time'
settings_types['store_field'] = 'bool'
settings_default['store_field'] = False
settings_description['store_field'] = 'If ``True``, the xdmf snapshots are stored in memory. Only two at a time for the linear interpolation'
settings_table = settings.SettingsTable()
__doc__ += settings_table.generate(settings_types, settings_default, settings_description)
def __init__(self):
self.in_dict = dict()
self.settings = dict()
self.file = None
self.extension = None
self.grid_data = dict()
self.interpolator = 3*[None]
self.x_periodicity = False
self.y_periodicity = False
# variables for interpolator wrapper
self._t0 = -1
self._t1 = -1
self._it0 = -1
self._it1 = -1
self._interpolator0 = None
self._interpolator1 = None
self.coeff = 0.
self.double_initialisation = True
self.vel_holder0 = 3*[None]
self.vel_holder1 = 3*[None]
def initialise(self, in_dict):
self.in_dict = in_dict
settings.to_custom_types(self.in_dict, self.settings_types, self.settings_default)
self.settings = self.in_dict
_, self.extension = os.path.splitext(self.settings['turbulent_field'])
if self.extension is '.h5':
self.read_btl(self.settings['turbulent_field'])
if self.extension in '.xdmf':
self.read_xdmf(self.settings['turbulent_field'])
if 'z' in self.settings['periodicity']:
raise ValueError('Periodicitiy setting in TurbVelocityField cannot be z.\n A turbulent boundary layer is not periodic in the z direction!')
if 'x' in self.settings['periodicity']:
self.x_periodicity = True
if 'y' in self.settings['periodicity']:
self.y_periodicity = True
# ADC: VERY VERY UGLY. NEED A BETTER WAY
def interpolator_wrapper0(self, coords, i_dim=0):
coeff = self.get_coeff()
return (1.0 - self.coeff)*self._interpolator0[i_dim](coords) + self.coeff*self._interpolator1[i_dim](coords)
def interpolator_wrapper1(self, coords, i_dim=1):
coeff = self.get_coeff()
return (1.0 - self.coeff)*self._interpolator0[i_dim](coords) + self.coeff*self._interpolator1[i_dim](coords)
def interpolator_wrapper2(self, coords, i_dim=2):
coeff = self.get_coeff()
return (1.0 - self.coeff)*self._interpolator0[i_dim](coords) + self.coeff*self._interpolator1[i_dim](coords)
def get_coeff(self):
return self.coeff
def init_interpolator(self):
if self.settings['frozen']:
self.interpolator = self._interpolator0
return
# continuing the ugliness
self.interpolator[0] = self.interpolator_wrapper0
self.interpolator[1] = self.interpolator_wrapper1
self.interpolator[2] = self.interpolator_wrapper2
# these functions need to define the interpolators
def read_btl(self, in_file):
"""
Legacy function, not using the custom format based on HDF5 anymore.
"""
raise NotImplementedError('The BTL reader is not up to date!')
def read_xdmf(self, in_file):
"""
Reads the xml file `<case_name>.xdmf`. Writes the self.grid_data data structure
with all the information necessary.
Note: this function does not load any turbulence data (such as ux000, ...),
it only reads the header information contained in the xdmf file.
"""
# store route of file for the other files
self.route = os.path.dirname(os.path.abspath(in_file))
# file to string
with open(in_file, 'r') as self.file:
data = self.file.read().replace('\n', '')
# parse data
# this next line is necessary to avoid problems with parsing in the Time part:
# <!--Start....
# 0.0, 1.0 ...
# see https://stackoverflow.com/a/18313932
parser = objectify.makeparser(remove_comments=True)
tree = objectify.fromstring(data, parser=parser)
# mesh dimensions
self.grid_data['dimensions'] = np.fromstring(tree.Domain.Topology.attrib['Dimensions'],
sep=' ',
count=3,
dtype=int)
# origin
self.grid_data['origin'] = np.fromstring(tree.Domain.Geometry.DataItem[0].text,
sep=' ',
count=int(tree.Domain.Geometry.DataItem[0].attrib['Dimensions']),
dtype=float)
# dxdydz
# because of how XDMF does it, it is actually dzdydx
self.grid_data['dxdydz'] = (
np.fromstring(tree.Domain.Geometry.DataItem[1].text,
sep=' ',
count=int(tree.Domain.Geometry.DataItem[1].attrib['Dimensions']),
dtype=float))
# now onto the grid
# time information
# [0] is start, [1] is stride
self.grid_data['time'] = np.fromstring(tree.Domain.Grid.Time.DataItem.text,
sep=' ',
count=2,
dtype=float)
self.grid_data['n_grid'] = len(tree.Domain.Grid.Grid)
# self.grid_data['grid'] = [dict()]*self.grid_data['n_grid']
self.grid_data['grid'] = []
for i, i_grid in enumerate(tree.Domain.Grid.Grid):
self.grid_data['grid'].append(dict())
# cycle through attributes
for k_attrib, v_attrib in i_grid.attrib.items():
self.grid_data['grid'][i][k_attrib] = v_attrib
# get Attributes (upper case A is not a mistake)
for i_attrib, attrib in enumerate(i_grid.Attribute):
self.grid_data['grid'][i][attrib.attrib['Name']] = dict()
self.grid_data['grid'][i][attrib.attrib['Name']]['file'] = (
attrib.DataItem.text.replace(' ', ''))
if attrib.DataItem.attrib['Precision'].strip() == '4':
self.grid_data['grid'][i][attrib.attrib['Name']]['Precision'] = np.float32
elif attrib.DataItem.attrib['Precision'].strip() == '8':
self.grid_data['grid'][i][attrib.attrib['Name']]['Precision'] = np.float64
# now we have the file names and the dimensions
self.grid_data['initial_x_grid'] = np.array(np.arange(0,
self.grid_data['dimensions'][2]))*self.grid_data['dxdydz'][2]
# z in the file is -y for us in sharpy (y_sharpy = right)
self.grid_data['initial_y_grid'] = np.array(np.arange(0,
self.grid_data['dimensions'][1]))*self.grid_data['dxdydz'][1]
# y in the file is z for us in sharpy (up)
self.grid_data['initial_z_grid'] = np.array(np.arange(0,
self.grid_data['dimensions'][0]))*self.grid_data['dxdydz'][0]
# the domain now goes:
# x \in [0, dimensions[0]*dx]
# y \in [-dimensions[2]*dz, 0]
# z \in [0, dimensions[1]*dy]
centre_z_offset = 0.
if self.settings['centre_y']:
centre_z_offset = -0.5*(self.grid_data['initial_z_grid'][-1] - self.grid_data['initial_z_grid'][0])
self.grid_data['initial_x_grid'] += self.settings['offset'][0] + self.grid_data['origin'][0]
self.grid_data['initial_x_grid'] -= np.max(self.grid_data['initial_x_grid'])
self.grid_data['initial_y_grid'] += self.settings['offset'][1] + self.grid_data['origin'][1]
self.grid_data['initial_z_grid'] += self.settings['offset'][2] + self.grid_data['origin'][2] + centre_z_offset
self.bbox = self.get_field_bbox(self.grid_data['initial_x_grid'],
self.grid_data['initial_y_grid'],
self.grid_data['initial_z_grid'],
frame='G')
if self.settings['print_info']:
cout.cout_wrap('The domain bbox is:', 1)
cout.cout_wrap(' x = [' + str(self.bbox[0, 0]) + ', ' + str(self.bbox[0, 1]) + ']', 1)
cout.cout_wrap(' y = [' + str(self.bbox[1, 0]) + ', ' + str(self.bbox[1, 1]) + ']', 1)
cout.cout_wrap(' z = [' + str(self.bbox[2, 0]) + ', ' + str(self.bbox[2, 1]) + ']', 1)
def generate(self, params, uext):
zeta = params['zeta']
for_pos = params['for_pos']
t = params['t']
self.update_cache(t)
self.update_coeff(t)
self.init_interpolator()
self.interpolate_zeta(zeta,
for_pos,
uext)
def update_cache(self, t):
self.double_initialisation = False
if self.settings['frozen']:
if self._interpolator0 is None:
self._t0 = self.timestep_2_time(0)
self._it0 = 0
self._interpolator0 = self.read_grid(self._it0, i_cache=0)
return
# most common case: t already in the [t0, t1] interval
if self._t0 <= t <= self._t1:
return
# t < t0, something weird (time going backwards)
if t < self._t0:
raise ValueError('Please make sure everything is ok. Your time is going backwards.')
# t > t1, need initialisation
if t > self._t1:
new_it = self.time_2_timestep(t)
# new timestep requires initialising the two of them (not likely at all)
# this means that the simulation timestep > LES timestep
if new_it > self._it1:
self.double_initialisation = True
else:
# t1 goes to t0
self._t0 = self._t1
self._it0 = self._it1
self._interpolator0 = self._interpolator1.copy()
# t1 updates to the next (new_it + 1)
self._it1 = new_it + 1
self._t1 = self.timestep_2_time(self._it1)
self._interpolator1 = self.read_grid(self._it1, i_cache=1)
return
# last case, both interp need to be initialised
if (self._t0 is None or self.double_initialisation):
self._t0 = self.timestep_2_time(new_it)
self._it0 = new_it
self._interpolator0 = self.read_grid(self._it0, i_cache=0)
self._it1 = new_it + 1
self._t1 = self.timestep_2_time(self._it1)
self._interpolator1 = self.read_grid(self._it1, i_cache=1)
def update_coeff(self, t):
if self.settings['frozen']:
self.coeff = 0.0
return
self.coeff = self.linear_coeff([self._t0, self._t1], t)
return
def time_2_timestep(self, t):
return int(max(0, np.floor((t - self.grid_data['time'][0])/self.grid_data['time'][1])))
def timestep_2_time(self, it):
return it*self.grid_data['time'][1] + self.grid_data['time'][0]
def get_field_bbox(self, x_grid, y_grid, z_grid, frame='G'):
bbox = np.zeros((3, 2))
bbox[0, :] = [np.min(x_grid), np.max(x_grid)]
bbox[1, :] = [np.min(y_grid), np.max(y_grid)]
bbox[2, :] = [np.min(z_grid), np.max(z_grid)]
if frame == 'G':
bbox[:, 0] = self.gstar_2_g(bbox[:, 0])
bbox[:, 1] = self.gstar_2_g(bbox[:, 1])
return bbox
def create_interpolator(self, data, x_grid, y_grid, z_grid, i_dim):
interpolator = interpolate.RegularGridInterpolator((x_grid, y_grid, z_grid),
data,
bounds_error=False,
fill_value=0.0)
return interpolator
def interpolate_zeta(self, zeta, for_pos, u_ext, interpolator=None, offset=np.zeros((3))):
if interpolator is None:
| |
logger.info("HA Deployment set to DISABLED")
HA_DEPLOYMENT = False
if write_flag == 0:
# reload odim-controller conf with haDeployment param
yaml.SafeDumper.org_represent_str = yaml.SafeDumper.represent_str
yaml.add_representer(str, represent_yaml_multline_str, Dumper=yaml.SafeDumper)
with open(CONTROLLER_CONF_FILE, 'w') as f:
yaml.safe_dump(CONTROLLER_CONF_DATA, f, default_flow_style=False)
# operation_odimra is used for deploying/removing ODIMRA
# in the nodes provided in odim-controller conf based on the operation input
def operation_odimra(operation):
cur_dir = os.getcwd()
os.chdir(ODIMRA_SRC_PATH)
host_file = os.path.join(KUBESPRAY_SRC_PATH, DEPLOYMENT_SRC_DIR, 'hosts.yaml')
if not os.path.exists(host_file):
logger.error("Host file not found for deployment id %s" %(DEPLOYMENT_ID))
exit(1)
if not DRY_RUN_SET:
load_password_from_vault(cur_dir)
# set options based on the operation type
helm_config_file = ""
odimra_config_file = ""
if operation == "install":
helm_config_file = os.path.join(ODIMRA_SRC_PATH, 'roles/pre-install/files/helmcharts/helm_config_values.yaml')
odimra_config_file = os.path.join(ODIMRA_SRC_PATH, 'roles/odimra-copy-image/files/odimra_config_values.yaml')
perform_odimra_deploy_prereqs()
elif operation == "uninstall":
helm_config_file = os.path.join(ODIMRA_SRC_PATH, 'roles/post-uninstall/files/odim_controller_config.yaml')
odimra_config_file = os.path.join(ODIMRA_SRC_PATH, 'roles/odimra-delete-image/files/odimra_config_values.yaml')
shutil.copyfile(CONTROLLER_CONF_FILE, helm_config_file)
shutil.copyfile(CONTROLLER_CONF_FILE, odimra_config_file)
# as rollback of failed operation is not handled yet
# will try on first master node and exit on failure
master_node = list(K8S_INVENTORY_DATA['all']['children']['kube-master']['hosts'].keys())[0]
logger.info("Starting odimra %s on master node %s", operation, master_node)
odimra_deploy_cmd = 'ansible-playbook -i {host_conf_file} --become --become-user=root \
--extra-vars "host={master_node} helm_config_file={helm_config_file} ignore_err={ignore_err}" \
{operation_conf_file}.yaml'.format(host_conf_file=host_file, master_node=master_node, helm_config_file=CONTROLLER_CONF_FILE, \
operation_conf_file=operation,ignore_err=IGNORE_ERRORS_SET)
ret = exec(odimra_deploy_cmd, {'ANSIBLE_BECOME_PASS': ANSIBLE_BECOME_PASS})
# remove copy of controller config file created
os.remove(helm_config_file)
os.remove(odimra_config_file)
if ret != 0:
logger.critical("ODIMRA %s failed on master node %s", operation, master_node)
os.chdir(cur_dir)
exit(1)
if operation == "uninstall" and os.path.exists(os.path.join(CONTROLLER_CONF_DATA['odimCertsPath'], '.gen_odimra_certs.ok')):
logger.info("Cleaning up certificates generated for the deployment")
shutil.rmtree(CONTROLLER_CONF_DATA['odimCertsPath'])
logger.info("Completed ODIMRA %s operation", operation)
os.chdir(cur_dir)
def cleanUp():
if DEPLOYMENT_SRC_DIR != "":
path = os.path.join(KUBESPRAY_SRC_PATH, DEPLOYMENT_SRC_DIR)
logger.info("Cleaning up temp directory : %s", path)
shutil.rmtree(path)
# install_k8s is for performing all the necessary steps
# for deploying k8s cluster
def install_k8s():
logger.info("Installing kubernetes")
# Parse the conf file passed
read_conf()
# Validate conf parameters passed
perform_checks()
# Check for HA deployment
perform_check_ha_deploy()
# Initiate k8s deployment
deploy_k8s()
exit(0)
# reset_k8s is for performing all the necessary steps
# for removing k8s from the deployed nodes
def reset_k8s():
logger.info("Resetting kubernetes")
# Parse the conf file passed
read_conf()
# Validate conf parameters passed
perform_checks(skip_opt_param_check=True)
# Remove k8s from the deployed nodes
remove_k8s()
exit(0)
# install_odimra is for performing all the necessary steps for installing ODIMRA
def install_odimra():
logger.info("Installing ODIMRA")
# Parse the conf file passed
read_conf()
# Validate conf parameters passed
perform_checks()
# load existing hosts.yaml created for the deployment_id
load_k8s_host_conf()
# Initiate ODIMRA deployment
operation_odimra("install")
exit(0)
# uninstall_odimra is used for performing all the necessary steps for uninstalling ODIMRA
def uninstall_odimra():
logger.info("Uninstalling ODIMRA")
# Parse the conf file passed
read_conf()
# Validate conf parameters passed
perform_checks()
# load existing hosts.yaml created for the deployment_id
load_k8s_host_conf()
# Initiate ODIMRA removal
operation_odimra("uninstall")
exit(0)
# add_k8s_node is for performing all the necessary steps
# for adding a new node to existing k8s cluster
def add_k8s_node():
logger.info("Adding new node to existing kubernetes cluster")
# Parse the conf file passed
read_conf()
# Validate conf parameters passed
perform_checks()
# load existing hosts.yaml created for the deployment_id
load_k8s_host_conf()
# Initiate k8s deployment on new nodes
scale_out_k8s()
exit(0)
# rm_k8s_node is for performing all the necessary steps
# for removing a node from the existing k8s cluster
def rm_k8s_node():
logger.info("Removing a node from the existing kubernetes cluster")
# Parse the conf file passed
read_conf()
# Validate conf parameters passed
perform_checks()
# load existing hosts.yaml created for the deployment_id
load_k8s_host_conf()
# Initiate node removal from k8s deployment
scale_in_k8s()
exit(0)
# generateRandomAlphaNum geneartes generates a random
# string of requested length containing alphanumeric and
# special characters from the defined set
def generateRandomAlphaNum(length):
random_char_set = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_-{}<>+[]$?@:;()%,'
return ''.join((random.choice(random_char_set) for i in range(length)))
# store_vault_key checks if vault password file exists,
# if not creates the file by asking user for the password
# else returns without performing any action.
def store_vault_key():
global ODIMRA_VAULT_KEY_FILE
user_home = os.getenv("HOME")
odimra_vault_dir = os.path.join(user_home, '.odimra')
if not os.path.exists(odimra_vault_dir):
os.mkdir(odimra_vault_dir, mode = 0o700)
ODIMRA_VAULT_KEY_FILE = os.path.join(odimra_vault_dir, '.key_dnd.dat')
if not os.path.exists(ODIMRA_VAULT_KEY_FILE):
print("\nProvide password for vault")
pw_from_prompt = lambda: (getpass.getpass('Enter Password: '), getpass.getpass('Confirm Password: '))
first_pw, second_pw = pw_from_prompt()
if first_pw != second_pw:
logger.critical("Passwords provided do not match")
exit(1)
fd = open(ODIMRA_VAULT_KEY_FILE, "wb")
fd.write(first_pw.encode('utf-8'))
fd.close()
encode_cmd = '{vault_bin} -encode {key_file}'.format(vault_bin=ODIMRA_VAULT_BIN, key_file=ODIMRA_VAULT_KEY_FILE)
ret = exec(encode_cmd, {})
if ret != 0:
logger.critical("storing vault key failed")
exit(1)
return
# store_password_in_vault stores the nodes sudo
# password securely by encrypting using odimra vault
def store_password_in_vault():
global ANSIBLE_BECOME_PASS
print("\nProvide sudo password of the nodes")
pw_from_prompt = lambda: (getpass.getpass('Enter Password: '), getpass.getpass('Confirm Password: '))
first_pw, second_pw = pw_from_prompt()
if first_pw != second_pw:
logger.critical("Passwords provided do not match")
exit(1)
fd = open(ANSIBLE_SUDO_PW_FILE, "wb")
fd.write(first_pw.encode('utf-8'))
fd.close()
encrypt_cmd = '{vault_bin} -key {key_file} -encrypt {data_file}'.format(vault_bin=ODIMRA_VAULT_BIN,
key_file=ODIMRA_VAULT_KEY_FILE, data_file=ANSIBLE_SUDO_PW_FILE)
ret = exec(encrypt_cmd, {})
if ret != 0:
logger.critical("storing node password failed")
exit(1)
ANSIBLE_BECOME_PASS = <PASSWORD>
# load_password_from_vault loads the sudo password of nodes
# of present cluster securely stored usign ansible vault
def load_password_from_vault(cur_dir):
global ANSIBLE_BECOME_PASS
decrypt_cmd = '{vault_bin} -key {key_file} -decrypt {data_file}'.format(vault_bin=ODIMRA_VAULT_BIN,
key_file=ODIMRA_VAULT_KEY_FILE, data_file=ANSIBLE_SUDO_PW_FILE)
execHdlr = subprocess.Popen(decrypt_cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True,
universal_newlines=True)
try:
std_out, std_err = execHdlr.communicate()
except TimeoutExpired:
execHdlr.kill()
if execHdlr.returncode != 0 or std_out == "":
print(std_out.strip())
logger.critical("failed to read node password")
os.chdir(cur_dir)
exit(1)
ANSIBLE_BECOME_PASS = std_out.rstrip('\n')
# check_extract_kubespray_src is used for invoking
# a script, after checking and if not exists, to extract
# kubespary source bundle
def check_extract_kubespray_src():
if not os.path.isdir(os.path.join(KUBESPRAY_SRC_PATH, "inventory")):
kubespray_extract_tool = os.path.join(KUBESPRAY_SRC_PATH, 'configure-kubespray.sh')
kubespray_extract_cmd = '/bin/bash {kubespray_extract_tool} {kubespray_src_path}'.format( \
kubespray_extract_tool=kubespray_extract_tool, kubespray_src_path=KUBESPRAY_SRC_PATH)
ret = exec(kubespray_extract_cmd, {})
if ret != 0:
logger.critical("Extracting and configuring kubespray failed")
exit(1)
def read_groupvar():
global GROUP_VAR_DATA
group_var_file = ODIMRA_SRC_PATH+'/group_vars/all'
if not os.path.isfile(group_var_file):
logger.critical("invalid group_var file %s passed, exiting!!!", group_var_file)
exit(1)
logger.debug("Reading group_var file %s", group_var_file)
with open(group_var_file) as f:
GROUP_VAR_DATA = yaml.load(f, Loader=yaml.FullLoader)
# upgrade_config_map update the config maps
def upgrade_config_map(config_map_name):
logger.info("Upgrading config map"+config_map_name)
# Parse the conf file passed
read_conf()
# Validate conf parameters passed
perform_checks()
# load existing hosts.yaml created for the deployment_id
load_k8s_host_conf()
#loading the group_all yaml and finding helm chart full name
read_groupvar()
helm_chart_list=config_map_name.split(",")
for data in helm_chart_list:
if data == "all":
odiraConfigHelmChartData= GROUP_VAR_DATA["odim_pv_pvc_secrets_helmcharts"]
for helm_chart_name in odiraConfigHelmChartData:
if 'pv-pvc' in helm_chart_name:
continue
update_helm_charts(helm_chart_name)
odimHelmChartData= GROUP_VAR_DATA["odim_svc_helmcharts"]
for helm_chart_name in odimHelmChartData:
update_helm_charts(helm_chart_name)
thirdPartyHelmCharts=GROUP_VAR_DATA["odim_third_party_helmcharts"]
for helm_chart_name in thirdPartyHelmCharts:
update_helm_charts(helm_chart_name)
deploy_plugin('all')
elif data == "odimra":
odiraConfigHelmChartData= GROUP_VAR_DATA["odim_pv_pvc_secrets_helmcharts"]
for helm_chart_name in odiraConfigHelmChartData:
if 'pv-pvc' in helm_chart_name:
continue
update_helm_charts(helm_chart_name)
odimHelmChartData= GROUP_VAR_DATA["odim_svc_helmcharts"]
for helm_chart_name in odimHelmChartData:
update_helm_charts(helm_chart_name)
deploy_plugin('all')
elif data == 'thirdparty':
thirdPartyHelmCharts=GROUP_VAR_DATA["odim_third_party_helmcharts"]
for helm_chart_name in thirdPartyHelmCharts:
update_helm_charts(helm_chart_name)
else:
update_helm_charts(data)
# update_helm_charts is for upgrading the deployed
# helm releases
def update_helm_charts(config_map_name):
optionHelmChartInfo = {
"odimra-config":"odim_pv_pvc_secrets_helmcharts",
"odimra-platformconfig":"odim_pv_pvc_secrets_helmcharts",
"odimra-secret":"odim_pv_pvc_secrets_helmcharts",
"kafka-secret":"odim_pv_pvc_secrets_helmcharts",
"zookeeper-secret":"odim_pv_pvc_secrets_helmcharts",
"configure-hosts":"odim_pv_pvc_secrets_helmcharts",
"odimra-k8s-access-config":"odim_pv_pvc_secrets_helmcharts",
"account-session":"odim_svc_helmcharts",
"aggregation":"odim_svc_helmcharts",
"api":"odim_svc_helmcharts",
"events":"odim_svc_helmcharts",
"fabrics":"odim_svc_helmcharts",
"telemetry":"odim_svc_helmcharts",
"managers":"odim_svc_helmcharts",
"systems":"odim_svc_helmcharts",
"task":"odim_svc_helmcharts",
"update":"odim_svc_helmcharts",
"kafka":"odim_third_party_helmcharts",
"zookeeper":"odim_third_party_helmcharts",
"redis":"odim_third_party_helmcharts",
"etcd":"odim_third_party_helmcharts"
}
operationHelmChartInfo={
"odimra-config":"upgrade-config",
"odimra-platformconfig":"upgrade-config",
"odimra-secret":"upgrade-config",
"kafka-secret":"upgrade-config",
"zookeeper-secret":"upgrade-config",
"configure-hosts":"upgrade-config",
"odimra-k8s-access-config":"upgrade-config",
"account-session":"upgrade-config",
"aggregation":"upgrade-config",
"api":"upgrade-config",
"events":"upgrade-config",
"fabrics":"upgrade-config",
"telemetry":"upgrade-config",
"managers":"upgrade-config",
"systems":"upgrade-config",
"task":"upgrade-config",
"update":"upgrade-config",
"kafka":"upgrade_thirdparty",
"zookeeper":"upgrade_thirdparty",
"redis":"upgrade_thirdparty",
"etcd":"upgrade_thirdparty"
}
if config_map_name not in optionHelmChartInfo:
logger.critical("%s upgrade is not supported!!!", config_map_name)
exit(1)
helmCharatGroupName=optionHelmChartInfo[config_map_name]
if 'haDeploymentEnabled' in CONTROLLER_CONF_DATA['odimra'] and \
CONTROLLER_CONF_DATA['odimra']['haDeploymentEnabled'] and \
helmCharatGroupName == 'odim_third_party_helmcharts':
helmCharatGroupName = 'odim_third_party_ha_helmcharts'
operationName=operationHelmChartInfo[config_map_name]
helmchartData=GROUP_VAR_DATA[helmCharatGroupName]
fullHelmChartName = helmchartData[config_map_name]
if fullHelmChartName=='':
logger.critical("%s upgrade is not supported!!!", config_map_name)
exit(1)
logger.info('Full helm chart name %s',fullHelmChartName)
cur_dir = os.getcwd()
os.chdir(ODIMRA_SRC_PATH)
host_file = os.path.join(KUBESPRAY_SRC_PATH, DEPLOYMENT_SRC_DIR, 'hosts.yaml')
if not os.path.exists(host_file):
logger.error("Host file not found for deployment id %s" %(DEPLOYMENT_ID))
exit(1)
if not DRY_RUN_SET:
load_password_from_vault(cur_dir)
# check if certs needs to be generated or loaded again
if 'secret' in config_map_name:
reload_odimra_certs()
upgrade_flag = False
if "third_party" in helmCharatGroupName or helmCharatGroupName =='odim_svc_helmcharts':
if ODIMRA_IMAGE_PATH == "":
logger.warning("odimra image source path not configured, expecting user to copy & load all the required odimra docker images on cluster nodes !!!")
else:
nodes_list = ""
for node, attrs in K8S_INVENTORY_DATA['all']['hosts'].items():
nodes_list += '{hostname},'.format(hostname=node)
nodes_list = nodes_list.rstrip(',')
dockerImageName=GROUP_VAR_DATA['odim_docker_images'][config_map_name]
logger.info("Start copying of docker images for %s",config_map_name)
docker_copy_image_command= 'ansible-playbook -i {host_conf_file} --become --become-user=root \
--extra-vars "docker_image_name={docker_image_name} helm_config_file={helm_config_file} host={nodes} ignore_err={ignore_err}" pre_upgrade.yaml'.format(\
host_conf_file=host_file,docker_image_name=dockerImageName,\
helm_config_file=CONTROLLER_CONF_FILE,\
nodes=nodes_list,\
ignore_err=IGNORE_ERRORS_SET)
ret = exec(docker_copy_image_command, {'ANSIBLE_BECOME_PASS': ANSIBLE_BECOME_PASS})
if ret != 0:
logger.critical("ODIMRA %s failed to copy docker image %s", operationName, dockerImageName)
os.chdir(cur_dir)
exit(1)
else:
logger.info("ODIMRA %s success copy docker image %s", operationName, dockerImageName)
for master_node in K8S_INVENTORY_DATA['all']['children']['kube-master']['hosts'].items():
logger.info("Starting upgrade of %s on master node %s", fullHelmChartName, master_node[0])
odimra_upgrade_cmd = 'ansible-playbook -i {host_conf_file} --become --become-user=root \
--extra-vars "host={master_node} helm_chart_name={helm_chart_name} helm_chart_name_version={helm_chart_name_version} helm_config_file={helm_config_file} ignore_err={ignore_err}" {operation_conf_file}.yaml'.format( \
host_conf_file=host_file, master_node=master_node[0], \
helm_chart_name=config_map_name, \
helm_chart_name_version=fullHelmChartName, \
helm_config_file=CONTROLLER_CONF_FILE, \
operation_conf_file=operationName,ignore_err=IGNORE_ERRORS_SET)
ret = exec(odimra_upgrade_cmd, {'ANSIBLE_BECOME_PASS': ANSIBLE_BECOME_PASS})
if ret != 0:
logger.critical("ODIMRA %s failed when tried on master node %s", operationName, master_node[0])
else:
logger.info("ODIMRA %s success on master node %s", operationName, master_node[0])
upgrade_flag=True
break
if upgrade_flag:
logger.info("Completed ODIMRA %s operation", operationName)
else:
logger.info("Could not %s ODIMRA on any master nodes", operationName)
os.chdir(cur_dir)
exit(1)
os.chdir(cur_dir)
# list_deployments is for listing the
# helm deployed releases
def list_deployments():
# Parse the conf file passed
read_conf()
# Validate conf parameters passed
perform_checks()
if 'namespace' not in CONTROLLER_CONF_DATA['odimra'] or \
CONTROLLER_CONF_DATA['odimra']['namespace'] == None or \
CONTROLLER_CONF_DATA['odimra']['namespace'] == "":
logger.critical("namespace not configured, exiting!!!")
exit(1)
# load existing hosts.yaml created for the deployment_id
load_k8s_host_conf()
list_flag = False
for master_node in K8S_INVENTORY_DATA['all']['children']['kube-master']['hosts'].items():
ip = K8S_INVENTORY_DATA['all']['hosts'][master_node[0]]['ip']
list_deps_cmd = '/usr/bin/ssh {ip} helm list -n {namespace}'.format( \
namespace=CONTROLLER_CONF_DATA['odimra']['namespace'], ip=ip)
ret = exec(list_deps_cmd, {'ANSIBLE_BECOME_PASS': ANSIBLE_BECOME_PASS})
if ret == 0:
list_flag = True
break
if not list_flag:
exit(1)
# list_deployment_history is for listing the
# details of a particular helm deployed release
def list_deployment_history(depName):
# Parse the conf file passed
read_conf()
# Validate conf parameters passed
perform_checks()
if 'namespace' not in CONTROLLER_CONF_DATA['odimra'] or \
CONTROLLER_CONF_DATA['odimra']['namespace'] == None or \
CONTROLLER_CONF_DATA['odimra']['namespace'] == "":
logger.critical("namespace not configured, exiting!!!")
exit(1)
# load existing hosts.yaml created for the deployment_id
load_k8s_host_conf()
list_flag = False
for master_node in K8S_INVENTORY_DATA['all']['children']['kube-master']['hosts'].items():
ip = K8S_INVENTORY_DATA['all']['hosts'][master_node[0]]['ip']
list_history_cmd = '/usr/bin/ssh {ip} helm history {deployment} -n {namespace}'.format( \
ip=ip, deployment=depName, \
namespace=CONTROLLER_CONF_DATA['odimra']['namespace'])
ret = exec(list_history_cmd, {'ANSIBLE_BECOME_PASS': ANSIBLE_BECOME_PASS})
if ret == 0:
list_flag = True
break
if not list_flag:
exit(1)
# rollback_deployment is for doing rollback of a
# particular helm deployed release
def rollback_deployment(depName, revision):
logger.info("rollback %s deployment to revision %d", depName, revision)
# Parse the conf file passed
read_conf()
# Validate conf parameters passed
perform_checks()
# load existing hosts.yaml created for the deployment_id
load_k8s_host_conf()
cur_dir = os.getcwd()
if not DRY_RUN_SET:
os.chdir(ODIMRA_SRC_PATH)
load_password_from_vault(cur_dir)
rollback_flag = False
host_file = os.path.join(KUBESPRAY_SRC_PATH, DEPLOYMENT_SRC_DIR, 'hosts.yaml')
for master_node in K8S_INVENTORY_DATA['all']['children']['kube-master']['hosts'].items():
logger.info("Starting rollback of %s deployment on master node %s", depName, master_node[0])
rollback_dep_cmd = 'ansible-playbook -i {host_conf_file} --become --become-user=root \
--extra-vars "host={master_node} release={depName} revision={revision}" rollback.yaml'.format( \
host_conf_file=host_file, master_node=master_node[0], \
depName=depName, revision=revision)
ret = exec(rollback_dep_cmd, {'ANSIBLE_BECOME_PASS': ANSIBLE_BECOME_PASS})
if ret != 0:
logger.critical("rollback of %s deployment failed on master node %s", depName, master_node[0])
else:
rollback_flag=True
break
if rollback_flag:
logger.info("rollback of %s deployment to revision %d was successful", depName, revision)
else:
logger.info("rollback of %s deployment to revision %d failed", depName, revision)
os.chdir(cur_dir)
exit(1)
os.chdir(cur_dir)
# scale_plugin is for scaling the helm deployed
# plugin release
def scale_plugin(plugin_name, replica_count):
logger.info("scaling plugin %s deployment to replicas %d", plugin_name, replica_count)
# Parse the conf file | |
# -*- coding: utf-8 -*-
# $Id: itgTableDaa.py $
"""
DAA (instruction) result table.
"""
__copyright__ = \
"""
Copyright (C) 2012-2015 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
"""
__version__ = "$Revision: 100868 $";
## The 32-bit GCC (C99) program that produced the table below.
g_sItgCProgramDaa = \
"""
#include <stdio.h>
int main()
{
for (unsigned uInputAL = 0; uInputAL < 256; uInputAL++)
for (unsigned fAux = 0; fAux < 2; fAux++)
for (unsigned fCarry = 0; fCarry < 2; fCarry++)
{
unsigned uInputEFlags = fCarry | (fAux << 4);
unsigned uResultAL;
unsigned uResultEFlags;
__asm__ __volatile__("pushl %1\\n"
"popfl\\n"
"daa\\n"
"pushf\\n"
"pop %1\\n"
: "=a" (uResultAL),
"=r" (uResultEFlags)
: "0" (uInputAL),
"1" (uInputEFlags)
: "memory"
);
printf(" ( 0x%02x, 0x%02x ), # AL=0x%02x, AF=%u CF=%u\\n",
uResultAL, uResultEFlags & 0xd5, uInputAL, fAux, fCarry);
/* 0xd5 = CF, PF, AF, ZF, SF */
}
return 0;
}
""";
#
# Compile and run the above program if requested to do so.
#
if __name__ == '__main__':
import sys;
if len(sys.argv) > 1 and sys.argv[1] == 'gen':
import subprocess;
oProc = subprocess.Popen(['gcc', '-x', 'c', '-std=gnu99', '-m32', '-o', './itgTableDaa', '-'], stdin = subprocess.PIPE);
oProc.communicate(g_sItgCProgramDaa);
oProc.wait();
oProc = subprocess.Popen(['./itgTableDaa',]).wait();
sys.exit(0);
##
# The DAA results.
#
# The index / input relation is: index = (AL << 2) | (CF << 1) | AF
#
g_aItgDaaResults = \
[
( 0x00, 0x44 ), # AL=0x00, AF=0 CF=0
( 0x60, 0x05 ), # AL=0x00, AF=0 CF=1
( 0x06, 0x14 ), # AL=0x00, AF=1 CF=0
( 0x66, 0x15 ), # AL=0x00, AF=1 CF=1
( 0x01, 0x00 ), # AL=0x01, AF=0 CF=0
( 0x61, 0x01 ), # AL=0x01, AF=0 CF=1
( 0x07, 0x10 ), # AL=0x01, AF=1 CF=0
( 0x67, 0x11 ), # AL=0x01, AF=1 CF=1
( 0x02, 0x00 ), # AL=0x02, AF=0 CF=0
( 0x62, 0x01 ), # AL=0x02, AF=0 CF=1
( 0x08, 0x10 ), # AL=0x02, AF=1 CF=0
( 0x68, 0x11 ), # AL=0x02, AF=1 CF=1
( 0x03, 0x04 ), # AL=0x03, AF=0 CF=0
( 0x63, 0x05 ), # AL=0x03, AF=0 CF=1
( 0x09, 0x14 ), # AL=0x03, AF=1 CF=0
( 0x69, 0x15 ), # AL=0x03, AF=1 CF=1
( 0x04, 0x00 ), # AL=0x04, AF=0 CF=0
( 0x64, 0x01 ), # AL=0x04, AF=0 CF=1
( 0x0a, 0x14 ), # AL=0x04, AF=1 CF=0
( 0x6a, 0x15 ), # AL=0x04, AF=1 CF=1
( 0x05, 0x04 ), # AL=0x05, AF=0 CF=0
( 0x65, 0x05 ), # AL=0x05, AF=0 CF=1
( 0x0b, 0x10 ), # AL=0x05, AF=1 CF=0
( 0x6b, 0x11 ), # AL=0x05, AF=1 CF=1
( 0x06, 0x04 ), # AL=0x06, AF=0 CF=0
( 0x66, 0x05 ), # AL=0x06, AF=0 CF=1
( 0x0c, 0x14 ), # AL=0x06, AF=1 CF=0
( 0x6c, 0x15 ), # AL=0x06, AF=1 CF=1
( 0x07, 0x00 ), # AL=0x07, AF=0 CF=0
( 0x67, 0x01 ), # AL=0x07, AF=0 CF=1
( 0x0d, 0x10 ), # AL=0x07, AF=1 CF=0
( 0x6d, 0x11 ), # AL=0x07, AF=1 CF=1
( 0x08, 0x00 ), # AL=0x08, AF=0 CF=0
( 0x68, 0x01 ), # AL=0x08, AF=0 CF=1
( 0x0e, 0x10 ), # AL=0x08, AF=1 CF=0
( 0x6e, 0x11 ), # AL=0x08, AF=1 CF=1
( 0x09, 0x04 ), # AL=0x09, AF=0 CF=0
( 0x69, 0x05 ), # AL=0x09, AF=0 CF=1
( 0x0f, 0x14 ), # AL=0x09, AF=1 CF=0
( 0x6f, 0x15 ), # AL=0x09, AF=1 CF=1
( 0x10, 0x10 ), # AL=0x0a, AF=0 CF=0
( 0x70, 0x11 ), # AL=0x0a, AF=0 CF=1
( 0x10, 0x10 ), # AL=0x0a, AF=1 CF=0
( 0x70, 0x11 ), # AL=0x0a, AF=1 CF=1
( 0x11, 0x14 ), # AL=0x0b, AF=0 CF=0
( 0x71, 0x15 ), # AL=0x0b, AF=0 CF=1
( 0x11, 0x14 ), # AL=0x0b, AF=1 CF=0
( 0x71, 0x15 ), # AL=0x0b, AF=1 CF=1
( 0x12, 0x14 ), # AL=0x0c, AF=0 CF=0
( 0x72, 0x15 ), # AL=0x0c, AF=0 CF=1
( 0x12, 0x14 ), # AL=0x0c, AF=1 CF=0
( 0x72, 0x15 ), # AL=0x0c, AF=1 CF=1
( 0x13, 0x10 ), # AL=0x0d, AF=0 CF=0
( 0x73, 0x11 ), # AL=0x0d, AF=0 CF=1
( 0x13, 0x10 ), # AL=0x0d, AF=1 CF=0
( 0x73, 0x11 ), # AL=0x0d, AF=1 CF=1
( 0x14, 0x14 ), # AL=0x0e, AF=0 CF=0
( 0x74, 0x15 ), # AL=0x0e, AF=0 CF=1
( 0x14, 0x14 ), # AL=0x0e, AF=1 CF=0
( 0x74, 0x15 ), # AL=0x0e, AF=1 CF=1
( 0x15, 0x10 ), # AL=0x0f, AF=0 CF=0
( 0x75, 0x11 ), # AL=0x0f, AF=0 CF=1
( 0x15, 0x10 ), # AL=0x0f, AF=1 CF=0
( 0x75, 0x11 ), # AL=0x0f, AF=1 CF=1
( 0x10, 0x00 ), # AL=0x10, AF=0 CF=0
( 0x70, 0x01 ), # AL=0x10, AF=0 CF=1
( 0x16, 0x10 ), # AL=0x10, AF=1 CF=0
( 0x76, 0x11 ), # AL=0x10, AF=1 CF=1
( 0x11, 0x04 ), # AL=0x11, AF=0 CF=0
( 0x71, 0x05 ), # AL=0x11, AF=0 CF=1
( 0x17, 0x14 ), # AL=0x11, AF=1 CF=0
( 0x77, 0x15 ), # AL=0x11, AF=1 CF=1
( 0x12, 0x04 ), # AL=0x12, AF=0 CF=0
( 0x72, 0x05 ), # AL=0x12, AF=0 CF=1
( 0x18, 0x14 ), # AL=0x12, AF=1 CF=0
( 0x78, 0x15 ), # AL=0x12, AF=1 CF=1
( 0x13, 0x00 ), # AL=0x13, AF=0 CF=0
( 0x73, 0x01 ), # AL=0x13, AF=0 CF=1
( 0x19, 0x10 ), # AL=0x13, AF=1 CF=0
( 0x79, 0x11 ), # AL=0x13, AF=1 CF=1
( 0x14, 0x04 ), # AL=0x14, AF=0 CF=0
( 0x74, 0x05 ), # AL=0x14, AF=0 CF=1
( 0x1a, 0x10 ), # AL=0x14, AF=1 CF=0
( 0x7a, 0x11 ), # AL=0x14, AF=1 CF=1
( 0x15, 0x00 ), # AL=0x15, AF=0 CF=0
( 0x75, 0x01 ), # AL=0x15, AF=0 CF=1
( 0x1b, 0x14 ), # AL=0x15, AF=1 CF=0
( 0x7b, 0x15 ), # AL=0x15, AF=1 CF=1
( 0x16, 0x00 ), # AL=0x16, AF=0 CF=0
( 0x76, 0x01 ), # AL=0x16, AF=0 CF=1
( 0x1c, 0x10 ), # AL=0x16, AF=1 CF=0
( 0x7c, 0x11 ), # AL=0x16, AF=1 CF=1
( 0x17, 0x04 ), # AL=0x17, AF=0 CF=0
( 0x77, 0x05 ), # AL=0x17, AF=0 CF=1
( 0x1d, 0x14 ), # AL=0x17, AF=1 CF=0
( 0x7d, 0x15 ), # AL=0x17, AF=1 CF=1
( 0x18, 0x04 ), # AL=0x18, AF=0 CF=0
( 0x78, 0x05 ), # AL=0x18, AF=0 CF=1
( 0x1e, 0x14 ), # AL=0x18, AF=1 CF=0
( 0x7e, 0x15 ), # AL=0x18, AF=1 CF=1
( 0x19, 0x00 ), # AL=0x19, AF=0 CF=0
( 0x79, 0x01 ), # AL=0x19, AF=0 CF=1
( 0x1f, 0x10 ), # AL=0x19, AF=1 CF=0
( 0x7f, 0x11 ), # AL=0x19, AF=1 CF=1
( 0x20, 0x10 ), # AL=0x1a, AF=0 CF=0
( 0x80, 0x91 ), # AL=0x1a, AF=0 CF=1
( 0x20, 0x10 ), # AL=0x1a, AF=1 CF=0
( 0x80, 0x91 ), # AL=0x1a, AF=1 CF=1
( 0x21, 0x14 ), # AL=0x1b, AF=0 CF=0
( 0x81, 0x95 ), # AL=0x1b, AF=0 CF=1
( 0x21, 0x14 ), # AL=0x1b, AF=1 CF=0
( 0x81, 0x95 ), # AL=0x1b, AF=1 CF=1
( 0x22, 0x14 ), # AL=0x1c, AF=0 CF=0
( 0x82, 0x95 ), # AL=0x1c, AF=0 CF=1
( 0x22, 0x14 ), # AL=0x1c, AF=1 CF=0
( 0x82, 0x95 ), # AL=0x1c, AF=1 CF=1
( 0x23, 0x10 ), # AL=0x1d, AF=0 CF=0
( 0x83, 0x91 ), # AL=0x1d, AF=0 CF=1
( 0x23, 0x10 ), # AL=0x1d, AF=1 CF=0
( 0x83, 0x91 ), # AL=0x1d, AF=1 CF=1
( 0x24, 0x14 ), # AL=0x1e, AF=0 CF=0
( 0x84, 0x95 ), # AL=0x1e, AF=0 CF=1
( 0x24, 0x14 ), # AL=0x1e, AF=1 CF=0
( 0x84, 0x95 ), # AL=0x1e, AF=1 CF=1
( 0x25, 0x10 ), # AL=0x1f, AF=0 CF=0
( 0x85, 0x91 ), # AL=0x1f, AF=0 CF=1
( 0x25, 0x10 ), # AL=0x1f, AF=1 CF=0
( 0x85, 0x91 | |
# coding: utf-8
# <NAME> (<EMAIL>, twitter @hirax)
# This code is based on Brun0oO's work(MIT License)
# https://github.com/Brun0oO/Pythonista/blob/master/arkit/main.py
# and Char<NAME>'s work.
# https://github.com/scj643/objc_tools/blob/master/objc_tools/scenekit/sk_scene.py
# Following thread will be heplful for understanding how Pythonista can call ARKit.
# https://forum.omz-software.com/topic/4362/solved-first-attempt-to-integrate-arkit-and-first-questions/29
import ui, os, sys, time, math
#from math import *
from enum import IntFlag
from objc_util import *
from objc_tools.scenekit.util import SUPPORTED_FORMATS, LightType, ShadowMode, DebugOptions, RenderingAPI, LightingModel
from objc_tools.scenekit.structures import Vector3, Vector4, Matrix4
from objc_tools.ui.editorview import TabView, tabVC
load_framework('SceneKit')
load_framework('ARKit')
load_framework('SpriteKit')
SCNNode = ObjCClass('SCNNode')
SCNLight = ObjCClass('SCNLight')
SCNSphere = ObjCClass('SCNSphere')
SCNBox = ObjCClass('SCNBox')
SCNCone = ObjCClass('SCNCone')
SCNCapsule = ObjCClass('SCNCapsule')
SCNCylinder = ObjCClass('SCNCylinder')
SCNScene = ObjCClass('SCNScene')
SCNNode = ObjCClass('SCNNode')
SCNLight = ObjCClass('SCNLight')
SCNView = ObjCClass('SCNView')
SCNCamera = ObjCClass('SCNCamera')
UIViewController = ObjCClass('UIViewController')
SCNMaterial = ObjCClass('SCNMaterial')
# ------------ SeneKit View -----------------
class SKView (object):
'''SKView
This object is used for subclassing
'''
def __init__(self):
self._create_objc()
self.attach()
def _create_objc(self):
self._scene_objc = SCNView.alloc().initWithFrame_options_(((0, 0),(100, 100)), ns({'SCNViewOptionPreferredRenderingAPI': 1})).autorelease()
self._scene_objc.setAutoresizingMask_(18) # Fill superview
self._scene_objc.setNeedsDisplayOnBoundsChange_(True) # fill on change
self._scene_ref = None
self._pointOfView_ref = Node(self._scene_objc.pointOfView())
def attach(self):
'''attach
This function is called after __init__
'''
pass
@property
def showsStatistics(self):
return self._scene_objc.showsStatistics()
@showsStatistics.setter
def showsStatistics(self, state):
if type(state) == bool:
self._scene_objc.setShowsStatistics_(state)
else:
raise TypeError('Must be a bool')
@property
def preferredFramesPerSecond(self):
return self._scene_objc.preferredFramesPerSecond()
@preferredFramesPerSecond.setter
def preferredFramesPerSecond(self, value):
self._scene_objc.setPreferredFramesPerSecond_(value)
@property
def allowsCameraControl(self):
return self._scene_objc.allowsCameraControl()
@allowsCameraControl.setter
def allowsCameraControl(self, state):
if type(state) == bool:
self._scene_objc.setAllowsCameraControl_(state)
else:
raise TypeError('Must be a bool')
@property
def scene(self):
if self._scene_ref:
return self._scene_ref
elif self._scene_objc.scene():
raise Warning('The scene does not have a reference')
return Scene(self._scene_objc.scene())
else:
return None
@scene.setter
def scene(self, value):
if isinstance(value, (Scene)):
self._scene_ref = value
self._scene_objc.setScene_(value._objc)
elif isinstance(value, (ObjCInstance)):
self._scene_ref = Scene(value)
self._scene_objc.setScene_(value)
else:
raise TypeError("Not able to set scene")
@property
def debugOptions(self):
return DebugOptions(self._scene_objc.debugOptions())
@debugOptions.setter
def debugOptions(self, value):
if isinstance(value, (DebugOptions)):
self._scene_objc.setDebugOptions_(value.value)
else:
self._scene_objc.setDebugOptions_(int(value))
@property
def pointOfView(self):
if self._scene_objc.pointOfView().ptr != self._pointOfView_ref._objc.ptr:
self._pointOfView_ref = Node(self._scene_objc.pointOfView())
return self._pointOfView_ref
def setPointOfView(self, value, animate = True):
if isinstance(value, (ObjCInstance)):
self._pointOfView_ref = Node(value)
self._scene_objc.setPointOfView_animate_(value, animate)
if isinstance(value, (Node)):
self._pointOfView_ref = value
self._scene_objc.setPointOfView_animate_(value._objc, animate)
def stop(self):
self._scene_objc.stop_(None)
def pause(self):
self._scene_objc.pause_(None)
def play(self):
self._scene_objc.play_(None)
#--------Scene View---------
class SceneView (SKView):
def attach(self):
self.uiView = ui.View()
self.present = self.uiView.present
ObjCInstance(self.uiView).addSubview_(self._scene_objc)
#--------Scene Tab---------
class SceneTab (SceneView, TabView):
def __init__(self):
SceneView.__init__(self)
TabView.__init__(self)
@on_main_thread
def makeSelf(self):
self.name = "SceneKit"
@on_main_thread
def customVC(self):
return create_objc_class(
"CustomViewController",
UIViewController,
methods = [],
protocols = ["OMTabContent"],
).new()
@on_main_thread
def show(self):
self.newVC.View = ObjCInstance(self.uiView)
self.newVC.title = self.name
self.newVC.navigationItem().rightBarButtonItems = self.right_button_items
tabVC.addTabWithViewController_(self.newVC)
#--------Scene---------
class Scene (object):
def __init__(self, scene = None):
if scene:
self._objc = objc
else:
self._objc = SCNScene.scene()
self._node_ref = Node(self._objc.root())
@property
def playbackSpeed(self):
return self._objc.playbackSpeed()
@playbackSpeed.setter
def playbackSpeed(self, value):
self._objc.setPlaybackSpeed_(value)
@property
def framerate(self):
return self._objc.frameRate()
@framerate.setter
def framerate(self, value):
self._objc.setFrameRate_(value)
@property
def fogDensityExponent(self):
'''
Controls the attenuation between the start and end fog distances.
0 means a constant fog, 1 a linear fog and 2 a quadratic fog,
but any positive value will work.
'''
return self._objc.fogDensityExponent()
@fogDensityExponent.setter
def fogDensityExponent(self, value):
self._objc.setFogDensityExponent_(value)
@property
def fogStartDistance(self):
return self._objc.fogStartDistance()
@fogStartDistance.setter
def fogStartDistance(self, value):
self._objc.setFogStartDistance_(value)
@property
def fogEndDistance(self):
return self._objc.fogEndDistance()
@fogEndDistance.setter
def fogEndDistance(self, value):
self._objc.setFogEndDistance_(value)
@property
def paused(self):
return self._objc.isPaused()
@paused.setter
def paused(self, value):
self._objc.setPaused_(value)
@property
def node(self):
if self._node_ref._objc.ptr == self._objc.root().ptr: # checks so we domt use more memory
return self._node_ref
else:
self._node_ref = Node(self._objc.root())
return self._node_ref
def removeAllParticleSystems(self):
self._objc.removeAllParticleSystems()
def save_to_file(self, file_name):
if SUPPORTED_FORMATS.match(path.rsplit('.', 1)[-1]):
options = ns({'SCNSceneExportDestinationURL': nsurl(path)})
file = nsurl(file_name)
return self._objc.writeToURL_options_(url, options)
else:
raise TypeError('Not a supported export type')
def __repr__(self):
return '<Scene <Framerate: {}, node: {}>>'.format(self.framerate, self.node)
# ------ Node ----------
class Node (object):
def __init__(self, objc = None):
self._light = None
self._geometry = None
self._camera = None
self._child_ref = []
if objc:
self._objc = objc
if self._objc.light():
self._light = Light(objc=self._objc.light())
if self._objc.geometry():
self._geometry = Geometry(self._objc.geometry())
if self._objc.camera():
self._camera = Camera(self._objc.camera())
else:
self._objc = SCNNode.node()
@property
def childNodes(self):
return self._child_ref
@property
def name(self):
if self._objc.name():
return str(self._objc.name())
else:
return None
@name.setter
def name(self, value):
self._objc.setName_(value)
@property
def scale(self):
return self._objc.scale()
@scale.setter
def scale(self, value):
self._objc.setScale_(value)
@property
def transform(self):
'''transfrom
Note: with this you can not set properties directly
'''
return self._objc.transform(argtypes = [], restype = Matrix4)
@transform.setter
def transform(self, value):
self._objc.setTransform_(value, argtypes = [Matrix4], restype = None)
@property
def position(self):
return self._objc.position(argtypes = [], restype = Vector3)
@position.setter
def position(self, value):
self._objc.setPosition_(value, argtypes = [Vector3], restype = None)
@property
def rotation(self):
return self._objc.rotation()
@rotation.setter
def rotation(self, value):
self._objc.setRotation_(value)
@property
def light(self):
return self._light
@light.setter
def light(self, value):
if isinstance(value, (ObjCInstance)):
self._objc.setLight_(value)
self._light = Light(value)
if isinstance(value, (Light)):
self._objc.setLight_(value._objc)
self._light = value
if value == None:
self._objc.setLight_(value)
self._light = value
@property
def geometry(self):
return self._geometry
@geometry.setter
def geometry(self, value):
if isinstance(value, (ObjCInstance)):
self._objc.setGeometry_(value)
self._geometry = Geometry(value)
if isinstance(value, (Geometry)):
self._objc.setGeometry_(value._objc)
self._light = value
if value == None:
self._objc.setGeometry_(value)
self._light = value
@property
def camera(self):
return self._camera
@camera.setter
def camera(self, value):
if isinstance(value, (ObjCInstance)):
self._objc.setCamera_(value)
self._camera = Camera(value)
if isinstance(value, (Camera)):
self._objc.setCamera_(value._objc)
self._camera = value
if value == None:
self._objc.setCamera_(value)
self._camera = value
def clone(self):
'''clone
The copy is recursive: every child node will be cloned, too.
The copied nodes will share their attached objects (light, geometry, camera, ...) with the original instances
'''
clone = self._objc.clone()
return Node(clone)
def flattenedClone(self):
'''flattenedCLone
A copy of the node with all geometry combined
'''
clone = self._objc.flattenedClone()
return Node(clone)
def addChild(self, value):
if isinstance(value, (ObjCInstance)):
if self._objc.canAddChildNode_(value):
self._objc.addChildNode_(value)
self._child_ref += [Node(value)]
if isinstance(value, (Node)):
if self._objc.canAddChildNode_(value._objc) and value not in self._child_ref:
self._objc.addChildNode_(value._objc)
self._child_ref += [value]
#--------- Light ------------
class Light (object):
def __init__(self, kind = LightType.Omni, casts_shadow = True, shadow_sample_count = 1000, objc = None):
if objc:
self._objc = objc
else:
self._objc = SCNLight.light()
self.type = kind
self.castsShadow = casts_shadow
self.shadowSampleCount = shadow_sample_count
@property
def type(self):
return self._objc.type()
@type.setter
def type(self, kind):
self._objc.setType_(kind)
@property
def castsShadow(self):
return self._objc.castsShadow()
@castsShadow.setter
def castsShadow(self, value):
self._objc.setCastsShadow_(value)
@property
def intensity(self):
return self._objc.intensity()
@intensity.setter
def intensity(self, value):
self._objc.setIntensity_(value)
@property
def shadowSampleCount(self):
return self._objc.shadowSampleCount()
@shadowSampleCount.setter
def shadowSampleCount(self, value):
self._objc.setShadowSampleCount_(value)
@property
def name(self):
if self._objc.name():
return str(self._objc.name())
else:
return None
@name.setter
def name(self, value):
self._objc.setName_(value)
@property
def color(self):
return self._objc.color()
@color.setter
def color(self, value):
self._objc.setColor_(value)
@property
def shadowColor(self):
return self._objc.color()
@shadowColor.setter
def shadowColor(self, value):
self._objc.setShadowColor_(value)
@property
def shadowRadius(self):
return self._objc.shadowRadius()
@shadowRadius.setter
def shadowRadius(self, value):
self._objc.setShadowRadius(value)
@property
def shadowMapSize(self):
return self._objc.shadowMapSize()
@shadowMapSize.setter
def shadowMapSize(self, value):
self._objc.setShadowMapSize(value)
#--------- Camera ------------
class Camera (object):
def __init__(self, objc = None):
if objc:
self._objc = objc
else:
self._objc = SCNCamera.camera()
@property
def name(self):
if self._objc.name():
return str(self._objc.name())
else:
return None
@name.setter
def name(self, value):
self._objc.setName_(value)
@property
def xFov(self):
'''Setting to 0 resets it to normal'''
return self._objc.xFov()
@xFov.setter
def xFov(self, value):
self._objc.setXFov_(value)
@property
def yFov(self):
'''Setting to 0 resets it to normal'''
return self._objc.yFov()
@yFov.setter
def yFov(self, value):
self._objc.setYFov_(value)
# ---------- geometry ----------------
class Geometry (object):
def __init__(self, objc = None):
self._objc = objc
@property
def name(self):
if self._objc.name():
return str(self._objc.name())
else:
return None
@name.setter
def name(self, value):
self._objc.setName_(value)
@property
def material(self):
return Material(self._objc.material())
# --------- Material ------------
class Material (object):
def __init__(self, objc = None):
self._objc = objc
@property
def lightingModel(self):
return str(self._objc.lightingModelName())
@lightingModel.setter
def lightingModel(self, value):
if type(value) == str:
self._objc.setLightingModelName_(value)
else:
print('not a valid type')
def load_scene(file):
url = ns(file)
s = SCNScene.sceneWithURL_options_(url, ns({}))
return Scene(s)
#---------------
# Some 'constants' used by ARkit
class ARWorldAlignment(IntFlag):
ARWorldAlignmentGravity = 0
ARWorldAlignmentGravityAndHeading = 1
ARWorldAlignmentCamera = 2
class ARPlaneDetection(IntFlag):
ARPlaneDetectionNone = 0
ARPlaneDetectionHorizontal = 1 << 0
ARPlaneDetectionVertical = 1 << 1
# Work In Progress here, I(Brun0oO's)'m deciphering the ARKit constants...
#class ARSCNDebugOption(IntFlag):
# ARSCNDebugOptionNone = 0
# ARSCNDebugOptionShowWorldOrigin = int("ffffffff80000000", 16)
# ARSCNDebugOptionShowFeaturePoints = int("ffffffff40000000", 16)
class ARSessionRunOptions(IntFlag):
ARSessionRunOptionsNone = 0
ARSessionRunOptionResetTracking = 1 << 0
ARSessionRunOptionRemoveExistingAnchors = 1 << 1
NSError = ObjCClass('NSError')
SCNScene = ObjCClass('SCNScene')
ARSCNView = ObjCClass('ARSCNView')
ARWorldTrackingConfiguration = ObjCClass('ARWorldTrackingConfiguration')
ARSession = ObjCClass('ARSession')
UIViewController = ObjCClass('UIViewController')
ARPlaneAnchor = ObjCClass('ARPlaneAnchor')
sceneview = None
#========= setup an initial scene ===================
def createSampleScene():
global scene
global root_node
global cube_node
scene = SCNScene.scene()
root_node = scene.rootNode()
| |
import sys
import datetime
import reportconfig
import projectmetrics
import os
import numpy as np
import matplotlib
import matplotlib.dates as mdates
# check for headless executions
if "DISPLAY" not in os.environ:
if os.system('python -c "import matplotlib.pyplot as plt; plt.figure()"') != 0:
print("INFO: Lack of display should generate an expected ImportError. Changing MatPlotLib backend.")
matplotlib.use('Agg')
import matplotlib.pyplot as plt
else:
import matplotlib.pyplot as plt
# import constants for the days of the week
from matplotlib.dates import MO, TU, WE, TH, FR, SA
plt.style.use('ggplot')
# plt.style.use('fivethirtyeight')
# plt.style.use('classic')
# plt.style.use('seaborn')
YEARS = mdates.YearLocator() # every year
MONTHS = mdates.MonthLocator() # every month
WEEKDAYS = mdates.WeekdayLocator(byweekday=(MO, TU, WE, TH, FR)) # every weekday
WEEKFINISH = mdates.WeekdayLocator(byweekday=SA) # every week start
YEARS_FORMAT = mdates.DateFormatter('%Y')
MONTHS_FORMAT = mdates.DateFormatter('%b %Y')
# DEFAULT_CMAP = "Set2"
# DEFAULT_CMAP = "Set3"
# DEFAULT_CMAP = "prism"
DEFAULT_CMAP = "tab10"
SECONDARY_CMAP = "gist_ncar"
DEFAULT_TREND_RANGE = [60, 30, 14, 7]
DEFAULT_SLOC_TYPES = ["HAND", "AC", "XML"]
DEFAULT_COMP_TYPES = ["Channels", "Commands", "Events", "Parameters", "Total Ports"]
DEFAULT_ISSUE_LABELS = ["Bug", "Req. Change", "Enhancement", "Process", "Issue"]
DEFAULT_BAR_WIDTH = 0.8
I = 'issues'
S = 'sloc'
C = 'comp'
class GitHubMetricsReport:
def __init__(self, args):
if "--config" in args:
config_file = args[args.index("--config") + 1]
else:
config_file = args[2]
self.config_opts = reportconfig.ReportConfiguration(config_file)
if "--username" in args:
self.config_opts.username = args[args.index("--username") + 1]
if "--git-api-key" in args:
self.config_opts.git_api_key = args[args.index("--git-api-key") + 1]
if "--zen-api-key" in args:
self.config_opts.zen_api_key = args[args.index("--zen-api-key") + 1]
if "--show" in args:
self.show = True
else:
self.show = False
self.metrics = projectmetrics.ProjectMetrics(None, config_opts=self.config_opts)
def create_graph_colors_list(data_types):
if len(data_types) > 20:
cmap = plt.get_cmap(SECONDARY_CMAP)
colors_list = cmap(np.linspace(0., 1., len(data_types)))
else:
cmap = plt.get_cmap(DEFAULT_CMAP)
colors_list = cmap(np.arange(len(data_types)))
return colors_list
def format_label_chart(fig, axs, x_data):
try:
for ax in axs:
ax.legend()
ax.set_xticks(np.array(list(range(len(x_data)))))
ax.set_xticklabels(x_data, rotation=90)
x_lim = ax.get_xlim()
ax.set_xlim(-1, len(x_data))
y_lim = ax.get_ylim()
ax.set_ylim(y_lim[0], 1.05 * y_lim[1])
except TypeError:
axs.legend()
axs.set_xticks(np.array(list(range(len(x_data)))))
axs.set_xticklabels(x_data, rotation=90)
x_lim = axs.get_xlim()
axs.set_xlim(-1, len(x_data))
y_lim = axs.get_ylim()
axs.set_ylim(y_lim[0], 1.05*y_lim[1])
fig.tight_layout()
return fig
def format_date_chart(fig, axs, x_data):
try:
for ax in axs:
ax.xaxis_date()
ax.legend()
ax.xaxis.set_major_locator(MONTHS)
ax.xaxis.set_major_formatter(MONTHS_FORMAT)
y_lim = ax.get_ylim()
ax.set_ylim(y_lim[0], 1.05 * y_lim[1])
# if len(data_x) <= 120:
# ax.xaxis.set_minor_locator(WEEKDAYS)
# else:
# ax.xaxis.set_minor_locator(WEEKFINISH)
except TypeError:
axs.xaxis_date()
axs.legend()
axs.xaxis.set_major_locator(MONTHS)
axs.xaxis.set_major_formatter(MONTHS_FORMAT)
y_lim = axs.get_ylim()
axs.set_ylim(y_lim[0], 1.05*y_lim[1])
# if len(data_x) <= 120:
# axs.xaxis.set_minor_locator(WEEKDAYS)
# else:
# axs.xaxis.set_minor_locator(WEEKFINISH)
fig.autofmt_xdate()
fig.tight_layout()
return fig
def finalize_figure(fig, title, directory=None, show=False):
if show:
plt.show()
plt.close(fig)
return
if directory is not None:
output_file = directory + title + ".png"
output_file = output_file.replace(" ", "_")
plt.savefig(output_file)
plt.close(fig)
return output_file
def generate_table(table_columns, data, title="", directory=None, show=False):
fig, ax = plt.subplots(1, 1, figsize=(10, (len(data) + 2) / 4 + 1))
# fig.patch.set_visible(False)
ax.axis('off')
table = ax.table(cellText=data, colLabels=table_columns, loc='center')
for index, header in enumerate(table_columns):
table.auto_set_column_width(index)
table.auto_set_font_size(True)
ax.set_title(title)
fig.tight_layout()
output_file = finalize_figure(fig, title, directory, show)
return output_file
def generate_line_plot(x_data, y_data, filled=None, data_labels=None, title="", directory=None, show=False,
date_plot=False, stacked=False):
if data_labels is None:
data_labels = list(y_data.keys())
if date_plot:
x_index = x_data
else:
x_index = np.array(list(range(len(x_data))))
y_offset = np.zeros((len(x_index),))
colors = create_graph_colors_list(data_labels)
fig, ax = plt.subplots(1, 1, figsize=(10, 10))
for index, label in enumerate(data_labels):
if isinstance(x_data, dict):
if stacked:
raise ValueError("Stacked line charts require shared x_data basis.")
x = x_data[label]
y_offset = np.zeros((len(x),))
else:
x = x_index
y = y_data[label]
if stacked:
y += y_offset
if date_plot:
ax.plot_date(x, y, '-', color=colors[index], label=label)
else:
ax.plot(x, y, '-', color=colors[index], label=label)
if filled and label in filled:
ax.fill_between(x, y, y_offset, color=colors[index], alpha=0.4)
if stacked:
y_offset += y
ax.set_title(title)
# format the ticks
if date_plot:
format_date_chart(fig, ax, x_data)
else:
format_label_chart(fig, ax, x_data)
# handles, labels = _sort_legend(ax)
# ax.legend(handles, labels)
output_file = finalize_figure(fig, title, directory, show)
return output_file
def _generate_complicated_bar_plot(x_data, y_data, data_labels=None, title="", directory=None, show=False,
date_plot=False, split=False, adjacent=False, stacked=False):
if data_labels is None:
data_labels = list(y_data.keys())
bar_width = DEFAULT_BAR_WIDTH
colors = create_graph_colors_list(data_labels)
if date_plot:
# TODO consider re-enabling; expand chart when range > 60 days
# sorted_x_data = sorted(x_data)
# fig_x = max(10., ((sorted_x_data[-1] - sorted_x_data[0]).days + 1) / 6.)
fig_x = 10
else:
# expand chart when components > 25
fig_x = max(10., len(x_data) / 2.5)
if split and len(data_labels) > 1:
fig, axs = plt.subplots(len(data_labels), 1, figsize=(fig_x, 5 * len(data_labels)))
ax = axs[0]
else:
axs = []
fig, ax = plt.subplots(1, 1, figsize=(fig_x, 10))
if date_plot:
x = x_data
else:
x = np.array(list(range(len(x_data))))
if adjacent:
bar_width /= len(data_labels)
x = x - (len(data_labels) - 1) * bar_width / 2
y_offset = np.zeros((len(x),))
for index, label in enumerate(data_labels):
if isinstance(x_data, dict):
if stacked:
raise ValueError("Stacked line charts require shared x_data basis.")
x = x_data[label]
y_offset = np.zeros((len(x),))
if split and len(data_labels) > 1:
ax = axs[index]
y = y_data[label]
bars = ax.bar(x, y, width=bar_width, bottom=y_offset, color=colors[index], label=label)
if not date_plot:
if adjacent:
x = x + bar_width
for position, bar in enumerate(bars):
height = bar.get_height()
if height != 0:
ax.text(bar.get_x() + bar.get_width() / 2., height + y_offset[position], " {} ".format(height),
ha='center', va='bottom')
# ha='center', va='bottom', rotation=90)
if stacked:
y_offset = y_offset + y_data[label]
if index == 0:
ax.set_title(title)
if split:
ax = axs
if date_plot:
format_date_chart(fig, ax, x_data)
else:
format_label_chart(fig, ax, x_data)
output_file = finalize_figure(fig, title, directory, show)
return output_file
def generate_stacked_bar_plot(x_data, y_data, data_labels=None, title="", directory=None, show=False, date_plot=False):
return _generate_complicated_bar_plot(x_data, y_data, data_labels, title, directory, show, date_plot, stacked=True)
def generate_split_bar_plot(x_data, y_data, data_labels=None, title="", directory=None, show=False, date_plot=False):
return _generate_complicated_bar_plot(x_data, y_data, data_labels, title, directory, show, date_plot, split=True)
def generate_adjacent_bar_plot(x_data, y_data, data_labels=None, title="", directory=None, show=False, date_plot=False):
return _generate_complicated_bar_plot(x_data, y_data, data_labels, title, directory, show, date_plot, adjacent=True)
def generate_pie_plot():
raise NotImplementedError()
def generate_stacked_pie_plot():
raise NotImplementedError()
def table_project_summary(reporter, categories=None, period=None, show=False, directory=None, title="Project Summary"):
metrics = reporter.metrics
table_columns = [""] + ["Current Value"]
table_data = []
# TODO evaluate issue label filter approach
# issue label counts, starting with overall
if categories[I]:
total = metrics.issue_totals[metrics.OV][metrics.NEW][-1] - metrics.issue_totals[metrics.OV][metrics.DONE][-1]
table_data.append([metrics.OV + " issues", total])
for key in categories[I]:
if key == metrics.OV:
continue
total = metrics.issue_totals[key][metrics.NEW][-1] - metrics.issue_totals[key][metrics.DONE][-1]
table_data.append([key + " issues", total])
# sloc
for category in categories[S]:
total = 0
for key in (list(metrics.sloc_data.keys())):
total += metrics.sloc_data[key].get(category) \
if metrics.sloc_data[key].get(category) is not None else 0
table_data.append([category, total])
# component counts
for comp in categories[C]:
total = 0
for key in list(metrics.comp_data.keys()):
total += metrics.comp_data[key].get(comp) \
if metrics.comp_data[key].get(comp) is not None else 0
table_data.append([comp, total])
output_file = generate_table(table_columns, table_data, title, directory, show)
return output_file
def table_issue_label_summary(reporter, categories=None, period=None, show=False, directory=None, title="Issue Label Summary"):
categories = {I: categories[I], S: [], C: []}
return table_project_summary(reporter, categories=categories, period=period, show=show, directory=directory, title=title)
def table_sloc_summary(reporter, categories=None, period=None, show=False, directory=None, title="Component SLOC Summary"):
categories = {I: [], S: categories[S], C: []}
return table_project_summary(reporter, categories=categories, period=period, show=show, directory=directory, title=title)
def table_comp_summary(reporter, categories=None, period=None, show=False, directory=None, title="Component Structure Summary"):
categories = {I: [], S: [], C: categories[C]}
return table_project_summary(reporter, categories=categories, period=period, show=show, directory=directory, title=title)
def table_project_diffs(reporter, categories=None, period=None, show=False, directory=None, title="Project Changes"):
metrics = reporter.metrics
table_columns = [""] + ["%d Day Change" % x for x in period]
table_data = []
# issue label diffs, starting with overall
if categories[I]:
# TODO evaluate issue label filter approach
label_totals = metrics.issue_totals[metrics.OV]
table_data += [[metrics.OV] + ["+" + str(label_totals[metrics.NEW][-1] - label_totals[metrics.NEW][-x]) +
" / -" + str(label_totals[metrics.DONE][-1] - label_totals[metrics.DONE][-x])
if x <= len(metrics.issue_dates) else "" for x in period]]
for key in categories[I]:
if key == metrics.OV:
continue
label_totals = metrics.issue_totals[key]
row = [key] + ["+" + str(label_totals[metrics.NEW][-1] - label_totals[metrics.NEW][-x]) +
" / -" + str(label_totals[metrics.DONE][-1] - label_totals[metrics.DONE][-x])
if x <= len(metrics.issue_dates) else "" for x in period]
table_data.append(row)
# manual sloc diffs
if categories[S]:
dates = metrics.sloc_totals[metrics.DATE]
for key in categories[S]:
if key == metrics.DATE:
continue
label_totals = metrics.sloc_totals.get(key)
if label_totals is None:
continue
row = [key] + [str(label_totals[-1] - label_totals[-x])
if x <= len(dates) else "" for x in period]
for index, value in enumerate(row):
if index == 0:
continue
if value and int(value) >= 0:
row[index] = '+' + value
table_data.append(row)
# component counts
if categories[C]:
dates = metrics.comp_totals[metrics.DATE]
for key in categories[C]:
if key == metrics.DATE:
continue
label_totals = metrics.comp_totals.get(key)
if label_totals is None:
continue
row = [key] + [str(label_totals[-1] - label_totals[-x])
if x <= len(dates) else "" for x in period]
for index, value in enumerate(row):
if index == 0:
continue
if value and int(value) >= 0:
row[index] = '+' + value
table_data.append(row)
output_file = generate_table(table_columns, table_data, title, directory, show)
return output_file
def table_issue_label_diffs(reporter, categories=None, period=None, show=False, directory=None, | |
"Q25228": {
"description": "British Overseas Territory in the Caribbean",
"label": "Anguilla"
},
"Q25279": {
"description": "island country in the Caribbean, part of the Kingdom of the Netherlands",
"label": "Curaçao"
},
"Q25305": {
"description": "British overseas territory in the Caribbean",
"label": "British Virgin Islands"
},
"Q258": {
"description": "sovereign state in Southern Africa",
"label": "South Africa"
},
"Q262": {
"description": "sovereign country in North Africa",
"label": "Algeria"
},
"Q26273": {
"description": "country on the Caribbean island of Saint Martin, part of the Kingdom of the Netherlands",
"label": "Sint Maarten"
},
"Q265": {
"description": "sovereign state in Central Asia",
"label": "Uzbekistan"
},
"Q26988": {
"description": "state in the South Pacific Ocean",
"label": "Cook Islands"
},
"Q27": {
"description": "sovereign state in northwestern Europe covering five-sixths of the island of Ireland",
"label": "Ireland"
},
"Q27275": {
"description": "core region of the Asian continent",
"label": "Central Asia"
},
"Q28": {
"description": "country in Central Europe",
"label": "Hungary"
},
"Q29": {
"description": "country in southwestern Europe",
"label": "Spain"
},
"Q298": {
"description": "sovereign state in South America",
"label": "Chile"
},
"Q29999": {
"description": "sovereign state, constitutional monarchy",
"label": "Kingdom of the Netherlands"
},
"Q30": {
"description": "sovereign state in North America",
"label": "United States of America"
},
"Q30971": {
"description": "French overseas country in the Southern Pacific ocean",
"label": "French Polynesia"
},
"Q31": {
"description": "country in western Europe",
"label": "Belgium"
},
"Q3111454": {
"description": "Vietnamese monarchs that ruled from 1226 to 1400.",
"label": "Tran dynasty"
},
"Q32": {
"description": "country in Western Europe",
"label": "Luxembourg"
},
"Q33": {
"description": "country in northern Europe",
"label": "Finland"
},
"Q334": {
"description": "sovereign city-state in Southeast Asia",
"label": "Singapore"
},
"Q33788": {
"description": "special collectivity of France in the southwest Pacific Ocean",
"label": "New Caledonia"
},
"Q33946": {
"description": "sovereign state in Central Europe (1918-1992)",
"label": "Czechoslovakia"
},
"Q34": {
"description": "sovereign state in northern Europe",
"label": "Sweden"
},
"Q34020": {
"description": "island country in the South Pacific Ocean",
"label": "Niue"
},
"Q347": {
"description": "country in Central Europe",
"label": "Liechtenstein"
},
"Q35": {
"description": "country in northern Europe",
"label": "Denmark"
},
"Q36": {
"description": "country in Central Europe",
"label": "Poland"
},
"Q37": {
"description": "sovereign state in northeastern Europe",
"label": "Lithuania"
},
"Q3769": {
"description": "overseas French department in the Guianas region",
"label": "French Guiana"
},
"Q38": {
"description": "country in Southern Europe",
"label": "Italy"
},
"Q39": {
"description": "federal state in Western Europe",
"label": "Switzerland"
},
"Q392770": {
"description": "inter-governmental organization",
"label": "Organisation of Eastern Caribbean States"
},
"Q398": {
"description": "sovereign state in Southwest Asia",
"label": "Bahrain"
},
"Q399": {
"description": "sovereign state in the South Caucasus region of Eurasia",
"label": "Armenia"
},
"Q40": {
"description": "country in Central Europe",
"label": "Austria"
},
"Q403": {
"description": "country in southeastern Europe",
"label": "Serbia"
},
"Q407199": {
"description": "West Bank, excluding East Jerusalem annexed to the State of Israel, in the status of belligerent occupation, supported by international public law and the High Court of Justice of Israel",
"label": "Palestinian territories"
},
"Q408": {
"description": "country in the Southern Hemisphere",
"label": "Australia"
},
"Q41": {
"description": "country in southeastern Europe",
"label": "Greece"
},
"Q414": {
"description": "sovereign state in South America",
"label": "Argentina"
},
"Q419": {
"description": "sovereign state in South America",
"label": "Peru"
},
"Q423": {
"description": "sovereign state in East Asia",
"label": "North Korea"
},
"Q42314": {
"description": "archipelago in the English Channel",
"label": "Channel Islands"
},
"Q424": {
"description": "sovereign state in Southeast Asia",
"label": "Cambodia"
},
"Q43": {
"description": "sovereign state straddling Southeastern Europe and Western Asia",
"label": "Turkey"
},
"Q45": {
"description": "country in southwestern Europe",
"label": "Portugal"
},
"Q458": {
"description": "economic and political union of 27 states mostly located in Europe",
"label": "European Union"
},
"Q4628": {
"description": "autonomous constituent country of the Kingdom of Denmark",
"label": "Faroe Islands"
},
"Q49": {
"description": "continent on the Earth's northwestern quadrant",
"label": "North America"
},
"Q574": {
"description": "sovereign state situated on several islands in Southeast Asia",
"label": "East Timor"
},
"Q5785": {
"description": "British Overseas Territory in the Caribbean",
"label": "Cayman Islands"
},
"Q657": {
"description": "sovereign state in central Africa",
"label": "Chad"
},
"Q664": {
"description": "sovereign state in Oceania, situated on two main and around 600 smaller islands in the southwestern Pacific Ocean",
"label": "New Zealand"
},
"Q664609": {
"description": "region to the center-east of America composed of many islands / coastal regions surrounding the Caribbean Sea",
"label": "Caribbean"
},
"Q668": {
"description": "sovereign state in South Asia",
"label": "India"
},
"Q672": {
"description": "island sovereign state in Oceania",
"label": "Tuvalu"
},
"Q678": {
"description": "sovereign state in Oceania, situated on an archipelago",
"label": "Tonga"
},
"Q683": {
"description": "sovereign state made up of six islands in the Pacific Ocean",
"label": "Samoa"
},
"Q685": {
"description": "island sovereign state in Oceania",
"label": "Solomon Islands"
},
"Q686": {
"description": "sovereign state situated on an archipelago in the South Pacific Ocean",
"label": "Vanuatu"
},
"Q691": {
"description": "island sovereign state in Oceania",
"label": "Papua New Guinea"
},
"Q695": {
"description": "island sovereign state in Oceania",
"label": "Palau"
},
"Q697": {
"description": "island sovereign state in Oceania",
"label": "Nauru"
},
"Q702": {
"description": "island sovereign state in Oceania",
"label": "Federated States of Micronesia"
},
"Q709": {
"description": "island sovereign state in Oceania",
"label": "Marshall Islands"
},
"Q710": {
"description": "island sovereign state in the central Pacific Ocean",
"label": "Kiribati"
},
"Q711": {
"description": "sovereign state in East Asia",
"label": "Mongolia"
},
"Q712": {
"description": "island sovereign state in Oceania",
"label": "Fiji"
},
"Q717": {
"description": "sovereign state in northern South America",
"label": "Venezuela"
},
"Q7184": {
"description": "intergovernmental military alliance of Western states",
"label": "NATO"
},
"Q730": {
"description": "sovereign state in South America",
"label": "Suriname"
},
"Q733": {
"description": "sovereign state in South America",
"label": "Paraguay"
},
"Q734": {
"description": "sovereign state in South America",
"label": "Guyana"
},
"Q736": {
"description": "sovereign state in South America",
"label": "Ecuador"
},
"Q739": {
"description": "sovereign state in South America",
"label": "Colombia"
},
"Q750": {
"description": "sovereign state in South America",
"label": "Bolivia"
},
"Q752401": {
"description": "list of countries that exhibits the lowest indicators of socioeconomic development",
"label": "Least Developed Countries"
},
"Q754": {
"description": "island sovereign state in the Caribbean Sea",
"label": "Trinidad and Tobago"
},
"Q756617": {
"description": "Denmark proper and its two autonomous territories: the Faroe Islands and Greenland",
"label": "Danish Realm"
},
"Q757": {
"description": "island sovereign state in the Caribbean Sea",
"label": "Saint Vincent and the Grenadines"
},
"Q760": {
"description": "island sovereign state in the Caribbean Sea",
"label": "Saint Lucia"
},
"Q763": {
"description": "island sovereign state in the Caribbean Sea",
"label": "Saint Kitts and Nevis"
},
"Q766": {
"description": "island sovereign state in the Caribbean Sea",
"label": "Jamaica"
},
"Q769": {
"description": "island sovereign state in the Caribbean Sea",
"label": "Grenada"
},
"Q77": {
"description": "sovereign state in South America",
"label": "Uruguay"
},
"Q771405": {
"description": "southern region of Asia",
"label": "South Asia"
},
"Q774": {
"description": "sovereign state in Central America",
"label": "Guatemala"
},
"Q778": {
"description": "island sovereign state in the West Indies",
"label": "The Bahamas"
},
"Q781": {
"description": "island sovereign state in the Caribbean Sea",
"label": "Antigua and Barbuda"
},
"Q783": {
"description": "sovereign state in Central America",
"label": "Honduras"
},
"Q784": {
"description": "island sovereign state in the Caribbean Sea",
"label": "Dominica"
},
"Q785": {
"description": "British Crown dependency in the Channel Islands",
"label": "Jersey"
},
"Q786": {
"description": "island sovereign state in the Caribbean Sea",
"label": "Dominican Republic"
},
"Q79": {
"description": "sovereign state in North Africa and Asia",
"label": "Egypt"
},
"Q790": {
"description": "island sovereign state in the Caribbean Sea",
"label": "Haiti"
},
"Q792": {
"description": "sovereign state in Central America",
"label": "El Salvador"
},
"Q794": {
| |
self.m_dist2SL, self.m_dh, self.m_dt)
else:
return '(%f, %f)' % (self.m_dh, self.m_dt)
def loadTide(self, dt, dh, data, dataDir, dilution):
self.m_dt = dt
self.m_dh = dh
self.m_tideDta = {}
for i, j, a, in data:
self.m_tideDta.setdefault(i, [])
self.m_tideDta[i].append( (j,a) )
self.m_dataDir = dataDir
self.m_dilution = dilution
LOGGER.trace('OverflowPointOneTide.loadTide: dt=%s dh=%s self=%s' % (self.m_dt, self.m_dh, self))
def loadPath(self, dt, dh, data, pathDir, dilution):
assert self.m_dt == dt
assert self.m_dh == dh
self.m_pathDta = {}
for i, j, md5, dd in data:
self.m_pathDta.setdefault(i, [])
self.m_pathDta[i].append( (j,md5,bool(dd)) )
self.m_pathDirs = [pathDir]
self.m_dilution = dilution
LOGGER.trace('OverflowPointOneTide.loadPath: dt=%s dh=%s self=%s' % (self.m_dt, self.m_dh, self))
def checkPathFiles(self):
"""
Debug code:
Check that all the path files exist
"""
for ix,dta in self.m_pathDta.items():
for iy,md5,dd in dta:
fname = 'path-%s.pkl' % (md5)
found = False
for p in self.m_pathDirs:
fullPath = os.path.join(p, fname)
if os.path.isfile(fullPath):
found = True
if not found:
LOGGER.warning('Path file "%s" not found in:', fname)
for p in self.m_pathDirs:
LOGGER.warning(' %s', p)
def checkInclusion(self, other):
"""
Debug code:
Check that other is included in self.
"""
LOGGER.info('OverflowPointOneTide.checkInclusion: %s', (self.m_dt, self.m_dh))
LOGGER.trace('OverflowPointOneTide.checkInclusion: %s vs %s', self, other)
if self.m_dt != other.m_dt: raise ValueError('OverflowPointOneTide: Incoherent tide: %s vs %s' % (self.m_dh, other.m_dh))
if self.m_dh != other.m_dh: raise ValueError('OverflowPointOneTide: Incoherent tide: %s vs %s' % (self.m_dh, other.m_dh))
for ix, ovals in other.m_tideDta.items():
try:
svals = self.m_tideDta[ix]
except KeyError:
raise
for oitem in ovals:
if oitem not in svals:
missing = True
for iy, a in svals:
if iy == oitem[0]:
LOGGER.error(' Bad dilution: (%d, %d): %.2e %.2e' % (ix, iy, a, oitem[1]))
missing = False
break
if missing:
LOGGER.error(' Missing item: %d: %s' % (ix, oitem))
#raise ValueError('%d: %s' % (ix, oitem))
class OverflowPoint:
"""
OverflowPoint
Container of OverflowPointOneTide
All times are UTC
"""
def __init__(self, name='', river=None, dist=0.0):
self.m_name = name
self.m_river = river
self.m_dist2SL = dist
self.m_poly = ()
self.m_parent = None
self.m_root = None # The target as OPoint
self.m_tideRsp = []
def __str__(self):
pstr = None
if self.m_parent:
if isinstance(self.m_parent, str):
pstr = 'unlinked - %s' % self.m_parent
else:
pstr = self.m_parent.m_name
return 'Point: %s; River: %s, Dist=%f, Parent point: %s' % \
(self.m_name,
self.m_river.name if self.m_river else None,
self.m_dist2SL,
pstr)
def __getitem__(self, i):
return self.m_tideRsp[i]
def __iter__(self):
return self.m_tideRsp.__iter__()
def __reduceHits(self, t2bdg, t2bds):
"""
Reduce (max) t2bds in t2bdg
"""
# LOGGER.trace('OverflowPoint.__reduceHits')
rv = t2bds[0]
if len(t2bdg) <= 0: t2bdg = [ [] for _ in range(len(t2bds)) ]
assert len(t2bdg) == len(t2bds)
for ov,rv in zip(t2bds, t2bdg):
if len(ov) > len(rv): rv.extend( [None]*(len(ov)-len(rv)) )
for j in range(len(ov)):
if rv[j]:
if ov[j]:
rv[j] = max(rv[j], ov[j])
else:
rv[j] = ov[j]
return t2bdg
def getHitsForSpillWindow(self, t_start, t_end, dt, tide_tbl, tide_cycles=[], merge_transit_times=False):
"""
For t in [t_start, t_end] with step dt, compute the hits for all required tide cycles id.
Times are UTC
Returns:
[ # for each river transit time
[ # for each exposure window part
[h0, ... hj] hj is Hit for time index j
]
]
"""
LOGGER.trace('OverflowPoint.getHitsForSpillWindow')
LOGGER.trace(' from %s', str(t_start))
LOGGER.trace(' to %s', str(t_end))
if not tide_cycles:
cycles = self.m_tideRsp
else:
# Cython chokes at a single expression
# Rewrite as 2 expressions
cycles = [ self.getTideResponse(ii) for ii in tide_cycles ]
cycles = [ r for r in cycles if r ]
LOGGER.trace('OverFlowPoint.getHitsForSpillWindow(): cycles[%d]', len(cycles))
for tideRsp in cycles:
LOGGER.trace(' %s', tideRsp)
# --- Loop on OverflowTideResponses - result in normalized timedelta
t2bdg = []
for tideRsp in cycles:
if tideRsp:
try:
t2bds = tideRsp.getHitsForSpillWindow(t_start, t_end, dt, tide_tbl, merge_transit_times)
t2bdg = self.__reduceHits(t2bdg, t2bds)
except Exception as e:
LOGGER.exception(e)
LOGGER.warning('OverflowPoint.getHitsForSpillWindow: Skipping cycle %s', tideRsp)
else:
LOGGER.warning('OverflowPoint.getHitsForSpillWindow: Skipping cycle %s', tideRsp)
LOGGER.trace('OverflowPoint.getHitsForSpillWindow: reduced data')
LOGGER.trace(' %s', [ 1 if h else 0 for h in t2bdg[0] ] if t2bdg else [])
return t2bdg
def doPlumes(self, t_start, t_end, dt, tide_tbl, tide_cycles=[]):
"""
For t in [t_start, t_end] with step dt, returns the particule paths
as a list of Plume objects.
.
Times are UTC
Returns:
[
plume1, ...
]
"""
LOGGER.trace('OverflowPoint.doPlumes from %s to %s', t_start, t_end)
hitss = self.getHitsForSpillWindow(t_start, t_end, dt, tide_tbl, tide_cycles, merge_transit_times=True)
assert len(hitss) in [0, 1]
# ---
md5s = []
res = []
try:
res.append( ASPlume(name=self.m_root.m_name, poly=self.m_root.m_poly) )
except Exception as e:
pass
for hits in hitss:
for hit in hits:
if hit and hit.md5 not in md5s:
md5s.append(hit.md5)
ptd = hit.pnt
ptdTideData = ptd.getTideData()
kwargs = {}
kwargs['dilution'] = ptdTideData[-1]
kwargs['name'] = self.m_name
kwargs['parent'] = self.m_parent.m_name if self.m_parent else self.m_name
kwargs['poly'] = self.m_parent.m_poly if self.m_parent else self.m_poly
kwargs['tide'] = ptdTideData[:2]
kwargs['t0'] = hit.t0
kwargs['tc'] = hit.tc
#kwargs['dt'] = -1.0
kwargs['isDirect']= hit.dd
kwargs['plume'] = ptd.getPath(hit.ix, hit.iy)
res.append( ASPlume(**kwargs) )
LOGGER.trace('OverflowPoint.doPlumes done')
return res
def doOverflow(self, t_start, t_end, dt, tide_tbl, tide_cycles=[], merge_transit_times=False):
"""
For t in [t_start, t_end] with step dt, compute the
exposure time window to overflow for all required tide cycles id.
Times are UTC
Returns:
[ # for each river transit time
[ # for each exposure window part
[(t0, t1, dil), ...] time_start, time_end, dilution
]
]
"""
LOGGER.trace('OverflowPoint.doOverflow from %s to %s', t_start, t_end)
hitss = self.getHitsForSpillWindow(t_start, t_end, dt, tide_tbl, tide_cycles, merge_transit_times)
# --- Compact - back to time
res_new = []
for hits in hitss:
ihit = 0
lhits = len(hits)
ps = []
while ihit < lhits:
while ihit < lhits and not hits[ihit]: ihit += 1 # get first Hit
p = []
while ihit < lhits and (hits[ihit] or (ihit+1 < lhits and hits[ihit+1])): # interpolate simple hole
d = hits[ihit].a if hits[ihit] else (hits[ihit+1].a+hits[ihit-1].a)/2.0
t0 = t_start + ihit*DTA_DELTAT
t1 = t0 + DTA_DELTAT
p.append( (t0, t1, d) )
ihit += 1
if p: ps.append(p)
res_new.append(ps)
LOGGER.trace('OverflowPoint.doOverflow done')
return res_new
def dump(self):
if self.m_river:
return '%s; %s; %f' % (self.m_name, self.m_river.name, self.m_dist2SL)
else:
return '%s' % (self.m_name)
def __decodeRiver(self, data, rivers):
#LOGGER.trace('OverflowPoint.__decodeRiver: %s (%s)' % (data, self))
tks = data.split(';')
if len(tks) == 1:
self.m_name = tks[0].strip()
self.m_river = None
self.m_dist2SL = 0.0
elif len(tks) == 3:
self.m_name = tks[0].strip()
self.m_river = rivers[ tks[1].strip() ]
self.m_dist2SL = float( tks[2] )
elif len(tks) == 4:
self.m_name = tks[0].strip()
self.m_river = rivers[ tks[1].strip() ]
self.m_dist2SL = float( tks[2] )
self.m_parent = tks[3].strip()
else:
raise ValueError
def __decodeTide(self, data, dataDir, dilution):
LOGGER.trace('OverflowPoint.__decodeTide: %s (%s)' % (self.m_name, self))
self.m_tideRsp = []
for item in data:
tk_dt_dh, tk_tide = item.split(';')[1:3]
dt_dh = list(eval(tk_dt_dh))
tid = list(eval(tk_tide))
o = OverflowPointOneTide(self.m_river, self.m_dist2SL)
o.loadTide(dt_dh[0], dt_dh[1], tid, dataDir, dilution)
self.m_tideRsp.append(o)
def __decodePath(self, data, pathDir, dilution):
LOGGER.trace('OverflowPoint.__decodePath: %s (%s)' % (self.m_name, self))
for item in data:
tk_dt_dh, tk_path = item.split(';')[1:3]
dt_dh = list(eval(tk_dt_dh))
pth = eval(tk_path)
o = self.__getTideResponse(dt_dh[0], dt_dh[1])
o.loadPath(dt_dh[0], dt_dh[1], pth, pathDir, dilution)
def __decodePoly(self, data, pathDir, dilution):
LOGGER.trace('OverflowPoint.__decodePoly: %s (%s)' % (self.m_name, self))
for item in data:
tk_poly = item.split(';')[1]
poly = list(eval(tk_poly))
self.m_poly = poly
def load(self, data, rivers, dataDir, dilution, root=None):
LOGGER.trace('OverflowPoint.load: %s' % (self))
self.__decodeRiver(data[0], rivers)
subDir = self.m_name
pathDir = os.path.join(dataDir, subDir)
if data[1]:
self.__decodeTide(data[1], dataDir, dilution)
if data[2]:
self.__decodePath(data[2], pathDir, dilution)
if data[3]:
self.__decodePoly(data[3], pathDir, dilution)
if root:
self.m_root = root
def resolveLinks(self, points):
"""
Translate parent name to object
Copy OverflowPointOneTide from parent
"""
if self.m_parent:
self.m_parent = points[self.m_parent]
for m in self.m_parent.m_tideRsp:
o = OverflowPointOneTide(self.m_river, self.m_dist2SL)
o.setTideData( *m.getTideData() )
if o in self.m_tideRsp:
i = self.m_tideRsp.index(o)
self.m_tideRsp[i].mergeTideData(o)
self.m_tideRsp[i].mergePathData(o)
else:
self.m_tideRsp.append(o)
def checkInclusion(self, other):
"""
Debug Code
Check that other is included in self.
"""
LOGGER.info ('OverflowPoints.checkInclusion: %s', self.m_name)
LOGGER.trace('OverflowPoints: %s vs %s', self, other)
if self.m_name != other.m_name: raise ValueError('OverflowPoint: Incoherent name')
if ((self.m_river is not None or other.m_river is not None) and
(self.m_river.name != other.m_river.name)): raise ValueError('OverflowPoint: Incoherent river')
for oitem in other.m_tideRsp:
i = self.m_tideRsp.index(oitem)
sitem = self.m_tideRsp[i]
sitem.checkInclusion(oitem)
def checkPathFiles(self):
"""
Debug Code
Check the presence of the path files
"""
for m in self.m_tideRsp:
m.checkPathFiles()
# try:
# self.m_parent.m_name
# except AttributeError:
# for m in self.m_tideRsp:
# m.checkPathFiles()
def __getTideResponse(self, td, th):
"""
Returns the tide | |
<reponame>Adiapohon/BROCANTO
/* MIT License
* Copyright (c) 2018 <NAME>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
import itertools
import csv
import random
# Wortwiederholungsfehler, bei denen die Wortklasse wiederholt wird (major violations)
#D, M, N, v, v, N
#d, N, v, v, N
#D, M, N, v, v, M, N
#d, N, v, m, v, M, N
#d, N, v, m, m, M, N
#D, M, N, v, N, N
#D, M, N, v, m, N, N
#D, M, N, v, m, m, N
#D, M, N, v, N, M, N
#d, N, v, N, M, N
#Zuordnungsfehler von Artikel, Adektiv und Verb (agreement violations)
#D, M, N, v, d, M, N
#d, M, N, v
#d, M, N, v, d, N
#d, N, v, d, M, N
#D, M, N, v, D, N
#D, N, v, D, M, N
#D, N, v, m
#d, N, v, m, D, N
#Falsche Abfolge von Nominal- und Verbalphrase (phrase level violation)
#d, N, d, N, v
#d, N, d, N, v, m
#D, M, N, d, N, v
#D, M, N, d, N, v, m
#d, N, D, M, N, v
#d, N, D, M, N, v, m
#D, M, N, D, M, N, v
#D, M, N, D, M, N, v, m
x = 1 # Anzahl der zufällig ausgewählten Sätze
N = ['tok', 'plox', 'gum', 'trul']
v = ['prez', 'pel', 'glif', 'rix']
M = ['füne', 'böke']
m = ['rüfi', 'nöri']
d = ['aaf']
D = ['aak']
c = ['caf']
data01 = []
data02 = []
data03 = []
data04 = []
data05 = []
data06 = []
data07 = []
data08 = []
data09 = []
data10 = []
data11 = []
data12 = []
data13 = []
data14 = []
data15 = []
data16 = []
data17 = []
data18 = []
data19 = []
data20 = []
data21 = []
data22 = []
data23 = []
data24 = []
data25 = []
data26 = []
print ("Wortwiederholungsfehler")
prod = itertools.product(D, M, N, v, v, N)
for p in prod:
tmp = ''
for a in p:
tmp += a + ' '
data01.append (tmp)
print (tmp)
prod = itertools.product(d, N, v, v, N)
for p in prod:
tmp = ''
for a in p:
tmp += a + ' '
data02.append (tmp)
print (tmp)
prod = itertools.product(D, M, N, v, v, M, N)
for p in prod:
tmp = ''
for a in p:
tmp += a + ' '
data03.append (tmp)
print (tmp)
prod = itertools.product(d, N, v, m, v, M, N)
for p in prod:
tmp = ''
for a in p:
tmp += a + ' '
data04.append (tmp)
print (tmp)
prod = itertools.product(d, N, v, m, d, N)
for p in prod:
tmp = ''
for a in p:
tmp += a + ' '
data05.append (tmp)
print (tmp)
prod = itertools.product(D, M, N, v, N, N)
for p in prod:
tmp = ''
for a in p:
tmp += a + ' '
data06.append (tmp)
print (tmp)
prod = itertools.product(D, M, N, v, m, N, N)
for p in prod:
tmp = ''
for a in p:
tmp += a + ' '
data07.append (tmp)
print (tmp)
prod = itertools.product(D, M, N, v, m, m, N)
for p in prod:
tmp = ''
for a in p:
tmp += a + ' '
data08.append (tmp)
print (tmp)
prod = itertools.product(D, M, N, v, N, M, N)
for p in prod:
tmp = ''
for a in p:
tmp += a + ' '
data09.append (tmp)
print (tmp)
prod = itertools.product(d, N, v, N, M, N)
for p in prod:
tmp = ''
for a in p:
tmp += a + ' '
data10.append (tmp)
print (tmp)
print ("Zuordnungsfehler")
prod = itertools.product(D, M, N, v, d, M, N)
for p in prod:
tmp = ''
for a in p:
tmp += a + ' '
data11.append (tmp)
print (tmp)
prod = itertools.product(d, M, N, v)
for p in prod:
tmp = ''
for a in p:
tmp += a + ' '
data12.append (tmp)
print (tmp)
prod = itertools.product(d, M, N, v, d, N)
for p in prod:
tmp = ''
for a in p:
tmp += a + ' '
data13.append (tmp)
print (tmp)
prod = itertools.product(d, N, v, d, M, N)
for p in prod:
tmp = ''
for a in p:
tmp += a + ' '
data14.append (tmp)
print (tmp)
prod = itertools.product(D, M, N, v, D, N)
for p in prod:
tmp = ''
for a in p:
tmp += a + ' '
data15.append (tmp)
print (tmp)
prod = itertools.product(D, N, v, D, M, N)
for p in prod:
tmp = ''
for a in p:
tmp += a + ' '
data16.append (tmp)
print (tmp)
prod = itertools.product(D, N, v, m)
for p in prod:
tmp = ''
for a in p:
tmp += a + ' '
data17.append (tmp)
print (tmp)
prod = itertools.product(d, N, v, m, D, N)
for p in prod:
tmp = ''
for a in p:
tmp += a + ' '
data18.append (tmp)
print (tmp)
print ("Falsche Abfolge von Nominal- und Verbalphrase")
prod = itertools.product(d, N, d, N, v)
for p in prod:
tmp = ''
for a in p:
tmp += a + ' '
data19.append (tmp)
print (tmp)
prod = itertools.product(d, N, d, N, v, m)
for p in prod:
tmp = ''
for a in p:
tmp += a + ' '
data20.append (tmp)
print (tmp)
prod = itertools.product(D, M, N, d, N, v)
for p in prod:
tmp = ''
for a in p:
tmp += a + ' '
data21.append (tmp)
print (tmp)
prod = itertools.product(D, M, N, d, N, v, m)
for p in prod:
tmp = ''
for a in p:
tmp += a + ' '
data22.append (tmp)
print (tmp)
prod = itertools.product(d, N, D, M, N, v)
for p in prod:
tmp = ''
for a in p:
tmp += a + ' '
data23.append (tmp)
print (tmp)
prod = itertools.product(d, N, D, M, N, v, m)
for p in prod:
tmp = ''
for a in p:
tmp += a + ' '
data24.append (tmp)
print (tmp)
prod = itertools.product(D, M, N, D, M, N, v)
for p in prod:
tmp = ''
for a in p:
tmp += a + ' '
data25.append (tmp)
print (tmp)
prod = itertools.product(D, M, N, D, M, N, v, m)
for p in prod:
tmp = ''
for a in p:
tmp += a + ' '
data26.append (tmp)
print (tmp)
#for x in data:
# print (x)
selec = random.sample (data01, x)
selec = selec + random.sample (data02, x)
selec = selec + random.sample (data03, x)
selec = selec + random.sample (data04, x)
selec = selec + random.sample (data05, x)
selec = selec + random.sample (data06, x)
selec = selec + random.sample (data07, x)
selec = selec + random.sample (data08, x)
selec = selec + random.sample (data09, x)
selec = selec + random.sample (data10, x)
selec = selec + random.sample (data11, x)
selec = selec + random.sample (data12, x)
selec = selec + random.sample (data13, x)
selec = selec + random.sample (data14, x)
selec = selec + random.sample (data15, x)
selec = selec + random.sample (data16, x)
selec = selec + random.sample (data17, 2)
selec |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.