input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
<reponame>pearcandy/pennylane
# Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for the :mod:`pennylane.template.embeddings` module.
Integration tests should be placed into ``test_templates.py``.
"""
# pylint: disable=protected-access,cell-var-from-loop
import pytest
from math import pi
import numpy as np
import pennylane as qml
import pennylane._queuing
from pennylane.templates.embeddings import (AngleEmbedding,
BasisEmbedding,
AmplitudeEmbedding,
QAOAEmbedding,
DisplacementEmbedding,
SqueezingEmbedding)
from pennylane import Beamsplitter
from pennylane.wires import Wires
class TestAmplitudeEmbedding:
""" Tests the AmplitudeEmbedding method."""
INPT = [np.array([0, 1, 0, 0]),
1 / np.sqrt(4) * np.array([1, 1, 1, 1]),
np.array([np.complex(-np.sqrt(0.1), 0.0), np.sqrt(0.3),
np.complex(0, -np.sqrt(0.1)), np.sqrt(0.5)])]
NOT_ENOUGH_FEATURES = [np.array([0, 1, 0]),
1 / np.sqrt(3) * np.array([1, 1, 1]),
np.array([np.complex(-np.sqrt(0.1), 0.0), np.sqrt(0.3),
np.complex(0, -np.sqrt(0.6))])]
TOO_MANY_FEATURES = [[0, 0, 0, 1, 0],
1 / np.sqrt(8) * np.array([1] * 8),
[np.complex(-np.sqrt(0.1), 0.0), np.sqrt(0.3),
np.complex(0, -np.sqrt(0.6)), 0., 0.]]
@pytest.mark.parametrize("inpt", INPT)
def test_amplitude_embedding_prepares_state(self, inpt):
"""Checks the state produced by AmplitudeEmbedding() for real and complex
inputs."""
n_qubits = 2
dev = qml.device('default.qubit', wires=2)
@qml.qnode(dev)
def circuit(x=None):
AmplitudeEmbedding(features=x, wires=range(n_qubits), pad=None, normalize=False)
return [qml.expval(qml.PauliZ(i)) for i in range(n_qubits)]
circuit(x=inpt)
state = dev._state.ravel()
assert np.allclose(state, inpt)
@pytest.mark.parametrize("inpt", NOT_ENOUGH_FEATURES)
@pytest.mark.parametrize("pad", [complex(0.1, 0.1), 0., 1.])
def test_amplitude_embedding_prepares_padded_state(self, inpt, pad):
"""Checks the state produced by AmplitudeEmbedding() for real and complex padding constants."""
n_qubits = 2
dev = qml.device('default.qubit', wires=2)
@qml.qnode(dev)
def circuit(x=None):
AmplitudeEmbedding(features=x, wires=range(n_qubits), pad=pad, normalize=False)
return [qml.expval(qml.PauliZ(i)) for i in range(n_qubits)]
circuit(x=inpt)
state = dev._state.ravel()
assert len(set(state[len(inpt):])) == 1
@pytest.mark.parametrize("inpt", INPT)
def test_amplitude_embedding_throws_exception_if_not_normalized(self, inpt):
"""Checks that AmplitudeEmbedding() throws exception when state is not normalized and `normalize=False`."""
not_nrmlzd = 2 * inpt
n_qubits = 2
dev = qml.device('default.qubit', wires=2)
@qml.qnode(dev)
def circuit(x=None):
AmplitudeEmbedding(features=x, wires=range(n_qubits), pad=None, normalize=False)
return [qml.expval(qml.PauliZ(i)) for i in range(n_qubits)]
with pytest.raises(ValueError, match="'features' must be a vector of length"):
circuit(x=not_nrmlzd)
@pytest.mark.parametrize("inpt", NOT_ENOUGH_FEATURES)
def test_amplitude_embedding_throws_exception_if_fewer_features_than_amplitudes(self, inpt):
"""Verifies that AmplitudeEmbedding() throws exception
if the number of features is fewer than the number of amplitudes, and
no automatic padding is chosen."""
n_qubits = 2
dev = qml.device('default.qubit', wires=n_qubits)
@qml.qnode(dev)
def circuit(x=None):
AmplitudeEmbedding(features=x, wires=range(n_qubits), pad=None, normalize=False)
return qml.expval(qml.PauliZ(0))
with pytest.raises(ValueError, match="'features' must be of shape"):
circuit(x=inpt)
@pytest.mark.parametrize("inpt", TOO_MANY_FEATURES)
def test_amplitude_embedding_throws_exception_if_more_features_than_amplitudes(self, inpt):
"""Verifies that AmplitudeEmbedding() throws exception
if the number of features is larger than the number of amplitudes, and
no automatic padding is chosen."""
n_qubits = 2
dev = qml.device('default.qubit', wires=n_qubits)
@qml.qnode(dev)
def circuit(x=None):
AmplitudeEmbedding(features=x, wires=range(n_qubits), pad=None, normalize=False)
return qml.expval(qml.PauliZ(0))
with pytest.raises(ValueError, match="'features' must be of shape"):
circuit(x=inpt)
@pytest.mark.parametrize("inpt", TOO_MANY_FEATURES)
def test_amplitude_embedding_with_padding_throws_exception_if_more_features_than_amplitudes(self, inpt):
"""Verifies that AmplitudeEmbedding() throws exception
if the number of features is larger than the number of amplitudes, and
automatic padding is chosen."""
n_qubits = 2
dev = qml.device('default.qubit', wires=n_qubits)
@qml.qnode(dev)
def circuit(x=None):
AmplitudeEmbedding(features=x, wires=range(n_qubits), pad=0., normalize=False)
return qml.expval(qml.PauliZ(0))
with pytest.raises(ValueError, match="'features' must be of shape"):
circuit(x=inpt)
def test_amplitude_embedding_tolerance_value(self):
"""Tests that a small enough tolerance value is used for Amplitude
Embedding."""
inputs = np.array([0.25895178024895, 0.115997030111517, 0.175840500169049, 0.16545033015906,
0.016337370015706, 0.006616800006361, 0.22326375021464, 0.161815530155566,
0.234776190225708, 0.082623190079432, 0.291982110280705, 0.295344560283937,
0.05998731005767, 0.056911140054713, 0.274260680263668, 0.163596590157278,
0.048460970046589, 0.292306260281016, 0.292451040281155, 0.007849840007547,
0.218302930209871, 0.326763300314142, 0.163634550157314, 0.275472160264832,
0.105510810101436])
tolerance = 10e-10
num_qubits = 5
dev = qml.device('default.qubit', wires=num_qubits)
assert np.isclose(np.sum(np.abs(inputs) ** 2), 1, tolerance)
@qml.qnode(dev)
def circuit(x=None):
AmplitudeEmbedding(x, list(range(num_qubits)), pad=0., normalize=True)
return qml.expval(qml.PauliZ(0))
# No normalization error is raised
circuit(x=inputs)
class TestAngleEmbedding:
""" Tests the AngleEmbedding method."""
def test_angle_embedding_state_rotx(self, qubit_device, n_subsystems):
"""Checks the state produced by AngleEmbedding()
using the rotation='X' strategy."""
features = [pi / 2, pi / 2, pi / 4, 0]
@qml.qnode(qubit_device)
def circuit(x=None):
AngleEmbedding(features=x, wires=range(n_subsystems), rotation='X')
qml.PauliX(wires=0)
AngleEmbedding(features=x, wires=range(n_subsystems), rotation='X')
return [qml.expval(qml.PauliZ(i)) for i in range(n_subsystems)]
res = circuit(x=features[:n_subsystems])
target = [1, -1, 0, 1, 1]
assert np.allclose(res, target[:n_subsystems])
def test_angle_embedding_state_roty(self, qubit_device, n_subsystems):
"""Checks the state produced by AngleEmbedding()
using the rotation='Y' strategy."""
features = [pi / 2, pi / 2, pi / 4, 0]
@qml.qnode(qubit_device)
def circuit(x=None):
AngleEmbedding(features=x, wires=range(n_subsystems), rotation='Y')
qml.PauliX(wires=0)
AngleEmbedding(features=x, wires=range(n_subsystems), rotation='Y')
return [qml.expval(qml.PauliZ(i)) for i in range(n_subsystems)]
res = circuit(x=features[:n_subsystems])
target = [-1, -1, 0, 1, 1]
assert np.allclose(res, target[:n_subsystems])
def test_angle_embedding_state_rotz(self, qubit_device, n_subsystems):
"""Checks the state produced by AngleEmbedding()
using the rotation='Z' strategy."""
features = [pi / 2, pi / 2, pi / 4, 0]
@qml.qnode(qubit_device)
def circuit(x=None):
AngleEmbedding(features=x, wires=range(n_subsystems), rotation='Z')
qml.PauliX(wires=0)
AngleEmbedding(features=x, wires=range(n_subsystems), rotation='Z')
return [qml.expval(qml.PauliZ(i)) for i in range(n_subsystems)]
res = circuit(x=features[:n_subsystems])
target = [-1, 1, 1, 1, 1]
assert np.allclose(res, target[:n_subsystems])
@pytest.mark.parametrize('strategy', ['X', 'Y', 'Z'])
def test_angle_embedding_fewer_features(self, strategy):
"""Verifies that AngleEmbedding() can be used correctly if there are
fewer features than rotation gates."""
features = [pi / 2, pi / 2, pi / 4, 0]
n_subsystems = 5
dev = qml.device('default.qubit', wires=n_subsystems)
@qml.qnode(dev)
def circuit(x=None):
AngleEmbedding(features=x, wires=range(n_subsystems), rotation='Z')
qml.PauliX(wires=0)
AngleEmbedding(features=x, wires=range(n_subsystems), rotation='Z')
return [qml.expval(qml.PauliZ(i)) for i in range(n_subsystems)]
res = circuit(x=features)
target = [-1, 1, 1, 1, 1]
assert np.allclose(res, target)
@pytest.mark.parametrize('strategy', ['X', 'Y', 'Z'])
def test_angle_embedding_exception_fewer_rotations(self, strategy):
"""Verifies that AngleEmbedding() raises an exception if there are fewer
rotation gates than features."""
features = [0, 0, 0, 0]
n_subsystems = 1
dev = qml.device('default.qubit', wires=n_subsystems)
@qml.qnode(dev)
def circuit(x=None):
AngleEmbedding(features=x, wires=range(n_subsystems), rotation=strategy)
qml.PauliX(wires=0)
AngleEmbedding(features=x, wires=range(n_subsystems), rotation=strategy)
return [qml.expval(qml.PauliZ(i)) for i in range(n_subsystems)]
with pytest.raises(ValueError, match="'features' must be of shape"):
circuit(x=features)
def test_angle_embedding_exception_wrongrot(self):
"""Verifies that AngleEmbedding() raises an exception if the
rotation strategy is unknown."""
n_subsystems = 1
dev = qml.device('default.qubit', wires=n_subsystems)
@qml.qnode(dev)
def circuit(x=None):
AngleEmbedding(features=x, wires=range(n_subsystems), rotation='A')
return [qml.expval(qml.PauliZ(i)) for i in range(n_subsystems)]
with pytest.raises(ValueError, match="did not recognize option"):
circuit(x=[1])
class TestBasisEmbedding:
""" Tests the BasisEmbedding method."""
def test_basis_embedding_state(self):
"""Checks the state produced by BasisEmbedding()."""
state = np.array([0, 1])
n_qubits = 2
dev = qml.device('default.qubit', wires=n_qubits)
@qml.qnode(dev)
def circuit(x=None):
BasisEmbedding(features=x, wires=range(2))
return [qml.expval(qml.PauliZ(i)) for i in range(n_qubits)]
res = circuit(x=state)
assert np.allclose(res, [1, -1])
def test_basis_embedding_too_many_input_bits_exception(self):
"""Verifies that BasisEmbedding() throws exception if there are more features than qubits."""
n_qubits = 2
dev = qml.device('default.qubit', wires=n_qubits)
@qml.qnode(dev)
def circuit(x=None):
BasisEmbedding(features=x, wires=range(2))
return qml.expval(qml.PauliZ(0))
with pytest.raises(ValueError):
circuit(x=np.array([0, 1, 1]))
def test_basis_embedding_not_enough_input_bits_exception(self):
"""Verifies that BasisEmbedding() throws exception if there are less features than qubits."""
n_qubits = 2
dev = qml.device('default.qubit', wires=n_qubits)
@qml.qnode(dev)
def circuit(x=None):
BasisEmbedding(features=x, wires=range(2))
return qml.expval(qml.PauliZ(0))
with pytest.raises(ValueError):
circuit(x=np.array([0]))
def test_basis_embedding_input_not_binary_exception(self):
"""Verifies that BasisEmbedding() raises an exception if the features contain
values other than zero and one."""
n_subsystems = 2
dev = qml.device('default.qubit', wires=n_subsystems)
@qml.qnode(dev)
def circuit(x=None):
BasisEmbedding(features=x, wires=[0, 1])
return qml.expval(qml.PauliZ(0))
with pytest.raises(ValueError, match="'basis_state' must only consist of"):
circuit(x=[2, 3])
def test_basis_embedding_features_not_iterable_exception(self):
"""Verifies that BasisEmbedding() raises an exception if the features are not
of type Iterable."""
n_subsystems = 2
dev = qml.device('default.qubit', wires=n_subsystems)
@qml.qnode(dev)
def circuit(x=None):
BasisEmbedding(features=x, wires=[0, 1])
return qml.expval(qml.PauliZ(0))
with pytest.raises(ValueError, match="'features' must be iterable"):
circuit(x=1)
class TestIQPEmbedding:
""" Tests the IQPEmbedding method."""
QUEUES = [(0, []),
(1, [qml.Hadamard, qml.RZ]),
(2, [qml.Hadamard, qml.Hadamard, qml.RZ, qml.RZ, qml.MultiRZ]),
(3, [qml.Hadamard, qml.Hadamard, qml.Hadamard, qml.RZ, qml.RZ, qml.RZ,
qml.MultiRZ, qml.MultiRZ, qml.MultiRZ])]
@pytest.mark.parametrize('n_wires, expected_queue', QUEUES)
@pytest.mark.parametrize('n_repeats', [1, 2])
def test_queue_default_pattern(self, n_wires, expected_queue, n_repeats):
"""Checks the queue for the default pattern."""
with pennylane._queuing.OperationRecorder() as rec:
qml.templates.IQPEmbedding(features=list(range(n_wires)), wires=range(n_wires), n_repeats=n_repeats)
expected_queue = expected_queue * n_repeats
for gate, expected_gate in zip(rec.queue, expected_queue):
assert isinstance(gate, expected_gate)
@pytest.mark.parametrize('features, expected_params', [([1., 2., 3.],
[1., 2., 3., 1 * 2, 1 * 3, 2 * 3]),
([0.1, 0.2, 0.3],
[0.1, 0.2, 0.3, 0.1 * 0.2, 0.1 * 0.3, 0.2 * 0.3])])
@pytest.mark.parametrize('wires', [range(3),
[2, 0, 1]])
def test_queue_parameters(self, features, expected_params, wires):
"""Checks the queued parameters, for consecutive and non-consecutive ``wires`` argument."""
with pennylane._queuing.OperationRecorder() as rec:
qml.templates.IQPEmbedding(features=features, wires=wires)
# compare all nonempty gate parameters to expected ones
counter = 0
for gate in rec.queue:
if gate.parameters:
assert gate.parameters[0] == expected_params[counter]
counter += 1
@pytest.mark.parametrize('wires, expected_queue_wires', [(range(3), [[0], [1], [2], [0], [1], [2],
[0, 1], [0, 2], [1, 2]]),
([2, 0, 1], [[2], [0], [1], [2], [0], [1],
[2, 0], [2, 1], [0, 1]])])
def test_queue_correct_wires(self, wires, expected_queue_wires):
"""Checks the queued wires for a consecutive and | |
<gh_stars>0
# Authors:
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD 3 clause
"""
Module for LBM boundary conditions
"""
import collections
import logging
import types
import numpy as np
from sympy import symbols, IndexedBase, Idx, Eq
from .storage import Array
log = logging.getLogger(__name__) #pylint: disable=invalid-name
#pylint: disable=too-few-public-methods
class BoundaryVelocity:
"""
Indices and distances for the label and the velocity ksym
"""
def __init__(self, domain, label, ksym):
self.label = label
# on cherche les points de l'exterieur qui ont une vitesse qui rentre (indice ksym)
# sur un bord labelise par label
# on parcourt toutes les vitesses et on determine les points interieurs qui ont la vitesse
# symmetrique (indice k) qui sort
# puis on ecrit dans une liste reprenant l'ordre des vitesses du schema
# - les indices des points exterieurs correspondants
# - les distances associees
self.v = domain.stencil.unique_velocities[ksym]
v = self.v.get_symmetric()
num = domain.stencil.unum2index[v.num]
ind = np.where(domain.flag[num] == self.label)
self.indices = np.array(ind)
if self.indices.size != 0:
self.indices += np.asarray(v.v)[:, np.newaxis]
self.distance = np.array(domain.distance[(num,) + ind])
class Boundary:
"""
Construct the boundary problem by defining the list of indices on the border and the methods used on each label.
Parameters
----------
domain : pylbm.Domain
the simulation domain
dico : dictionary
describes the boundaries
- key is a label
- value are again a dictionnary with
+ "method" key that gives the boundary method class used (Bounce_back, Anti_bounce_back, ...)
+ "value_bc" key that gives the value on the boundary
Attributes
----------
bv_per_label : dictionnary
for each label key, a list of spatial indices and distance define for each velocity the points
on the domain that are on the boundary.
methods : list
list of boundary methods used in the LBM scheme
The list contains Boundary_method instance.
"""
#pylint: disable=too-many-locals
def __init__(self, domain, generator, dico):
self.domain = domain
# build the list of indices for each unique velocity and for each label
self.bv_per_label = {}
for label in self.domain.list_of_labels():
dummy_bv = []
for k in range(self.domain.stencil.unvtot):
dummy_bv.append(BoundaryVelocity(self.domain, label, k))
self.bv_per_label[label] = dummy_bv
# build the list of boundary informations for each stencil and each label
dico_bound = dico.get('boundary_conditions', {})
stencil = self.domain.stencil
istore = collections.OrderedDict() # important to set the boundary conditions always in the same way !!!
ilabel = {}
distance = {}
value_bc = {}
#pylint: disable=too-many-nested-blocks
for label in self.domain.list_of_labels():
if label in [-1, -2]: # periodic or interface conditions
pass
else: # non periodic conditions
value_bc[label] = dico_bound[label].get('value', None)
methods = dico_bound[label]['method']
# for each method get the list of points, the labels and the distances
# where the distribution function must be updated on the boundary
for k, v in methods.items():
for inumk, numk in enumerate(stencil.num[k]):
if self.bv_per_label[label][stencil.unum2index[numk]].indices.size != 0:
indices = self.bv_per_label[label][stencil.unum2index[numk]].indices
distance_tmp = self.bv_per_label[label][stencil.unum2index[numk]].distance
velocity = (inumk + stencil.nv_ptr[k])*np.ones(indices.shape[1], dtype=np.int32)[np.newaxis, :]
ilabel_tmp = label*np.ones(indices.shape[1], dtype=np.int32)
istore_tmp = np.concatenate([velocity, indices])
if istore.get(v, None) is None:
istore[v] = istore_tmp.copy()
ilabel[v] = ilabel_tmp.copy()
distance[v] = distance_tmp.copy()
else:
istore[v] = np.concatenate([istore[v], istore_tmp], axis=1)
ilabel[v] = np.concatenate([ilabel[v], ilabel_tmp])
distance[v] = np.concatenate([distance[v], distance_tmp])
# for each method create the instance associated
self.methods = []
for k in list(istore.keys()):
self.methods.append(k(istore[k], ilabel[k], distance[k], stencil, value_bc, domain.distance.shape, generator))
#pylint: disable=protected-access
class BoundaryMethod:
"""
Set boundary method.
Parameters
----------
FIXME : add parameters documentation
Attributes
----------
feq : ndarray
the equilibrium values of the distribution function on the border
rhs : ndarray
the additional terms to fix the boundary values
distance : ndarray
distance to the border (needed for Bouzidi type conditions)
istore : ndarray
indices of points where we store the boundary condition
ilabel : ndarray
label of the boundary
iload : list
indices of points needed to compute the boundary condition
value_bc : dictionnary
the prescribed values on the border
"""
def __init__(self, istore, ilabel, distance, stencil, value_bc, nspace, generator):
self.istore = istore
self.feq = np.zeros((stencil.nv_ptr[-1], istore.shape[1]))
self.rhs = np.zeros(istore.shape[1])
self.ilabel = ilabel
self.distance = distance
self.stencil = stencil
self.iload = []
self.value_bc = {}
for k in np.unique(self.ilabel):
self.value_bc[k] = value_bc[k]
self.nspace = nspace
self.generator = generator
def fix_iload(self):
"""
Transpose iload and istore.
Must be fix in a future version.
"""
# Fixme : store in a good way and in the right type istore and iload
for i in range(len(self.iload)):
self.iload[i] = np.ascontiguousarray(self.iload[i].T, dtype=np.int32)
self.istore = np.ascontiguousarray(self.istore.T, dtype=np.int32)
#pylint: disable=too-many-locals
def prepare_rhs(self, simulation):
"""
Compute the distribution function at the equilibrium with the value on the border.
Parameters
----------
simulation : Simulation
simulation class
"""
nv = simulation.container.nv
sorder = simulation.container.sorder
nspace = [1]*(len(sorder)-1)
v = self.stencil.get_all_velocities()
gpu_support = simulation.container.gpu_support
for key, value in self.value_bc.items():
if value is not None:
indices = np.where(self.ilabel == key)
# TODO: check the index in sorder to be the most contiguous
nspace[0] = indices[0].size
k = self.istore[0, indices]
s = 1 - self.distance[indices]
coords = tuple()
for i in range(simulation.domain.dim):
x = simulation.domain.coords_halo[i][self.istore[i + 1, indices]]
x += s*v[k, i]*simulation.domain.dx
x = x.ravel()
for j in range(1, simulation.domain.dim): #pylint: disable=unused-variable
x = x[:, np.newaxis]
coords += (x,)
m = Array(nv, nspace, 0, sorder, gpu_support=gpu_support)
m.set_conserved_moments(simulation.scheme.consm)
f = Array(nv, nspace, 0, sorder, gpu_support=gpu_support)
f.set_conserved_moments(simulation.scheme.consm)
#TODO add error message and more tests
if isinstance(value, types.FunctionType):
value(f, m, *coords)
elif isinstance(value, tuple):
if len(value) != 2:
log.error("""Function set in boundary must be the function name or a tuple
of size 2 with function name and extra args.""")
args = coords + value[1]
value[0](f, m, *args)
simulation.equilibrium(m)
simulation.m2f(m, f)
if self.generator.backend.upper() == "LOOPY":
f.array_cpu[...] = f.array.get()
self.feq[:, indices[0]] = f.swaparray.reshape((nv, indices[0].size))
def _get_istore_iload_symb(self, dim):
ncond = symbols('ncond', integer=True)
istore = symbols('istore', integer=True)
istore = IndexedBase(istore, [ncond, dim+1])
iload = []
for i in range(len(self.iload)):
iloads = symbols('iload%d'%i, integer=True)
iload.append(IndexedBase(iloads, [ncond, dim+1]))
return istore, iload, ncond
@staticmethod
def _get_rhs_dist_symb(ncond):
rhs = IndexedBase('rhs', [ncond])
dist = IndexedBase('dist', [ncond])
return rhs, dist
def update(self, ff):
"""
Update distribution functions with this boundary condition.
Parameters
----------
ff : array
The distribution functions
"""
from .symbolic import call_genfunction
args = self._get_args(ff)
call_genfunction(self.function, args) #pylint: disable=no-member
#pylint: disable=possibly-unused-variable
def _get_args(self, ff):
dim = len(ff.nspace)
nx = ff.nspace[0]
if dim > 1:
ny = ff.nspace[1]
if dim > 2:
nz = ff.nspace[2]
f = ff.array
for i in range(len(self.iload)):
exec('iload{i} = self.iload[{i}]'.format(i=i)) #pylint: disable=exec-used
istore = self.istore
rhs = self.rhs
if hasattr(self, 's'):
dist = self.s
ncond = istore.shape[0]
return locals()
def move2gpu(self):
"""
Move arrays needed to compute the boundary on the GPU memory.
"""
if self.generator.backend.upper() == "LOOPY":
try:
import pyopencl as cl
import pyopencl.array #pylint: disable=unused-variable
from .context import queue
except ImportError:
raise ImportError("Please install loo.py")
self.rhs = cl.array.to_device(queue, self.rhs)
if hasattr(self, 's'):
self.s = cl.array.to_device(queue, self.s) #pylint: disable=attribute-defined-outside-init
self.istore = cl.array.to_device(queue, self.istore)
for i in range(len(self.iload)):
self.iload[i] = cl.array.to_device(queue, self.iload[i])
class BounceBack(BoundaryMethod):
"""
Boundary condition of type bounce-back
Notes
------
.. plot:: codes/bounce_back.py
"""
def set_iload(self):
"""
Compute the indices that are needed (symmertic velocities and space indices).
"""
k = self.istore[0]
ksym = self.stencil.get_symmetric()[k][np.newaxis, :]
v = self.stencil.get_all_velocities()
indices = self.istore[1:] + v[k].T
self.iload.append(np.concatenate([ksym, indices]))
def set_rhs(self):
"""
Compute and set the additional terms to fix the boundary values.
"""
k = self.istore[0]
ksym = self.stencil.get_symmetric()[k]
self.rhs[:] = self.feq[k, np.arange(k.size)] - self.feq[ksym, np.arange(k.size)]
#pylint: disable=too-many-locals
def generate(self, sorder):
"""
Generate the numerical code.
Parameters
----------
sorder : list
the order of nv, nx, ny and nz
"""
from .generator import For
from .symbolic import nx, ny, nz, indexed, ix
ns = int(self.stencil.nv_ptr[-1])
dim = self.stencil.dim
istore, iload, ncond = self._get_istore_iload_symb(dim)
rhs, _ = self._get_rhs_dist_symb(ncond)
idx = Idx(ix, (0, ncond))
fstore = indexed('f', [ns, nx, ny, nz], index=[istore[idx, k] for k in range(dim+1)], priority=sorder)
fload = indexed('f', [ns, nx, ny, nz], index=[iload[0][idx, k] for k in range(dim+1)], priority=sorder)
self.generator.add_routine(('bounce_back', For(idx, Eq(fstore, fload + rhs[idx]))))
@property
def function(self):
"""Return the generated function"""
return self.generator.module.bounce_back
class BouzidiBounceBack(BoundaryMethod):
"""
Boundary condition of type Bouzidi bounce-back [BFL01]
Notes
------
.. plot:: codes/Bouzidi.py
"""
def __init__(self, istore, ilabel, distance, stencil, value_bc, nspace, generator):
super(BouzidiBounceBack, self).__init__(istore, ilabel, distance, stencil, value_bc, nspace, generator)
self.s | |
Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 1.14848,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 9.93414,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.000768033,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.203292,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.00489056,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.134003,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.216141,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.109101,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.459244,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.152511,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.12944,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.000923932,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00562067,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0409021,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0415683,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.041826,
'Execution Unit/Register Files/Runtime Dynamic': 0.047189,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0863613,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.24668,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.36178,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00129107,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00129107,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00118164,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000488669,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000597132,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00436092,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0103381,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0399607,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 2.54184,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.103155,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.135724,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 4.88372,
'Instruction Fetch Unit/Runtime Dynamic': 0.293539,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0180619,
'L2/Runtime Dynamic': 0.00421009,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.46507,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.595097,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.039727,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0397271,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.65267,
'Load Store Unit/Runtime Dynamic': 0.830745,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0979601,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.195921,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0347664,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.035037,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.158042,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0169126,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.373874,
'Memory Management Unit/Runtime Dynamic': 0.0519496,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 15.6472,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.00242996,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0060754,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0695086,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': | |
from __future__ import unicode_literals
import fiona
import os
import logging
import sys
import urllib
import datetime
from collections import OrderedDict
import cartosql
import zipfile
import requests
import json
# do you want to delete everything currently in the Carto table when you run this script?
CLEAR_TABLE_FIRST = False
# name of data directory in Docker container
DATA_DIR = 'data'
# Carto username and API key for account where we will store the data
CARTO_USER = os.getenv('CARTO_USER')
CARTO_KEY = os.getenv('CARTO_KEY')
# name of table in Carto where we will upload the data
CARTO_TABLE = 'cli_037_smoke_plumes'
# column names and types for data table
# column names should be lowercase
# column types should be one of the following: geometry, text, numeric, timestamp
CARTO_SCHEMA = OrderedDict([
('the_geom', 'geometry'),
('_UID', 'text'),
('date', 'timestamp'),
('Satellite', 'text'),
('_start', 'timestamp'),
('_end', 'timestamp'),
('duration', 'text'),
('Density', 'numeric')
])
# column of table that can be used as a unique ID
UID_FIELD = '_UID'
# column that stores datetime information
TIME_FIELD = 'date'
# how many rows can be stored in the Carto table before the oldest ones are deleted?
MAXROWS = 100000
# url for latest Hazard Mapping System (HMS) data
SOURCE_URL = 'http://satepsanone.nesdis.noaa.gov/pub/FIRE/HMS/GIS/hms_smoke{date}.zip'
# url for archive Hazard Mapping System (HMS) data
SOURCE_URL_ARCHIVE = 'http://satepsanone.nesdis.noaa.gov/pub/FIRE/HMS/GIS/ARCHIVE/hms_smoke{date}.zip'
# file name to save data retrieved from source url
FILENAME = 'hms_smoke{date}'
# format of dates in Carto table
DATE_FORMAT = '%Y%m%d'
# format of dates in source shapefiles
DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S'
# maximum days to go back while searching for new dates
MAXAGE_UPLOAD = datetime.datetime.today() - datetime.timedelta(days=360)
# use SOURCE_URL_ARCHIVE to access data if date is older than MAX_CHECK_CURRENT
# else use SOURCE_URL
MAX_CHECK_CURRENT = datetime.datetime.today() - datetime.timedelta(days=7)
# oldest date that can be stored in the Carto table before we start deleting
MAXAGE = datetime.datetime.today() - datetime.timedelta(days=365*10)
# Resource Watch dataset API ID
# Important! Before testing this script:
# Please change this ID OR comment out the getLayerIDs(DATASET_ID) function in the script below
# Failing to do so will overwrite the last update date on a different dataset on Resource Watch
DATASET_ID = 'c667617a-44e8-4181-b96d-f99bbe73c331'
def lastUpdateDate(dataset, date):
'''
Given a Resource Watch dataset's API ID and a datetime,
this function will update the dataset's 'last update date' on the API with the given datetime
INPUT dataset: Resource Watch API dataset ID (string)
date: date to set as the 'last update date' for the input dataset (datetime)
'''
# generate the API url for this dataset
apiUrl = f'http://api.resourcewatch.org/v1/dataset/{dataset}'
# create headers to send with the request to update the 'last update date'
headers = {
'Content-Type': 'application/json',
'Authorization': os.getenv('apiToken')
}
# create the json data to send in the request
body = {
"dataLastUpdated": date.isoformat() # date should be a string in the format 'YYYY-MM-DDTHH:MM:SS'
}
# send the request
try:
r = requests.patch(url = apiUrl, json = body, headers = headers)
logging.info('[lastUpdated]: SUCCESS, '+ date.isoformat() +' status code '+str(r.status_code))
return 0
except Exception as e:
logging.error('[lastUpdated]: '+str(e))
'''
FUNCTIONS FOR CARTO DATASETS
The functions below must go in every near real-time script for a Carto dataset.
Their format should not need to be changed.
'''
def checkCreateTable(table, schema, id_field, time_field=''):
'''
Create the table if it does not exist, and pull list of IDs already in the table if it does
INPUT table: Carto table to check or create (string)
schema: dictionary of column names and types, used if we are creating the table for the first time (dictionary)
id_field: name of column that we want to use as a unique ID for this table; this will be used to compare the
source data to the our table each time we run the script so that we only have to pull data we
haven't previously uploaded (string)
time_field: optional, name of column that will store datetime information (string)
RETURN list of existing IDs in the table, pulled from the id_field column (list of strings)
'''
# check it the table already exists in Carto
if cartosql.tableExists(table, user=CARTO_USER, key=CARTO_KEY):
# if the table does exist, get a list of all the values in the id_field column
logging.info('Fetching existing IDs')
r = cartosql.getFields(id_field, table, f='csv', post=True, user=CARTO_USER, key=CARTO_KEY)
# turn the response into a list of strings, removing the first and last entries (header and an empty space at end)
return r.text.split('\r\n')[1:-1]
else:
# if the table does not exist, create it with columns based on the schema input
logging.info('Table {} does not exist, creating'.format(table))
cartosql.createTable(table, schema, user=CARTO_USER, key=CARTO_KEY)
# if a unique ID field is specified, set it as a unique index in the Carto table; when you upload data, Carto
# will ensure no two rows have the same entry in this column and return an error if you try to upload a row with
# a duplicate unique ID
if id_field:
cartosql.createIndex(table, id_field, unique=True, user=CARTO_USER, key=CARTO_KEY)
# if a time_field is specified, set it as an index in the Carto table; this is not a unique index
if time_field:
cartosql.createIndex(table, time_field, user=CARTO_USER, key=CARTO_KEY)
# return an empty list because there are no IDs in the new table yet
return []
'''
FUNCTIONS FOR THIS DATASET
The functions below have been tailored to this specific dataset.
They should all be checked because their format likely will need to be changed.
'''
# Generate UID
def genUID(date, pos_in_shp):
'''
Generate unique id using date and feature index in retrieved GeoJSON
INPUT date: date for which we want to generate id (string)
pos_in_shp: index of the feature in GeoJSON (integer)
RETURN unique id of the feature in GeoJSON (string)
'''
return str('{}_{}'.format(date, pos_in_shp))
def getDate(uid):
'''
Split uid variable using '_' to get the first eight elements which represent the date
INPUT uid: unique ID used in Carto table (string)
RETURN date in the format YYYYMMDD (string)
'''
return uid.split('_')[0]
def formatObservationDatetime(start, end, datetime_format=DATETIME_FORMAT):
'''
Reformat the start and end date according to DATETIME_FORMAT
INPUT start: start date of smoke plume observation (string)
end: end date of smoke plume observation (string)
datetime_format: format in which this function will return the datestrings (string)
RETURN start: start date of the observation, in the format specified by datetime_format (string)
end: end date of the observation, in the format specified by datetime_format (string)
duration: duration of the observation, in the format HH:MM:SS (string)
'''
# split the start date to separate out date and time
date, time = start.split(' ')
# get year of start date from first four characters of the date
year = int(date[:4])
# get fourth and the following characters from the date and subtract 1 to get day
# 1 is subtracted because we will add the day number to January 1 to get the date. The source starts with January 1 having a day number of 1, so we would want to add 0 to January 1 to get the correct date.
day = int(date[4:])-1
# get hour from the last two characters of the time string
hour = int(time[:-2])
# get minute from the time string (up until the last two characters)
minute = int(time[-2:])
# create a datetime object for the 1st of January of the year
# generate a complete datetime object to include month, day and time
start_dt = datetime.datetime(year=year,month=1,day=1) + datetime.timedelta(days=day, hours=hour, minutes=minute)
# Use similar approach as above to reformat the end date
date, time = end.split(' ')
year = int(date[:4])
day = int(date[4:])-1
hour = int(time[:-2])
minute = int(time[-2:])
end_dt = datetime.datetime(year=year,month=1,day=1) + datetime.timedelta(days=day, hours=hour, minutes=minute)
# convert datetime object to string formatted according to datetime_format
start = start_dt.strftime(datetime_format)
# convert datetime object to string formatted according to datetime_format
end = end_dt.strftime(datetime_format)
# get duration of the event, in the format HH:MM:SS, by subtracting start date from end date
duration = str((end_dt - start_dt))
return(start,end,duration)
def findShp(zfile):
'''
Check if the zipfile contain the shapefile and return the shapefile name
INPUT zfile: zipfile containing retrieved data from source url (string)
RETURN f: filename for the shapefile in zipfile (string)
'''
# loop through all files in the zipped file
with zipfile.ZipFile(zfile) as z:
for f in z.namelist():
# check if the file is a | |
<reponame>ericbudish/HFT-Races
'''
code_snippet_take_provide_cancel.py
This snippet is the code we used to analyze the activities in races. It decomposes the
active qty in races (qty traded and canceled) into 3 groups:
1. qty traded without an attempt to cancel
2. qty traded with a failed attempt to cancel
3. qty canceled successfully
for each firm. This is the input for Figure 4.3 Panel B (takes, cancels and liquidity
provision by firm group) and the Cancel Attempt Rate Columns in Table 4.11 (see also
code_snippet_cancel_activities.py). The code is specific to the LSE settings and may
not be applicable to other exchange message data directly.
We provide this code for the purpose of disclosing how we did the analysis and helping our user
reproduce the related results. Note that the coding convention (e.g., naming) in this script
is slightly different from the code in /PythonCode, because this is part of an earlier version of
our code.
To reproduce the results, users need to
1. Go through the code snippet carefully and understand the logic.
2. Adapt/rewrite the code to their context depending on the specific details of their setting.
Users can make use of this code snippet in this step.
3. Execute the code to obtain the active qty decomposition in races.
4. Compute Figure 4.3 Panel B data. Users have to do this by themselves.
(1) % Races won: This can be obtained directly from the Python race-level statistical data.
(2) % Successful taking in races: This can be obtained from the output of this snippet
(qty traded with a failed cancel + qty traded without an attempt to cancel)
(3) % Successful canceling in races: This can be obtained from the output of this snippet
(qty canceled successfully data).
(4) % Liquidity provided in races: This can be obtained from the trade level data. Please
refer to code_snippet_trade_level_data.py
5. Compute the number of races where a group of firms provide liquidity: This can be obtained
from the trade level data. Please refer to code_snippet_trade_level_data.py.
'''
###################################################################################
import pandas as pd
import numpy as np
import multiprocessing
import importlib
import os
import datetime
import warnings
import logging
from ast import literal_eval
import random
import os
from LatencyArbitrageAnalysis.utils.Dtypes import dtypes_msgs, dtypes_top
from LatencyArbitrageAnalysis.RaceDetection.Race_Msg_Outcome import get_msg_outcome
warnings.filterwarnings("ignore")
PrepData = importlib.import_module('04a_Prep_Race_Data')
now = datetime.datetime.now()
runtime = '%02d%02d%02d_%02d%02d' % (now.year, now.month, now.day, now.hour, now.minute)
logpath = '/data/proc/data_analysis/active_qty_decompose/logs/'
logger = logging.getLogger('decompose_active_qty')
logger.setLevel(logging.INFO)
handler = logging.FileHandler(logpath + f'/decompose_active_qty_{runtime}.log')
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
def decompose_active_qty(race, msgs, top_15, me):
'''
Given a race, this function returns the decomposition of
the active qty (qty traded and cancelled) in the race
param:
race: a row in the race record dataset
msgs: msgs dataset (output of 4a)
top_15: list of firm ID of the top 15 firms.
me: outbound messages in the msgs dataset
output:
pd.Series. Decomposition of active qty in GBP
'''
# extract the information about the race, including the msgs in race,
# race price & side, and race starting time
race_msgs = msgs.loc[race['ix_race']]
race_start_time = race_msgs['MessageTimestamp'].iloc[0]
race_ID = race['SingleLvlRaceID']
S = race['S']
P_Signed = race['P']
race_msgs[f'{S}RaceRlvtMsgOutcome'] = get_msg_outcome(S, P_Signed, race_msgs, False) # Strict fail = False
Sign = 1 * (S == 'Ask') - 1 * (S == 'Bid')
price_factor = int(1e8)
P = Sign * P_Signed
P_GBP = (P/price_factor)/100
# initialize output data structure. The output is a row with following columns
output = {
# basic race information
'SingleLvlRaceID':race['SingleLvlRaceID'], 'RacePrice':P_GBP, 'Side': S,
# decompose active qty
'no_cancel_top_1-3':0,'no_cancel_top_4-6':0,'no_cancel_top_7-10':0,'no_cancel_top_11-15':0,'no_cancel_nontop15':0,
'cancel_succ_top_1-3':0,'cancel_succ_top_4-6':0,'cancel_succ_top_7-10':0,'cancel_succ_top_11-15':0,'cancel_succ_nontop15':0,
'cancel_fail_top_1-3':0, 'cancel_fail_top_4-6':0,'cancel_fail_top_7-10':0,'cancel_fail_top_11-15':0,'cancel_fail_nontop15':0,
# decompose active qty based on activity in 50 us
'no_cancel_top_1-3_in_50us':0,'no_cancel_top_4-6_in_50us':0,'no_cancel_top_7-10_in_50us':0,'no_cancel_top_11-15_in_50us':0,'no_cancel_nontop15_in_50us':0,
'cancel_succ_top_1-3_in_50us':0,'cancel_succ_top_4-6_in_50us':0,'cancel_succ_top_7-10_in_50us':0,'cancel_succ_top_11-15_in_50us':0,'cancel_succ_nontop15_in_50us':0,
'cancel_fail_top_1-3_in_50us':0, 'cancel_fail_top_4-6_in_50us':0,'cancel_fail_top_7-10_in_50us':0,'cancel_fail_top_11-15_in_50us':0,'cancel_fail_nontop15_in_50us':0
}
# get information about the firm group
# python indices are left-closed / right-open so [0:3] is elements 0, 1, 2.
top_list = {'top_11-15':top_15[10:], 'top_7-10':top_15[6:10], 'top_4-6':top_15[3:6], 'top_1-3':top_15[0:3]}
firm_group_map = {firm:firm_group for firm_group, firm_list in top_list.items() for firm in firm_list}
# get the set of uid of the cancel attempts in this race
cancel_uid = race_msgs.loc[(race_msgs[f'{S}RaceRlvtType']=='Cancel Attempt'), 'UniqueOrderID'].values
# get the set of uid of the cancel attempts in 50us in this race
cancel_uid_50us = race_msgs.loc[(race_msgs[f'{S}RaceRlvtType']=='Cancel Attempt') &
((race_msgs['MessageTimestamp'] - race_start_time).dt.total_seconds()<=0.00005)
,'UniqueOrderID'].values
# loop over race messages
for _, race_msg in race_msgs.iterrows():
# get the firm id, firm group, race message type and race outcome of the race message
firm = race_msg['FirmNum']
firm_group = firm_group_map[firm] if firm in firm_group_map.keys() else 'nontop15' # 'top 1-3', 'top 4-6' etc.
RaceRlvtType = race_msg[f'{S}RaceRlvtType'] # 'Cancel Attempt', 'Take Attempt'
outcome = race_msg[f'{S}RaceRlvtMsgOutcome'] # 'Success', 'Fail', 'Unknown'.
# if the message is a successful cancel, increment 'cancel_succ_{firm_group}' by GBP canceled
# There is a corner case where the cancel is "partially successful" -- say I have 1000 shares, get sniped for 800, cancel the last 200. This method
# will sometimes assign successful cancel to all 1000 shares: if the cancel inbound is received when I still have 1000 shares outstanding.
# this case should be rare because in a race a sniper should take all my shares
if RaceRlvtType == 'Cancel Attempt' and outcome == 'Success':
Qty = max(0, race_msg[f'{S}RaceRlvtQty'])
output[f'cancel_succ_{firm_group}'] += Qty * P_GBP
# in addition, if the successful cancel is within 50us,
# increment 'cancel_succ_{firm_group}_in_50us' by GBP canceled
if (race_msg['MessageTimestamp'] - race_start_time).total_seconds()<=0.00005:
output[f'cancel_succ_{firm_group}_in_50us'] += Qty * P_GBP
else:
# else, the firm did not try to cancel in 50 us.
# Hence, increment 'no_cancel_{firm_group}_in_50us' by GBP canceled
output[f'no_cancel_{firm_group}_in_50us'] += Qty * P_GBP
# if the message is a successful take
if RaceRlvtType == 'Take Attempt' and outcome == 'Success':
# if it is non-quote related, and the outbound messages of this inbound is not missing:
# Note: we do not deal with quote and packet loss here, because the code will be tedious and it does not
# improve the accuracy much since those are rare cases.
if (not race_msg['QuoteRelated']) and ((race_msg['UniqueOrderID'], race_msg['EventNum']) in me.index):
# get the messages in the event that the message belongs to
ev = me.loc[(race_msg['UniqueOrderID'], race_msg['EventNum'])]
# get the execution outbounds in the event
Exec_out_P = ev.loc[ev['UnifiedMessageType'].isin({'ME: Partial Fill (A)', 'ME: Full Fill (A)'}) &
(ev['ExecutedPrice'] == P)]
# loop over execution outbounds
for _, exec_msg in Exec_out_P.iterrows():
# take out the trade match id
trade_match_id = exec_msg['TradeMatchID']
# using the trade matching id, get the counterparty's execution outbound (i.e. the resting order's outbound)
# this works because the two execution outbounds in a trade share the same TradeMatchID
counterpart_msg = msgs[(msgs['TradeMatchID'] == trade_match_id) &
(msgs['TradePos'] != exec_msg['TradePos'])]
if counterpart_msg.shape[0] == 0:
continue
# get the firm id of the counterparty
counterpart_id = counterpart_msg['FirmNum'].iloc[0]
# get the unique order id for the counterparty's order
counterpart_unique_order_id = counterpart_msg['UniqueOrderID'].iloc[0]
# get counterparty's firm group based on its firm id
counterpart_firm_group = firm_group_map[counterpart_id] if counterpart_id in firm_group_map.keys() else 'nontop15'
# get the executed quantity of this trade
Qty_P = max(0,exec_msg['ExecutedQty'])
# if counterparty tried to cancel this order during the race
# i.e., the counterparty's unique order id is in cancel_uid
# this means he tried but failed to cancel the order.
# Hence increment 'cancel_fail_{counterpart_firm_group}' by GBP traded at P
if counterpart_unique_order_id in cancel_uid:
output[f'cancel_fail_{counterpart_firm_group}'] += Qty_P * P_GBP
else:
# else, the counterparty did not try to cancel the order during the race
# hence, increment 'no_cancel_{firm_group}' by GBP traded at P
output[f'no_cancel_{counterpart_firm_group}'] += Qty_P * P_GBP
# increment 'cancel_fail_{firm_group}_in_50us' and 'no_cancel_{firm_group}_in_50us' similarly
if counterpart_unique_order_id in cancel_uid_50us:
output[f'cancel_fail_{counterpart_firm_group}_in_50us'] += Qty_P * P_GBP
else:
output[f'no_cancel_{counterpart_firm_group}_in_50us'] += Qty_P * P_GBP
return pd.Series(output)
def process_sym_date(symdate):
date, sym = symdate
try:
infile_msgs = '/data/proc/data/clean/%s/CleanMsgData_%s_%s.csv.gz' % (date, date, sym)
infile_top = '/data/proc/data/book/%s/BBO_%s_%s.csv.gz' % (date, date, sym)
infile_race_records = '/data/proc/output/race_stats/%s/%s/Race_Recs_%s_%s.pkl' % ('daily_500us', date, date, sym)
price_factor = int(1e8)
# This rank is from the firm dynamics analysis in the paper.
# Firms are sorted based on the proportion of races won in the whole sample
| |
<reponame>nullstalgia/toltec<gh_stars>0
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-or-later
# Copyright (C) 2001 <NAME> <<EMAIL>>
# Andern Research Labs
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA. */
#
# Copyright 2001, <NAME> <<EMAIL>>
# Added reading in of packages.
# Added missing package information fields.
# Changed render_control() to __repr__().
#
# Current Issues:
# The API doesn't validate package information fields. It should be
# throwing exceptions in the right places.
# Executions of tar could silently fail.
# Executions of tar *do* fail, and loudly, because you have to specify a full filename,
# and tar complains if any files are missing, and the opkg spec doesn't require
# people to say "./control.tar.gz" or "./control" when they package files.
# It would be much better to require ./control or disallow ./control (either)
# rather than letting people pick. Some freedoms aren't worth their cost.
from __future__ import absolute_import
from __future__ import print_function
import tempfile
import os
import sys
import glob
import hashlib
import re
import subprocess
from stat import ST_SIZE
import arfile
import tarfile
import textwrap
import collections
def order(x):
if not x:
return 0
if x == "~":
return -1
if str.isdigit(x):
return 0
if str.isalpha(x):
return ord(x)
return 256 + ord(x)
class Version(object):
"""A class for holding parsed package version information."""
def __init__(self, epoch, version):
self.epoch = epoch
self.version = version
def _versioncompare(self, selfversion, refversion):
"""
Implementation below is a copy of the opkg version comparison algorithm
http://git.yoctoproject.org/cgit/cgit.cgi/opkg/tree/libopkg/pkg.c*n933
it alternates between number and non number comparisons until a difference is found
digits are compared by value. other characters are sorted lexically using the above method orderOfChar
One slight modification, the original version can return any value, whereas this one is limited to -1, 0, +1
"""
if not selfversion: selfversion = ""
if not refversion: refversion = ""
value = list(selfversion)
ref = list(refversion)
while value or ref:
first_diff = 0
# alphanumeric comparison
while (value and not str.isdigit(value[0])) or (ref and not str.isdigit(ref[0])):
vc = order(value.pop(0) if value else None)
rc = order(ref.pop(0) if ref else None)
if vc != rc:
return -1 if vc < rc else 1
# comparing numbers
# start by skipping 0
while value and value[0] == "0":
value.pop(0)
while ref and ref[0] == "0":
ref.pop(0)
# actual number comparison
while value and str.isdigit(value[0]) and ref and str.isdigit(ref[0]):
if not first_diff:
first_diff = int(value.pop(0)) - int(ref.pop(0))
else:
value.pop(0)
ref.pop(0)
# the one that has a value remaining was the highest number
if value and str.isdigit(value[0]):
return 1
if ref and str.isdigit(ref[0]):
return -1
# in case of equal length numbers look at the first diff
if first_diff:
return 1 if first_diff > 0 else -1
return 0
def compare(self, ref):
if (self.epoch > ref.epoch):
return 1
elif (self.epoch < ref.epoch):
return -1
else:
self_ver_comps = re.match(r"(.+?)(-r.+)?$", self.version)
ref_ver_comps = re.match(r"(.+?)(-r.+)?$", ref.version)
#print((self_ver_comps.group(1), self_ver_comps.group(2)))
#print((ref_ver_comps.group(1), ref_ver_comps.group(2)))
r = self._versioncompare(self_ver_comps.group(1), ref_ver_comps.group(1))
if r == 0:
r = self._versioncompare(self_ver_comps.group(2), ref_ver_comps.group(2))
#print("compare: %s vs %s = %d" % (self, ref, r))
return r
def __str__(self):
return str(self.epoch) + ":" + self.version
def parse_version(versionstr):
epoch = 0
# check for epoch
m = re.match('([0-9]*):(.*)', versionstr)
if m:
(epochstr, versionstr) = m.groups()
epoch = int(epochstr)
return Version(epoch, versionstr)
class Package(object):
"""A class for creating objects to manipulate (e.g. create) opkg
packages."""
# fn: Package file path
# relpath: If this argument is set, the file path is given relative to this
# path when a string representation of the Package object is created. If
# this argument is not set, the basename of the file path is given.
def __init__(self, fn=None, relpath=None, all_fields=None):
self.package = None
self.version = 'none'
self.parsed_version = None
self.architecture = None
self.maintainer = None
self.source = None
self.description = None
self.depends = None
self.provides = None
self.replaces = None
self.conflicts = None
self.recommends = None
self.suggests = None
self.section = None
self.filename_header = None
self.file_list = []
# md5 and size is lazy attribute, computed on demand
#self.md5 = None
#self.size = None
self.installed_size = None
self.filename = None
self.file_ext_opk = "ipk"
self.homepage = None
self.oe = None
self.priority = None
self.tags = None
self.fn = fn
self.license = None
self.user_defined_fields = collections.OrderedDict()
if fn:
# see if it is deb format
f = open(fn, "rb")
if relpath:
self.filename = os.path.relpath(fn, relpath)
else:
self.filename = os.path.basename(fn)
## sys.stderr.write(" extracting control.tar.gz from %s\n"% (fn,))
if tarfile.is_tarfile(fn):
tar = tarfile.open(fn, "r", f)
tarStream = tar.extractfile("./control.tar.gz")
else:
ar = arfile.ArFile(f, fn)
tarStream = ar.open("control.tar.gz")
tarf = tarfile.open("control.tar.gz", "r", tarStream)
try:
control = tarf.extractfile("control")
except KeyError:
control = tarf.extractfile("./control")
try:
self.read_control(control, all_fields)
except TypeError as e:
sys.stderr.write("Cannot read control file '%s' - %s\n" % (fn, e))
control.close()
self.scratch_dir = None
self.file_dir = None
self.meta_dir = None
def __getattr__(self, name):
if name == "md5":
self._computeFileMD5()
return self.md5
elif name == "sha256":
self._computeFileSHA256()
return self.sha256
elif name == 'size':
return self._get_file_size()
else:
raise AttributeError(name)
def _computeFileMD5(self):
# compute the MD5.
if not self.fn:
self.md5 = 'Unknown'
else:
f = open(self.fn, "rb")
sum = hashlib.md5()
while True:
data = f.read(1024)
if not data: break
sum.update(data)
f.close()
self.md5 = sum.hexdigest()
def _computeFileSHA256(self):
# compute the SHA256.
if not self.fn:
self.sha256 = 'Unknown'
else:
f = open(self.fn, "rb")
sum = hashlib.sha256()
while True:
data = f.read(1024)
if not data: break
sum.update(data)
f.close()
self.sha256 = sum.hexdigest()
def _get_file_size(self):
if not self.fn:
self.size = 0;
else:
stat = os.stat(self.fn)
self.size = stat[ST_SIZE]
return int(self.size)
def read_control(self, control, all_fields=None):
import os
line = control.readline()
while 1:
if not line: break
# Decode if stream has byte strings
if not isinstance(line, str):
line = line.decode()
line = line.rstrip()
lineparts = re.match(r'([\w-]*?):\s*(.*)', line)
if lineparts:
name = lineparts.group(1)
name_lowercase = name.lower()
value = lineparts.group(2)
while 1:
line = control.readline().rstrip()
if not line: break
if line[0] != ' ': break
value = value + '\n' + line
if name_lowercase == 'size':
self.size = int(value)
elif name_lowercase == 'md5sum':
self.md5 = value
elif name_lowercase == 'sha256sum':
self.sha256 = value
elif name_lowercase in self.__dict__:
self.__dict__[name_lowercase] = value
elif all_fields:
self.user_defined_fields[name] = value
else:
print("Lost field %s, %s" % (name,value))
pass
if line and line[0] == '\n':
return # consumes one blank line at end of package descriptoin
else:
line = control.readline()
pass
return
def _setup_scratch_area(self):
self.scratch_dir = "%s/%sopkg" % (tempfile.gettempdir(),
tempfile.gettempprefix())
self.file_dir = "%s/files" % (self.scratch_dir)
self.meta_dir = "%s/meta" % (self.scratch_dir)
os.mkdir(self.scratch_dir)
os.mkdir(self.file_dir)
os.mkdir(self.meta_dir)
def set_package(self, package):
self.package = package
def get_package(self):
return self.package
def set_version(self, version):
self.version = version
self.parsed_version = parse_version(version)
def get_version(self):
return self.version
def set_architecture(self, architecture):
self.architecture = architecture
def get_architecture(self):
return self.architecture
def set_maintainer(self, maintainer):
self.maintainer = maintainer
def get_maintainer(self):
return self.maintainer
def set_source(self, source):
self.source = source
def get_source(self):
return self.source
def set_description(self, description):
self.description = description
def get_description(self):
return self.description
def set_depends(self, depends):
self.depends = depends
def get_depends(self, depends):
return self.depends
def set_provides(self, provides):
self.provides = provides
def get_provides(self, provides):
return self.provides
def set_replaces(self, replaces):
self.replaces = replaces
def get_replaces(self, replaces):
return self.replaces
def set_conflicts(self, conflicts):
self.conflicts = conflicts
def get_conflicts(self, conflicts):
return self.conflicts
def set_suggests(self, suggests):
self.suggests = suggests
def get_suggests(self, suggests):
return self.suggests
def set_section(self, section):
self.section = section
def get_section(self, section):
return self.section
def set_license(self, license):
self.license = license
def get_license(self, license):
return self.license
def get_file_list_dir(self, directory):
def check_output(*popenargs, **kwargs):
"""Run command with arguments and return its output as a byte string.
Backported from Python 2.7 as it's implemented as pure python on | |
json.loads(process_info['infos'])
indexes = process_params['indexes']
quality_band = process_params['quality_band']
functions = [process_params['composite_function'], 'IDT']
satellite = process_info['metadata']['platform']['code']
mask = process_info.get('mask', None)
tiles = params['tiles']
start_date = datetime.strptime(params['start_date'], '%Y-%m-%d').strftime('%Y-%m-%d')
end_date = datetime.strptime(params['end_date'], '%Y-%m-%d').strftime('%Y-%m-%d') \
if params.get('end_date') else datetime.now().strftime('%Y-%m-%d')
# verify cube info
cube_infos = Collection.query().filter(
Collection.id == process_info['datacube_id']
).first()
cube_infos_irregular = Collection.query().filter(
Collection.id == process_info['irregular_datacube_id']
).first()
if not cube_infos or not cube_infos_irregular:
return 'Cube not found!', 404
# get bands list
bands = Band.query().filter(
Band.collection_id == cube_infos_irregular.id
).all()
bands_list = []
indexes_list = []
for band in bands:
if band.name.upper() not in [i['common_name'].upper() for i in indexes]:
bands_list.append(band.name)
else:
indexes_available = {
'NDVI': ['NIR', 'RED'],
'EVI': ['NIR', 'RED', 'BLUE']
}
if not indexes_available.get(band.name.upper()):
return 'Index not available', 400
index = dict(
name=band.name,
bands=[
dict(
name=b.name,
common_name=b.common_name
) for b in bands \
if b.common_name.upper() in indexes_available[band.name.upper()]
]
)
if len(index['bands']) != len(indexes_available[band.name.upper()]):
return 'bands: {}, are needed to create the {} index'.format(
','.join(indexes_available[band.name.upper()]), band.name), 400
indexes_list.append(index)
# get quicklook bands
bands_ql = Quicklook.query().filter(
Quicklook.collection_id == cube_infos_irregular.id
).first()
bands_ql_list = [
list(filter(lambda b: b.id == bands_ql.red, bands))[0].name,
list(filter(lambda b: b.id == bands_ql.green, bands))[0].name,
list(filter(lambda b: b.id == bands_ql.blue, bands))[0].name
]
cub_ref = cube_infos or cube_infos_irregular
# items => old mosaic
# orchestrate
shape = params.get('shape', None)
self.score['items'] = orchestrate(cub_ref, tiles, start_date, end_date, functions, shape)
# prepare merge
crs = cube_infos.grs.crs
formatted_version = format_version(cube_infos.version)
prepare_merge(self, cube_infos['name'], params['collections'], satellite, bands_list,
indexes_list, bands_ql_list, float(bands[0].resolution_x), float(bands[0].resolution_y),
int(bands[0].nodata), crs, quality_band, functions, formatted_version,
params.get('force', False), mask)
return dict(
message='Processing started with succesfully'
), 201
@classmethod
def create_grs_schema(cls, name, description, projection, meridian, degreesx, degreesy, bbox, srid=SRID_ALBERS_EQUAL_AREA):
"""Create a Brazil Data Cube Grid Schema."""
bbox = bbox.split(',')
bbox_obj = {
"w": float(bbox[0]),
"n": float(bbox[1]),
"e": float(bbox[2]),
"s": float(bbox[3])
}
tile_srs_p4 = "+proj=longlat +ellps=GRS80 +datum=GRS80 +no_defs"
if projection == 'aea':
tile_srs_p4 = "+proj=aea +lat_0=-12 +lon_0={} +lat_1=-2 +lat_2=-22 +x_0=5000000 +y_0=10000000 +ellps=GRS80 +units=m +no_defs".format(meridian)
elif projection == 'sinu':
tile_srs_p4 = "+proj=sinu +lon_0={} +x_0=0 +y_0=0 +a=6371007.181 +b=6371007.181 +units=m +no_defs".format(meridian)
# Number of tiles and base tile
num_tiles_x = int(360. / degreesx)
num_tiles_y = int(180. / degreesy)
h_base = num_tiles_x / 2
v_base = num_tiles_y / 2
# Tile size in meters (dx,dy) at center of system (argsmeridian,0.)
src_crs = '+proj=longlat +ellps=GRS80 +datum=GRS80 +no_defs'
dst_crs = tile_srs_p4
xs = [(meridian - degreesx / 2), (meridian + degreesx / 2), meridian, meridian, 0.]
ys = [0., 0., -degreesy / 2, degreesy / 2, 0.]
out = transform(src_crs, dst_crs, xs, ys, zs=None)
x1 = out[0][0]
x2 = out[0][1]
y1 = out[1][2]
y2 = out[1][3]
dx = x2 - x1
dy = y2 - y1
# Coordinates of WRS center (0.,0.) - top left coordinate of (h_base,v_base)
x_center = out[0][4]
y_center = out[1][4]
# Border coordinates of WRS grid
x_min = x_center - dx * h_base
y_max = y_center + dy * v_base
# Upper Left is (xl,yu) Bottom Right is (xr,yb)
xs = [bbox_obj['w'], bbox_obj['e'], meridian, meridian]
ys = [0., 0., bbox_obj['n'], bbox_obj['s']]
out = transform(src_crs, dst_crs, xs, ys, zs=None)
xl = out[0][0]
xr = out[0][1]
yu = out[1][2]
yb = out[1][3]
h_min = int((xl - x_min) / dx)
h_max = int((xr - x_min) / dx)
v_min = int((y_max - yu) / dy)
v_max = int((y_max - yb) / dy)
tiles = []
features = []
dst_crs = '+proj=longlat +ellps=GRS80 +datum=GRS80 +no_defs'
src_crs = tile_srs_p4
for ix in range(h_min, h_max+1):
x1 = x_min + ix*dx
x2 = x1 + dx
for iy in range(v_min, v_max+1):
y1 = y_max - iy*dy
y2 = y1 - dy
# Evaluate the bounding box of tile in longlat
xs = [x1, x2, x2, x1]
ys = [y1, y1, y2, y2]
out = transform(src_crs, dst_crs, xs, ys, zs=None)
polygon = from_shape(
Polygon(
[
(x1, y2),
(x2, y2),
(x2, y1),
(x1, y1),
(x1, y2)
]
),
srid=srid
)
# Insert tile
tile_name = '{0:03d}{1:03d}'.format(ix, iy)
tiles.append(dict(
name=tile_name
))
features.append(dict(
tile=tile_name,
geom=polygon
))
with db.session.begin_nested():
crs = CRS.from_proj4(tile_srs_p4)
data = dict(
auth_name='Albers Equal Area',
auth_srid=srid,
srid=srid,
srtext=crs.to_wkt(),
proj4text=tile_srs_p4
)
spatial_index, _ = get_or_create_model(SpatialRefSys, defaults=data, srid=srid)
grs = GridRefSys.create_geometry_table(table_name=name, features=features, srid=srid)
grs.description = description
db.session.add(grs)
[db.session.add(Tile(**tile, grs=grs)) for tile in tiles]
db.session.commit()
return dict(
message='Grid {} created with successfully'.format(name)
), 201
@classmethod
def list_grs_schemas(cls):
"""Retrieve a list of available Grid Schema on Brazil Data Cube database."""
schemas = GridRefSys.query().all()
return [dict(**Serializer.serialize(schema), crs=schema.crs) for schema in schemas], 200
@classmethod
def get_grs_schema(cls, grs_id):
"""Retrieves a Grid Schema definition with tiles associated."""
schema = GridRefSys.query().filter(GridRefSys.id == grs_id).first()
if schema is None:
return 'GRS {} not found.'.format(grs_id), 404
geom_table = schema.geom_table
tiles = db.session.query(
geom_table.c.tile,
func.ST_AsGeoJSON(func.ST_Transform(geom_table.c.geom, 4326), 6, 3).cast(sqlalchemy.JSON).label('geom_wgs84')
).all()
dump_grs = Serializer.serialize(schema)
dump_grs['tiles'] = [dict(id=t.tile, geom_wgs84=t.geom_wgs84) for t in tiles]
return dump_grs, 200
def list_cubes(self):
"""Retrieve the list of data cubes from Brazil Data Cube database."""
cubes = Collection.query().filter(Collection.collection_type == 'cube').all()
serializer = CollectionForm()
list_cubes = []
for cube in cubes:
cube_dict = serializer.dump(cube)
not_done = 0
sum_acts = 0
error = 0
if cube.composite_function.alias != 'IDT':
activities = self.services.get_activities_by_datacube(cube.name)
not_done = len(list(filter(lambda i: i['mystatus'] == 'NOTDONE', activities)))
error = len(list(filter(lambda i: i['mystatus'] == 'ERROR', activities)))
sum_acts += len(activities)
parts = get_cube_parts(cube.name)
data_cube_identity = '_'.join(parts[:2])
activities = self.services.get_activities_by_datacube(data_cube_identity)
not_done_identity = len(list(filter(lambda i: i['mystatus'] == 'NOTDONE', activities)))
error_identity = len(list(filter(lambda i: i['mystatus'] == 'ERROR', activities)))
sum_acts += len(activities)
cube_dict['status'] = 'Pending'
if sum_acts > 0:
sum_not_done = not_done + not_done_identity
sum_errors = error + error_identity
cube_dict['status'] = 'Error' if sum_errors > 0 else 'Finished' \
if (sum_not_done + sum_errors) == 0 else 'Pending'
list_cubes.append(cube_dict)
return list_cubes, 200
def get_cube(self, cube_id: int):
cube = self.get_cube_or_404(cube_id)
dump_cube = Serializer.serialize(cube)
dump_cube['bands'] = [Serializer.serialize(b) for b in cube.bands]
dump_cube['quicklook'] = [
list(filter(lambda b: b.id == cube.quicklook[0].red, cube.bands))[0].name,
list(filter(lambda b: b.id == cube.quicklook[0].green, cube.bands))[0].name,
list(filter(lambda b: b.id == cube.quicklook[0].blue, cube.bands))[0].name
]
dump_cube['extent'] = None
dump_cube['grid'] = cube.grs.name
dump_cube['composite_function'] = cube.composite_function.name
return dump_cube, 200
@classmethod
def list_tiles_cube(cls, cube_id: int, only_ids=False):
"""Retrieve all tiles (as GeoJSON) that belongs to a data cube."""
features = db.session.query(
Item.tile_id,
Tile,
func.ST_AsGeoJSON(Item.geom, 6, 3).cast(sqlalchemy.JSON).label('geom')
).distinct(Item.tile_id).filter(Item.collection_id == cube_id, Item.tile_id == Tile.id).all()
return [feature.Tile.name if only_ids else feature.geom for feature in features], 200
@classmethod
def list_composite_functions(cls):
"""Retrieve a list of available Composite Functions on Brazil Data Cube database."""
schemas = CompositeFunction.query().all()
return [Serializer.serialize(schema) for schema in schemas], 200
def create_bucket(self, name, requester_pay):
service = self.services
status = service.create_bucket(name, requester_pay)
if not status:
return dict(
message='Bucket {} already exists.'.format(name)
), 409
return dict(
message='Bucket created with successfully'
), 201
def list_buckets(self):
"""Retrieve a list of available bucket in aws account."""
buckets = self.services.list_repositories()
return buckets, 200
def check_for_invalid_merges(self, cube_id: str, tile_id: str, start_date: str, end_date: str) -> Tuple[dict, int]:
"""List merge files used in data cube and check for invalid scenes.
Args:
datacube: Data cube name
tile: Brazil Data Cube Tile identifier
start_date: Activity start date (period)
end_date: Activity End (period)
Returns:
List of Images used in period
"""
cube = self.get_cube_or_404(cube_id)
items = self.services.get_merges(cube.name, tile_id, start_date[:10], end_date[:10])
result = validate_merges(items)
return result, 200
def list_cube_items(self, cube_id: str, bbox: str = None, start: str = None,
end: str = None, tiles: str = None, page: int = 1, per_page: int = 10):
"""Retrieve all data cube items done."""
cube = self.get_cube_or_404(cube_id=cube_id)
where = [
Item.collection_id == cube.id,
Tile.id == Item.tile_id
]
# temporal filter
if start:
where.append(Item.start_date >= start)
if end:
where.append(Item.end_date <= end)
# tile(string) filter
if tiles:
tiles = tiles.split(',') if isinstance(tiles, str) else tiles
where.append(
Tile.name.in_(tiles)
)
# spatial filter
if bbox:
xmin, ymin, xmax, ymax = [float(coord) for coord in bbox.split(',')]
where.append(
func.ST_Intersects(
func.ST_SetSRID(Item.geom, 4326), func.ST_MakeEnvelope(xmin, ymin, xmax, ymax, 4326)
)
)
paginator = db.session.query(Item).filter(
*where
).order_by(Item.start_date.desc()).paginate(int(page), int(per_page), error_out=False)
result = []
for item in paginator.items:
obj = Serializer.serialize(item)
obj['geom'] = None
obj['min_convex_hull'] = None
obj['tile_id'] = item.tile.name
if item.assets.get('thumbnail'):
obj['quicklook'] = item.assets['thumbnail']['href']
del obj['assets']
result.append(obj)
return dict(
items=result,
page=page,
per_page=page,
total_items=paginator.total,
total_pages=paginator.pages
), 200
def generate_periods(self, schema: str, step: | |
How much to adjust the brightness. Can be
any non negative number. 0 gives a black image, 1 gives the
original image while 2 increases the brightness by a factor of 2.
Returns:
PIL.Image|np.array|paddle.Tensor: Brightness adjusted image.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
from paddle.vision.transforms import functional as F
fake_img = (np.random.rand(256, 300, 3) * 255.).astype('uint8')
fake_img = Image.fromarray(fake_img)
converted_img = F.adjust_brightness(fake_img, 0.4)
print(converted_img.size)
"""
if not (_is_pil_image(img) or _is_numpy_image(img) or
_is_tensor_image(img)):
raise TypeError(
'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}'.
format(type(img)))
if _is_pil_image(img):
return F_pil.adjust_brightness(img, brightness_factor)
elif _is_numpy_image(img):
return F_cv2.adjust_brightness(img, brightness_factor)
else:
return F_t.adjust_brightness(img, brightness_factor)
def adjust_contrast(img, contrast_factor):
"""Adjusts contrast of an Image.
Args:
img (PIL.Image|np.array|paddle.Tensor): Image to be adjusted.
contrast_factor (float): How much to adjust the contrast. Can be any
non negative number. 0 gives a solid gray image, 1 gives the
original image while 2 increases the contrast by a factor of 2.
Returns:
PIL.Image|np.array|paddle.Tensor: Contrast adjusted image.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
from paddle.vision.transforms import functional as F
fake_img = (np.random.rand(256, 300, 3) * 255.).astype('uint8')
fake_img = Image.fromarray(fake_img)
converted_img = F.adjust_contrast(fake_img, 0.4)
print(converted_img.size)
"""
if not (_is_pil_image(img) or _is_numpy_image(img) or
_is_tensor_image(img)):
raise TypeError(
'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}'.
format(type(img)))
if _is_pil_image(img):
return F_pil.adjust_contrast(img, contrast_factor)
elif _is_numpy_image(img):
return F_cv2.adjust_contrast(img, contrast_factor)
else:
return F_t.adjust_contrast(img, contrast_factor)
def adjust_saturation(img, saturation_factor):
"""Adjusts color saturation of an image.
Args:
img (PIL.Image|np.array|paddle.Tensor): Image to be adjusted.
saturation_factor (float): How much to adjust the saturation. 0 will
give a black and white image, 1 will give the original image while
2 will enhance the saturation by a factor of 2.
Returns:
PIL.Image|np.array|paddle.Tensor: Saturation adjusted image.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
from paddle.vision.transforms import functional as F
fake_img = (np.random.rand(256, 300, 3) * 255.).astype('uint8')
fake_img = Image.fromarray(fake_img)
converted_img = F.adjust_saturation(fake_img, 0.4)
print(converted_img.size)
"""
if not (_is_pil_image(img) or _is_numpy_image(img) or
_is_tensor_image(img)):
raise TypeError(
'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}'.
format(type(img)))
if _is_pil_image(img):
return F_pil.adjust_saturation(img, saturation_factor)
elif _is_numpy_image(img):
return F_cv2.adjust_saturation(img, saturation_factor)
else:
return F_t.adjust_saturation(img, saturation_factor)
def adjust_hue(img, hue_factor):
"""Adjusts hue of an image.
The image hue is adjusted by converting the image to HSV and
cyclically shifting the intensities in the hue channel (H).
The image is then converted back to original image mode.
`hue_factor` is the amount of shift in H channel and must be in the
interval `[-0.5, 0.5]`.
Args:
img (PIL.Image|np.array|paddle.Tensor): Image to be adjusted.
hue_factor (float): How much to shift the hue channel. Should be in
[-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in
HSV space in positive and negative direction respectively.
0 means no shift. Therefore, both -0.5 and 0.5 will give an image
with complementary colors while 0 gives the original image.
Returns:
PIL.Image|np.array|paddle.Tensor: Hue adjusted image.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
from paddle.vision.transforms import functional as F
fake_img = (np.random.rand(256, 300, 3) * 255.).astype('uint8')
fake_img = Image.fromarray(fake_img)
converted_img = F.adjust_hue(fake_img, 0.4)
print(converted_img.size)
"""
if not (_is_pil_image(img) or _is_numpy_image(img) or
_is_tensor_image(img)):
raise TypeError(
'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}'.
format(type(img)))
if _is_pil_image(img):
return F_pil.adjust_hue(img, hue_factor)
elif _is_numpy_image(img):
return F_cv2.adjust_hue(img, hue_factor)
else:
return F_t.adjust_hue(img, hue_factor)
def _get_affine_matrix(center, angle, translate, scale, shear):
# Affine matrix is : M = T * C * RotateScaleShear * C^-1
# Ihe inverse one is : M^-1 = C * RotateScaleShear^-1 * C^-1 * T^-1
rot = math.radians(angle)
sx = math.radians(shear[0])
sy = math.radians(shear[1])
# Rotate and Shear without scaling
a = math.cos(rot - sy) / math.cos(sy)
b = -math.cos(rot - sy) * math.tan(sx) / math.cos(sy) - math.sin(rot)
c = math.sin(rot - sy) / math.cos(sy)
d = -math.sin(rot - sy) * math.tan(sx) / math.cos(sy) + math.cos(rot)
# Center Translation
cx, cy = center
tx, ty = translate
# Inverted rotation matrix with scale and shear
# det([[a, b], [c, d]]) == 1, since det(rotation) = 1 and det(shear) = 1
matrix = [d, -b, 0.0, -c, a, 0.0]
matrix = [x / scale for x in matrix]
# Apply inverse of translation and of center translation: RSS^-1 * C^-1 * T^-1
matrix[2] += matrix[0] * (-cx - tx) + matrix[1] * (-cy - ty)
matrix[5] += matrix[3] * (-cx - tx) + matrix[4] * (-cy - ty)
# Apply center translation: C * RSS^-1 * C^-1 * T^-1
matrix[2] += cx
matrix[5] += cy
return matrix
def affine(img,
angle,
translate,
scale,
shear,
interpolation="nearest",
fill=0,
center=None):
"""Apply affine transformation on the image.
Args:
img (PIL.Image|np.array|paddle.Tensor): Image to be affined.
angle (int|float): The angle of the random rotation in clockwise order.
translate (list[float]): Maximum absolute fraction for horizontal and vertical translations.
scale (float): Scale factor for the image, scale should be positive.
shear (list[float]): Shear angle values which are parallel to the x-axis and y-axis in clockwise order.
interpolation (str, optional): Interpolation method. If omitted, or if the
image has only one channel, it is set to PIL.Image.NEAREST or cv2.INTER_NEAREST
according the backend.
When use pil backend, support method are as following:
- "nearest": Image.NEAREST,
- "bilinear": Image.BILINEAR,
- "bicubic": Image.BICUBIC
When use cv2 backend, support method are as following:
- "nearest": cv2.INTER_NEAREST,
- "bilinear": cv2.INTER_LINEAR,
- "bicubic": cv2.INTER_CUBIC
fill (int|list|tuple, optional): Pixel fill value for the area outside the transformed
image. If given a number, the value is used for all bands respectively.
center (2-tuple, optional): Optional center of rotation, (x, y).
Origin is the upper left corner.
Default is the center of the image.
Returns:
PIL.Image|np.array|paddle.Tensor: Affine Transformed image.
Examples:
.. code-block:: python
import paddle
from paddle.vision.transforms import functional as F
fake_img = paddle.randn((3, 256, 300)).astype(paddle.float32)
affined_img = F.affine(fake_img, 45, translate=[0.2, 0.2], scale=0.5, shear=[-10, 10])
print(affined_img.shape)
"""
if not (_is_pil_image(img) or _is_numpy_image(img) or
_is_tensor_image(img)):
raise TypeError(
'img should be PIL Image or Tensor Image or ndarray with dim=[2 or 3]. Got {}'.
format(type(img)))
if not isinstance(angle, (int, float)):
raise TypeError("Argument angle should be int or float")
if not isinstance(translate, (list, tuple)):
raise TypeError("Argument translate should be a sequence")
if len(translate) != 2:
raise ValueError("Argument translate should be a sequence of length 2")
if scale <= 0.0:
raise ValueError("Argument scale should be positive")
if not isinstance(shear, (numbers.Number, (list, tuple))):
raise TypeError(
"Shear should be either a single value or a sequence of two values")
if not isinstance(interpolation, str):
raise TypeError("Argument interpolation should be a string")
if isinstance(angle, int):
angle = float(angle)
if isinstance(translate, tuple):
translate = list(translate)
if isinstance(shear, numbers.Number):
shear = [shear, 0.0]
if isinstance(shear, tuple):
shear = list(shear)
if len(shear) == 1:
shear = [shear[0], shear[0]]
if len(shear) != 2:
raise ValueError(
f"Shear should be a sequence containing two values. Got {shear}")
if center is not None and not isinstance(center, (list, tuple)):
raise TypeError("Argument center should be a sequence")
if _is_pil_image(img):
width, height = img.size
# center = (width * 0.5 + 0.5, height * 0.5 + 0.5)
# it is visually better to estimate the center without 0.5 offset
# otherwise image rotated by 90 degrees is shifted vs output image of F_t.affine
if center is None:
center = [width * 0.5, height * 0.5]
matrix = _get_affine_matrix(center, angle, translate, scale, shear)
return F_pil.affine(img, matrix, interpolation, fill)
if _is_numpy_image(img):
# get affine_matrix in F_cv2.affine() using cv2's functions
width, height = img.shape[0:2]
# center = (width * 0.5 + 0.5, height * 0.5 + 0.5)
# it is visually better to estimate the center without 0.5 offset
# otherwise image rotated by 90 degrees is shifted vs output image of F_t.affine
if center is None:
center = (width * 0.5, | |
<reponame>guiyaocheng/DeepRL-InformationExtraction<filename>code/myserver.py
import zmq, time
import numpy as np
import copy
import sys, json, pdb, pickle, operator, collections
import helper
import predict as predict
from train import load_data
from itertools import izip
import inflect
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.feature_extraction.text import TfidfVectorizer
import argparse
from random import shuffle
from operator import itemgetter
import matplotlib.pyplot as plt
from nltk.tokenize import word_tokenize
from classifier import Classifier
import constants
import re
DEBUG = False
ANALYSIS = False
COUNT_ZERO = False
#Global variables
int2tags = constants.int2tags
NUM_ENTITIES = len(int2tags)
NUM_QUERY_TYPES = NUM_ENTITIES + 1
WORD_LIMIT = 1000
CONTEXT_LENGTH = 3
CONTEXT_TYPE = None
STATE_SIZE = 4*NUM_ENTITIES+1 + 2*CONTEXT_LENGTH*NUM_ENTITIES
STOP_ACTION = NUM_ENTITIES
IGNORE_ALL = STOP_ACTION + 1
ACCEPT_ALL = 999 #arbitrary
trained_model = None
tfidf_vectorizer = TfidfVectorizer()
inflect_engine = inflect.engine()
def dd():
return {}
def ddd():
return collections.defaultdict(dd)
#global caching to speed up
# TRAIN_TFIDF_MATRICES = {}
# TRAIN_ENTITIES = collections.defaultdict(dd)
# TRAIN_CONFIDENCES = collections.defaultdict(dd)
# TEST_TFIDF_MATRICES = {}
# TEST_ENTITIES = collections.defaultdict(dd)
# TEST_CONFIDENCES = collections.defaultdict(dd)
TRAIN_COSINE_SIM = collections.defaultdict(dd)
TRAIN_ENTITIES = collections.defaultdict(ddd)
TRAIN_CONFIDENCES = collections.defaultdict(ddd)
TRAIN_CONTEXT = collections.defaultdict(ddd) #final value will be a vector
TEST_COSINE_SIM = collections.defaultdict(dd)
TEST_ENTITIES = collections.defaultdict(ddd)
TEST_CONFIDENCES = collections.defaultdict(ddd)
TEST_CONTEXT = collections.defaultdict(ddd) #final value will be a vector
CORRECT = collections.defaultdict(lambda:0.)
GOLD = collections.defaultdict(lambda:0.)
PRED = collections.defaultdict(lambda:0.)
EVALCONF = collections.defaultdict(lambda:[])
EVALCONF2 = collections.defaultdict(lambda:[])
QUERY = collections.defaultdict(lambda:0.)
ACTION = collections.defaultdict(lambda:0.)
CHANGES = 0
evalMode = False
STAT_POSITIVE, STAT_NEGATIVE = 0, 0 #stat. sign.
CONTEXT = None
def splitBars(w):
return [q.strip() for q in w.split('|')]
#Environment for each episode
class Environment:
def __init__(self, originalArticle, newArticles, goldEntities, indx, args, evalMode):
self.indx = indx
self.originalArticle = originalArticle
self.newArticles = newArticles #extra articles to process
self.goldEntities = goldEntities
self.ignoreDuplicates = args.ignoreDuplicates
self.entity = args.entity
self.aggregate = args.aggregate
self.delayedReward = args.delayedReward
self.shooterLenientEval = args.shooterLenientEval
self.listNum = 0 #start off with first list
self.rlbasicEval = args.rlbasicEval
self.rlqueryEval = args.rlqueryEval
self.shuffledIndxs = [range(len(q)) for q in self.newArticles]
if not evalMode and args.shuffleArticles:
for q in self.shuffledIndxs:
shuffle(q)
self.state = [0 for i in range(STATE_SIZE)]
self.terminal = False
self.bestEntities = collections.defaultdict(lambda:'') #current best entities
self.bestConfidences = collections.defaultdict(lambda:0.)
self.bestEntitySet = None
if self.aggregate == 'majority':
self.bestEntitySet = collections.defaultdict(lambda:[])
self.bestIndex = (0,0)
self.prevListNum = 0
self.prevArticleIndx = 0
# to keep track of extracted values from previousArticle
# start off with list 0 always
###################################
# if 0 in ENTITIES[self.indx][0]:
if len(ENTITIES[self.indx][0])>0:
####
self.prevEntities, self.prevConfidences = ENTITIES[self.indx][0][0], CONFIDENCES[self.indx][0][0]
else:
self.prevEntities, self.prevConfidences = self.extractEntitiesWithConfidences(self.originalArticle)
ENTITIES[self.indx][0][0] = self.prevEntities
CONFIDENCES[self.indx][0][0] = self.prevConfidences
#store the original entities before updating state
self.originalEntities = self.prevEntities
#calculate tf-idf similarities using all the articles related to the original
self.allArticles = [originalArticle] + [item for sublist in self.newArticles for item in sublist]
self.allArticles = [' '.join(q) for q in self.allArticles]
############################################
# if self.indx not in COSINE_SIM:
# # self.tfidf_matrix = TFIDF_MATRICES[0][self.indx]
# self.tfidf_matrix = tfidf_vectorizer.fit_transform(self.allArticles)
# cnt = 0
# for listNum, sublist in enumerate(self.newArticles):
# COSINE_SIM[self.indx][listNum] = cosine_similarity(self.tfidf_matrix[0:1], self.tfidf_matrix[cnt:cnt+len(sublist)])[0]
# pdb.set_trace()
# cnt += len(sublist)
############################################
#update the initial state
self.stepNum = [0 for q in range(NUM_QUERY_TYPES)]
self.updateState(ACCEPT_ALL, 1, self.ignoreDuplicates)
return
# def extractEntitiesWithConfidences(self, article):
# #article is a list of words
# joined_article = ' '.join(article)
# pred, conf_scores, conf_cnts = predict.predictWithConfidences(trained_model, joined_article, False, helper.cities)
#
# for i in range(len(conf_scores)):
# if conf_cnts[i] > 0:
# conf_scores[i] /= conf_cnts[i]
#
# return pred.split(' ### '), conf_scores
#find the article similarity between original and newArticle[i] (=allArticles[i+1])
def articleSim(self, indx, listNum, i):
# return cosine_similarity(self.tfidf_matrix[0:1], self.tfidf_matrix[i+1:i+2])[0][0]
return COSINE_SIM[indx][listNum][i]
# update the state based on the decision from DQN
def updateState(self, action, query, ignoreDuplicates=False):
global CONTEXT, CONTEXT_TYPE
#use query to get next article
articleIndx = None
if self.rlbasicEval:
#ignore the query decision from the agent
listNum = self.listNum
self.listNum += 1
if self.listNum == NUM_QUERY_TYPES: self.listNum = 0
else:
listNum = query-1 #convert from 1-based to 0-based
if self.rlqueryEval:
#set the reconciliation action
action = ACCEPT_ALL
if ignoreDuplicates:
nextArticle = None
while not nextArticle and self.stepNum[listNum] < len(self.newArticles[listNum]):
articleIndx = self.shuffledIndxs[listNum][self.stepNum]
if self.articleSim(self.indx, listNum, articleIndx) < 0.95:
nextArticle = self.newArticles[listNum][articleIndx]
else:
self.stepNum[listNum] += 1
else:
#get next article
if self.stepNum[listNum] < len(self.newArticles[listNum]):
articleIndx = self.shuffledIndxs[listNum][self.stepNum[listNum]]
nextArticle = self.newArticles[listNum][articleIndx]
else:
nextArticle = None
if action != STOP_ACTION:
# integrate the values into the current DB state
entities, confidences = self.prevEntities, self.prevConfidences
# all other tags
####################################
# todo: only one relation not one entity
for i in range(NUM_ENTITIES):
if action != ACCEPT_ALL and i != action: continue #only perform update for the entity chosen by agent
self.bestIndex = (self.prevListNum, self.prevArticleIndx) #analysis
if self.aggregate == 'majority':
self.bestEntitySet[i].append((entities[i], confidences[i]))
self.bestEntities[i], self.bestConfidences[i] = self.majorityVote(self.bestEntitySet[i])
else:
if i==0:
#handle shooterName - add to list or directly replace
if not self.bestEntities[i]:
self.bestEntities[i] = entities[i]
self.bestConfidences[i] = confidences[i]
elif self.aggregate == 'always' or confidences[i] > self.bestConfidences[i]:
self.bestEntities[i] = entities[i] #directly replace
# self.bestEntities[i] = self.bestEntities[i] + '|' + entities[i] #add to list
self.bestConfidences[i] = confidences[i]
else:
if not self.bestEntities[i] or self.aggregate == 'always' or confidences[i] > self.bestConfidences[i]:
self.bestEntities[i] = entities[i]
self.bestConfidences[i] = confidences[i]
# print "Changing best Entities"
# print "New entities", self.bestEntities
if DEBUG:
print "entitySet:", self.bestEntitySet
########
if nextArticle and action != STOP_ACTION:
assert(articleIndx != None)
##########
# if (articleIndx+1) in ENTITIES[self.indx][listNum]:
# entities, confidences = ENTITIES[self.indx][listNum][articleIndx+1], CONFIDENCES[self.indx][listNum][articleIndx+1]
entities, confidences = ENTITIES[self.indx][listNum][articleIndx], CONFIDENCES[self.indx][listNum][articleIndx]
# else:
# entities, confidences = self.extractEntitiesWithConfidences(nextArticle)
# ENTITIES[self.indx][listNum][articleIndx+1], CONFIDENCES[self.indx][listNum][articleIndx+1] = entities, confidences
##########
assert(len(entities) == len(confidences))
else:
# print "No next article"
entities, confidences = [""]*NUM_ENTITIES, [0]*NUM_ENTITIES
self.terminal = True
#modify self.state appropriately
# print(self.bestEntities, entities)
#############
if constants.mode == 'Shooter':
matches = map(self.checkEquality, self.bestEntities.values()[1:-1], entities[1:-1]) # map() is a high order function, the first parameter is a pointer of a function
matches.insert(0, self.checkEqualityShooter(self.bestEntities.values()[0], entities[0]))
matches.append(self.checkEqualityCity(self.bestEntities.values()[-1], entities[-1]))
elif constants.mode == "DS":
matches = map(self.checkEquality, self.bestEntities.values(), entities)
else:
matches = map(self.checkEqualityShooter, self.bestEntities.values(), entities)
#######
# pdb.set_trace()
self.state = [0 for i in range(STATE_SIZE)]
for i in range(NUM_ENTITIES):
self.state[i] = self.bestConfidences[i] #DB state
self.state[NUM_ENTITIES+i] = confidences[i] #IMP: (original) next article state
matchScore = float(matches[i])
if matchScore > 0:
self.state[2*NUM_ENTITIES+i] = 1
else:
self.state[3*NUM_ENTITIES+i] = 1
# self.state[2*NUM_ENTITIES+i] = float(matches[i])*confidences[i] if float(matches[i])>0 else -1*confidences[i]
if nextArticle:
# print self.indx, listNum, articleIndx
# print COSINE_SIM[self.indx][listNum]
self.state[4*NUM_ENTITIES] = self.articleSim(self.indx, listNum, articleIndx)
else:
self.state[4*NUM_ENTITIES] = 0
#selectively mask states
if self.entity != NUM_ENTITIES:
for j in range(NUM_ENTITIES):
if j != self.entity:
self.state[j] = 0
self.state[NUM_ENTITIES+j] = 0
#TODO: mask matches
#add in context information
if nextArticle and CONTEXT_TYPE != 0:
#######################################
j = 4*NUM_ENTITIES+1
self.state[j:j + 2 * CONTEXT_LENGTH] = CONTEXT[self.indx][listNum][articleIndx][0]
j += 2 * CONTEXT_LENGTH
self.state[j:j + 2 * CONTEXT_LENGTH] = CONTEXT[self.indx][listNum][articleIndx][1]
# for q in range(NUM_ENTITIES):
# if self.entity == NUM_ENTITIES or self.entity == q:
# self.state[j:j+2*CONTEXT_LENGTH] = CONTEXT[self.indx][listNum][articleIndx+1][q]
# j += 2*CONTEXT_LENGTH
########################################
# pdb.set_trace()
#update state variables
self.prevEntities = entities
self.prevConfidences = confidences
self.prevListNum = listNum
self.prevArticleIndx = articleIndx
return
# check if two entities are equal. Need to handle city
def checkEquality(self, e1, e2):
# if gold is unknown, then dont count that
return e2!='' and (COUNT_ZERO or e2 != 'zero') and e1.lower() == e2.lower()
def checkEqualityShooter(self, e1, e2):
if e2 == '' or e2=='unknown': return 0.
gold = set(splitBars(e2.lower()))
pred = set(splitBars(e1.lower()))
correct = len(gold.intersection(pred))
prec = float(correct)/len(pred)
rec = float(correct)/len(gold)
if self.shooterLenientEval:
if correct > 0:
return 1.
else:
return 0.
else:
if prec+rec > 0:
f1 = (2*prec*rec)/(prec+rec)
else:
f1 = 0.
return f1
def checkEqualityCity(self, e1, e2):
return e2!='' and e1.lower() == e2.lower()
def calculateReward(self, oldEntities, newEntities):
if constants.mode == 'Shooter':
rewards = [int(self.checkEquality(newEntities[1], self.goldEntities[1])) - int(self.checkEquality(oldEntities[1], self.goldEntities[1])),
int(self.checkEquality(newEntities[2], self.goldEntities[2])) - int(self.checkEquality(oldEntities[2], self.goldEntities[2]))]
#add in shooter reward
if self.goldEntities[0]:
rewards.insert(0, self.checkEqualityShooter(newEntities[0], self.goldEntities[0]) \
- self.checkEqualityShooter(oldEntities[0], self.goldEntities[0]))
else:
rewards.insert(0, 0.)
# add in city reward
rewards.append(self.checkEqualityCity(newEntities[-1], self.goldEntities[-1]) \
- self.checkEqualityCity(oldEntities[-1], self.goldEntities[-1]))
##########################################
elif constants.mode == 'DS':
rewards = [int(self.checkEquality(newEntities[0], self.goldEntities[0])) - int(self.checkEquality(oldEntities[0], self.goldEntities[0]))]
##########################################
else:
rewards = []
for i in range(len(newEntities)):
if self.goldEntities[i] != 'unknown':
rewards.append(self.checkEqualityShooter(newEntities[i], self.goldEntities[i]) - self.checkEqualityShooter(oldEntities[i], self.goldEntities[i]))
else:
rewards.append(0.)
if self.entity == NUM_ENTITIES:
return sum(rewards)
else:
return rewards[self.entity]
def calculateStatSign(self, oldEntities, newEntities):
if constants.mode == 'Shooter':
rewards = [int(self.checkEquality(newEntities[1], self.goldEntities[1])) - int(self.checkEquality(oldEntities[1], self.goldEntities[1])),
int(self.checkEquality(newEntities[2], self.goldEntities[2])) - int(self.checkEquality(oldEntities[2], self.goldEntities[2]))]
#add in shooter reward
if self.goldEntities[0]:
rewards.insert(0, self.checkEqualityShooter(newEntities[0], self.goldEntities[0]) \
- self.checkEqualityShooter(oldEntities[0], self.goldEntities[0]))
else:
rewards.insert(0, 0.)
# add in city reward
rewards.append(self.checkEqualityCity(newEntities[-1], self.goldEntities[-1]) \
- self.checkEqualityCity(oldEntities[-1], self.goldEntities[-1]))
else:
rewards = []
for i in | |
<filename>notifico/services/hooks/github.py<gh_stars>0
# -*- coding: utf8 -*-
__all__ = ('GithubHook',)
import re
import json
import requests
from flask.ext import wtf
from notifico.services.hooks import HookService
def simplify_payload(payload):
"""
Massage the github webhook payload into something a little more
usable. Idea comes from gith by danheberden.
"""
result = {
'branch': None,
'tag': None,
'pusher': None,
'files': {
'all': [],
'added': [],
'removed': [],
'modified': []
},
'original': payload
}
# Try to find the branch/tag name from `ref`, falling back to `base_ref`.
ref_r = re.compile(r'refs/(heads|tags)/(.*)$')
for ref in (payload.get('ref', ''), payload.get('base_ref', '')):
match = ref_r.match(ref)
if match:
type_, name = match.group(1, 2)
result[{'heads': 'branch', 'tags': 'tag'}[type_]] = name
break
# Github (for whatever reason) doesn't always know the pusher. This field
# is always missing/nil for commits generated by github itself, and for
# web hooks coming from the "Test Hook" button.
if 'pusher' in payload:
result['pusher'] = payload['pusher'].get('name')
# Github returns the string 'none' when a deploy key pushes
if result['pusher'] == 'none':
result['pusher'] = u'A deploy key'
# Summarize file movement over all the commits.
for commit in payload.get('commits', tuple()):
for type_ in ('added', 'removed', 'modified'):
result['files'][type_].extend(commit[type_])
result['files']['all'].extend(commit[type_])
return result
class GithubConfigForm(wtf.Form):
branches = wtf.TextField('Branches', validators=[
wtf.Optional(),
wtf.Length(max=1024)
], description=(
'A comma-seperated list of branches to forward, or blank for all.'
' Ex: "master, dev"'
))
use_colors = wtf.BooleanField('Use Colors', validators=[
wtf.Optional()
], default=True, description=(
'If checked, commit messages will include minor mIRC coloring.'
))
show_branch = wtf.BooleanField('Show Branch Name', validators=[
wtf.Optional()
], default=True, description=(
'If checked, commit messages will include the branch name.'
))
show_tags = wtf.BooleanField('Show Tags', validators=[
wtf.Optional()
], default=True, description=(
'If checked, changes to tags will be shown.'
))
prefer_username = wtf.BooleanField('Prefer Usernames', validators=[
wtf.Optional()
], default=True, description=(
'If checked, show github usernames instead of commiter name when'
' possible.'
))
full_project_name = wtf.BooleanField('Full Project Name', validators=[
wtf.Optional()
], default=False, description=(
'If checked, show the full github project name (ex: tktech/notifico)'
' instead of the Notifico project name (ex: notifico)'
))
title_only = wtf.BooleanField('Title Only', validators=[
wtf.Optional()
], default=False, description=(
'If checked, only the commits title (the commit message up to'
' the first new line) will be emitted.'
))
distinct_only = wtf.BooleanField('Distinct Commits Only', validators=[
wtf.Optional()
], default=True, description=(
'Commits will only be announced the first time they are seen.'
))
def _create_push_final_summary(j, config):
# The name of the repository.
original = j['original']
full_project_name = config.get('full_project_name', False)
line_limit = config.get('line_limit', 3)
line = []
project_name = original['repository']['name']
if full_project_name:
# The use wants the <username>/<project name> form from
# github, not the Notifico name.
project_name = '{username}/{project_Name}'.format(
username=original['repository']['owner']['name'],
project_Name=project_name
)
line.append(u'{RESET}[{BLUE}{name}{RESET}]'.format(
name=project_name,
**HookService.colors
))
line.append(u'... and {count} more commits.'.format(
count=len(original.get('commits', [])) - line_limit
))
return u' '.join(line)
class GithubHook(HookService):
"""
HookService hook for http://github.com.
"""
SERVICE_NAME = 'Github'
SERVICE_ID = 10
@classmethod
def service_description(cls):
return cls.env().get_template('github_desc.html').render()
@classmethod
def handle_request(cls, user, request, hook):
# Support both json payloads as well as form encoded payloads
if request.headers.get('Content-Type') == 'application/json':
payload = request.get_json()
else:
try:
payload = json.loads(request.form['payload'])
except KeyError:
return
event = request.headers.get('X-GitHub-Event', '')
event_handler = {
'ping': cls._handle_ping,
'push': cls._handle_push,
'issues': cls._handle_issues,
'issue_comment': cls._handle_issue_comment,
'commit_comment': cls._handle_commit_comment,
'create': cls._handle_create,
'delete': cls._handle_delete,
'pull_request': cls._handle_pull_request,
'pull_request_review_comment': (
cls._handle_pull_request_review_comment
),
'gollum': cls._handle_gollum,
'watch': cls._handle_watch,
'release': cls._handle_release,
'fork': cls._handle_fork,
'member': cls._handle_member,
'public': cls._handle_public,
'team_add': cls._handle_team_add,
'status': cls._handle_status,
'deployment': cls._handle_deployment,
'deployment_status': cls._handle_deployment_status
}
if not event in event_handler:
return
return event_handler[event](user, request, hook, payload)
@classmethod
def _handle_ping(cls, user, request, hook, json):
yield u'{RESET}[{BLUE}GitHub{RESET}] {zen}'.format(
zen=json['zen'],
**HookService.colors
)
@classmethod
def _handle_issues(cls, user, request, hook, json):
fmt_string = (
u'{RESET}[{BLUE}{name}{RESET}] {ORANGE}{who}{RESET} {action} '
'issue {GREEN}#{num}{RESET}: {title} - {PINK}{url}{RESET}'
)
yield fmt_string.format(
name=json['repository']['name'],
who=json['sender']['login'],
action=json['action'],
num=json['issue']['number'],
title=json['issue']['title'],
url=GithubHook.shorten(json['issue']['html_url']),
**HookService.colors
)
@classmethod
def _handle_issue_comment(cls, user, request, hook, json):
fmt_string = (
u'{RESET}[{BLUE}{name}{RESET}] {ORANGE}{who}{RESET} commented on '
'issue {GREEN}#{num}{RESET}: {title} - {PINK}{url}{RESET}'
)
yield fmt_string.format(
name=json['repository']['name'],
who=json['sender']['login'],
action=json['action'],
num=json['issue']['number'],
title=json['issue']['title'],
url=GithubHook.shorten(json['comment']['html_url']),
**HookService.colors
)
@classmethod
def _handle_commit_comment(cls, user, request, hook, json):
fmt_string = (
u'{RESET}[{BLUE}{name}{RESET}] {ORANGE}{who}{RESET} commented on '
'commit {GREEN}{commit}{RESET} - {PINK}{url}{RESET}'
)
yield fmt_string.format(
name=json['repository']['name'],
who=json['comment']['user']['login'],
commit=json['comment']['commit_id'],
url=GithubHook.shorten(json['comment']['html_url']),
**HookService.colors
)
@classmethod
def _handle_create(cls, user, request, hook, json):
fmt_string = u' '.join([
u'{RESET}[{BLUE}{name}{RESET}] {ORANGE}{who}{RESET} '
'created {ref_type}',
# null/None if repository was created
u'{GREEN}{ref}{RESET}' if json['ref'] else u'',
u'- {PINK}{url}{RESET}'
])
# URL points to repo, no other url available
yield fmt_string.format(
name=json['repository']['name'],
who=json['sender']['login'],
ref_type=json['ref_type'],
ref=json['ref'],
url=GithubHook.shorten(json['repository']['html_url']),
**HookService.colors
)
@classmethod
def _handle_delete(cls, user, request, hook, json):
fmt_string = (
u'{RESET}[{BLUE}{name}{RESET}] {ORANGE}{who}{RESET} deleted '
'{ref_type} {GREEN}{ref}{RESET} - {PINK}{url}{RESET}'
)
# URL points to repo, no other url available
yield fmt_string.format(
name=json['repository']['name'],
who=json['sender']['login'],
ref_type=json['ref_type'],
ref=json['ref'],
url=GithubHook.shorten(json['repository']['html_url']),
**HookService.colors
)
@classmethod
def _handle_pull_request(cls, user, request, hook, json):
fmt_string = (
u'{RESET}[{BLUE}{name}{RESET}] {ORANGE}{who}{RESET} {action} pull '
'request {GREEN}#{num}{RESET}: {title} - {PINK}{url}{RESET}'
)
yield fmt_string.format(
name=json['repository']['name'],
who=json['sender']['login'],
action=json['action'],
num=json['number'],
title=json['pull_request']['title'],
url=GithubHook.shorten(json['pull_request']['html_url']),
**HookService.colors
)
@classmethod
def _handle_pull_request_review_comment(cls, user, request, hook, json):
fmt_string = (
u'{RESET}[{BLUE}{name}{RESET}] {ORANGE}{who}{RESET} reviewed pull '
'request {GREEN}#{num}{RESET} commit - {PINK}{url}{RESET}'
)
num = json['comment']['pull_request_url'].split('/')[-1]
yield fmt_string.format(
name=json['repository']['name'],
who=json['comment']['user']['login'],
num=num,
url=GithubHook.shorten(json['comment']['html_url']),
**HookService.colors
)
@classmethod
def _handle_gollum(cls, user, request, hook, json):
name = json['repository']['name']
if len(json['pages']) > 1:
# Multiple pages changed
fmt_string = (
u'{RESET}[{BLUE}{name}{RESET}] {ORANGE}{who}{RESET} '
'updated the Wiki'
)
yield fmt_string.format(
name=name,
who=json['sender']['login'],
**HookService.colors
)
fmt_string_page = (
u'{RESET}[{BLUE}{name}{RESET}] Page {GREEN}{pname}{RESET}'
' {action} - {PINK}{url}{RESET}'
)
for page in json['pages']:
yield fmt_string_page.format(
name=name,
pname=page['page_name'],
action=page['action'],
url=GithubHook.shorten(page['html_url']),
**HookService.colors
)
else:
# Only one page
fmt_string = (
u'{RESET}[{BLUE}{name}{RESET}] {ORANGE}{who}{RESET} {action} '
'page {GREEN}{pname}{RESET} - {PINK}{url}{RESET}'
)
yield fmt_string.format(
name=name,
who=json['sender']['login'],
pname=json['pages'][0]['page_name'],
action=json['pages'][0]['action'],
url=GithubHook.shorten(json['pages'][0]['html_url']),
**HookService.colors
)
@classmethod
def _handle_watch(cls, user, request, hook, json):
fmt_string = (
u'{RESET}[{BLUE}{name}{RESET}] {ORANGE}{who}{RESET} starred '
'{GREEN}{name}{RESET} - {PINK}{url}{RESET}'
)
yield fmt_string.format(
name=json['repository']['name'],
who=json['sender']['login'],
url=GithubHook.shorten(json['sender']['html_url']),
**HookService.colors
)
@classmethod
def _handle_release(cls, user, request, hook, json):
fmt_string = (
u'{RESET}[{BLUE}{name}{RESET}] {ORANGE}{who}{RESET} {action} '
'{GREEN}{tag_name} | {title}{RESET} - {PINK}{url}{RESET}'
)
yield fmt_string.format(
name=json['repository']['name'],
who=json['sender']['login'],
action=json['action'],
tag_name=json['release']['tag_name'],
title=json['release']['name'],
url=GithubHook.shorten(json['release']['html_url']),
**HookService.colors
)
@classmethod
def _handle_fork(cls, user, request, hook, json):
fmt_string = (
u'{RESET}[{BLUE}{name}{RESET}] {ORANGE}{who}{RESET} forked '
'the repository - {PINK}{url}{RESET}'
)
# URL points to repo, no other url available
yield fmt_string.format(
name=json['repository']['name'],
who=json['forkee']['owner']['login'],
url=GithubHook.shorten(json['forkee']['owner']['html_url']),
**HookService.colors
)
@classmethod
def _handle_member(cls, user, request, hook, json):
fmt_string = (
u'{RESET}[{BLUE}{name}{RESET}] {ORANGE}{who}{RESET} {action} '
'user {GREEN}{whom}{RESET} - {PINK}{url}{RESET}'
)
yield fmt_string.format(
name=json['repository']['name'],
who=json['sender']['login'],
action=json['action'],
whom=json['member']['login'],
url=GithubHook.shorten(json['member']['html_url']),
**HookService.colors
)
@classmethod
def _handle_public(cls, user, request, hook, json):
fmt_string = (
u'{RESET}[{BLUE}{name}{RESET}] {ORANGE}{who}{RESET} made the '
'repository public!'
)
yield fmt_string.format(
name=json['repository']['name'],
who=json['sender']['login'],
**HookService.colors
)
@classmethod
def _handle_team_add(cls, user, request, hook, json):
fmt_string = (
u'{RESET}[{BLUE}{name}{RESET}] {ORANGE}{who}{RESET} added the'
' team {GREEN}{tname}{RESET} to the repository!'
)
yield fmt_string.format(
name=json['repository']['name'],
who=json['sender']['login'],
tname=json['team']['name'],
**HookService.colors
)
@classmethod
def _handle_status(cls, user, request, hook, json):
fmt_string = (
u'{RESET}[{BLUE}{name}{RESET}] {status_color}{status}{RESET}. '
'{description} - {PINK}{url}{RESET}'
)
status_color = HookService.colors['GREEN']
if not json['state'].lower() == 'success':
status_color = HookService.colors['RED']
yield fmt_string.format(
name=json['repository']['name'],
status_color=status_color,
status=json['state'].capitalize(),
description=json['description'],
url=json['target_url'],
**HookService.colors
)
@classmethod
def _handle_deployment(cls, user, request, hook, json):
yield ''
@classmethod
def _handle_deployment_status(cls, user, request, hook, json):
yield ''
@classmethod
def _handle_push(cls, user, request, hook, json):
j = simplify_payload(json)
original = j['original']
# Config may not exist for pre-migrate hooks.
config = hook.config or {}
# Should we get rid of mIRC colors before sending?
strip = not config.get('use_colors', True)
# Branch names to filter on.
branches = config.get('branches', None)
# Display tag activity?
show_tags = config.get('show_tags', True)
# Limit the number of lines to display before the summary.
# 3 is the default on github.com's IRC service
line_limit = config.get('line_limit', 3)
if not original['commits']:
if show_tags and j['tag']:
yield cls.message(
cls._create_non_commit_summary(j, config),
strip=strip
)
if j['branch']:
yield cls.message(
cls._create_non_commit_summary(j, config),
strip=strip
)
# No commits, no tags, no new branch. Nothing to do
return
if branches:
# The user wants to filter by branch name.
branches = [b.strip().lower() for b in branches.split(',')]
if j['branch'] and j['branch'].lower() not in branches:
# This isn't a branch the user wants.
return
# A short summarization of the commits in the push.
yield cls.message(cls._create_push_summary(j, config), strip=strip)
# A one-line summary for each commit in the push.
line_iterator = cls._create_commit_summary(j, config)
for i, formatted_commit | |
library. """
rstrings = []
for id, r in self._rates.items():
rstrings.append('{} ({})'.format(r, id))
return '\n'.join(rstrings)
def __add__(self, other):
""" Add two libraries to get a library containing rates from both. """
new_rates = self._rates
for id, r in other._rates.items():
try:
assert id not in new_rates
except:
if r != new_rates[id]:
print('ERROR: rate {} defined differently in libraries {} and {}\n'.format(r, self._library_file, other._library_file))
raise
else:
new_rates[id] = r
new_library = Library(libfile='{} + {}'.format(self._library_file, other._library_file),
rates=new_rates,
read_library=False)
return new_library
def get_rates(self):
""" Return a list of the rates in this library. """
rlist = [r for id, r in self._rates.items()]
return rlist
def get_rate(self, id):
""" Return a rate matching the id provided. """
try:
return self._rates[id]
except:
print("ERROR: rate identifier does not match a rate in this library.")
raise
def linking_nuclei(self, nuclist, with_reverse=True):
"""
Return a Library object containing the rates linking the
nuclei provided in the list of Nucleus objects or nucleus abbreviations 'nuclist'.
If with_reverse is True, then include reverse rates. Otherwise
include only forward rates.
"""
if type(nuclist) == Nucleus or type(nuclist) == str:
nuclist = [nuclist]
else:
try:
nuclist = list(nuclist)
except:
raise
nucleus_list = []
for nuc in nuclist:
if type(nuc) == Nucleus:
nucleus_list.append(nuc)
else:
try:
anuc = Nucleus(nuc)
except:
raise
else:
nucleus_list.append(anuc)
# Get the set of rates for which any Nucleus in nucleus_list
# appears as either reactant or product.
rate_filters = []
for nuc in nucleus_list:
rate_filters.append(RateFilter(reactants=nuc, exact=False))
rate_filters.append(RateFilter(products=nuc, exact=False))
triage_library = self.filter(rate_filters)
# Discard any of this set of rates for which nuclei appear not
# in nucleus_list
filtered_rates = []
for r in triage_library.get_rates():
include = True
for nuc in r.reactants:
if nuc not in nucleus_list:
include = False
break
for nuc in r.products:
if nuc not in nucleus_list:
include = False
break
if not with_reverse and r.reverse:
include = False
if include:
filtered_rates.append(r)
# Return library containing the filtered rates
return Library(rates=filtered_rates)
def filter(self, filter_spec):
"""
filter_specs should be an iterable of RateFilter objects or a
single RateFilter object. Library.filter yields all rates
matching any RateFilter in filter_specs. If RateFilter.exact,
then return rates with exactly the reactants or products
passed in as arguments.
"""
if type(filter_spec) == RateFilter:
filter_specifications = [filter_spec]
else:
try:
iter(filter_spec)
except:
raise
else:
filter_specifications = filter_spec
matching_rates = collections.OrderedDict()
for id, r in self._rates.items():
for f in filter_specifications:
if f.matches(r):
matching_rates[id] = r
break
if matching_rates:
return Library(libfile=self._library_file,
rates=matching_rates,
read_library=False)
else:
return None
class RateFilter(object):
"""RateFilter stores selection rules specifying a rate or group of
rates to assist searching for rates stored in a Library."""
def __init__(self, reactants=None, products=None, exact=True,
reverse=None, min_reactants=None, max_reactants=None,
min_products=None, max_products=None):
self.reactants = []
self.products = []
self.exact = exact
self.reverse = reverse
self.min_reactants = min_reactants
self.min_products = min_products
self.max_reactants = max_reactants
self.max_products = max_products
if reactants:
if type(reactants) == Nucleus or type(reactants) == str:
reactants = [reactants]
self.reactants = [self._cast_nucleus(r) for r in reactants]
if products:
if type(products) == Nucleus or type(reactants) == str:
products = [products]
self.products = [self._cast_nucleus(r) for r in products]
@staticmethod
def _cast_nucleus(r):
""" Make sure r is of type Nucleus. """
if not type(r) == Nucleus:
try:
rnuc = Nucleus(r)
except:
raise
else:
return rnuc
else:
return r
@staticmethod
def _contents_equal(a, b):
"""
Return True if the contents of a and b exactly match, ignoring ordering.
If either a or b is None, return True only if both a and b are None.
"""
if a and b:
return collections.Counter(a) == collections.Counter(b)
else:
return (not a) and (not b)
@staticmethod
def _compare_nuclides(test, reference, exact=True):
"""
test and reference should be iterables of Nucleus objects.
If an exact match is desired, test and reference should exactly match, ignoring ordering.
Otherwise, return True only if every element of test appears at least one time in reference.
"""
matches = True
if exact:
matches = RateFilter._contents_equal(test, reference)
else:
for nuc in test:
if not (nuc in reference):
matches = False
break
return matches
def matches(self, r):
""" Given a Rate r, see if it matches this RateFilter. """
matches_reactants = True
matches_products = True
matches_reverse = True
matches_min_reactants = True
matches_min_products = True
matches_max_reactants = True
matches_max_products = True
if self.reactants:
matches_reactants = self._compare_nuclides(self.reactants, r.reactants, self.exact)
if self.products:
matches_products = self._compare_nuclides(self.products, r.products, self.exact)
if type(self.reverse) == type(True):
matches_reverse = self.reverse == r.reverse
if type(self.min_reactants) == int:
matches_min_reactants = len(r.reactants) >= self.min_reactants
if type(self.min_products) == int:
matches_min_products = len(r.products) >= self.min_products
if type(self.max_reactants) == int:
matches_max_reactants = len(r.reactants) <= self.max_reactants
if type(self.max_products) == int:
matches_max_products = len(r.products) <= self.max_products
return (matches_reactants and matches_products and matches_reverse and
matches_min_reactants and matches_max_reactants and
matches_min_products and matches_max_products)
def invert(self):
""" Return a RateFilter matching the inverse rate. """
newfilter = RateFilter(reactants=self.products,
products=self.reactants,
exact=self.exact,
reverse=self.reverse,
min_reactants=self.min_products,
max_reactants=self.max_products,
min_products=self.min_reactants,
max_products=self.max_reactants)
return newfilter
class Rate(object):
""" a single Reaclib rate, which can be composed of multiple sets """
def __init__(self, rfile=None, rfile_path=None, chapter=None, original_source=None,
reactants=None, products=None, sets=None, labelprops=None, Q=None):
""" rfile can be either a string specifying the path to a rate file or
an io.StringIO object from which to read rate information. """
self.rfile_path = rfile_path
self.rfile = None
if type(rfile) == str:
self.rfile_path = Library._find_rate_file(rfile)
self.rfile = os.path.basename(rfile)
self.chapter = chapter # the Reaclib chapter for this reaction
self.original_source = original_source # the contents of the original rate file
self.fname = None
if reactants:
self.reactants = reactants
else:
self.reactants = []
if products:
self.products = products
else:
self.products = []
if sets:
self.sets = sets
else:
self.sets = []
self.labelprops = labelprops
self.Q = Q
if type(rfile) == str:
# read in the file, parse the different sets and store them as
# SingleSet objects in sets[]
f = open(self.rfile_path, "r")
elif type(rfile) == io.StringIO:
# Set f to the io.StringIO object
f = rfile
else:
f = None
if f:
self._read_from_file(f)
f.close()
else:
self._set_label_properties()
self._set_rhs_properties()
self._set_screening()
self._set_print_representation()
def __repr__(self):
return self.string
def __hash__(self):
return hash(self.__repr__())
def __eq__(self, other):
""" Determine whether two Rate objects are equal.
They are equal if they contain identical reactants and products and
if they contain the same SingleSet sets and if their chapters are equal."""
x = True
x = x and (self.chapter == other.chapter)
x = x and (self.reactants == other.reactants)
x = x and (self.products == other.products)
x = x and (len(self.sets) == len(other.sets))
for si in self.sets:
scomp = False
for sj in other.sets:
if si == sj:
scomp = True
break
x = x and scomp
return x
def __add__(self, other):
"""Combine the sets of two Rate objects if they describe the same
reaction. Must be Reaclib rates."""
assert(self.reactants == other.reactants)
assert(self.products == other.products)
assert(self.chapter == other.chapter)
assert(type(self.chapter) == int)
assert(self.label == other.label)
assert(self.weak == other.weak)
assert(self.tabular == other.tabular)
assert(self.reverse == other.reverse)
if self.resonant != other.resonant:
self._labelprops_combine_resonance()
new_rate = Rate(chapter=self.chapter,
original_source='\n'.join([self.original_source,
other.original_source]),
reactants=self.reactants,
products=self.products,
sets=self.sets + other.sets,
labelprops=self.labelprops,
Q=self.Q)
return new_rate
def _set_label_properties(self, labelprops=None):
""" Calls _update_resonance_combined and then
_update_label_properties. """
if labelprops:
self.labelprops = labelprops
# Update labelprops based on the Sets in this Rate
# to set the resonance_combined flag properly
self._update_resonance_combined()
self._update_label_properties()
def _update_resonance_combined(self):
""" Checks the Sets in this Rate and updates the
resonance_combined flag as well as
self.labelprops[4] """
sres = [s.resonant for s in self.sets]
if True in sres and False in sres:
self._labelprops_combine_resonance()
else:
self.resonance_combined = False
def _labelprops_combine_resonance(self):
""" Update self.labelprops[4] = 'c'.
Also set the resonance_combined flag. """
llp = list(self.labelprops)
llp[4] = 'c'
self.labelprops = ''.join(llp)
self.resonance_combined = True
def _update_label_properties(self):
""" Set label and flags indicating Rate is resonant,
weak, or reverse. """
assert(type(self.labelprops) == str)
try:
assert(len(self.labelprops) == 6)
except:
assert(self.labelprops == 'tabular')
self.label = 'tabular'
self.resonant = False
self.resonance_combined = False
self.weak = False # The tabular rate might or might not be weak
self.reverse = False
self.tabular = True
else:
self.label = self.labelprops[0:4]
self.resonant = self.labelprops[4] == 'r'
self.weak = self.labelprops[4] == 'w'
| |
y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
anchors = utils.batch_slice(ix, lambda x: tf.gather(anchors, x),
self.config.IMAGES_PER_GPU,
names=["pre_nms_anchors"])
# Apply deltas to anchors to get refined anchors.
# [batch, N, (y1, x1, y2, x2)]
boxes = utils.batch_slice([anchors, deltas],
lambda x, y: apply_box_deltas_graph(x, y),
self.config.IMAGES_PER_GPU,
names=["refined_anchors"])
# Clip to image boundaries. [batch, N, (y1, x1, y2, x2)]
height, width = self.config.IMAGE_SHAPE[:2]
window = np.array([0, 0, height, width]).astype(np.float32)
boxes = utils.batch_slice(boxes,
lambda x: clip_boxes_graph(x, window),
self.config.IMAGES_PER_GPU,
names=["refined_anchors_clipped"])
# Filter out small boxes
# According to <NAME> paper, this reduces detection accuracy
# for small objects, so we're skipping it.
# Normalize dimensions to range of 0 to 1.
normalized_boxes = boxes / np.array([[height, width, height, width]])
# Non-max suppression
def nms(normalized_boxes, scores):
indices = tf.image.non_max_suppression(
normalized_boxes, scores, self.proposal_count,
self.nms_threshold, name="rpn_non_max_suppression")
proposals = tf.gather(normalized_boxes, indices)
# Pad if needed
padding = tf.maximum(self.proposal_count - tf.shape(proposals)[0], 0)
proposals = tf.pad(proposals, [(0, padding), (0, 0)])
return proposals
proposals = utils.batch_slice([normalized_boxes, scores], nms,
self.config.IMAGES_PER_GPU)
return proposals
def compute_output_shape(self, input_shape):
return (None, self.proposal_count, 4)
############################################################
# ROIAlign Layer
############################################################
def log2_graph(x):
"""Implementatin of Log2. TF doesn't have a native implemenation."""
return tf.log(x) / tf.log(2.0)
class PyramidROIAlign(KE.Layer):
"""Implements ROI Pooling on multiple levels of the feature pyramid.
Params:
- pool_shape: [height, width] of the output pooled regions. Usually [7, 7]
- image_shape: [height, width, channels]. Shape of input image in pixels
Inputs:
- boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized
coordinates. Possibly padded with zeros if not enough
boxes to fill the array.
- Feature maps: List of feature maps from different levels of the pyramid.
Each is [batch, height, width, channels]
Output:
Pooled regions in the shape: [batch, num_boxes, height, width, channels].
The width and height are those specific in the pool_shape in the layer
constructor.
"""
def __init__(self, pool_shape, image_shape, **kwargs):
super(PyramidROIAlign, self).__init__(**kwargs)
self.pool_shape = tuple(pool_shape)
self.image_shape = tuple(image_shape)
def call(self, inputs):
# Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords
boxes = inputs[0]
# Feature Maps. List of feature maps from different level of the
# feature pyramid. Each is [batch, height, width, channels]
feature_maps = inputs[1:]
# Assign each ROI to a level in the pyramid based on the ROI area.
y1, x1, y2, x2 = tf.split(boxes, 4, axis=2)
h = y2 - y1
w = x2 - x1
# Equation 1 in the Feature Pyramid Networks paper. Account for
# the fact that our coordinates are normalized here.
# e.g. a 224x224 ROI (in pixels) maps to P4
image_area = tf.cast(
self.image_shape[0] * self.image_shape[1], tf.float32)
roi_level = log2_graph(tf.sqrt(h * w) / (224.0 / tf.sqrt(image_area)))
roi_level = tf.minimum(5, tf.maximum(
2, 4 + tf.cast(tf.round(roi_level), tf.int32)))
roi_level = tf.squeeze(roi_level, 2)
# Loop through levels and apply ROI pooling to each. P2 to P5.
pooled = []
box_to_level = []
for i, level in enumerate(range(2, 6)):
ix = tf.where(tf.equal(roi_level, level))
level_boxes = tf.gather_nd(boxes, ix)
# Box indicies for crop_and_resize.
box_indices = tf.cast(ix[:, 0], tf.int32)
# Keep track of which box is mapped to which level
box_to_level.append(ix)
# Stop gradient propogation to ROI proposals
level_boxes = tf.stop_gradient(level_boxes)
box_indices = tf.stop_gradient(box_indices)
# Crop and Resize
# From Mask R-CNN paper: "We sample four regular locations, so
# that we can evaluate either max or average pooling. In fact,
# interpolating only a single value at each bin center (without
# pooling) is nearly as effective."
#
# Here we use the simplified approach of a single value per bin,
# which is how it's done in tf.crop_and_resize()
# Result: [batch * num_boxes, pool_height, pool_width, channels]
pooled.append(tf.image.crop_and_resize(
feature_maps[i], level_boxes, box_indices, self.pool_shape,
method="bilinear"))
# Pack pooled features into one tensor
pooled = tf.concat(pooled, axis=0)
# Pack box_to_level mapping into one array and add another
# column representing the order of pooled boxes
box_to_level = tf.concat(box_to_level, axis=0)
box_range = tf.expand_dims(tf.range(tf.shape(box_to_level)[0]), 1)
box_to_level = tf.concat([tf.cast(box_to_level, tf.int32), box_range],
axis=1)
# Rearrange pooled features to match the order of the original boxes
# Sort box_to_level by batch then box index
# TF doesn't have a way to sort by two columns, so merge them and sort.
sorting_tensor = box_to_level[:, 0] * 100000 + box_to_level[:, 1]
ix = tf.nn.top_k(sorting_tensor, k=tf.shape(
box_to_level)[0]).indices[::-1]
ix = tf.gather(box_to_level[:, 2], ix)
pooled = tf.gather(pooled, ix)
# Re-add the batch dimension
pooled = tf.expand_dims(pooled, 0)
return pooled
def compute_output_shape(self, input_shape):
return input_shape[0][:2] + self.pool_shape + (input_shape[1][-1], )
############################################################
# Detection Target Layer
############################################################
def overlaps_graph(boxes1, boxes2):
"""Computes IoU overlaps between two sets of boxes.
boxes1, boxes2: [N, (y1, x1, y2, x2)].
"""
# 1. Tile boxes2 and repeate boxes1. This allows us to compare
# every boxes1 against every boxes2 without loops.
# TF doesn't have an equivalent to np.repeate() so simulate it
# using tf.tile() and tf.reshape.
b1 = tf.reshape(tf.tile(tf.expand_dims(boxes1, 1),
[1, 1, tf.shape(boxes2)[0]]), [-1, 4])
b2 = tf.tile(boxes2, [tf.shape(boxes1)[0], 1])
# 2. Compute intersections
b1_y1, b1_x1, b1_y2, b1_x2 = tf.split(b1, 4, axis=1)
b2_y1, b2_x1, b2_y2, b2_x2 = tf.split(b2, 4, axis=1)
y1 = tf.maximum(b1_y1, b2_y1)
x1 = tf.maximum(b1_x1, b2_x1)
y2 = tf.minimum(b1_y2, b2_y2)
x2 = tf.minimum(b1_x2, b2_x2)
intersection = tf.maximum(x2 - x1, 0) * tf.maximum(y2 - y1, 0)
# 3. Compute unions
b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)
b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)
union = b1_area + b2_area - intersection
# 4. Compute IoU and reshape to [boxes1, boxes2]
iou = intersection / union
overlaps = tf.reshape(iou, [tf.shape(boxes1)[0], tf.shape(boxes2)[0]])
return overlaps
def detection_targets_graph(Inputs, config):
"""Generates detection targets for one image. Subsamples proposals and
generates target class IDs, bounding box deltas, and masks for each.
Inputs:
proposals: [N, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [NUM_FEATURES,MAX_GT_INSTANCES] int class IDs
relations: [MAX_GT_INSTANCES,MAX_GT_INSTANCES]
gt_boxes: [MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized coordinates.
gt_masks: [height, width, MAX_GT_INSTANCES] of boolean type.
gt_poses: [MAX_GT_INSTANCES, (tetax,tetay,tetaz,x,y,z)]
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized coordinates
class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs. Zero padded.
relations: [TRAIN_ROIS_PER_IMAGE,TRAIN_ROIS_PER_IMAGE]
deltas: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (dy, dx, log(dh), log(dw))]
Class-specific bbox refinements.
masks: [TRAIN_ROIS_PER_IMAGE, height, width). Masks cropped to bbox
boundaries and resized to neural network output size.
gt_poses: [MAX_GT_INSTANCES, (tetax,tetay,tetaz,x,y,z)]
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
#input
proposals=Inputs[0]
gt_class_ids=Inputs[1:7]
gt_boxes=Inputs[7]
gt_masks=Inputs[8]
gt_poses=Inputs[9]
# Assertions
asserts = [
tf.Assert(tf.greater(tf.shape(proposals)[0], 0), [proposals],
name="roi_assertion"),
]
with tf.control_dependencies(asserts):
proposals = tf.identity(proposals)
# Remove zero padding
proposals, _ = trim_zeros_graph(proposals, name="trim_proposals")
gt_boxes, non_zeros = trim_zeros_graph(gt_boxes, name="trim_gt_boxes")
for i in range(config.NUM_FEATURES-2):
gt_class_ids[i] = tf.boolean_mask(gt_class_ids[i], non_zeros,
name="trim_gt_class_ids"+str(i))
gt_class_ids[5]=tf.gather(gt_class_ids[5], tf.where(non_zeros)[:, 0], axis=0,
name="trim_gt_relations0")
gt_class_ids[5]=tf.gather(gt_class_ids[5], tf.where(non_zeros)[:, 0], axis=1,
name="trim_gt_relations1")
gt_poses=tf.gather(gt_poses, tf.where(non_zeros)[:, 0], axis=0,
name="trim_gt_poses")
gt_masks = tf.gather(gt_masks, tf.where(non_zeros)[:, 0], axis=2,
name="trim_gt_masks")
# Handle COCO crowds
# A crowd box in COCO is a bounding box around several instances. Exclude
# them from training. A crowd box is given a negative class ID.
crowd_ix = tf.where(gt_class_ids[0] < 0)[:, 0]
non_crowd_ix = tf.where(gt_class_ids[0] > 0)[:, 0]
crowd_boxes = tf.gather(gt_boxes, crowd_ix)
crowd_masks = tf.gather(gt_masks, crowd_ix, axis=2)
for i in range(config.NUM_FEATURES-2):
gt_class_ids[i] = tf.gather(gt_class_ids[i], non_crowd_ix)
gt_class_ids[5] = tf.gather(gt_class_ids[5], non_crowd_ix, axis=0)
gt_class_ids[5] = tf.gather(gt_class_ids[5], non_crowd_ix, axis=1)
gt_boxes = tf.gather(gt_boxes, non_crowd_ix)
gt_poses = tf.gather(gt_poses, non_crowd_ix)
gt_masks = tf.gather(gt_masks, non_crowd_ix, axis=2)
# Compute overlaps matrix [proposals, gt_boxes]
overlaps = overlaps_graph(proposals, gt_boxes)
# Compute overlaps with crowd boxes [anchors, crowds]
crowd_overlaps = overlaps_graph(proposals, crowd_boxes)
crowd_iou_max = tf.reduce_max(crowd_overlaps, axis=1)
no_crowd_bool = (crowd_iou_max < 0.001)
# Determine postive and negative ROIs
roi_iou_max = tf.reduce_max(overlaps, axis=1)
# 1. Positive ROIs are those with >= 0.5 IoU with a GT box
positive_roi_bool = (roi_iou_max >= 0.5)
positive_indices = tf.where(positive_roi_bool)[:, 0]
# 2. Negative ROIs are those with < 0.5 with every GT box. Skip crowds.
negative_indices = tf.where(tf.logical_and(roi_iou_max < 0.5, no_crowd_bool))[:, 0]
# Subsample ROIs. Aim for 33% positive
# Positive ROIs
positive_count = int(config.TRAIN_ROIS_PER_IMAGE *
config.ROI_POSITIVE_RATIO)
positive_indices = tf.random_shuffle(positive_indices)[:positive_count]
positive_count = tf.shape(positive_indices)[0]
# Negative ROIs. Add enough to maintain positive:negative ratio.
r = 1.0 / config.ROI_POSITIVE_RATIO
negative_count | |
/ ")
print("\t|")
print("\t|")
if numberOfErrors == 6 :
print("\t___________")
print("\t| |")
print("\t| O")
print("\t| /|\\")
print("\t| / \\")
print("\t|")
print("\t|")
print("\nYou lose! GAME OVER\n")
print("The answer was \"" + word + "\"")
loser = True
if not loser :
print("\n\tWord: " + toGuess)
print("\tMisses: " + wrongChars)
if "_" in toGuess and not loser :
guessChar = ""
while not guessChar.isalpha() :
guessChar = input("\n---------------------------------\nEnter your letter: ")
_ = os.system('cls' if os.name=='nt' else 'clear')
if guessChar == "A" or guessChar == "a" :
if word[1] == "A" or word[1] == "a" :
toGuess = toGuess[:1] + "a" + toGuess[2:]
if word[2] == "A" or word[2] == "a" :
toGuess = toGuess[:2] + "a" + toGuess[3:]
if word[1] != "A" and word[1] != "a" and word[2] != "A" and word[2] != "a" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "a" + ", "
if guessChar == "B" or guessChar == "b" :
if word[1] == "B" or word[1] == "b" :
toGuess = toGuess[:1] + "b" + toGuess[2:]
if word[2] == "B" or word[2] == "b" :
toGuess = toGuess[:2] + "b" + toGuess[3:]
if word[1] != "B" and word[1] != "b" and word[2] != "B" and word[2] != "b" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "b" + ", "
if guessChar == "C" or guessChar == "c" :
if word[1] == "C" or word[1] == "c" :
toGuess = toGuess[:1] + "c" + toGuess[2:]
if word[2] == "C" or word[2] == "c" :
toGuess = toGuess[:2] + "c" + toGuess[3:]
if word[1] != "C" and word[1] != "c" and word[2] != "C" and word[2] != "c" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "c" + ", "
if guessChar == "D" or guessChar == "d" :
if word[1] == "D" or word[1] == "d" :
toGuess = toGuess[:1] + "d" + toGuess[2:]
if word[2] == "D" or word[2] == "d" :
toGuess = toGuess[:2] + "d" + toGuess[3:]
if word[1] != "D" and word[1] != "d" and word[2] != "D" and word[2] != "d" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "d" + ", "
if guessChar == "E" or guessChar == "e" :
if word[1] == "E" or word[1] == "e" :
toGuess = toGuess[:1] + "e" + toGuess[2:]
if word[2] == "E" or word[2] == "e" :
toGuess = toGuess[:2] + "e" + toGuess[3:]
if word[1] != "E" and word[1] != "e" and word[2] != "E" and word[2] != "e" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "e" + ", "
if guessChar == "F" or guessChar == "f" :
if word[1] == "F" or word[1] == "f" :
toGuess = toGuess[:1] + "f" + toGuess[2:]
if word[2] == "F" or word[2] == "f" :
toGuess = toGuess[:2] + "f" + toGuess[3:]
if word[1] != "F" and word[1] != "f" and word[2] != "F" and word[2] != "f" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "f" + ", "
if guessChar == "G" or guessChar == "g" :
if word[1] == "G" or word[1] == "g" :
toGuess = toGuess[:1] + "g" + toGuess[2:]
if word[2] == "G" or word[2] == "g" :
toGuess = toGuess[:2] + "g" + toGuess[3:]
if word[1] != "G" and word[1] != "g" and word[2] != "G" and word[2] != "g" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "g" + ", "
if guessChar == "H" or guessChar == "h" :
if word[1] == "H" or word[1] == "h" :
toGuess = toGuess[:1] + "h" + toGuess[2:]
if word[2] == "H" or word[2] == "h" :
toGuess = toGuess[:2] + "h" + toGuess[3:]
if word[1] != "H" and word[1] != "h" and word[2] != "H" and word[2] != "h" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "h" + ", "
if guessChar == "I" or guessChar == "i" :
if word[1] == "I" or word[1] == "i" :
toGuess = toGuess[:1] + "i" + toGuess[2:]
if word[2] == "I" or word[2] == "i" :
toGuess = toGuess[:2] + "i" + toGuess[3:]
if word[1] != "I" and word[1] != "i" and word[2] != "I" and word[2] != "i" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "i" + ", "
if guessChar == "J" or guessChar == "j" :
if word[1] == "J" or word[1] == "j" :
toGuess = toGuess[:1] + "j" + toGuess[2:]
if word[2] == "J" or word[2] == "j" :
toGuess = toGuess[:2] + "j" + toGuess[3:]
if word[1] != "J" and word[1] != "j" and word[2] != "J" and word[2] != "j" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "j" + ", "
if guessChar == "K" or guessChar == "k" :
if word[1] == "K" or word[1] == "k" :
toGuess = toGuess[:1] + "k" + toGuess[2:]
if word[2] == "K" or word[2] == "k" :
toGuess = toGuess[:2] + "k" + toGuess[3:]
if word[1] != "K" and word[1] != "k" and word[2] != "K" and word[2] != "k" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "k" + ", "
if guessChar == "L" or guessChar == "l" :
if word[1] == "L" or word[1] == "l" :
toGuess = toGuess[:1] + "l" + toGuess[2:]
if word[2] == "L" or word[2] == "l" :
toGuess = toGuess[:2] + "l" + toGuess[3:]
if word[1] != "L" and word[1] != "l" and word[2] != "L" and word[2] != "l" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "l" + ", "
if guessChar == "M" or guessChar == "m" :
if word[1] == "M" or word[1] == "m" :
toGuess = toGuess[:1] + "m" + toGuess[2:]
if word[2] == "M" or word[2] == "m" :
toGuess = toGuess[:2] + "m" + toGuess[3:]
if word[1] != "M" and word[1] != "m" and word[2] != "M" and word[2] != "m" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "m" + ", "
if guessChar == "N" or guessChar == "n" :
if word[1] == "N" or word[1] == "n" :
toGuess = toGuess[:1] + "n" + toGuess[2:]
if word[2] == "N" or word[2] == "n" :
toGuess = toGuess[:2] + "n" + toGuess[3:]
if word[1] != "N" and word[1] != "n" and word[2] != "N" and word[2] != "n" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "n" + ", "
if guessChar == "O" or guessChar == "o" :
if word[1] == "O" or word[1] == "o" :
toGuess = toGuess[:1] + "o" + toGuess[2:]
if word[2] == "O" or word[2] == "o" :
toGuess = toGuess[:2] + "o" + toGuess[3:]
if word[1] != "O" and word[1] != "o" and word[2] != "O" and word[2] != "o" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "o" + ", "
if guessChar == "P" or guessChar == "p" :
if word[1] == "P" or word[1] == "p" :
toGuess = toGuess[:1] + "p" + toGuess[2:]
if word[2] == "P" or word[2] == "p" :
toGuess = toGuess[:2] + "p" + toGuess[3:]
if word[1] != "P" and word[1] != "p" and word[2] != "P" and word[2] != "p" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "p" + ", "
if guessChar == "Q" or guessChar == "q" :
if word[1] == "Q" or word[1] == "q" :
toGuess = toGuess[:1] + "q" + toGuess[2:]
if word[2] == "Q" or word[2] == "q" :
toGuess = toGuess[:2] + "q" + toGuess[3:]
if word[1] != "Q" and word[1] != "q" and word[2] != "Q" and word[2] != "q" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "q" + ", "
if guessChar == "R" or guessChar == "r" :
if word[1] == "R" or word[1] == "r" :
toGuess = toGuess[:1] + "r" + toGuess[2:]
if word[2] == "R" or word[2] == "r" :
toGuess = toGuess[:2] + "r" + toGuess[3:]
if word[1] != "R" and word[1] != "r" and word[2] != "R" and word[2] != "r" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "r" + ", "
if guessChar == "S" or guessChar == "s" :
if word[1] == "S" or word[1] == "s" :
toGuess = toGuess[:1] + "s" + toGuess[2:]
if word[2] == "S" or word[2] == "s" :
toGuess = toGuess[:2] + "s" + toGuess[3:]
if word[1] != "S" and word[1] != "s" and word[2] != "S" and word[2] != "s" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "s" + ", "
if guessChar == "T" or guessChar == "t" :
if word[1] == "T" or word[1] == "t" :
toGuess = toGuess[:1] + "t" + toGuess[2:]
if word[2] == "T" or word[2] == | |
<reponame>sapcc/trove
# Copyright 2015 Tesora Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import os
import re
import six
from trove.guestagent.common import guestagent_utils
from trove.guestagent.common import operating_system
from trove.guestagent.common.operating_system import FileMode
class ConfigurationManager(object):
"""
ConfigurationManager is responsible for management of
datastore configuration.
Its base functionality includes reading and writing configuration files.
It is responsible for validating user inputs and requests.
When supplied an override strategy it allows the user to manage
configuration overrides as well.
"""
# Configuration group names. The names determine the order in which the
# groups get applied. System groups are divided into two camps; pre-user
# and post-user. In general system overrides will get applied over the
# user group, unless specified otherwise (i.e. SYSTEM_POST_USER_GROUP
# will be used).
SYSTEM_PRE_USER_GROUP = '10-system'
USER_GROUP = '20-user'
SYSTEM_POST_USER_GROUP = '50-system'
DEFAULT_STRATEGY_OVERRIDES_SUB_DIR = 'overrides'
DEFAULT_CHANGE_ID = 'common'
def __init__(self, base_config_path, owner, group, codec,
requires_root=False, override_strategy=None):
"""
:param base_config_path Path to the configuration file.
:type base_config_path string
:param owner Owner of the configuration files.
:type owner string
:param group Group of the configuration files.
:type group string
:param codec Codec for reading/writing of the particular
configuration format.
:type codec StreamCodec
:param requires_root Whether the manager requires superuser
privileges.
:type requires_root boolean
:param override_strategy Strategy used to manage configuration
overrides (e.g. ImportOverrideStrategy).
Defaults to OneFileOverrideStrategy
if None. This strategy should be
compatible with very much any datastore.
It is recommended each datastore defines
its strategy explicitly to avoid upgrade
compatibility issues in case the default
implementation changes in the future.
:type override_strategy ConfigurationOverrideStrategy
"""
self._base_config_path = base_config_path
self._owner = owner
self._group = group
self._codec = codec
self._requires_root = requires_root
self._value_cache = None
if not override_strategy:
# Use OneFile strategy by default. Store the revisions in a
# sub-directory at the location of the configuration file.
revision_dir = guestagent_utils.build_file_path(
os.path.dirname(base_config_path),
self.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR)
self._override_strategy = OneFileOverrideStrategy(revision_dir)
else:
self._override_strategy = override_strategy
self._override_strategy.configure(
base_config_path, owner, group, codec, requires_root)
def get_value(self, key, default=None):
"""Return the current value at a given key or 'default'.
"""
if self._value_cache is None:
self.refresh_cache()
return self._value_cache.get(key, default)
def parse_configuration(self):
"""Read contents of the configuration file (applying overrides if any)
and parse it into a dict.
:returns: Configuration file as a Python dict.
"""
base_options = operating_system.read_file(
self._base_config_path, codec=self._codec,
as_root=self._requires_root)
updates = self._override_strategy.parse_updates()
guestagent_utils.update_dict(updates, base_options)
return base_options
def save_configuration(self, options):
"""Write given contents to the base configuration file.
Remove all existing overrides (both system and user).
:param contents Contents of the configuration file.
:type contents string or dict
"""
if isinstance(options, dict):
# Serialize a dict of options for writing.
self.save_configuration(self._codec.serialize(options))
else:
self._override_strategy.remove(self.USER_GROUP)
self._override_strategy.remove(self.SYSTEM_PRE_USER_GROUP)
self._override_strategy.remove(self.SYSTEM_POST_USER_GROUP)
operating_system.write_file(
self._base_config_path, options, as_root=self._requires_root)
operating_system.chown(
self._base_config_path, self._owner, self._group,
as_root=self._requires_root)
operating_system.chmod(
self._base_config_path, FileMode.ADD_READ_ALL,
as_root=self._requires_root)
self.refresh_cache()
def has_system_override(self, change_id):
"""Return whether a given 'system' change exists.
"""
return (self._override_strategy.exists(self.SYSTEM_POST_USER_GROUP,
change_id) or
self._override_strategy.exists(self.SYSTEM_PRE_USER_GROUP,
change_id))
def apply_system_override(self, options, change_id=DEFAULT_CHANGE_ID,
pre_user=False):
"""Apply a 'system' change to the configuration.
System overrides are always applied after all user changes so that
they override any user-defined setting.
:param options Configuration changes.
:type options string or dict
"""
group_name = (
self.SYSTEM_PRE_USER_GROUP if pre_user else
self.SYSTEM_POST_USER_GROUP)
self._apply_override(group_name, change_id, options)
def apply_user_override(self, options, change_id=DEFAULT_CHANGE_ID):
"""Apply a 'user' change to the configuration.
The 'system' values will be re-applied over this override.
:param options Configuration changes.
:type options string or dict
"""
self._apply_override(self.USER_GROUP, change_id, options)
def get_user_override(self, change_id=DEFAULT_CHANGE_ID):
"""Get the user overrides"""
return self._override_strategy.get(self.USER_GROUP, change_id)
def _apply_override(self, group_name, change_id, options):
if not isinstance(options, dict):
# Deserialize the options into a dict if not already.
self._apply_override(
group_name, change_id, self._codec.deserialize(options))
else:
self._override_strategy.apply(group_name, change_id, options)
self.refresh_cache()
def remove_system_override(self, change_id=DEFAULT_CHANGE_ID):
"""Revert a 'system' configuration change.
"""
self._remove_override(self.SYSTEM_POST_USER_GROUP, change_id)
self._remove_override(self.SYSTEM_PRE_USER_GROUP, change_id)
def remove_user_override(self, change_id=DEFAULT_CHANGE_ID):
"""Revert a 'user' configuration change.
"""
self._remove_override(self.USER_GROUP, change_id)
def _remove_override(self, group_name, change_id):
self._override_strategy.remove(group_name, change_id)
self.refresh_cache()
def refresh_cache(self):
self._value_cache = self.parse_configuration()
@six.add_metaclass(abc.ABCMeta)
class ConfigurationOverrideStrategy(object):
"""ConfigurationOverrideStrategy handles configuration files.
The strategy provides functionality to enumerate, apply and remove
configuration overrides.
"""
@abc.abstractmethod
def configure(self, *args, **kwargs):
"""Configure this strategy.
A strategy needs to be configured before it can be used.
It would typically be configured by the ConfigurationManager.
"""
@abc.abstractmethod
def exists(self, group_name, change_id):
"""Return whether a given revision exists.
"""
@abc.abstractmethod
def apply(self, group_name, change_id, options):
"""Apply given options on the most current configuration revision.
Update if a file with the same id already exists.
:param group_name The group the override belongs to.
:type group_name string
:param change_id The name of the override within the group.
:type change_id string
:param options Configuration changes.
:type options dict
"""
@abc.abstractmethod
def remove(self, group_name, change_id=None):
"""Rollback a given configuration override.
Remove the whole group if 'change_id' is None.
:param group_name The group the override belongs to.
:type group_name string
:param change_id The name of the override within the group.
:type change_id string
"""
@abc.abstractmethod
def get(self, group_name, change_id=None):
"""Return the contents of a given configuration override
:param group_name The group the override belongs to.
:type group_name string
:param change_id The name of the override within the group.
:type change_id string
"""
def parse_updates(self):
"""Return all updates applied to the base revision as a single dict.
Return an empty dict if the base file is always the most current
version of configuration.
:returns: Updates to the base revision as a Python dict.
"""
return {}
class ImportOverrideStrategy(ConfigurationOverrideStrategy):
"""Import strategy keeps overrides in separate files that get imported
into the base configuration file which never changes itself.
An override file is simply deleted when the override is removed.
We keep two sets of override files in a separate directory.
- User overrides - configuration overrides applied by the user via the
Trove API.
- System overrides - 'internal' configuration changes applied by the
guestagent.
The name format of override files is: '<set prefix>-<n>-<group name>.<ext>'
where 'set prefix' is to used to order user/system sets,
'n' is an index used to keep track of the order in which overrides
within their set got applied.
"""
FILE_NAME_PATTERN = r'%s-([0-9]+)-%s\.%s$'
def __init__(self, revision_dir, revision_ext):
"""
:param revision_dir Path to the directory for import files.
:type revision_dir string
:param revision_ext Extension of revision files.
:type revision_ext string
"""
self._revision_dir = revision_dir
self._revision_ext = revision_ext
def configure(self, base_config_path, owner, group, codec, requires_root):
"""
:param base_config_path Path to the configuration file.
:type base_config_path string
:param owner Owner of the configuration and
revision files.
:type owner string
:param group Group of the configuration and
revision files.
:type group string
:param codec Codec for reading/writing of the particular
configuration format.
:type codec StreamCodec
:param requires_root Whether the strategy requires superuser
privileges.
:type requires_root boolean
"""
self._base_config_path = base_config_path
self._owner = owner
self._group = group
self._codec = codec
self._requires_root = requires_root
def exists(self, group_name, change_id):
return self._find_revision_file(group_name, change_id) is not None
def apply(self, group_name, change_id, options):
self._initialize_import_directory()
revision_file = self._find_revision_file(group_name, change_id)
if revision_file is None:
# Create a new file.
last_revision_index = self._get_last_file_index(group_name)
revision_file = guestagent_utils.build_file_path(
self._revision_dir,
'%s-%03d-%s' % (group_name, last_revision_index + 1,
change_id),
self._revision_ext)
else:
# Update the existing file.
current = operating_system.read_file(
revision_file, codec=self._codec, as_root=self._requires_root)
options = guestagent_utils.update_dict(options, current)
operating_system.write_file(
revision_file, options, codec=self._codec,
as_root=self._requires_root)
operating_system.chown(
revision_file, self._owner, self._group,
as_root=self._requires_root)
operating_system.chmod(
revision_file, FileMode.ADD_READ_ALL, as_root=self._requires_root)
def _initialize_import_directory(self):
"""Lazy-initialize the directory for imported revision files.
"""
if not os.path.exists(self._revision_dir):
operating_system.create_directory(
self._revision_dir, user=self._owner, group=self._group,
force=True, as_root=self._requires_root)
def remove(self, group_name, change_id=None):
removed = set()
if change_id:
# Remove a given file.
revision_file = self._find_revision_file(group_name, change_id)
if revision_file:
removed.add(revision_file)
else:
# Remove the entire group.
removed = self._collect_revision_files(group_name)
for path in removed:
operating_system.remove(path, force=True,
as_root=self._requires_root)
def get(self, group_name, change_id):
revision_file = self._find_revision_file(group_name, change_id)
return operating_system.read_file(revision_file,
| |
<reponame>Astlaan/OpenQL
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.10
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
"""
`OpenQL` is a C++/Python framework for high-level quantum programming. The framework provides a compiler for compiling and optimizing quantum code. The compiler produces the intermediate quantum assembly language in cQASM (Common QASM) and the compiled eQASM (executable QASM) for various target platforms. While the eQASM is platform-specific, the quantum assembly code (QASM) is hardware-agnostic and can be simulated on the QX simulator.
"""
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_openql', [dirname(__file__)])
except ImportError:
import _openql
return _openql
if fp is not None:
try:
_mod = imp.load_module('_openql', fp, pathname, description)
finally:
fp.close()
return _mod
_openql = swig_import_helper()
del swig_import_helper
else:
import _openql
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self,name,value):
if (name == "thisown"): return self.this.own(value)
if hasattr(self,name) or (name == "this"):
set(self,name,value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
class SwigPyIterator(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _openql.delete_SwigPyIterator
__del__ = lambda self : None;
def value(self): return _openql.SwigPyIterator_value(self)
def incr(self, n=1): return _openql.SwigPyIterator_incr(self, n)
def decr(self, n=1): return _openql.SwigPyIterator_decr(self, n)
def distance(self, *args, **kwargs): return _openql.SwigPyIterator_distance(self, *args, **kwargs)
def equal(self, *args, **kwargs): return _openql.SwigPyIterator_equal(self, *args, **kwargs)
def copy(self): return _openql.SwigPyIterator_copy(self)
def next(self): return _openql.SwigPyIterator_next(self)
def __next__(self): return _openql.SwigPyIterator___next__(self)
def previous(self): return _openql.SwigPyIterator_previous(self)
def advance(self, *args, **kwargs): return _openql.SwigPyIterator_advance(self, *args, **kwargs)
def __eq__(self, *args, **kwargs): return _openql.SwigPyIterator___eq__(self, *args, **kwargs)
def __ne__(self, *args, **kwargs): return _openql.SwigPyIterator___ne__(self, *args, **kwargs)
def __iadd__(self, *args, **kwargs): return _openql.SwigPyIterator___iadd__(self, *args, **kwargs)
def __isub__(self, *args, **kwargs): return _openql.SwigPyIterator___isub__(self, *args, **kwargs)
def __add__(self, *args, **kwargs): return _openql.SwigPyIterator___add__(self, *args, **kwargs)
def __sub__(self, *args): return _openql.SwigPyIterator___sub__(self, *args)
def __iter__(self): return self
SwigPyIterator_swigregister = _openql.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
class vectori(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def iterator(self): return _openql.vectori_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _openql.vectori___nonzero__(self)
def __bool__(self): return _openql.vectori___bool__(self)
def __len__(self): return _openql.vectori___len__(self)
def pop(self): return _openql.vectori_pop(self)
def __getslice__(self, *args, **kwargs): return _openql.vectori___getslice__(self, *args, **kwargs)
def __setslice__(self, *args, **kwargs): return _openql.vectori___setslice__(self, *args, **kwargs)
def __delslice__(self, *args, **kwargs): return _openql.vectori___delslice__(self, *args, **kwargs)
def __delitem__(self, *args): return _openql.vectori___delitem__(self, *args)
def __getitem__(self, *args): return _openql.vectori___getitem__(self, *args)
def __setitem__(self, *args): return _openql.vectori___setitem__(self, *args)
def append(self, *args, **kwargs): return _openql.vectori_append(self, *args, **kwargs)
def empty(self): return _openql.vectori_empty(self)
def size(self): return _openql.vectori_size(self)
def clear(self): return _openql.vectori_clear(self)
def swap(self, *args, **kwargs): return _openql.vectori_swap(self, *args, **kwargs)
def get_allocator(self): return _openql.vectori_get_allocator(self)
def begin(self): return _openql.vectori_begin(self)
def end(self): return _openql.vectori_end(self)
def rbegin(self): return _openql.vectori_rbegin(self)
def rend(self): return _openql.vectori_rend(self)
def pop_back(self): return _openql.vectori_pop_back(self)
def erase(self, *args): return _openql.vectori_erase(self, *args)
def __init__(self, *args):
this = _openql.new_vectori(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args, **kwargs): return _openql.vectori_push_back(self, *args, **kwargs)
def front(self): return _openql.vectori_front(self)
def back(self): return _openql.vectori_back(self)
def assign(self, *args, **kwargs): return _openql.vectori_assign(self, *args, **kwargs)
def resize(self, *args): return _openql.vectori_resize(self, *args)
def insert(self, *args): return _openql.vectori_insert(self, *args)
def reserve(self, *args, **kwargs): return _openql.vectori_reserve(self, *args, **kwargs)
def capacity(self): return _openql.vectori_capacity(self)
__swig_destroy__ = _openql.delete_vectori
__del__ = lambda self : None;
vectori_swigregister = _openql.vectori_swigregister
vectori_swigregister(vectori)
class vectorui(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def iterator(self): return _openql.vectorui_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _openql.vectorui___nonzero__(self)
def __bool__(self): return _openql.vectorui___bool__(self)
def __len__(self): return _openql.vectorui___len__(self)
def pop(self): return _openql.vectorui_pop(self)
def __getslice__(self, *args, **kwargs): return _openql.vectorui___getslice__(self, *args, **kwargs)
def __setslice__(self, *args, **kwargs): return _openql.vectorui___setslice__(self, *args, **kwargs)
def __delslice__(self, *args, **kwargs): return _openql.vectorui___delslice__(self, *args, **kwargs)
def __delitem__(self, *args): return _openql.vectorui___delitem__(self, *args)
def __getitem__(self, *args): return _openql.vectorui___getitem__(self, *args)
def __setitem__(self, *args): return _openql.vectorui___setitem__(self, *args)
def append(self, *args, **kwargs): return _openql.vectorui_append(self, *args, **kwargs)
def empty(self): return _openql.vectorui_empty(self)
def size(self): return _openql.vectorui_size(self)
def clear(self): return _openql.vectorui_clear(self)
def swap(self, *args, **kwargs): return _openql.vectorui_swap(self, *args, **kwargs)
def get_allocator(self): return _openql.vectorui_get_allocator(self)
def begin(self): return _openql.vectorui_begin(self)
def end(self): return _openql.vectorui_end(self)
def rbegin(self): return _openql.vectorui_rbegin(self)
def rend(self): return _openql.vectorui_rend(self)
def pop_back(self): return _openql.vectorui_pop_back(self)
def erase(self, *args): return _openql.vectorui_erase(self, *args)
def __init__(self, *args):
this = _openql.new_vectorui(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args, **kwargs): return _openql.vectorui_push_back(self, *args, **kwargs)
def front(self): return _openql.vectorui_front(self)
def back(self): return _openql.vectorui_back(self)
def assign(self, *args, **kwargs): return _openql.vectorui_assign(self, *args, **kwargs)
def resize(self, *args): return _openql.vectorui_resize(self, *args)
def insert(self, *args): return _openql.vectorui_insert(self, *args)
def reserve(self, *args, **kwargs): return _openql.vectorui_reserve(self, *args, **kwargs)
def capacity(self): return _openql.vectorui_capacity(self)
__swig_destroy__ = _openql.delete_vectorui
__del__ = lambda self : None;
vectorui_swigregister = _openql.vectorui_swigregister
vectorui_swigregister(vectorui)
class vectorf(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def iterator(self): return _openql.vectorf_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _openql.vectorf___nonzero__(self)
def __bool__(self): return _openql.vectorf___bool__(self)
def __len__(self): return _openql.vectorf___len__(self)
def pop(self): return _openql.vectorf_pop(self)
def __getslice__(self, *args, **kwargs): return _openql.vectorf___getslice__(self, *args, **kwargs)
def __setslice__(self, *args, **kwargs): return _openql.vectorf___setslice__(self, *args, **kwargs)
def __delslice__(self, *args, **kwargs): return _openql.vectorf___delslice__(self, *args, **kwargs)
def __delitem__(self, *args): return _openql.vectorf___delitem__(self, *args)
def __getitem__(self, *args): return _openql.vectorf___getitem__(self, *args)
def __setitem__(self, *args): return _openql.vectorf___setitem__(self, *args)
def append(self, *args, **kwargs): return _openql.vectorf_append(self, *args, **kwargs)
def empty(self): return _openql.vectorf_empty(self)
def size(self): return _openql.vectorf_size(self)
def clear(self): return _openql.vectorf_clear(self)
def swap(self, *args, **kwargs): return _openql.vectorf_swap(self, *args, **kwargs)
def get_allocator(self): return _openql.vectorf_get_allocator(self)
def begin(self): return _openql.vectorf_begin(self)
def end(self): return _openql.vectorf_end(self)
def rbegin(self): return _openql.vectorf_rbegin(self)
def rend(self): return _openql.vectorf_rend(self)
def pop_back(self): return _openql.vectorf_pop_back(self)
def erase(self, *args): return _openql.vectorf_erase(self, *args)
def __init__(self, *args):
this = _openql.new_vectorf(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args, **kwargs): return _openql.vectorf_push_back(self, *args, **kwargs)
def front(self): return _openql.vectorf_front(self)
def back(self): return _openql.vectorf_back(self)
def assign(self, *args, **kwargs): return _openql.vectorf_assign(self, *args, **kwargs)
def resize(self, *args): return _openql.vectorf_resize(self, *args)
def insert(self, *args): return _openql.vectorf_insert(self, *args)
def reserve(self, *args, **kwargs): return _openql.vectorf_reserve(self, *args, **kwargs)
def capacity(self): return _openql.vectorf_capacity(self)
__swig_destroy__ = _openql.delete_vectorf
__del__ = lambda self : None;
vectorf_swigregister = _openql.vectorf_swigregister
vectorf_swigregister(vectorf)
class vectord(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def iterator(self): return _openql.vectord_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _openql.vectord___nonzero__(self)
def __bool__(self): return _openql.vectord___bool__(self)
def __len__(self): return _openql.vectord___len__(self)
def pop(self): return _openql.vectord_pop(self)
def __getslice__(self, *args, **kwargs): return _openql.vectord___getslice__(self, *args, **kwargs)
def __setslice__(self, *args, **kwargs): return _openql.vectord___setslice__(self, *args, **kwargs)
def __delslice__(self, *args, **kwargs): return _openql.vectord___delslice__(self, *args, **kwargs)
def __delitem__(self, *args): return _openql.vectord___delitem__(self, *args)
def __getitem__(self, *args): return _openql.vectord___getitem__(self, *args)
def __setitem__(self, *args): return _openql.vectord___setitem__(self, *args)
def append(self, *args, **kwargs): return _openql.vectord_append(self, *args, **kwargs)
def empty(self): return _openql.vectord_empty(self)
def size(self): return _openql.vectord_size(self)
def clear(self): return _openql.vectord_clear(self)
def swap(self, *args, **kwargs): return _openql.vectord_swap(self, *args, **kwargs)
def get_allocator(self): return _openql.vectord_get_allocator(self)
def begin(self): return _openql.vectord_begin(self)
def end(self): return _openql.vectord_end(self)
def rbegin(self): return _openql.vectord_rbegin(self)
def rend(self): return _openql.vectord_rend(self)
def pop_back(self): return _openql.vectord_pop_back(self)
def erase(self, *args): return _openql.vectord_erase(self, *args)
def __init__(self, *args):
this = _openql.new_vectord(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args, **kwargs): return _openql.vectord_push_back(self, *args, **kwargs)
def front(self): return _openql.vectord_front(self)
def back(self): return _openql.vectord_back(self)
def assign(self, *args, **kwargs): return _openql.vectord_assign(self, *args, **kwargs)
def resize(self, *args): return _openql.vectord_resize(self, *args)
def insert(self, *args): return _openql.vectord_insert(self, *args)
def reserve(self, *args, **kwargs): return _openql.vectord_reserve(self, *args, **kwargs)
def capacity(self): return _openql.vectord_capacity(self)
__swig_destroy__ = _openql.delete_vectord
__del__ = lambda self : None;
vectord_swigregister = _openql.vectord_swigregister
vectord_swigregister(vectord)
def get_version():
"""
Returns OpenQL version
arameters
| |
= 0, 0
return BoundingBox2d(
[(x, y), (x + self.total_width, y - self.total_height)]
)
class Cell(Box): # ABC
is_visible = False
def place(self, x: float, y: float):
# Base cells do not render anything, therefore placing the content is
# not necessary
pass
def final_location(self) -> Tuple[float, float]:
# Base cells do not render anything, therefore final location is not
# important
return 0, 0
def render(self, m: Matrix44 = None) -> None:
pass
class Glue(Cell): # ABC
EMPTY: Tuple = tuple()
def __init__(
self, width: float, min_width: float = None, max_width: float = None
):
self._width: float = float(width)
self._min_width = float(min_width) if min_width else self._width
self._max_width: Optional[float] = max_width
def resize(self, width: float):
max_width = self._max_width
if max_width is not None:
width = min(max_width, width)
self._width = max(width, self._min_width)
@property
def can_shrink(self):
return self._min_width < self._width
@property
def can_grow(self):
return self._max_width is None or self._width < self._max_width
@property
def total_width(self) -> float:
return self._width
@property
def total_height(self) -> float:
return 0
def to_space(self) -> "Space":
return Space(self._width, self._min_width, self._max_width)
class Space(Glue):
pass
class NonBreakingSpace(Glue):
pass
class Tabulator(Glue):
pass
class ContentCell(Cell): # ABC
"""Represents visible content like text or fractions.
Supported vertical alignments (IntEnum):
=== =================
int CellAlignment
=== =================
0 BOTTOM
1 CENTER
2 TOP
=== =================
"""
is_visible = True
def __init__(
self,
width: float,
height: float,
valign: CellAlignment = CellAlignment.BOTTOM,
renderer: ContentRenderer = None,
):
self._final_x: Optional[float] = None
self._final_y: Optional[float] = None
self._width = float(width)
self._height = float(height)
self.valign = CellAlignment(valign) # public attribute read/write
self.renderer = renderer
def set_final_location(self, x: float, y: float):
self._final_x = x
self._final_y = y
def final_location(self):
return self._final_x, self._final_y
@property
def total_width(self) -> float:
return self._width
@property
def total_height(self) -> float:
return self._height
def place(self, x: float, y: float):
"""(x, y) is the top/left corner"""
self._final_x = x
self._final_y = y
class Stroke:
# no enum because bit values can be combined: UNDERLINE + OVERLINE
NO_STROKE = 0
UNDERLINE = 1
STRIKE_THROUGH = 2
OVERLINE = 4
CONTINUE = 8 # continue stroke to following text cell
class Text(ContentCell):
"""Represents visible text content.
Supported strokes as bit values (flags), can be combined:
=== =================
int Stroke
=== =================
0 NO_STROKE
1 UNDERLINE
2 STRIKE THROUGH
4 OVERLINE
8 CONTINUE
=== =================
The CONTINUE flag extends the stroke of the current text cell across the
glue cells to the following text cell.
"""
def __init__(
self,
width: float,
height: float,
valign: CellAlignment = CellAlignment.BOTTOM,
stroke: int = Stroke.NO_STROKE,
renderer: ContentRenderer = None,
):
super().__init__(width, height, valign, renderer)
self.stroke = int(stroke) # public attribute read/write
def render(self, m: Matrix44 = None) -> None:
left, top = self.final_location()
height = self.total_height
bottom = top - height
right = left + self.total_width
self.renderer.render( # type: ignore
left=left, bottom=bottom, right=right, top=top, m=m
)
def render_stroke(
self,
extend_left: float = 0,
extend_right: float = 0,
m: Matrix44 = None,
) -> None:
left, top = self.final_location()
left -= extend_left
height = self.total_height
bottom = top - height
right = left + self.total_width + extend_right
renderer = self.renderer
assert renderer is not None
# render underline, strike through, overline
spacing = height / 5 # ???
if self.stroke & Stroke.UNDERLINE:
y = bottom - spacing
renderer.line(left, y, right, y, m)
if self.stroke & Stroke.STRIKE_THROUGH:
y = (top + bottom) / 2
renderer.line(left, y, right, y, m)
if self.stroke & Stroke.OVERLINE:
y = top + spacing
renderer.line(left, y, right, y, m)
def render_cells(cells: Iterable[Cell], m: Matrix44 = None) -> None:
for cell in cells:
if cell.is_visible:
cell.render(m)
def render_text_strokes(cells: List[Cell], m: Matrix44 = None) -> None:
"""Render text cell strokes across glue cells."""
# Should be called for container with horizontal arranged text cells
# like HCellGroup to create underline, overline and strike trough
# features.
# Can not render strokes across line breaks!
def stroke_extension():
extend = 0
i = index + 1
count = len(cells)
while i < count:
cell = cells[i]
# extend stroke only across adjacent glue cells:
if isinstance(cell, Glue):
extend += cell.total_width
else:
break
i += 1
return extend
for index, cell in enumerate(cells):
if isinstance(cell, Text) and cell.stroke:
extend = stroke_extension() if cell.stroke & Stroke.CONTINUE else 0
cell.render_stroke(extend_right=extend, m=m)
class Fraction(ContentCell):
"""Represents visible fractions.
Supported stacking A/B (IntEnum):
=== =========== =========
int Stacking Description
=== =========== =========
0 OVER A over B, without horizontal line
1 LINE A over B, horizontal line between
2 SLANTED A slanted line B
=== =========== =========
"""
HEIGHT_SCALE = 1.2
def __init__(
self,
top: ContentCell,
bottom: ContentCell,
stacking: Stacking = Stacking.OVER,
valign: CellAlignment = CellAlignment.BOTTOM,
renderer: ContentRenderer = None,
):
super().__init__(0, 0, valign, renderer)
self._stacking = stacking
self._top_content = top
self._bottom_content = bottom
self._update_size()
def _update_size(self):
top = self._top_content
bottom = self._bottom_content
if self._stacking == Stacking.SLANTED:
self._height = top.total_height + bottom.total_height
self._width = top.total_width + bottom.total_width
else:
self._height = self.HEIGHT_SCALE * (
top.total_height + bottom.total_height
)
self._width = max(top.total_width, bottom.total_width)
def place(self, x: float, y: float):
"""(x, y) is the top/left corner"""
self._final_x = x
self._final_y = y
width = self.total_width
height = self.total_height
top_content = self._top_content
bottom_content = self._bottom_content
if top_content is None or bottom_content is None:
raise ValueError("no content set")
if self._stacking == Stacking.SLANTED:
top_content.place(x, y) # left/top
x += width - bottom_content.total_width
y -= height - bottom_content.total_height
bottom_content.place(x, y) # right/bottom
else:
center = x + width / 2
x = center - top_content.total_width / 2
top_content.place(x, y) # center/top
x = center - bottom_content.total_width / 2
y -= height - bottom_content.total_height
bottom_content.place(x, y) # center/bottom
def render(self, m: Matrix44 = None) -> None:
self._top_content.render(m)
self._bottom_content.render(m)
if self._stacking != Stacking.OVER:
self._render_line(m)
def _render_line(self, m: Matrix44) -> None:
x, y = self.final_location()
tw = self.total_width
th = self.total_height
if self._stacking == Stacking.LINE:
x1 = x
x2 = x + tw
y1 = y2 = y - th / 2
else: # SLANTED
delta = min(tw, th) / 2
cx = x + self._top_content.total_width
cy = y - self._top_content.total_height
x1 = cx - delta
y1 = cy - delta
x2 = cx + delta
y2 = cy + delta
self.renderer.line(x1, y1, x2, y2, m) # type: ignore
_content = (Text, Fraction)
_glue = (Space, NonBreakingSpace, Tabulator)
_no_break = (Text, NonBreakingSpace)
def normalize_cells(cells: Iterable[Cell]) -> List[Cell]:
def replace_pending_nbsp_by_spaces():
index = len(content) - 1
while index >= 0:
cell = content[index]
if isinstance(cell, NonBreakingSpace):
content[index] = cell.to_space()
index -= 1
else:
return
def is_useless_nbsp():
try:
peek = cells[index + 1]
except IndexError:
return True
if not isinstance(prev, _no_break) or not isinstance(peek, _no_break):
return True
return False
content = []
cells = list(cells)
prev = None
for index, cell in enumerate(cells):
if isinstance(cell, _content):
if isinstance(prev, _content):
raise ValueError("no glue between content cells")
elif isinstance(cell, NonBreakingSpace) and is_useless_nbsp():
cell = cell.to_space()
replace_pending_nbsp_by_spaces()
prev = cell
content.append(cell)
# remove pending glue:
while content and isinstance(content[-1], _glue):
content.pop()
return content
class Container(Box):
def __init__(
self,
width: Optional[float],
height: float = None,
margins: Sequence[float] = None,
renderer: ContentRenderer = None,
):
self._final_x: Optional[float] = None
self._final_y: Optional[float] = None
# _content_width is None for: defined by content
self._content_width: Optional[float] = width
# _content_height is None for: defined by content
self._content_height: Optional[float] = height
# margins are always defined
self._margins: Tuple4f = resolve_margins(margins)
# content renderer is optional:
self.renderer: Optional[ContentRenderer] = renderer
def place(self, x: float, y: float):
self._final_x = x
self._final_y = y
self.place_content()
def final_location(self):
if not self.is_placed():
raise ValueError("Container is not placed.")
return self._final_x, self._final_y
def is_placed(self) -> bool:
return self._final_x is not None and self._final_y is not None
@abc.abstractmethod
def __iter__(self) -> Box:
pass
@property
def top_margin(self) -> float:
return self._margins[0]
@property
def right_margin(self) -> float:
return self._margins[1]
@property
def bottom_margin(self) -> float:
return self._margins[2]
@property
def left_margin(self) -> float:
return self._margins[3]
@property
def content_width(self) -> float:
if self._content_width is None:
return 0
else:
return self._content_width
@property
def total_width(self) -> float:
return self.content_width + self.right_margin + self.left_margin
@property
def content_height(self) -> float:
| |
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
This is a neural network model of the development of reduplicated canonical
babbling in human infancy.
This is a modification of Izhikevich's (2007 Cerebral Cortex) daspnet.m and of
a previous mode described in Warlaumont (2012, 2013 ICDL-EpiRob) and Warlaumont
& Finnegan (2016 PLOS ONE). Code from those papers was written in MATLAB. This
is a rewriting of the 2016 code in Python.
Vocal tract simulation is performed in Praat (so you must have Praat installed
for this to run).
This version currently only supports human reinforcement. In the MATLAB version
automatic salience-based reinforcement using a modified version of Coath et
al.'s (2009) auditory salience algorithms, written in MATLAB, was also an
option.
<NAME>
<EMAIL> or <EMAIL>
http://www.annewarlaumont.org
For updates, see https://github.com/AnneSWarlaumont/BabbleNN
"""
# Commented out for debugging:
# def sim(simid,path,T,reinforcer,muscscale,yoke,plotOn):
"""
Starts or restarts a simulation
simid: a unique identifier for this simulation. Should not contain spaces.
path: path to the directory where your sim data should be saved. No slash
at the end.
T: the length of time the experiment is to run in seconds. This can be
changed to a longer or shorter value when a simulation is restarted
reinforcer: the type of reinforcement. For now, must be 'human'.
muscscale: this scales the activity sent to Praat. 4 is the recommended
value
yoke: indicates whether to run an experiment or a yoked control simulation.
Set to False to run a regular simulation. Set to True to run a
yoked control. There must already have been a simulation of the same
id run, with its data on the path, for the simulation to yoke to.
plotOn: enables plots of several simulation parameters. Set to False to
disable plots and to True to enable.
Example use: sim('Mortimer','/Users/awarlau/Downloads','7200,'human',4,
False,False)
"""
#Temporary, for debugging:
simid = 'Mortimer'
path = '/Users/awarlau/Downloads'
T = 60 * 30 # sec * min * hr
reinforcer = 'relhipos' # 'agonist_spike' # 'relhipos' # # 'sumsmoothmusc>0'
thresh = 0
threshinc = 5
temprewhistlen = 20
muscscale = 4
yoke = False
plotOn = True
soutscale = 100
STDPadd = 1000
import os, numpy as np
DAinc = 1 # amount of dopamine given during reward
M = 100 # number of synapses per neuron
Ne = 800 # number of excitatory reservoir neurons
Ni = 200 # number of inhibitory reservoir neurons
N = Ne + Ni # total number of reservoir neurons
Nout = 200 # number of reservoir output neurons
Nmot = Nout # number of motor neurons
a = np.concatenate((0.02 * np.ones((Ne)), 0.1 * np.ones((Ni))))
# time scales of the membrane recovery variable for reservoir neurons
d = np.concatenate((8 * np.ones((Ne)), 2 * np.ones((Ni))))
# membrane recovery variable after-spike shift for reservoir neurons
a_mot = 0.02 * np.ones((Nmot))
# time scales of the membrane recovery variable for motor neurons
d_mot = 8 * np.ones((Nmot))
# membrane recovery variable after-spike shift for motor neurons
post = np.floor(np.concatenate(
(N * np.random.rand(Ne,M), Ne * np.random.rand(Ni,M))))
# Assign the postsynaptic neurons for each reservoir neuron
post_mot = np.repeat(np.arange(Nmot).transpose(),Nout,0)
# all output neurons connect to all motor neurons
s = np.concatenate((np.random.rand(Ne,M),-1 * np.random.rand(Ni,M)))
# synaptic weights within the reservoir
sout = np.random.rand(Nout,Nmot) # synaptic weights from output to motor
sout = soutscale * sout / np.mean(sout) # normalize sout
sd = np.zeros((Nout,Nmot)) # will store the changes to be made to sout
STDP = np.zeros(Nout)
v = -65 * np.ones((N)) # reservoir membrane potentials
v_mot = -65 * np.ones((Nmot)) # motor neuron membrane potentials
u = 0.2 * v # reservoir membrane recovery variables
u_mot = 0.2 * v_mot # motor neuron membrane recovery variables
firings = [] # reservoir neuron firings for the current second
outFirings = [] # output neuron firings for the current second
motFirings = [] # motor neuron firings for the current second
DA = 0 # level of dopamine above baseline
muscsmooth = 100 # spike train data smoothed by 100 ms moving average
sec = 0 # current time in the simulation
rew = [] # track when rewards were received
hist_sumsmoothmusc = [] # keep a record of sumsmoothmusc after each second
# Initialize reward policy variables:
if reinforcer == 'relhipos':
temprewhist = [False] * temprewhistlen # Keeps track, for up to 10 previous sounds, of
# when the threshold for reward was exceeded
rewcount = 0
# Absolute path where Praat can be found
praatPathmac = '/Applications/Praat.app/Contents/MacOS/Praat'
# Set data directory names:
wavdir = path + '/' + simid + '_Wav'
firingsdir = path + '/' + simid + '_Firings'
# Create data directories:
if os.path.isdir(wavdir) != True:
os.mkdir(wavdir)
if os.path.isdir(firingsdir) != True:
os.mkdir(firingsdir)
# Begin the simulation!
for sec in range(sec,T):
print('********************************************')
print('Second ' + str(sec+1) + ' of ' + str(T))
# Reset firings
firings = []
outFirings = []
motFirings = []
for t in range(0,1000): # millisecond timesteps
# give random input to reservoir and motor neurons:
I = 13 * (np.random.rand(N))
I_mot = 13 * (np.random.rand(Nmot))
# get the indices of fired neurons:
fired = v >= 30
fired_out = v[0:Nout] >= 30
fired_mot = v_mot >= 30
# reset the voltages for the neurons that fired:
v[fired] = -65
v_mot[fired_mot] = -65
# individual neuron dynamics:
u[fired] = u[fired] + d[fired]
u_mot[fired_mot] = u_mot[fired_mot] + d_mot[fired_mot]
# spike-timing dependent plasticity computations:
STDP[fired_out] = STDPadd # record output neuron (i.e.
# presynaptic neuron)spike times.
for k in range(0,Nmot):
if fired_mot[k]:
sd[:,k] = sd[:,k] + STDP # adjust sd for potentiation-eligible
# synapses
motFirings.append([t,k]) # update records of when motor
# neurons fired
for k in range(0,Nout):
if fired_out[k]:
outFirings.append([t,k]) # update the records of when
# output neurons fired
for k in range(0,N):
if fired[k]:
firings.append([t,k]) # update the records of when
# reservoir neurons fired
# For any presynaptic neuron that fired, calculate the input
# current to add to each of its postsynaptic neurons as
# proportional to the synaptic strength from the presynaptic to
# the postsynaptic neuron:
for k in range(0,len(firings)):
if firings[k][0] > t-1:
for l in range(0,np.size(post,1)):
postnum = int(post[firings[k][1], l])
I[postnum] = I[postnum] + s[firings[k][1], l]
# Calculate the currents to add to the motor neurons:
for k in range(0,len(outFirings)):
if outFirings[k][0] > t:
for l in range(0,np.size(post_mot,1)):
postnum = int(post_mot[outFirings[k][1], l])
I_mot[postnum] = I_mot[postnum] + 2 * sout[outFirings[k][1], l]
# Individual neuronal dynamics computations (for numerical
# stability the time step is 0.5 ms)
v = v + 0.5 * ((0.04 * v + 5) * v + 140 - u + I)
v = v + 0.5 * ((0.04 * v + 5) * v + 140 - u + I)
v_mot = v_mot + 0.5 * (
(0.04 * v_mot + 5) * v_mot + 140 - u_mot + I_mot)
v_mot = v_mot + 0.5 * (
(0.04 * v_mot + 5) * v_mot + 140 - u_mot + I_mot)
u = u + a * (0.2 * v - u)
u_mot = u_mot + a_mot * (0.2 * v_mot - u_mot)
# Exponential decay of the traces of presynaptic neuron firing
# with tau = 20 ms
STDP = 0.95 * STDP
# Exponential decay of the dopamine concentration over time
DA = DA * 0.995
# Every 10 ms, modify synaptic weights:
if (t + 1) % 10 == 0:
prevsout = sout # for debugging
sout = np.maximum(0, sout + DA * sd)
sout = soutscale * sout / np.mean(sout) # normalize
sd = 0.99 * sd # The eligibility trace decays exponentially
# evaluate the model and maybe give DA:
# initialize second-long records of agonist and antagonist spikes
if t == 0:
numfiredmusc1pos = -1 * np.ones(1000)
numfiredmusc1neg = -1 * np.ones(1000)
smoothmuscpos = -1 * np.ones(1000 - muscsmooth)
smoothmuscneg = -1 * np.ones(1000 - muscsmooth)
smoothmusc = -1 * np.ones(1000 - muscsmooth)
# Find out which of the agonist and antagonist jaw/lip motor
# neurons fired this ms:
numfiredmusc1pos[t] = sum(v_mot[0:int(Nmot/2)] >= 30)
numfiredmusc1neg[t] = sum(v_mot[int(Nmot/2):Nmot] >= 30)
if reinforcer == 'agonist_spike':
if numfiredmusc1pos[t] > 0:
rew.append(sec*1000+t)
if t == 999:
# Create a moving average of the | |
import os
import zipfile
import PIL
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import layers
from tensorflow.keras import Model
from tensorflow.python.keras.applications.vgg16 import decode_predictions
from tensorflow.python.keras.preprocessing.image import load_img
import numpy as np
import matplotlib.pyplot as plt
from ImageClassificator import ImageClassificator
from OCReader import OCReader
from ReportFoto import ReportFoto
class ImageProcessor:
vgg = None
cnn = None
def run(self):
if self.vgg is None and self.cnn is None:
self.create_processors()
dict_result = {}
categorie = ['book_jacket', 'web_site', 'monitor', 'scoreboard', 'street_sign', 'perfume', 'carton',
'digital_clock'
, 'hair_spray', 'wall_clock']
pino = "photo_downloaded\\"
mypath2 = "C:\\Users\\matti\\git\\ProgettoLube\\ProgettoLube\\WebInspector\\"
paths = [os.path.join("photo_downloaded\\", fn) for fn in next(os.walk("photo_downloaded\\"))[2]]
temp = []
counter = 0
counterLubeCreoERRATI = 0
counterLubeCreoNi = 0
counterLubeCreoOk = 0
counterLubeERRATI = 0
counterLubeNi = 0
counterLubeOk = 0
counterCreoERRATI = 0
counterCreoNi = 0
counterCreoOk = 0
counterCompetitos = 0
counterNotLogo = 0
for x in paths:
temp.append("C:\\Users\\matti\\git\\ProgettoLube\\ProgettoLube\\WebInspector\\" + x)
dict_result['foto_trovate'] = len(temp)
for x in temp:
try:
image = load_img(x,
target_size=(224, 224))
except PIL.UnidentifiedImageError as e:
print('error')
# plt.imshow(image)
# plt.show()
# im = cv2.resize(cv2.imread(IMAGE_PATH), (224, 224))
# il metodo predict si attende un tensore N, 224, 224, 3
# quindi per una sola immagine deve essere 1, 224, 224, 3
# im = np.expand_dims(im, axis=0)
# altro modo di procedere
image = np.array(image)
try:
image = np.expand_dims(image, axis=0)
except ValueError:
print('error')
try:
predictions = self.vgg.predict(image)
except ValueError:
print('error')
label = decode_predictions(predictions, top=5)
# retrieve the most likely result, e.g. highest probability
# print(label)
label = label[0][0]
# label = label[0][:]
# print(label)
# print the classification
print('%s (%.2f%%)' % (label[1], label[2] * 100))
for y in categorie:
if label[1] == y:
counter = counter + 1
print("LOGO CORRETTO TROVATO ", x)
predizione = self.cnn.predict(x)
if predizione == 'lube&creo ERRATI':
counterLubeCreoERRATI = counterLubeCreoERRATI + 1
if predizione == 'lube&creo loghi ok ma proporzioni o abbinamenti NON CORRETTI':
counterLubeCreoNi = counterLubeCreoNi + 1
if predizione == 'lube&creo TUTTO OK':
counterLubeCreoOk = counterLubeCreoOk + 1
if predizione == 'creo ERRATI':
counterCreoERRATI = counterCreoERRATI + 1
if predizione == 'creo loghi ok ma proporzioni o abbinamenti NON CORRETTI':
counterCreoNi = counterCreoNi + 1
if predizione == 'creo TUTTO OK':
counterCreoOk = counterCreoOk + 1
if predizione == 'lube loghi ok ma proporzioni o abbinamenti NON CORRETTI':
counterLubeNi = counterLubeNi + 1
if predizione == 'lubeERRATI':
counterLubeERRATI = counterLubeERRATI + 1
if predizione == 'lubeTUTTO OK':
counterLubeOk = counterLubeOk + 1
if predizione == 'NOT LOGO':
counterNotLogo = counterNotLogo + 1
if predizione == 'competitors':
ocr_reader = OCReader
flag = ocr_reader.search_for_competitors(path=x)
if flag:
counterCompetitos = counterCompetitos + 1
print(counter)
dict_result['logo_correctness'] = {
'lube&creo ERRATI': counterLubeCreoERRATI,
'lube&creo loghi ok ma proporzioni o abbinamenti NON CORRETTI': counterLubeCreoNi,
'lube&creo TUTTO OK': counterLubeCreoOk,
'lube ERRATI': counterLubeERRATI,
'lube loghi ok ma proporzioni o abbinamenti NON CORRETTI': counterLubeNi,
'lube TUTTO OK': counterLubeOk,
'creo ERRATI': counterCreoERRATI,
'creo loghi ok ma proporzioni o abbinamenti NON CORRETTI': counterCreoNi,
'creo TUTTO OK': counterCreoOk,
'competitors': counterCompetitos,
'not logo': counterNotLogo
}
return dict_result
def ocr_scan(self,platform):
ocr = OCReader()
dictionary_parole_dentro_immagine = ocr.read_text_two(platform)
return dictionary_parole_dentro_immagine
def create_processors(self):
self.vgg = VGG16(weights='imagenet', include_top=True)
self.vgg.compile(optimizer=tf.keras.optimizers.RMSprop(lr=0.0001), loss='binary_crossentropy', metrics=['acc'])
self.cnn = ImageClassificator()
flag_stampa_trend_training = False # modificare se si vuole vedere il grafico del trend
self.cnn.create_model(flag_stampa_trend_training)
def generate_report_foto(self, platform):
x = self.run()
#y = self.ocr_scan(platform)
report_foto = ReportFoto(x, None)
return report_foto
# img_height = 180
# img_width = 180
# base_dir = "C:\\Users\\matti\\OneDrive\\Desktop\\vgg16_logos"
#
# # train_dir = os.path.join(base_dir, 'training')
# # validation_dir = os.path.join(base_dir, 'validation')
# #
# # # Directory with our training cat pictures
# # #train_cats_dir = os.path.join(train_dir, 'cats')
# #
# # # Directory with our training dog pictures
# # #train_dogs_dir = os.path.join(train_dir, 'dogs')
# #
# # # Directory with our validation cat pictures
# # #validation_cats_dir = os.path.join(validation_dir, 'cats')
# #
# # # Directory with our validation dog pictures
# # #validation_dogs_dir = os.path.join(validation_dir, 'dogs')
# #
# # # Add our data-augmentation parameters to ImageDataGenerator
# # train_datagen = ImageDataGenerator(rescale=1. / 255., rotation_range=40, width_shift_range=0.2,
# # height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True)
# #
# # # Note that the validation data should not be augmented!
# # test_datagen = ImageDataGenerator(rescale=1.0 / 255.)
# #
# # # Flow training images in batches of 20 using train_datagen generator
# # train_generator = train_datagen.flow_from_directory(train_dir, batch_size=20, class_mode='binary',
# # target_size=(224, 224))
# #
# # # Flow validation images in batches of 20 using test_datagen generator
# # validation_generator = test_datagen.flow_from_directory(validation_dir, batch_size=20, class_mode='binary',
# # target_size=(224, 224))
#
# # base_model = VGG16(input_shape=(224, 224, 3), # Shape of our images
# # include_top=False, # Leave out the last fully connected layer
# # weights='imagenet')
#
# # for layer in base_model.layers:
# # layer.trainable = False
#
# # Flatten the output layer to 1 dimension
# # x = layers.Flatten()(base_model.output)
#
# # Add a fully connected layer with 512 hidden units and ReLU activation
# # x = layers.Dense(512, activation='relu')(x)
#
# # Add a dropout rate of 0.5
# # x = layers.Dropout(0.5)(x)
#
# # Add a final sigmoid layer for classification
# # x = layers.Dense(1, activation='sigmoid')(x)
#
# # model = tf.keras.models.Model(base_model.input, x)
# model = VGG16(weights='imagenet', include_top=True)
#
# model.compile(optimizer=tf.keras.optimizers.RMSprop(lr=0.0001), loss='binary_crossentropy', metrics=['acc'])
#
# # vgghist = model.fit(train_generator, validation_data=validation_generator, steps_per_epoch=100, epochs=10)
#
# # img = keras.preprocessing.image.load_img(
# # 'C:\\Users\\matti\\git\\ProgettoLube\\ProgettoLube\\WebInspector\\images\\logo.png', target_size=(img_height,img_width)
# # )
# # img_array = keras.preprocessing.image.img_to_array(img)
# # img_array = tf.expand_dims(img_array, 0) # Create a batch
#
# ###############################################################################################################
# dict_result = {}
# ic = ImageClassificator()
# flag_stampa_trend_training = False # modificare se si vuole vedere il grafico del trend
# ic.create_model(flag_stampa_trend_training)
# categorie = ['book_jacket', 'web_site', 'monitor', 'scoreboard', 'street_sign', 'perfume', 'carton', 'digital_clock'
# , 'hair_spray', 'wall_clock']
# pino = "photo_downloaded\\"
# mypath2 = "C:\\Users\\matti\\git\\ProgettoLube\\ProgettoLube\\WebInspector\\"
# paths = [os.path.join("photo_downloaded\\", fn) for fn in next(os.walk("photo_downloaded\\"))[2]]
# temp = []
# counter = 0
# counterErrati = 0
# counterNi = 0
# counterOk = 0
# for x in paths:
# temp.append("C:\\Users\\matti\\git\\ProgettoLube\\ProgettoLube\\WebInspector\\" + x)
# dict_result['foto_trovate'] = len(temp)
# for x in temp:
# try:
# image = load_img(x,
# target_size=(224, 224))
#
# except PIL.UnidentifiedImageError as e:
# print('error')
#
# # plt.imshow(image)
# # plt.show()
#
# # im = cv2.resize(cv2.imread(IMAGE_PATH), (224, 224))
# # il metodo predict si attende un tensore N, 224, 224, 3
# # quindi per una sola immagine deve essere 1, 224, 224, 3
# # im = np.expand_dims(im, axis=0)
#
# # altro modo di procedere
# image = np.array(image)
# try:
# image = np.expand_dims(image, axis=0)
# except ValueError:
# print('error')
# try:
# predictions = model.predict(image)
# except ValueError:
# print('error')
#
# label = decode_predictions(predictions, top=5)
# # retrieve the most likely result, e.g. highest probability
# # print(label)
# label = label[0][0]
# # label = label[0][:]
# # print(label)
# # print the classification
# print('%s (%.2f%%)' % (label[1], label[2] * 100))
# for y in categorie:
# if label[1] == y:
# counter = counter + 1
# print("LOGO CORRETTO TROVATO ", x)
# predizione = ic.predict(x)
# if predizione == 'lube&creo ERRATI':
# counterErrati = counterErrati + 1
# if predizione == 'lube&creo loghi ok ma proporzioni o abbinamenti NON CORRETTI':
# counterNi = counterNi + 1
# if predizione == 'lube&creo TUTTO OK':
# counterOk = counterOk + 1
#
# print(counter)
# dict_result['logo'] = {
# 'lube&creo ERRATI': str(counterErrati),
# 'lube&creo loghi ok ma proporzioni o abbinamenti NON CORRETTI': str(counterNi),
# 'lube&creo TUTTO OK': str(counterOk)
# }
#
# ######################################################################################
#
# # image2 = load_img('C:\\Users\\matti\\git\\ProgettoLube\\ProgettoLube\\WebInspector\\images\\logo2.png',
# # target_size=(224, 224))
# #
# # plt.imshow(image2)
# # plt.show()
# #
# # # im = cv2.resize(cv2.imread(IMAGE_PATH), (224, 224))
# # # il metodo predict si attende un tensore N, 224, 224, 3
# # # quindi per una sola immagine deve essere 1, 224, 224, 3
# # # im = np.expand_dims(im, | |
import pandas as pd
import numpy as np
import requests
import requests_ftp
import os
import re
import csv
from unidecode import unidecode
def EFO_parent_mapper(Cat_Stud, Cat_Anc_byN):
"""A function to map the parents and traits linkage file to the remaining
catalogue datasets. It also does a bit of cleaning of terms to make strings
which appear long on graphs in the notebook a bit cleaner
"""
with open(os.path.abspath(os.path.join('__file__',
'../..',
'data',
'Catalogue',
'Synthetic',
'Mapped_EFO.csv')), 'w') as fileout:
efo_out = csv.writer(fileout, delimiter=',', lineterminator='\n')
efo_out.writerow(['EFO URI', 'STUDY ACCESSION',
'PUBMEDID', 'ASSOCIATION COUNT'])
for index, row in Cat_Stud.iterrows():
listoftraits = row['MAPPED_TRAIT_URI'].split(',')
for trait in listoftraits:
efo_out.writerow([trait.lower().strip(),
row['STUDY ACCESSION'],
str(row['PUBMEDID']),
str(row['ASSOCIATION COUNT'])])
EFOsPerPaper = pd.read_csv(os.path.abspath(
os.path.join('__file__',
'../..',
'data',
'Catalogue',
'Synthetic',
'Mapped_EFO.csv')), sep=',')
EFO_Parent_Map = pd.read_csv(os.path.abspath(
os.path.join('__file__',
'../..',
'data',
'Catalogue',
'Raw',
'Cat_Map.tsv')), sep='\t')
EFO_Parent_Map['EFO URI'] = EFO_Parent_Map['EFO URI'].str.lower(
).str.strip()
EFO_Parent_Map = EFO_Parent_Map[[
'EFO URI', 'Parent term', 'EFO term']].drop_duplicates()
EFO_Parent_Paper_Merged = pd.merge(
EFOsPerPaper, EFO_Parent_Map, on='EFO URI', how='left')
Mapped = pd.merge(EFO_Parent_Paper_Merged, Cat_Anc_byN,
how='left', on='STUDY ACCESSION')
Mapped = Mapped[~Mapped['N'].isnull(
)]
Mapped['EFO term'] = Mapped['EFO term'].str.replace('measurement', 'meas.')
Mapped['EFO term'] = Mapped['EFO term'].str.title()
Mapped['EFO term'] = Mapped['EFO term'].str.replace(
'Hd Lc', 'HD LC')
Mapped['EFO term'] = Mapped['EFO term'].str.replace(
'Ld Lc', 'LD LC')
Mapped['EFO term'] = Mapped['EFO term'].replace(
'High Density Lipoprotein Cholesterol Meas.', 'HD LC measurement')
Mapped['EFO term'] = Mapped['EFO term'].replace(
'Low Density Lipoprotein Cholesterol Meas.', 'LD LC measurement')
Mapped['EFO term'] = Mapped['EFO term'].str.replace(' Ii ', ' II ')
Mapped['Parent term'] = Mapped['Parent term'].str.replace('measurement',
'meas.')
Mapped['Parent term'] = Mapped['Parent term'].str.replace(' or ', '/')
Mapped['Parent term'] = Mapped['Parent term'].str.title()
with open(os.path.abspath(
os.path.join('__file__',
'../..',
'data',
'Catalogue',
'Synthetic',
'Map_NoComs.csv')), 'w') as fileout:
efo_out = csv.writer(fileout, delimiter=',', lineterminator='\n')
efo_out.writerow(['EFO URI', 'STUDY ACCESSION',
'PUBMEDID', 'ASSOCIATION COUNT'])
for index, row in Cat_Stud.iterrows():
if ',' not in row['MAPPED_TRAIT_URI']:
efo_out.writerow([row['MAPPED_TRAIT_URI'].lower().strip(),
row['STUDY ACCESSION'],
str(row['PUBMEDID']),
str(row['ASSOCIATION COUNT'])])
EFOsPer_NoComs = pd.read_csv(os.path.abspath(
os.path.join('__file__', '../..', 'data',
'Catalogue', 'Synthetic',
'Map_NoComs.csv')), sep=',')
EFO_Parent_Paper_Merged = pd.merge(
EFOsPer_NoComs, EFO_Parent_Map, on='EFO URI', how='left')
Map_NoComs = pd.merge(EFO_Parent_Paper_Merged, Cat_Anc_byN,
how='left', on='STUDY ACCESSION')
Map_NoComs = Map_NoComs[~Map_NoComs['N'].isnull()]
Map_NoComs['EFO term'] = Map_NoComs['EFO term'].str.replace('measurement',
'meas.')
Map_NoComs['EFO term'] = Map_NoComs['EFO term'].str.title()
Map_NoComs['EFO term'] = Map_NoComs['EFO term'].str.replace('Hd Lc',
'HD LC')
Map_NoComs['EFO term'] = Map_NoComs['EFO term'].str.replace('Ld Lc',
'LD LC')
Map_NoComs['EFO term'] = Map_NoComs['EFO term'].replace(
'High Density Lipoprotein Cholesterol Meas.', 'HD LC measurement')
Map_NoComs['EFO term'] = Map_NoComs['EFO term'].replace(
'Low Density Lipoprotein Cholesterol Meas.', 'LD LC measurement')
Map_NoComs['EFO term'] = Map_NoComs['EFO term'].str.replace(' Ii ', ' II ')
Map_NoComs['Parent term'] = Map_NoComs['Parent term'].str.replace(
'measurement', 'meas.')
Map_NoComs['Parent term'] = Map_NoComs['Parent term'].str.replace(
' or ', '/').str.title()
return Mapped, Map_NoComs
def load_gwas_cat():
"""A function which cleans and loads all the main GWAS Catalog files into
the notebook workspace. It renames a couple of fields, and creates
one groupedby.sum for return
"""
Cat_Stud = pd.read_csv(os.path.abspath(
os.path.join('__file__',
'../..',
'data',
'Catalogue',
'Raw',
'Cat_Stud.tsv')),
header=0, sep='\t', encoding='utf-8',
index_col=False)
Cat_Stud.fillna('N/A', inplace=True)
Cat_Anc = pd.read_csv(os.path.abspath(
os.path.join('__file__', '../..',
'data',
'Catalogue',
'Raw',
'Cat_Anc.tsv')),
header=0, sep='\t', encoding='utf-8',
index_col=False)
Cat_Anc.rename(columns={'BROAD ANCESTRAL CATEGORY': 'BROAD ANCESTRAL',
'NUMBER OF INDIVDUALS': 'N'}, inplace=True)
Cat_Anc = Cat_Anc[~Cat_Anc['BROAD ANCESTRAL'].isnull()]
Cat_Anc.columns = Cat_Anc.columns.str.replace('ACCCESSION', 'ACCESSION')
Cat_Anc_byN = Cat_Anc[['STUDY ACCESSION', 'N',
'DATE']].groupby(by='STUDY ACCESSION').sum()
Cat_Anc_byN = Cat_Anc_byN.reset_index()
Cat_Anc_byN = pd.merge(Cat_Anc_byN, Cat_Stud[[
'STUDY ACCESSION', 'DATE']], how='left', on='STUDY ACCESSION')
cleaner_broad = pd.read_csv(os.path.abspath(
os.path.join('__file__',
'../..',
'data',
'Support',
'dict_replacer_broad.tsv')),
sep='\t', header=0, index_col=False)
Cat_Anc = pd.merge(Cat_Anc, cleaner_broad, how='left',
on='BROAD ANCESTRAL')
Cat_Anc['Dates'] = [pd.to_datetime(d) for d in Cat_Anc['DATE']]
Cat_Anc['N'] = pd.to_numeric(Cat_Anc['N'], errors='coerce')
Cat_Anc = Cat_Anc[Cat_Anc['N'].notnull()]
Cat_Anc['N'] = Cat_Anc['N'].astype(int)
Cat_Anc = Cat_Anc.sort_values(by='Dates')
Cat_Anc['Broader']
Cat_Anc['Broader'] = Cat_Anc['Broader'].str.replace(
'African American or Afro-Caribbean', 'African Am./Caribbean')
Cat_Anc['Broader'] = Cat_Anc['Broader'].str.replace(
'Hispanic or Latin American', 'Hispanic/Latin American')
Cat_Full = pd.read_csv(os.path.abspath(os.path.join('__file__',
'../..',
'data',
'Catalogue',
'Raw',
'Cat_Full.tsv')),
header=0, sep='\t', encoding='utf-8',
index_col=False, low_memory=False)
Cat_Anc.to_csv(os.path.abspath(
os.path.join('__file__',
'../..',
'data',
'Catalogue',
'Synthetic',
'Cat_Anc_withBroader.tsv')),
sep='\t', index=False)
return Cat_Stud, Cat_Anc, Cat_Anc_byN, Cat_Full
def load_pubmed_data():
"""" Load the PUBMED data into the workspace"""
FunderInfo = pd.read_csv(os.path.abspath(
os.path.join('__file__',
'../..',
'data',
'PUBMED',
'Pubmed_FunderInfo.csv')))
FunderInfo = FunderInfo[FunderInfo['Agency'].notnull()]
FunderInfo['Agency'] = FunderInfo['Agency'].\
str.replace('National Natural Science Foundation of China \(National Science Foundation of China\)',
'National Natural Science Foundation of China')
FunderInfo['Agency'] = FunderInfo['Agency'].\
str.replace('Medical Research Council', 'MRC')
FunderInfo['Agency'] = FunderInfo['Agency'].\
str.replace('National Research Foundation of Korea \(KR\)',
'National Research Foundation of Korea')
FunderInfo['Agency'] = FunderInfo['Agency'].\
str.replace('National Human Genome Research Institute',
'NHGRI')
FunderInfo['Agency'] = FunderInfo['Agency'].\
str.replace('NHGRI NIH HHS',
'NHGRI')
FunderInfo['Agency'] = FunderInfo['Agency'].\
str.replace('NCI NIH HHS',
'NCI')
FunderInfo['Agency'] = FunderInfo['Agency'].\
str.replace('National Cancer Institute',
'NCI')
FunderInfo['Agency'] = FunderInfo['Agency'].\
str.replace('National Institute of Arthritis and Musculoskeletal and Skin Diseases',
'NIAMS NIH HHS')
FunderInfo['Agency'] = FunderInfo['Agency'].\
str.replace('National Institute on Aging',
'NIA NIH HHS')
FunderInfo['Agency'] = FunderInfo['Agency'].\
str.replace('NIDA NIH HHS',
'NIDA')
FunderInfo['Agency'] = FunderInfo['Agency'].\
str.replace('National Institute of Diabetes and Digestive and Kidney Diseases',
'NIDDK NIH HHS')
FunderInfo['Agency'] = FunderInfo['Agency'].\
str.replace('National Institutes of Health',
'NIH HHS')
FunderInfo['Agency'] = FunderInfo['Agency'].\
str.replace('NIMH NIH HHS',
'NIMH')
FunderInfo['Agency'] = FunderInfo['Agency'].\
str.replace('National Institute of Neurological Disorders and Stroke',
'NINDS')
FunderInfo['Agency'] = FunderInfo['Agency'].\
str.replace('NINDS NIH HHS',
'NINDS')
FunderInfo['Agency'] = FunderInfo['Agency'].\
str.replace('National Natural Science Foundation of China (National Science Foundation of China)',
'National Natural Science Foundation of China')
FunderInfo['Agency'] = FunderInfo['Agency'].\
str.replace('National Research Foundation of Korea (KR)',
'National Research Foundation of Korea')
FunderInfo['Agency'] = FunderInfo['Agency'].\
str.replace('National Human Genome Research Institute',
'NHGRI')
FunderInfo['Agency'] = FunderInfo['Agency'].\
str.replace('NHGRI NIH HHS',
'NHGRI')
FunderInfo['Agency'] = FunderInfo['Agency'].\
str.replace('National Heart, Lung, and Blood Institute',
'NHLBI')
FunderInfo['Agency'] = FunderInfo['Agency'].\
str.replace('NHLBI NIH HHS',
'NHLBI')
FunderInfo['Agency'] = FunderInfo['Agency'].\
str.replace('National Cancer Institute',
'NCI')
FunderInfo['Agency'] = FunderInfo['Agency'].\
str.replace('NCI NIH HHS',
'NCI')
FunderInfo['Agency'] = FunderInfo['Agency'].\
str.replace('National Institute of Arthritis and Musculoskeletal and Skin Diseases',
'NIAMS NIH HHS')
FunderInfo['Agency'] = FunderInfo['Agency'].\
str.replace('National Institute on Aging',
'NIA NIH HHS')
FunderInfo['Agency'] = FunderInfo['Agency'].\
str.replace('National Institute of Arthritis and Musculoskeletal and Skin Diseases',
'NIAMS NIH HHS')
FunderInfo['Agency'] = FunderInfo['Agency'].\
str.replace('NIDA NIH HHS',
'NIDA')
FunderInfo['Agency'] = FunderInfo['Agency'].\
str.replace('National Institute of Diabetes and Digestive and Kidney Diseases',
'NIDDK NIH HHS')
FunderInfo['Agency'] = FunderInfo['Agency'].\
str.replace('National Institutes of Health',
'NIH HHS')
FunderInfo['Agency'] = FunderInfo['Agency'].\
str.replace('NIMH NIH HHS',
'NIMH')
FunderInfo['Agency'] = FunderInfo['Agency'].\
str.replace('National Institute of Neurological Disorders and Stroke',
'NINDS')
FunderInfo['Agency'] = FunderInfo['Agency'].\
str.replace('NINDS NIH HHS',
'NINDS')
AbstractInfo = pd.read_csv(os.path.abspath(
os.path.join('__file__',
'../..',
'data',
'PUBMED',
'Pubmed_AbstractInfo.csv')))
AbstractInfo['Abstracts'] = AbstractInfo['Abstract'].apply(
lambda x: re.sub('[^a-zA-Z ]+', '', x)).str.lower()
AbstractCount = pd.DataFrame(AbstractInfo.Abstract.apply(lambda x:
pd.value_counts(x.split(' '))).sum(axis=0),
columns=['num_words'])
AbstractCount.index.name = 'word'
AbstractCount.to_csv(os.path.abspath(
os.path.join('__file__',
'../..',
'data',
'PUBMED',
'Pubmed_AbstractCount.csv')))
AuthorMaster = pd.read_csv(os.path.abspath(
os.path.join('__file__',
'../..',
'data',
'PUBMED',
'Pubmed_AuthorInfo.csv')))
AuthorMaster = AuthorMaster.drop_duplicates(
subset=['PUBMEDID', 'AUTHORNAME'])
for col in ['FORENAME', 'LASTNAME', 'AUTHORNAME']:
AuthorMaster[col] = AuthorMaster[col].apply(unidecode)
return FunderInfo, AbstractInfo, AuthorMaster
def make_timely(variables, yearlist, yearquarterlist, Cat_Stud,
Cat_Anc, Cat_Anc_byN):
""" build the dataframe which plots the longitudinal growth of GWAS over
time (figure 1a)
"""
df_years = pd.DataFrame(columns=variables, index=yearlist)
df_yearquarters = pd.DataFrame(
columns=variables, index=yearquarterlist)
for year_ in range(2007, 2018):
df_years['N ≤ 5,000'][str(year_)] = \
len(Cat_Anc_byN[(Cat_Anc_byN['N'] <= 5000) &
(Cat_Anc_byN['DATE'].str.contains(str(year_)))])
df_years['5,001 ≤ N ≤ 50,000'][str(year_)] = \
len(Cat_Anc_byN[(Cat_Anc_byN['N'] >= 5001) &
(Cat_Anc_byN['N'] <= 50000) &
(Cat_Anc_byN['DATE'].str.contains(str(year_)))])
df_years['50,001 ≤ N ≤ 100,000'][str(year_)] = \
len(Cat_Anc_byN[(Cat_Anc_byN['N'] > 50000) &
(Cat_Anc_byN['N'] <= 100000) &
(Cat_Anc_byN['DATE'].str.contains(str(year_)))])
df_years['100,001 ≤ N'][str(year_)] = \
len(Cat_Anc_byN[(Cat_Anc_byN['N'] > 100000) &
(Cat_Anc_byN['DATE'].str.contains(str(year_)))])
df_years['N'][str(year_)] = Cat_Anc[Cat_Anc['DATE'].str.contains(
str(year_))]['N'].sum()
df_years['Associations'][str(year_)] = \
Cat_Stud[Cat_Stud['DATE'].str.contains(
str(year_))]['ASSOCIATION COUNT'].sum()
df_years['Journals Printing GWAS'][str(year_)] = \
len(Cat_Stud[Cat_Stud['DATE'].str.contains(
str(year_))]['JOURNAL'].unique())
df_years['# Diseases Studied'][str(year_)] = \
len(Cat_Stud[Cat_Stud['DATE'].str.contains(
str(year_))]['DISEASE/TRAIT'].unique())
df_yearquarters['N ≤ 5,000'][str(year_) + 'Q1'] = \
len(Cat_Anc_byN[(Cat_Anc_byN['N'] <= 5000) &
(Cat_Anc_byN['DATE'].str.contains(
str(year_) + '-01-')) |
(Cat_Anc_byN['DATE'].str.contains(
str(year_) + '-02-')) |
(Cat_Anc_byN['DATE'].str.contains(
str(year_) + '-03-'))])
df_yearquarters['5,001 ≤ N ≤ 50,000'][str(year_) + 'Q1'] = \
len(Cat_Anc_byN[(Cat_Anc_byN['N'] >= 5001) &
(Cat_Anc_byN['N'] <= 50000) &
(Cat_Anc_byN['DATE'].str.contains(
str(year_) + '-01-')) |
(Cat_Anc_byN['DATE'].str.contains(
str(year_) + '-02-')) |
(Cat_Anc_byN['DATE'].str.contains(
str(year_) + '-03-'))])
df_yearquarters['50,001 ≤ N ≤ 100,000'][str(year_) + 'Q1'] = \
len(Cat_Anc_byN[(Cat_Anc_byN['N'] <= 5000) &
(Cat_Anc_byN['N'] <= 100000) &
(Cat_Anc_byN['DATE'].str.contains(
str(year_) + '-01-')) |
(Cat_Anc_byN['DATE'].str.contains(
str(year_) + '-02-')) |
(Cat_Anc_byN['DATE'].str.contains(
str(year_) + '-03-'))])
df_yearquarters['100,001 ≤ N'][str(year_) + 'Q1'] = \
len(Cat_Anc_byN[(Cat_Anc_byN['N'] > 100000) &
(Cat_Anc_byN['DATE'].str.contains(
str(year_) + '-01-')) |
(Cat_Anc_byN['DATE'].str.contains(
str(year_) + '-02-')) |
(Cat_Anc_byN['DATE'].str.contains(
str(year_) + '-03-'))])
df_yearquarters['N ≤ 5,000'][str(year_) + 'Q2'] = \
len(Cat_Anc_byN[(Cat_Anc_byN['N'] <= 5000) &
(Cat_Anc_byN['DATE'].str.contains(
str(year_) + '-04-')) |
(Cat_Anc_byN['DATE'].str.contains(
str(year_) + '-05-')) |
(Cat_Anc_byN['DATE'].str.contains(
str(year_) + '-06-'))])
df_yearquarters['5,001 ≤ N ≤ 50,000'][str(year_) + 'Q2'] = \
len(Cat_Anc_byN[(Cat_Anc_byN['N'] >= 5001) &
(Cat_Anc_byN['N'] <= 50000) &
(Cat_Anc_byN['DATE'].str.contains(
str(year_) + '-04-')) |
(Cat_Anc_byN['DATE'].str.contains(
str(year_) + '-05-')) |
(Cat_Anc_byN['DATE'].str.contains(
str(year_) + '-06-'))])
df_yearquarters['50,001 ≤ N ≤ 100,000'][str(year_) + 'Q2'] = \
len(Cat_Anc_byN[(Cat_Anc_byN['N'] <= 5000) &
(Cat_Anc_byN['N'] <= 100000) &
(Cat_Anc_byN['DATE'].str.contains(
str(year_) + '-04-')) |
(Cat_Anc_byN['DATE'].str.contains(
str(year_) + '-05-')) |
(Cat_Anc_byN['DATE'].str.contains(
str(year_) | |
section [target]
value = self._get_value(target, key)
if value:
return value
# Maybe implement a [default] section in the future?
return default
def dump(self):
output = ''
for config in self.configs:
for section in config:
output += f'\n[{section}]\n'
for key, value in config[section].items():
if value is not None:
output += f'{key}={value}\n'
else:
output += f'{key}\n'
return output
class Raid():
# pylint: disable=invalid-name
def __init__(self, name, remote, config, command, lvsvolume, devices,
mqueue, timeout=5):
self.name = name
self.remote = remote
self.config = config
self.command = command
self.lvsvolume = lvsvolume
self.devices = devices
self.mqueue = mqueue
self.timeout = timeout
# For pylint, these must be defined in __init__
self.drives = {}
self.partitions = {}
self.user = self.config.get_value(remote, 'username')
if self.user is None:
self.user = 'root'
elif isinstance(self.user, list):
self.user = self.user[0]
self._clear_status()
self.ssh = Ssh(self.remote, self.user, timeout=self.timeout)
def INFO(self, message):
self.mqueue.put((self.name, 'I', message))
def FATAL(self, message) -> typing.NoReturn:
self.mqueue.put((self.name, 'F', message))
sys.exit(1)
def _dump(self, result):
for line in result:
self.INFO(line)
def _get_partitions(self):
if len(self.partitions) > 0:
return
result, status = self.ssh.execute('cat /proc/partitions')
if status:
self.partitions = {}
return
for line in result:
# Skip the header
if line.startswith('major') or line == '':
continue
_, _, blocks, name = line.split()
# Skip bogus partitions on the md devices
if re.search(r'md\d+p\d+', name):
continue
if int(blocks) < 2:
# Extended partitions are 1 block long
continue
self.partitions[os.path.join('/dev/', name)] = int(blocks) * 1024
def _get_drives(self):
if len(self.drives) > 0:
return
result, status = self.ssh.execute('lsscsi -bSS')
if status:
self.drives = {}
return
for line in result:
_, dev, size = line.split()
if dev == '-' or size == '-':
continue
blocks, block_size = size.split(',')
size_bytes = int(blocks) * int(block_size)
if size_bytes > 0:
self.drives[dev] = size_bytes
def _clear_status(self):
self.uuid_devs = {}
self.uuid_md = {}
self.mds = set()
self.encrypted = set()
self.partitions = {}
self.drives = {}
self.mapping = {}
self.sizes = {}
self.level = {}
self.provides = {}
self.lvs = set()
self.pvs = set()
self.mounts = {}
self.failed = {}
def _get_status(self):
self._clear_status()
self._get_drives()
self._get_partitions()
DEBUG('determining device status')
for dev in self.partitions:
name = os.path.basename(dev)
# For the non-md devices, determine the UUID of the md of the
# associated md device, if applicable.
result, _ = self.ssh.execute(f'mdadm -Q --examine {dev}')
# Ignore the status so that we always get a --detail on md devices.
for line in result:
if re.search(r'Array UUID : ', line):
uuid = re.sub('.* UUID : ', '', line).strip()
if uuid not in self.uuid_devs:
self.uuid_devs[uuid] = []
self.uuid_devs[uuid].append(name)
if re.search(r'Array Size : ', line):
size = re.sub('.* Size : ', '', line).strip()
size = re.sub(' .*$', '', size)
self.sizes[uuid] = (int(size) * 1024, '')
if re.search(r'Raid Level : ', line):
level = re.sub('.* Level : ', '', line).strip()
self.level[uuid] = level
# For all of the md devices, determine the UUID of the md.
uuid = None
result, status = self.ssh.execute(f'mdadm -Q --detail {dev}')
if status:
continue
for line in result:
if re.search(r'UUID : ', line):
uuid = re.sub('.* UUID : ', '', line).strip()
self.uuid_md[uuid] = name
self.mds.add(name)
if uuid and re.search(r'Failed Devices :', line):
failed = re.sub('.* Devices : ', '', line).strip()
self.failed[uuid] = int(failed)
DEBUG(f'found {len(self.uuid_devs)} UUIDs among'
f' {len(self.partitions)} partitions')
# Determine encryption status. If the header is detached, we can make
# a determination from the uuid without the md device.
DEBUG('determining encryption status')
for uuid in self.uuid_devs:
name = self.uuid_md.get(uuid)
if name is not None:
result, _ = self.ssh.execute(f'cryptsetup luksDump {name}')
for line in result:
if re.search(r'LUKS header information', line):
self.encrypted.add(uuid)
result, _ = self.ssh.execute(
f'cryptsetup luksDump --header "{uuid}.header" 0')
for line in result:
if re.search(r'LUKS header information', line):
self.encrypted.add(uuid)
# Determine volume mapping
DEBUG('determining volume mapping')
result, _ = self.ssh.execute('dmsetup deps -o devname')
for line in result:
if re.search(r'No devices', line):
continue
args = line.split()
volume = None
deps = []
for arg in args:
if volume is None:
volume = re.sub(r':', '', arg)
elif arg[0] == '(':
deps.append(re.sub(r'[()]', '', arg))
self.mapping[volume] = deps
for dep in deps:
self.provides[dep] = volume
# Determine sizes
DEBUG('determinging volume sizes')
for volume in self.mapping:
result, _ = self.ssh.execute(
f'pvs --rows --units b /dev/mapper/{volume}')
for line in result:
if re.search(r'PSize', line):
_, size = line.split()
size = re.sub(r'B', '', size)
self.sizes[volume] = (size, '')
self.pvs.add(volume)
if re.search(r'VG', line):
# Newer versions deactivate the VG on boot. Activate it
# here so that we can get VG status.
_, vg = line.split()
self.ssh.execute(f'vgchange -a y {vg}')
if volume not in self.sizes:
result, _ = self.ssh.execute(
f'lvs --rows --units b /dev/mapper/{volume}')
for line in result:
if re.search(r'LSize', line):
_, size = line.split()
size = re.sub(r'B', '', size)
self.sizes[volume] = (size, '')
self.lvs.add(volume)
# Determine what is mounted
DEBUG('determining mount status')
result, _ = self.ssh.execute(
'df -B1 --output=source,target,fstype,size,used')
for line in result:
if line.startswith('/dev/mapper'):
volume, mountpoint, fstype, size, used = line.split()
volume = re.sub(r'/dev/mapper/', '', volume)
self.mounts[volume] = (mountpoint, fstype)
self.sizes[mountpoint] = (size, used)
def _get_next_mdname(self):
for i in range(0, 10):
name = f'md{i}'
if name not in self.mds:
return name
self.FATAL('More than 10 md devices not supported')
def _get_next_volname(self):
for i in range(0, 10):
name = f'r{i}'
if name not in self.mapping:
return name
self.FATAL('More than 10 volumes not supported')
@staticmethod
def _human(size, metric=False):
if size == '':
return ''
size = int(size)
if metric:
divisor = 1000
units = ['b', 'KB', 'MB', 'GB', 'TB']
else:
divisor = 1024
units = ['b', 'KiB', 'MiB', 'GiB', 'TiB']
unit = 0
while size > divisor and unit < len(units) - 1:
size /= divisor
unit += 1
return f'{size:.2f}{units[unit]}'
def _md5up(self):
# Bring up md devices
for uuid in self.uuid_devs:
if uuid in self.uuid_md:
self.INFO(f'up: {self.uuid_md[uuid]} {uuid}')
continue
name = self._get_next_mdname()
self.INFO(f'starting: {name} {uuid}')
result, _ = self.ssh.execute(f'mdadm -A --uuid {uuid} {name}')
self._dump(result)
def _md5down(self):
for uuid in self.uuid_devs:
name = self.uuid_md.get(uuid)
if name is None:
self.INFO(f'down: {uuid}')
continue
self.INFO(f'stopping: {name} {uuid}')
dev = os.path.join("/dev/", name)
result, _ = self.ssh.execute(f'mdadm -S {dev}')
self._dump(result)
def _md5create(self, partitions, level=6):
name = self._get_next_mdname()
result, status = self.ssh.execute(
f'mdadm -C /dev/{name} --verbose -n {len(partitions)} -l {level}'
f' {" ".join(partitions)}',
prompt='Continue creating array?',
data='YES')
self._dump(result)
if status != 0:
self.FATAL('cannot create {name}')
self.INFO('setting stripe_cache_size')
result, status = self.ssh.execute(
f'echo 32768 > /sys/block/{name}/md/stripe_cache_size')
self._dump(result)
if status != 0:
self.FATAL('could not set stripe_cache_size')
self.INFO('updating /etc/mdadm/mdadm.conf')
result, status = self.ssh.execute('/usr/share/mdadm/mkconf')
if status != 0:
self.FATAL('cannot run /usr/share/mdadm/mkconf')
for line in result:
if line.startswith('ARRAY'):
_, _, meta, uuid, name = line.split()
_, status = self.ssh.execute(
f'echo "ARRAY <ignore> {meta} {uuid} {name}"'
' >> /etc/mdadm/mdadm.conf')
if status != 0:
self.FATAL('could not update /etc/mdadm/mdadm.conf')
self.INFO('updating initramfs')
result, status = self.ssh.execute('update-initramfs -u')
self._dump(result)
if status != 0:
self.FATAL('cannot update initramfs')
return name
def _create_luks_header(self, uuid):
_, status = self.ssh.execute(f'test -f {uuid}.header')
if not status:
self.FATAL(f'{uuid}.header already exists on {self.remote}')
return
self.INFO(f'creating {uuid}.header on {self.remote}')
result, _ = self.ssh.execute(
f'dd if=/dev/zero of={uuid}.header bs=4k count=1024')
self._dump(result)
def _luksformat(self):
for uuid, name in self.uuid_md.items():
luks_key = self._get_luks_key(uuid)
if luks_key != '':
self.FATAL(f'LUKS key already exists for {name} {uuid}')
continue
self._create_luks_header(uuid)
self.INFO(f'creating LUKS key for {name} {uuid}')
for key in ['key0', 'key1', 'key2', 'key3']:
key_remote = self.config.get_value(self.remote, key)
if key_remote is not None and len(key_remote) > 0:
secret = Secret(key_remote[0], socket.gethostname(),
self.user)
partial = secrets.token_hex(64)
secret.put_secret(uuid, partial)
luks_key = self._get_luks_key(uuid)
if luks_key == '':
self.FATAL('unable to store LUKS key, check ~/.urraid')
result, _ = self.ssh.execute(
f'cryptsetup luksFormat /dev/{name} '
f'--header "{uuid}.header" --use-random --batch-mode',
prompt='passphrase', data=luks_key)
self._dump(result)
self.INFO(f'finished formatting {name} {uuid}')
return uuid, name
return None, None
def _lvscreate(self, uuid, name):
self._get_status()
self.INFO(f'creating pvs/lvs state for {name} {uuid}')
if name not in self.provides:
self.FATAL(f'cannot find volume for {name}')
volume = self.provides[name]
result, _ = self.ssh.execute(f'pvcreate /dev/mapper/{volume}')
self._dump(result)
result, _ = self.ssh.execute(f'pvs /dev/mapper/{volume} -o+pe_start')
self._dump(result)
vg, mountpoint = self.lvsvolume.split('-')
result, _ = self.ssh.execute(
f'vgcreate -s 1g {vg} /dev/mapper/{volume}')
self._dump(result)
result, _ = self.ssh.execute(
f'lvcreate -l "100%FREE" -n {mountpoint} {vg}')
self._dump(result)
self.INFO(f'lvs volume {self.lvsvolume} created on {vg}')
| |
import numpy as np
import pytest
from skimage import measure
import autogalaxy as ag
from autoarray.inversion.pixelizations.abstract import AbstractPixelization
from autoarray.inversion.regularization.abstract import AbstractRegularization
from autogalaxy import exc
from autogalaxy.plane import plane
def critical_curve_via_magnification_via_plane_from(plane, grid):
magnification = plane.magnification_2d_from(grid=grid)
inverse_magnification = 1 / magnification
critical_curves_indices = measure.find_contours(inverse_magnification.native, 0)
no_critical_curves = len(critical_curves_indices)
contours = []
critical_curves = []
for jj in np.arange(no_critical_curves):
contours.append(critical_curves_indices[jj])
contour_x, contour_y = contours[jj].T
pixel_coord = np.stack((contour_x, contour_y), axis=-1)
critical_curve = grid.mask.grid_scaled_for_marching_squares_from(
grid_pixels_1d=pixel_coord, shape_native=magnification.sub_shape_native
)
critical_curve = np.array(grid=critical_curve)
critical_curves.append(critical_curve)
return critical_curves
def caustics_via_magnification_via_plane_from(plane, grid):
caustics = []
critical_curves = critical_curve_via_magnification_via_plane_from(
plane=plane, grid=grid
)
for i in range(len(critical_curves)):
critical_curve = critical_curves[i]
deflections_1d = plane.deflections_yx_2d_from(grid=critical_curve)
caustic = critical_curve - deflections_1d
caustics.append(caustic)
return caustics
### Has Attributes ###
def test__has_light_profile():
plane = ag.Plane(galaxies=[ag.Galaxy(redshift=0.5)], redshift=None)
assert plane.has_light_profile is False
plane = ag.Plane(
galaxies=[ag.Galaxy(redshift=0.5, light_profile=ag.lp.LightProfile())],
redshift=None,
)
assert plane.has_light_profile is True
plane = ag.Plane(
galaxies=[
ag.Galaxy(redshift=0.5, light_profile=ag.lp.LightProfile()),
ag.Galaxy(redshift=0.5),
],
redshift=None,
)
assert plane.has_light_profile is True
def test__has_mass_profile():
plane = ag.Plane(galaxies=[ag.Galaxy(redshift=0.5)], redshift=None)
assert plane.has_mass_profile is False
plane = ag.Plane(
galaxies=[ag.Galaxy(redshift=0.5, mass_profile=ag.mp.MassProfile())],
redshift=None,
)
assert plane.has_mass_profile is True
plane = ag.Plane(
galaxies=[
ag.Galaxy(redshift=0.5, mass_profile=ag.mp.MassProfile()),
ag.Galaxy(redshift=0.5),
],
redshift=None,
)
assert plane.has_mass_profile is True
def test__has_light_profile_linear():
plane = ag.Plane(galaxies=[ag.Galaxy(redshift=0.5)], redshift=None)
assert plane.has_light_profile_linear is False
plane = ag.Plane(
galaxies=[
ag.Galaxy(redshift=0.5, light_profile=ag.lp_linear.LightProfileLinear())
],
redshift=None,
)
assert plane.has_light_profile_linear is True
plane = ag.Plane(
galaxies=[
ag.Galaxy(redshift=0.5, light_profile=ag.lp_linear.LightProfileLinear()),
ag.Galaxy(redshift=0.5),
],
redshift=None,
)
assert plane.has_light_profile_linear is True
def test__has_pixelization():
plane = ag.Plane(galaxies=[ag.Galaxy(redshift=0.5)], redshift=None)
assert plane.has_pixelization is False
galaxy_pix = ag.Galaxy(
redshift=0.5,
pixelization=AbstractPixelization(),
regularization=AbstractRegularization(),
)
plane = ag.Plane(galaxies=[galaxy_pix], redshift=None)
assert plane.has_pixelization is True
plane = ag.Plane(galaxies=[galaxy_pix, ag.Galaxy(redshift=0.5)], redshift=None)
assert plane.has_pixelization is True
def test__has_regularization():
plane = ag.Plane(galaxies=[ag.Galaxy(redshift=0.5)], redshift=None)
assert plane.has_regularization is False
galaxy_pix = ag.Galaxy(
redshift=0.5,
pixelization=AbstractPixelization(),
regularization=AbstractRegularization(),
)
plane = ag.Plane(galaxies=[galaxy_pix], redshift=None)
assert plane.has_regularization is True
plane = ag.Plane(galaxies=[galaxy_pix, ag.Galaxy(redshift=0.5)], redshift=None)
assert plane.has_regularization is True
def test__has_hyper_galaxy():
plane = ag.Plane(galaxies=[ag.Galaxy(redshift=0.5)], redshift=None)
assert plane.has_hyper_galaxy is False
galaxy = ag.Galaxy(redshift=0.5, hyper_galaxy=ag.HyperGalaxy())
plane = ag.Plane(galaxies=[galaxy], redshift=None)
assert plane.has_hyper_galaxy is True
plane = ag.Plane(galaxies=[galaxy, ag.Galaxy(redshift=0.5)], redshift=None)
assert plane.has_hyper_galaxy is True
### Attribute Lists ###
def test__mass_profile_list():
plane = ag.Plane(galaxies=[ag.Galaxy(redshift=0.5)], redshift=None)
assert plane.mass_profile_list == []
sis_0 = ag.mp.SphIsothermal(einstein_radius=1.0)
sis_1 = ag.mp.SphIsothermal(einstein_radius=2.0)
sis_2 = ag.mp.SphIsothermal(einstein_radius=3.0)
plane = ag.Plane(
galaxies=[ag.Galaxy(redshift=0.5, mass_profile=sis_0)], redshift=None
)
assert plane.mass_profile_list == [sis_0]
plane = ag.Plane(
galaxies=[
ag.Galaxy(redshift=0.5, mass_profile_0=sis_0, mass_profile_1=sis_1),
ag.Galaxy(redshift=0.5, mass_profile_0=sis_2, mass_profile_1=sis_1),
],
redshift=None,
)
assert plane.mass_profile_list == [sis_0, sis_1, sis_2, sis_1]
def test__pixelization_list():
galaxy_pix = ag.Galaxy(
redshift=0.5,
pixelization=ag.m.MockPixelization(mapper=1),
regularization=ag.m.MockRegularization(),
)
plane = ag.Plane(galaxies=[galaxy_pix], redshift=None)
assert plane.pixelization_list[0].mapper == 1
galaxy_pix_0 = ag.Galaxy(
redshift=0.5,
pixelization=ag.m.MockPixelization(mapper=1),
regularization=ag.m.MockRegularization(),
)
galaxy_pix_1 = ag.Galaxy(
redshift=0.5,
pixelization=ag.m.MockPixelization(mapper=2),
regularization=ag.m.MockRegularization(),
)
plane = ag.Plane(galaxies=[galaxy_pix_0, galaxy_pix_1], redshift=None)
assert plane.pixelization_list[0].mapper == 1
assert plane.pixelization_list[1].mapper == 2
galaxy_no_pix = ag.Galaxy(redshift=0.5)
plane = ag.Plane(galaxies=[galaxy_no_pix], redshift=None)
assert plane.pixelization_list == []
def test__regularization_list():
galaxy_reg = ag.Galaxy(
redshift=0.5,
pixelization=ag.m.MockPixelization(),
regularization=ag.m.MockRegularization(regularization_matrix=1),
)
plane = ag.Plane(galaxies=[galaxy_reg], redshift=None)
assert plane.regularization_list[0].regularization_matrix == 1
galaxy_reg_0 = ag.Galaxy(
redshift=0.5,
pixelization=ag.m.MockPixelization(),
regularization=ag.m.MockRegularization(regularization_matrix=1),
)
galaxy_reg_1 = ag.Galaxy(
redshift=0.5,
pixelization=ag.m.MockPixelization(),
regularization=ag.m.MockRegularization(regularization_matrix=2),
)
plane = ag.Plane(galaxies=[galaxy_reg_0, galaxy_reg_1], redshift=None)
assert plane.regularization_list[0].regularization_matrix == 1
assert plane.regularization_list[1].regularization_matrix == 2
galaxy_no_pix = ag.Galaxy(redshift=0.5)
plane = ag.Plane(galaxies=[galaxy_no_pix], redshift=None)
assert plane.regularization_list == []
def test__hyper_galaxy_image_list():
galaxy_pix = ag.Galaxy(
redshift=0.5,
pixelization=AbstractPixelization(),
regularization=AbstractRegularization(),
)
plane = ag.Plane(galaxies=[galaxy_pix], redshift=None)
assert plane.hyper_galaxies_with_pixelization_image_list[0] is None
galaxy_pix = ag.Galaxy(
redshift=0.5,
pixelization=AbstractPixelization(),
regularization=AbstractRegularization(),
hyper_galaxy_image=1,
)
plane = ag.Plane(galaxies=[galaxy_pix, ag.Galaxy(redshift=0.5)], redshift=None)
assert plane.hyper_galaxies_with_pixelization_image_list[0] == 1
plane = ag.Plane(galaxies=[ag.Galaxy(redshift=0.5)], redshift=None)
assert plane.hyper_galaxies_with_pixelization_image_list == []
### Light Profile Quantities ###
def test__image_2d_from(sub_grid_2d_7x7, gal_x1_lp):
light_profile = gal_x1_lp.light_profile_list[0]
lp_image = light_profile.image_2d_from(grid=sub_grid_2d_7x7)
# Perform sub gridding average manually
lp_image_pixel_0 = (lp_image[0] + lp_image[1] + lp_image[2] + lp_image[3]) / 4
lp_image_pixel_1 = (lp_image[4] + lp_image[5] + lp_image[6] + lp_image[7]) / 4
plane = ag.Plane(galaxies=[gal_x1_lp], redshift=None)
image = plane.image_2d_from(grid=sub_grid_2d_7x7)
assert (image.binned[0] == lp_image_pixel_0).all()
assert (image.binned[1] == lp_image_pixel_1).all()
assert (image == lp_image).all()
galaxy_image = gal_x1_lp.image_2d_from(grid=sub_grid_2d_7x7)
plane = ag.Plane(galaxies=[gal_x1_lp], redshift=None)
image = plane.image_2d_from(grid=sub_grid_2d_7x7)
assert image == pytest.approx(galaxy_image, 1.0e-4)
# Overwrite one value so intensity in each pixel is different
sub_grid_2d_7x7[5] = np.array([2.0, 2.0])
g0 = ag.Galaxy(redshift=0.5, light_profile=ag.lp.EllSersic(intensity=1.0))
g1 = ag.Galaxy(redshift=0.5, light_profile=ag.lp.EllSersic(intensity=2.0))
g0_image = g0.image_2d_from(grid=sub_grid_2d_7x7)
g1_image = g1.image_2d_from(grid=sub_grid_2d_7x7)
plane = ag.Plane(galaxies=[g0, g1], redshift=None)
image = plane.image_2d_from(grid=sub_grid_2d_7x7)
assert image == pytest.approx(g0_image + g1_image, 1.0e-4)
plane = ag.Plane(galaxies=[], redshift=0.5)
image = plane.image_2d_from(grid=sub_grid_2d_7x7)
assert image.shape_native == (7, 7)
assert (image[0] == 0.0).all()
assert (image[1] == 0.0).all()
def test__image_2d_list_from(sub_grid_2d_7x7):
# Overwrite one value so intensity in each pixel is different
sub_grid_2d_7x7[5] = np.array([2.0, 2.0])
g0 = ag.Galaxy(redshift=0.5, light_profile=ag.lp.EllSersic(intensity=1.0))
g1 = ag.Galaxy(redshift=0.5, light_profile=ag.lp.EllSersic(intensity=2.0))
lp0 = g0.light_profile_list[0]
lp1 = g1.light_profile_list[0]
lp0_image = lp0.image_2d_from(grid=sub_grid_2d_7x7)
lp1_image = lp1.image_2d_from(grid=sub_grid_2d_7x7)
# Perform sub gridding average manually
lp0_image_pixel_0 = (lp0_image[0] + lp0_image[1] + lp0_image[2] + lp0_image[3]) / 4
lp0_image_pixel_1 = (lp0_image[4] + lp0_image[5] + lp0_image[6] + lp0_image[7]) / 4
lp1_image_pixel_0 = (lp1_image[0] + lp1_image[1] + lp1_image[2] + lp1_image[3]) / 4
lp1_image_pixel_1 = (lp1_image[4] + lp1_image[5] + lp1_image[6] + lp1_image[7]) / 4
plane = ag.Plane(galaxies=[g0, g1], redshift=None)
image = plane.image_2d_from(grid=sub_grid_2d_7x7)
assert image.binned[0] == pytest.approx(
lp0_image_pixel_0 + lp1_image_pixel_0, 1.0e-4
)
assert image.binned[1] == pytest.approx(
lp0_image_pixel_1 + lp1_image_pixel_1, 1.0e-4
)
image_of_galaxies = plane.image_2d_list_from(grid=sub_grid_2d_7x7)
assert image_of_galaxies[0].binned[0] == lp0_image_pixel_0
assert image_of_galaxies[0].binned[1] == lp0_image_pixel_1
assert image_of_galaxies[1].binned[0] == lp1_image_pixel_0
assert image_of_galaxies[1].binned[1] == lp1_image_pixel_1
def test__galaxy_image_2d_dict_from(sub_grid_2d_7x7):
g0 = ag.Galaxy(redshift=0.5, light_profile=ag.lp.EllSersic(intensity=1.0))
g1 = ag.Galaxy(
redshift=0.5,
mass_profile=ag.mp.SphIsothermal(einstein_radius=1.0),
light_profile=ag.lp.EllSersic(intensity=2.0),
)
g2 = ag.Galaxy(redshift=0.5, light_profile=ag.lp.EllSersic(intensity=3.0))
g0_image = g0.image_2d_from(grid=sub_grid_2d_7x7)
g1_image = g1.image_2d_from(grid=sub_grid_2d_7x7)
g2_image = g2.image_2d_from(grid=sub_grid_2d_7x7)
plane = ag.Plane(redshift=-0.75, galaxies=[g1, g0, g2])
image_1d_dict = plane.galaxy_image_2d_dict_from(grid=sub_grid_2d_7x7)
assert (image_1d_dict[g0].slim == g0_image).all()
assert (image_1d_dict[g1].slim == g1_image).all()
assert (image_1d_dict[g2].slim == g2_image).all()
image_dict = plane.galaxy_image_2d_dict_from(grid=sub_grid_2d_7x7)
assert (image_dict[g0].native == g0_image.native).all()
assert (image_dict[g1].native == g1_image.native).all()
assert (image_dict[g2].native == g2_image.native).all()
def test__light_profile_snr__signal_to_noise_via_simulator_correct():
background_sky_level = 10.0
exposure_time = 300.0
grid = ag.Grid2D.uniform(shape_native=(3, 3), pixel_scales=1.0)
sersic_0 = ag.lp_snr.EllSersic(
signal_to_noise_ratio=10.0, centre=(1.0, 1.0), effective_radius=0.01
)
sersic_1 = ag.lp_snr.EllSersic(
signal_to_noise_ratio=20.0, centre=(-1.0, -1.0), effective_radius=0.01
)
plane = ag.Plane(
galaxies=[ag.Galaxy(redshift=0.5, light_0=sersic_0, light_1=sersic_1)]
)
simulator = ag.SimulatorImaging(
exposure_time=exposure_time,
noise_seed=1,
background_sky_level=background_sky_level,
)
imaging = simulator.via_plane_from(plane=plane, grid=grid)
assert 9.0 < imaging.signal_to_noise_map.native[0, 2] < 11.0
assert 11.0 < imaging.signal_to_noise_map.native[2, 0] < 21.0
### Mass Profile Quantities ###
def test__convergence_2d_from(sub_grid_2d_7x7):
g0 = ag.Galaxy(redshift=0.5, mass_profile=ag.mp.SphIsothermal(einstein_radius=1.0))
g1 = ag.Galaxy(redshift=0.5, mass_profile=ag.mp.SphIsothermal(einstein_radius=2.0))
g0_convergence = g0.convergence_2d_from(grid=sub_grid_2d_7x7)
g1_convergence = g1.convergence_2d_from(grid=sub_grid_2d_7x7)
plane = ag.Plane(galaxies=[g0, g1], redshift=None)
convergence = plane.convergence_2d_from(grid=sub_grid_2d_7x7)
assert convergence == pytest.approx(g0_convergence + g1_convergence, 1.0e-8)
# No galaxies
plane = ag.Plane(galaxies=[], redshift=0.5)
convergence = plane.convergence_2d_from(grid=sub_grid_2d_7x7)
assert convergence.sub_shape_slim == sub_grid_2d_7x7.sub_shape_slim
convergence = plane.convergence_2d_from(grid=sub_grid_2d_7x7)
assert convergence.sub_shape_native == (14, 14)
convergence = plane.convergence_2d_from(grid=sub_grid_2d_7x7)
assert convergence.shape_native == (7, 7)
def test__potential_2d_from(sub_grid_2d_7x7):
g0 = ag.Galaxy(redshift=0.5, mass_profile=ag.mp.SphIsothermal(einstein_radius=1.0))
g1 = ag.Galaxy(redshift=0.5, mass_profile=ag.mp.SphIsothermal(einstein_radius=2.0))
g0_potential = g0.potential_2d_from(grid=sub_grid_2d_7x7)
g1_potential = g1.potential_2d_from(grid=sub_grid_2d_7x7)
plane = ag.Plane(galaxies=[g0, g1], redshift=None)
potential = plane.potential_2d_from(grid=sub_grid_2d_7x7)
assert potential == pytest.approx(g0_potential + g1_potential, 1.0e-8)
# No galaxies
plane = ag.Plane(galaxies=[], redshift=0.5)
potential = plane.potential_2d_from(grid=sub_grid_2d_7x7)
assert potential.sub_shape_slim == sub_grid_2d_7x7.sub_shape_slim
potential = plane.potential_2d_from(grid=sub_grid_2d_7x7)
assert potential.sub_shape_native == (14, 14)
potential = plane.potential_2d_from(grid=sub_grid_2d_7x7)
assert potential.shape_native == (7, 7)
def test__deflections_yx_2d_from(sub_grid_2d_7x7):
# Overwrite one value so intensity in each pixel is different
sub_grid_2d_7x7[5] = np.array([2.0, 2.0])
g0 = ag.Galaxy(redshift=0.5, mass_profile=ag.mp.SphIsothermal(einstein_radius=1.0))
g1 = ag.Galaxy(redshift=0.5, mass_profile=ag.mp.SphIsothermal(einstein_radius=2.0))
g0_deflections = g0.deflections_yx_2d_from(grid=sub_grid_2d_7x7)
g1_deflections = g1.deflections_yx_2d_from(grid=sub_grid_2d_7x7)
plane = ag.Plane(galaxies=[g0, g1], redshift=None)
deflections = plane.deflections_yx_2d_from(grid=sub_grid_2d_7x7)
assert deflections == pytest.approx(g0_deflections + g1_deflections, 1.0e-4)
# No Galaxies
plane = ag.Plane(redshift=0.5, galaxies=[])
deflections = plane.deflections_yx_2d_from(grid=sub_grid_2d_7x7)
assert deflections.shape_native == (7, 7)
assert (deflections.binned[0, 0] == 0.0).all()
assert (deflections.binned[0, 1] == 0.0).all()
assert (deflections.binned[1, 0] == 0.0).all()
assert (deflections.binned[0] == 0.0).all()
### Hyper Quantities ###
def test__contribution_map_list():
hyper_model_image = ag.Array2D.manual_native([[2.0, 4.0, 10.0]], pixel_scales=1.0)
hyper_galaxy_image = ag.Array2D.manual_native([[1.0, 5.0, 8.0]], pixel_scales=1.0)
hyper_galaxy_0 = ag.HyperGalaxy(contribution_factor=5.0)
hyper_galaxy_1 = ag.HyperGalaxy(contribution_factor=10.0)
contribution_map_0 = hyper_galaxy_0.contribution_map_from(
hyper_model_image=hyper_model_image, hyper_galaxy_image=hyper_galaxy_image
)
contribution_map_1 = hyper_galaxy_1.contribution_map_from(
hyper_model_image=hyper_model_image, hyper_galaxy_image=hyper_galaxy_image
)
galaxy_0 = ag.Galaxy(
redshift=0.5,
hyper_galaxy=hyper_galaxy_0,
hyper_model_image=hyper_model_image,
hyper_galaxy_image=hyper_galaxy_image,
)
galaxy_1 = ag.Galaxy(
redshift=0.5,
hyper_galaxy=hyper_galaxy_1,
hyper_model_image=hyper_model_image,
hyper_galaxy_image=hyper_galaxy_image,
)
plane = ag.Plane(redshift=0.5, galaxies=[galaxy_0])
assert (plane.contribution_map_list[0].slim == contribution_map_0).all()
plane = ag.Plane(redshift=0.5, galaxies=[galaxy_1])
assert (plane.contribution_map_list[0].slim == contribution_map_1).all()
plane = ag.Plane(redshift=0.5, galaxies=[galaxy_1, galaxy_0])
assert (plane.contribution_map_list[0].slim == contribution_map_1).all()
assert (plane.contribution_map_list[1].slim == contribution_map_0).all()
assert plane.contribution_map_list[0].slim == pytest.approx(
[0.20833333, 0.89285714, 1.0], 1.0e-2
)
assert plane.contribution_map_list[1].slim == pytest.approx(
[0.25714286, 1.0, 0.96], 1.0e-2
)
assert (sum(plane.contribution_map_list) == plane.contribution_map).all()
hyper_model_image = ag.Array2D.manual_native([[2.0, 4.0, 10.0]], pixel_scales=1.0)
hyper_galaxy_image = ag.Array2D.manual_native([[1.0, 5.0, 8.0]], pixel_scales=1.0)
hyper_galaxy = ag.HyperGalaxy(contribution_factor=5.0)
contribution_map = hyper_galaxy.contribution_map_from(
hyper_model_image=hyper_model_image, hyper_galaxy_image=hyper_galaxy_image
)
galaxy = ag.Galaxy(
redshift=0.5,
hyper_galaxy=hyper_galaxy,
hyper_model_image=hyper_model_image,
hyper_galaxy_image=hyper_galaxy_image,
)
plane = ag.Plane(
redshift=0.5,
galaxies=[galaxy, ag.Galaxy(redshift=0.5), ag.Galaxy(redshift=0.5)],
)
assert (plane.contribution_map_list[0].slim == contribution_map).all()
assert plane.contribution_map_list[1] == None
assert plane.contribution_map_list[2] == None
galaxy_1 = ag.Galaxy(redshift=0.5)
plane = ag.Plane(redshift=0.5, galaxies=[galaxy_0, galaxy_1])
assert (galaxy_0.contribution_map == plane.contribution_map).all()
galaxy_0 = ag.Galaxy(redshift=0.5)
plane = ag.Plane(redshift=0.5, galaxies=[galaxy_0, galaxy_1])
assert plane.contribution_map == None
def test__hyper_noise_map_list_from():
noise_map = ag.Array2D.manual_native(array=[[1.0, 2.0, 3.0]], pixel_scales=1.0)
hyper_galaxy_0 | |
<filename>recipe_engine/internal/engine.py<gh_stars>1-10
# Copyright 2019 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
import calendar
import copy
import datetime
import json
import logging
import os
import re
import sys
import traceback
from contextlib import contextmanager
import attr
import gevent
import gevent.local
import six
from google.protobuf import json_format as jsonpb
from pympler import summary, tracker
from PB.go.chromium.org.luci.buildbucket.proto import common as common_pb2
from PB.go.chromium.org.luci.lucictx import sections as sections_pb2
from PB.recipe_engine import engine_properties as engine_properties_pb2
from PB.recipe_engine import result as result_pb2
from .. import recipe_api
from .. import util
from ..step_data import StepData, ExecutionResult
from ..types import StepPresentation, thaw
from ..types import PerGreenletState, PerGreentletStateRegistry
from ..third_party import luci_context
from .engine_env import merge_envs
from .exceptions import RecipeUsageError, CrashEngine
from .step_runner import Step
from .resource_semaphore import ResourceWaiter
from .global_shutdown import GLOBAL_SHUTDOWN
LOG = logging.getLogger(__name__)
@attr.s(frozen=True, slots=True, repr=False)
class _ActiveStep(object):
"""The object type that we keep in RecipeEngine._step_stack."""
step_data = attr.ib() # type: StepData
step_stream = attr.ib() # type: StepStream
is_parent = attr.ib() # type: bool
children_presentations = attr.ib(factory=list) # type: List[StepPresentation]
greenlets = attr.ib(factory=list) # type: List[gevent.Greenlet]
def close(self):
"""If step_data is set, finalizes its StepPresentation with
self.step_stream, then closes self.step_stream.
"""
gevent.wait(self.greenlets)
if self.step_data:
self.step_data.presentation.finalize(self.step_stream)
self.step_stream.close()
class _MemoryProfiler(object):
"""The memory profiler used in recipe engine that is backed by Pympler.
Note: This class is currently not thread safe. The snapshot operation are not
atomic. The profiler will be called before each step execution. Therefore, it
is okay for now as steps are executed serially. However, once we start to
execute steps in parallel, the implementation needs to be re-evaluated to
ensure the atomicity of snapshot operation
"""
def __init__(self, initial_snapshot_name='Bootstrap'):
self._current_snapshot_name = initial_snapshot_name
self._diff_snapshot = False
self._tracker = tracker.SummaryTracker()
def snapshot(self, snapshot_name):
""" Snapshot the memory
Returns [geneorator of str] - formated memory snapshot or diff surrounded by
dividing line. When this method is called for the first time, the full
snapshot will be returned. After that, it will only return the diff with
the previous snapshot.
"""
memsum = self._tracker.create_summary()
last_snapshot_name = self._current_snapshot_name
self._current_snapshot_name = snapshot_name
if self._diff_snapshot:
yield ((
'-------- Diff between current snapshot (%s) and last snapshot (%s) '
'Starts --------') % (snapshot_name, last_snapshot_name))
diff = self._tracker.diff(summary1=memsum)
# TODO(yiwzhang): switch to yield from after moving to python 3
for diff_line in summary.format_(diff):
yield diff_line
yield ((
'-------- Diff between current snapshot (%s) and last snapshot (%s) '
'Ends --------') % (snapshot_name, last_snapshot_name))
else:
# create_summary() won't make the return value latest summary in the
# underlying tracker. Manually moving it forward
self._tracker.s0 = memsum
# Only dump the full snapshot when this method is called for the first
# time. From then onwards, dump diff only
self._diff_snapshot = True
yield '-------- Memory Snapshot (%s) Start --------' % snapshot_name
# TODO(yiwzhang): switch to yield from after moving to python 3
for snapshot_line in summary.format_(memsum):
yield snapshot_line
yield '-------- Memory Snapshot (%s) Ends --------' % snapshot_name
class RecipeEngine(object):
"""
Knows how to execute steps emitted by a recipe, holds global state such as
step history and build properties. Each recipe module API has a reference to
this object.
Recipe modules that are aware of the engine:
* properties - uses engine.properties.
* step - uses engine.create_step(...), and previous_step_result.
"""
def __init__(self, recipe_deps, step_runner, stream_engine, warning_recorder,
properties, environ, start_dir, initial_luci_context,
num_logical_cores, memory_mb):
"""See run_steps() for parameter meanings."""
self._recipe_deps = recipe_deps
self._step_runner = step_runner
self._stream_engine = stream_engine # type: StreamEngine
self._properties = properties
self._engine_properties = _get_engine_properties(properties)
self._environ = environ.copy()
self._start_dir = start_dir
self._clients = {client.IDENT: client for client in (
recipe_api.ConcurrencyClient(
stream_engine.supports_concurrency,
self.spawn_greenlet),
recipe_api.LUCIContextClient(initial_luci_context),
recipe_api.PathsClient(start_dir),
recipe_api.PropertiesClient(properties),
recipe_api.StepClient(self),
recipe_api.WarningClient(warning_recorder, recipe_deps),
)}
self._resource = ResourceWaiter(num_logical_cores * 1000, memory_mb)
self._memory_profiler = _MemoryProfiler() if (
self._engine_properties.memory_profiler.enable_snapshot) else None
# A greenlet-local store which holds a stack of _ActiveStep objects, holding
# the most recently executed step at each nest level (objects deeper in the
# stack have lower nest levels). When we pop from this stack, we close the
# corresponding step stream.
#
# NOTE: Due to the way that steps are run in the recipe engine, only the tip
# of this stack may be a 'real' step; i.e. anything other than the tip of
# the stack is a parent nesting step.
class StepStack(PerGreenletState):
steps = [_ActiveStep(None, None, True)] # "root" parent
def _get_setter_on_spawn(self):
tip_step = self.steps[-1]
def _inner():
self.steps = [tip_step]
return _inner
self._step_stack_storage = StepStack()
# Map of namespace_tuple -> {step_name: int} to deduplicate `step_name`s
# within a namespace.
self._step_names = {}
@property
def _step_stack(self):
return self._step_stack_storage.steps
@property
def properties(self):
"""Used by recipe_deps._instantiate_api and recipe_deps.Recipe._run_steps"""
return self._properties
@property
def environ(self):
"""Used by recipe_deps._instantiate_api and recipe_deps.Recipe._run_steps"""
return self._environ
def resolve_requirement(self, req):
"""Resolves a requirement or raises ValueError if it cannot be resolved.
Args:
* req (_UnresolvedRequirement): The requirement to resolve.
Returns the resolved requirement.
Raises ValueError if the requirement cannot be satisfied.
"""
# pylint: disable=protected-access
assert isinstance(req, recipe_api._UnresolvedRequirement)
if req._typ == 'client':
return self._clients.get(req._name)
raise ValueError('Unknown requirement type [%s]' % (req._typ,))
def initialize_path_client_HACK(self, root_api):
"""This is a hack; the "PathsClient" currently works to provide a reverse
string->Path lookup by walking down the recipe's `api` object and calling
the various 'root' path methods (like .resource(), etc.).
However, we would like to eventually simplify the 'paths' system, whose
whole complexity exists to facilitate 'pure-data' config.py processing,
which is also going to be deprecated in favor of protos and removal of the
config subsystem.
Args:
* root_api (RecipeScriptApi): The root `api` object which would be passed
to the recipe's RunSteps function.
"""
self._clients['paths']._initialize_with_recipe_api(root_api)
def record_import_warning(self, warning, importer):
"""Records an import warning."""
self._clients['warning'].record_import_warning(warning, importer)
def close_non_parent_step(self):
"""Closes the tip of the _step_stack if it's not a parent nesting step."""
try:
tip_step = self._step_stack[-1]
if tip_step.is_parent:
return
self._step_stack.pop().close()
except:
_log_crash(self._stream_engine, "close_non_parent_step()")
raise CrashEngine("Closing non-parent step failed.")
@property
def active_step(self):
"""Returns the current _ActiveStep.step_data.
May be None if the _ActiveStep is the root _ActiveStep.
"""
return self._step_stack[-1].step_data
def spawn_greenlet(self, func, args, kwargs, greenlet_name):
"""Returns a gevent.Greenlet which has been initialized with the correct
greenlet-local-storage state.
Args:
* greenlet_name (str|None) - If non-None, assign this to the greenlet's
name.
"""
self.close_non_parent_step()
to_run = [pgs._get_setter_on_spawn() for pgs in PerGreentletStateRegistry]
current_step = self._step_stack[-1]
def _runner():
for fn in to_run:
fn()
try:
return func(*args, **kwargs)
finally:
self.close_non_parent_step()
ret = gevent.spawn(_runner)
if greenlet_name is not None:
ret.name = greenlet_name
current_step.greenlets.append(ret)
return ret
def _record_step_name(self, name):
"""Records a step name in the current namespace.
Args:
* name (str) - The name of the step we want to run in the current context.
Side effect:
* calls close_non_parent_step.
* Updates global tracking state for this step name.
Returns Tuple[str] of the step name_tokens that should ACTUALLY run.
"""
self.close_non_parent_step()
try:
namespace = ()
if self.active_step:
namespace = self.active_step.name_tokens
cur_state = self._step_names.setdefault(namespace, {})
cur_count = cur_state.setdefault(name, 0)
dedup_name = name
if cur_count:
dedup_name = name + ' (%d)' % (cur_count + 1)
cur_state[name] += 1
return namespace + (dedup_name,)
except:
_log_crash(self._stream_engine, "_record_step_name(%r)" % (name,))
raise CrashEngine("Getting name tokens for %r failed." % (name,))
def _write_memory_snapshot(self, log_stream, snapshot_name):
"""Snapshot the memory and write the result to the supplied log stream if
the memory snapshot is enabled.
Args:
* log_stream (Stream) - stream that the diff will write to. An None
stream will make this method no-op
* snapshot_name (str) - Name of the snapshot. The name will be perserved
along with the snapshot
TODO(crbug.com/1057844): After luciexe rolls out, instead of writing the
log to arbitrary log stream, it should constantly write to memory_profile
log stream created in setup_build step to consolidate all memory snapshots
in one UI page.
"""
if self._memory_profiler and log_stream:
for line in self._memory_profiler.snapshot(snapshot_name):
log_stream.write_line(line)
@contextmanager
def parent_step(self, name):
"""Opens a parent step with the given name in the current namespace.
Args:
* name (str) - The name of the parent step to open.
Yields a tuple of (StepPresentation, List[StepData]):
* The StepPresentation for this parent step.
* The List of children StepData of this parent step.
"""
name_tokens = self._record_step_name(name)
try:
step_data = StepData(name_tokens, | |
from collections import OrderedDict
import numpy as np
import numexpr as ne
from scipy.spatial import distance
from scipy import sparse
import scipy.sparse.linalg
from . import exact_geodesic
from . import subsurface
from .misc import _memo
class Surface(exact_geodesic.ExactGeodesicMixin, subsurface.SubsurfaceMixin):
"""Represents a single cortical hemisphere surface. Can be the white matter surface,
pial surface, fiducial (mid-cortical) surface, inflated surface, flattened surface,
etc.
Implements some useful functions for dealing with functions across surfaces.
Parameters
----------
pts : 2D ndarray, shape (total_verts, 3)
Location of each vertex in space (mm). Order is x, y, z.
polys : 2D ndarray, shape (total_polys, 3)
Indices of the vertices in each triangle in the surface.
"""
def __init__(self, pts, polys):
self.pts = pts.astype(np.double)
self.polys = polys
self._cache = dict()
self._rlfac_solvers = dict()
self._nLC_solvers = dict()
@property
@_memo
def ppts(self):
"""3D matrix of points in each face: n faces x 3 points per face x 3 coords per point.
"""
return self.pts[self.polys]
@property
@_memo
def connected(self):
"""Sparse matrix of vertex-face associations.
"""
npt = len(self.pts)
npoly = len(self.polys)
return sparse.coo_matrix((np.ones((3*npoly,)), # data
(np.hstack(self.polys.T), # row
np.tile(range(npoly),(1,3)).squeeze())), # col
(npt, npoly)).tocsr() # size
@property
@_memo
def adj(self):
"""Sparse vertex adjacency matrix.
"""
npt = len(self.pts)
npoly = len(self.polys)
adj1 = sparse.coo_matrix((np.ones((npoly,)),
(self.polys[:,0], self.polys[:,1])), (npt,npt))
adj2 = sparse.coo_matrix((np.ones((npoly,)),
(self.polys[:,0], self.polys[:,2])), (npt,npt))
adj3 = sparse.coo_matrix((np.ones((npoly,)),
(self.polys[:,1], self.polys[:,2])), (npt,npt))
alladj = (adj1 + adj2 + adj3).tocsr()
return alladj + alladj.T
@property
@_memo
def face_normals(self):
"""Normal vector for each face.
"""
# Compute normal vector direction
nnfnorms = np.cross(self.ppts[:,1] - self.ppts[:,0],
self.ppts[:,2] - self.ppts[:,0])
# Normalize to norm 1
nfnorms = nnfnorms / np.sqrt((nnfnorms**2).sum(1))[:,np.newaxis]
# Ensure that there are no nans (shouldn't be a problem with well-formed surfaces)
return np.nan_to_num(nfnorms)
@property
@_memo
def vertex_normals(self):
"""Normal vector for each vertex (average of normals for neighboring faces).
"""
# Average adjacent face normals
nnvnorms = np.nan_to_num(self.connected.dot(self.face_normals) / self.connected.sum(1)).A
# Normalize to norm 1
return nnvnorms / np.sqrt((nnvnorms**2).sum(1))[:,np.newaxis]
@property
@_memo
def face_areas(self):
"""Area of each face.
"""
# Compute normal vector (length is face area)
nnfnorms = np.cross(self.ppts[:,1] - self.ppts[:,0],
self.ppts[:,2] - self.ppts[:,0])
# Compute vector length
return np.sqrt((nnfnorms**2).sum(-1)) / 2
@property
@_memo
def cotangent_weights(self):
"""Cotangent of angle opposite each vertex in each face.
"""
ppts = self.ppts
cots1 = ((ppts[:,1]-ppts[:,0]) *
(ppts[:,2]-ppts[:,0])).sum(1) / np.sqrt((np.cross(ppts[:,1]-ppts[:,0],
ppts[:,2]-ppts[:,0])**2).sum(1))
cots2 = ((ppts[:,2]-ppts[:,1]) *
(ppts[:,0]-ppts[:,1])).sum(1) / np.sqrt((np.cross(ppts[:,2]-ppts[:,1],
ppts[:,0]-ppts[:,1])**2).sum(1))
cots3 = ((ppts[:,0]-ppts[:,2]) *
(ppts[:,1]-ppts[:,2])).sum(1) / np.sqrt((np.cross(ppts[:,0]-ppts[:,2],
ppts[:,1]-ppts[:,2])**2).sum(1))
# Then we have to sanitize the fuck out of everything..
cots = np.vstack([cots1, cots2, cots3])
cots[np.isinf(cots)] = 0
cots[np.isnan(cots)] = 0
return cots
@property
@_memo
def laplace_operator(self):
"""Laplace-Beltrami operator for this surface. A sparse adjacency matrix with
edge weights determined by the cotangents of the angles opposite each edge.
Returns a 4-tuple (B,D,W,V) where D is the 'lumped mass matrix', W is the weighted
adjacency matrix, and V is a diagonal matrix that normalizes the adjacencies.
The 'stiffness matrix', A, can be computed as V - W.
The full LB operator can be computed as D^{-1} (V - W).
B is the finite element method (FEM) 'mass matrix', which replaces D in FEM analyses.
See 'Discrete Laplace-Beltrami operators for shape analysis and segmentation'
by Reuter et al., 2009 for details.
"""
## Lumped mass matrix
D = self.connected.dot(self.face_areas) / 3.0
## Stiffness matrix
npt = len(self.pts)
cots1, cots2, cots3 = self.cotangent_weights
# W is weighted adjacency matrix
W1 = sparse.coo_matrix((cots1, (self.polys[:,1], self.polys[:,2])), (npt, npt))
W2 = sparse.coo_matrix((cots2, (self.polys[:,2], self.polys[:,0])), (npt, npt))
W3 = sparse.coo_matrix((cots3, (self.polys[:,0], self.polys[:,1])), (npt, npt))
W = (W1 + W1.T + W2 + W2.T + W3 + W3.T).tocsr() / 2.0
# V is sum of each col
V = sparse.dia_matrix((np.array(W.sum(0)).ravel(),[0]), (npt,npt))
# A is stiffness matrix
#A = W - V # negative operator -- more useful in practice
# For FEM:
Be1 = sparse.coo_matrix((self.face_areas, (self.polys[:,1], self.polys[:,2])), (npt, npt))
Be2 = sparse.coo_matrix((self.face_areas, (self.polys[:,2], self.polys[:,0])), (npt, npt))
Be3 = sparse.coo_matrix((self.face_areas, (self.polys[:,0], self.polys[:,1])), (npt, npt))
Bd = self.connected.dot(self.face_areas) / 6
dBd = scipy.sparse.dia_matrix((Bd,[0]), (len(D),len(D)))
B = (Be1 + Be1.T + Be2 + Be2.T + Be3 + Be3.T)/12 + dBd
return B, D, W, V
def mean_curvature(self):
"""Compute mean curvature of this surface using the Laplace-Beltrami operator.
Curvature is computed at each vertex. It's probably pretty noisy, and should
be smoothed using smooth().
Negative values of mean curvature mean that the surface is folded inward
(as in a sulcus), positive values of curvature mean that the surface is
folded outward (as on a gyrus).
Returns
-------
curv : 1D ndarray, shape (total_verts,)
The mean curvature at each vertex.
"""
B,D,W,V = self.laplace_operator
npt = len(D)
Dinv = sparse.dia_matrix((D**-1,[0]), (npt,npt)).tocsr() # construct Dinv
L = Dinv.dot((V-W))
curv = (L.dot(self.pts) * self.vertex_normals).sum(1)
return curv
def smooth(self, scalars, factor=1.0, iterations=1):
"""Smooth vertex-wise function given by `scalars` across the surface using
mean curvature flow method (see http://brickisland.net/cs177fa12/?p=302).
Amount of smoothing is controlled by `factor`.
Parameters
----------
scalars : 1D ndarray, shape (total_verts,)
A scalar-valued function across the cortex, such as the curvature
supplied by mean_curvature.
factor : float, optional
Amount of smoothing to perform, larger values smooth more.
iterations : int, optional
Number of times to repeat smoothing, larger values smooths more.
Returns
-------
smscalars : 1D ndarray, shape (total_verts,)
Smoothed scalar values.
"""
if factor == 0.0:
return scalars
B,D,W,V = self.laplace_operator
npt = len(D)
lfac = sparse.dia_matrix((D,[0]), (npt,npt)) - factor * (W-V)
goodrows = np.nonzero(~np.array(lfac.sum(0) == 0).ravel())[0]
lfac_solver = sparse.linalg.dsolve.factorized(lfac[goodrows][:,goodrows])
to_smooth = scalars.copy()
for _ in range(iterations):
from_smooth = lfac_solver((D * to_smooth)[goodrows])
to_smooth[goodrows] = from_smooth
smscalars = np.zeros(scalars.shape)
smscalars[goodrows] = from_smooth
return smscalars
@property
@_memo
def avg_edge_length(self):
"""Average length of all edges in the surface.
"""
adj = self.adj
tadj = sparse.triu(adj, 1) # only entries above main diagonal, in coo format
edgelens = np.sqrt(((self.pts[tadj.row] - self.pts[tadj.col])**2).sum(1))
return edgelens.mean()
def surface_gradient(self, scalars, at_verts=True):
"""Gradient of a function with values `scalars` at each vertex on the surface.
If `at_verts`, returns values at each vertex. Otherwise, returns values at each
face.
Parameters
----------
scalars : 1D ndarray, shape (total_verts,)
A scalar-valued function across the cortex.
at_verts : bool, optional
If True (default), values will be returned for each vertex. Otherwise,
values will be retruned for each face.
Returns
-------
gradu : 2D ndarray, shape (total_verts,3) or (total_polys,3)
Contains the x-, y-, and z-axis gradients of the given `scalars` at either
each vertex (if `at_verts` is True) or each face.
"""
pu = scalars[self.polys]
fe12, fe23, fe31 = [f.T for f in self._facenorm_cross_edge]
pu1, pu2, pu3 = pu.T
fa = self.face_areas
# numexpr is much faster than doing this using numpy!
#gradu = ((fe12.T * pu[:,2] +
# fe23.T * pu[:,0] +
# fe31.T * pu[:,1]) / (2 * self.face_areas)).T
gradu = np.nan_to_num(ne.evaluate("(fe12 * pu3 + fe23 * pu1 + fe31 * pu2) / (2 * fa)").T)
if at_verts:
return (self.connected.dot(gradu).T / self.connected.sum(1).A.squeeze()).T
return gradu
def create_biharmonic_solver(self, boundary_verts, clip_D=0.1):
r"""Set up biharmonic equation with Dirichlet boundary conditions on the cortical
mesh and precompute Cholesky factorization for solving it. The vertices listed in
`boundary_verts` are considered part of the boundary, and will not be included in
the factorization.
To facilitate Cholesky decomposition (which requires a symmetric matrix), the
squared Laplace-Beltrami operator is separated into left-hand-side (L2) and
right-hand-side (Dinv) parts. If we write the L-B operator as the product of
the stiffness matrix (V-W) and the inverse mass matrix (Dinv), the biharmonic
problem is as follows (with `u` denoting non-boundary vertices)
.. math::
:nowrap:
\begin{eqnarray}
L^2_{u} \phi &=& -\rho_{u} \\
\left[ D^{-1} (V-W) D^{-1} (V-W) \right]_{u} \phi &=& -\rho_{u} \\
\left[ (V-W) D^{-1} (V-W) \right]_{u} \phi &=& -\left[D \rho\right]_{u}
\end{eqnarray}
Parameters
----------
boundary_verts : list or ndarray of length V
Indices of vertices that will be part of the Dirichlet boundary.
Returns
-------
lhs : sparse matrix
Left side of biharmonic problem, (V-W) D^{-1} (V-W)
rhs : sparse matrix, dia
Right side of biharmonic problem, D
Dinv | |
<gh_stars>0
"""
generate_plots_JCAP_2021.py is a Python routine that can be used
to generate the plots of <NAME>, <NAME>, <NAME>, and
<NAME>, "Polarization of gravitational waves from helical MHD
turbulent sources," https://arxiv.org/abs/2107.05356.
It reads the pickle run variables that can be generated by the routines
initialize_JCAP_2021.py, initialize_PRR_2021.py, and initialize_PRD_2020.py
The function run() executes the code.
"""
import os
import numpy as np
import matplotlib.pyplot as plt
import astropy.units as u
# get working directory, where the runs and routines should be stored
dir0 = os.getcwd() + '/'
HOME = dir0 + '..'
os.chdir(HOME)
from dirs import read_dirs as rd
import plot_sets
import run as r
import interferometry as inte
import cosmoGW
import spectra
os.chdir(dir0)
def run():
os.chdir(HOME)
# import dictionary with the names identifying
# the runs and pointing to the corresponding directory
dirs = {}
dirs = rd('JCAP_2021_ini', dirs)
dirs = rd('JCAP_2021_dri', dirs)
R = [s for s in dirs]
# read the runs stored in the pickle variables
runs = r.load_runs(R, dir0, dirs, quiet=False)
dirs_PRR = {}
dirs_PRR = rd('PRR_2021_K', dirs_PRR)
dirs_PRR = rd('PRR_2021_M', dirs_PRR)
R_PRR = [s for s in dirs_PRR]
# read the runs stored in the pickle variables
dir0_PRR = dir0 + '../PRR_2011_05556/'
runs_PRR = r.load_runs(R_PRR, dir0_PRR, dirs_PRR, quiet=False)
dirs_PRD = {}
dirs_PRD = rd('PRD_2020_ini', dirs_PRD)
dirs_PRD = rd('PRD_2020_hel', dirs_PRD)
dirs_PRD = rd('PRD_2020_noh', dirs_PRD)
dirs_PRD = rd('PRD_2020_ac', dirs_PRD)
R_PRD = [s for s in dirs_PRD]
# read the runs stored in the pickle variables
dir0_PRD = dir0 + '../PRD_1903_08585/'
runs_PRD = r.load_runs(R_PRD, dir0_PRD, dirs_PRD, quiet=False)
os.chdir(dir0)
return runs, runs_PRR, runs_PRD
def generate_table(runs, save=True, print_tex=False):
"""
Function that generates the Table I of <NAME>, <NAME>, <NAME>,
and <NAME>, "Polarization of gravitational waves from helical MHD
turbulent sources," https://arxiv.org/abs/2107.05356, containing the
parameters of the simulations and the characteristic values of magnetic and
GW results.
Arguments:
runs -- variable that contains the memory project runs with the
stored spectra
save -- option to save the table in tableI.csv
(default True)
"""
import pandas as pd
types = []
sigmas = []
EMmaxs = []
OmGWstat = []
PMs = []
PGWs = []
ks = []
ns = []
etas = []
for i in runs:
run = runs.get(i)
types.append(run.type)
sigmas.append(run.sig)
EMmaxs.append(run.EMmax)
OmGWstat.append(run.GWstat)
PMs.append(run.PM)
PGWs.append(run.PGW)
ks.append(run.k)
ns.append(run.n)
etas.append(run.eta)
types = np.array(types)
sigmas = np.array(sigmas)
EMmaxs = np.array(EMmaxs)
OmGWstat = np.array(OmGWstat)
PMs = np.array(PMs)
PGWs = np.array(PGWs)
ks = np.array(ks)
ns = np.array(ns)
etas = np.array(etas)
df = pd.DataFrame({'Type': types, 'sigma': sigmas, 'EEM^max': EMmaxs,
'Omega_GW^stat': OmGWstat, 'PPM': PMs,
'PPGW': PGWs, 'kf': ks, 'n': ns, 'eta': etas})
if save: df.to_csv('tableI.csv')
if print_tex:
for i in range(0, len(types)):
exp_EM = np.floor(np.log10(EMmaxs[i]))
bas_EM = EMmaxs[i]/10**exp_EM
exp_EGW = np.floor(np.log10(OmGWstat[i]))
bas_EGW = OmGWstat[i]/10**exp_EGW
exp_eta = np.floor(np.log10(etas[i]))
bas_eta = etas[i]/10**exp_eta
if sigmas[i] == '-0.01' or sigmas[i] == '-1':
PM_s = '$\!\!\!%.3f$'%PMs[i]
sig_s = '$\!\!\!%s$'%sigmas[i]
else:
PM_s = '\ $%.2f$'%PMs[i]
sig_s = '\ $%s$'%sigmas[i]
if sigmas[i] == '-1': PGW_s = '$\!\!%.2f$'%PGWs[i]
else: PGW_s = '\ $%.2f$'%PGWs[i]
print(types[i], '&', sig_s, '&',
"$%.2f \\times 10^{%i}$"%(bas_EM, exp_EM), '&',
"$%.2f \\times 10^{%i}$"%(bas_EGW, exp_EGW), '&',
PM_s, '&', PGW_s, '&', ks[i], '&', ns[i], '&',
"$%.0f \\times 10^{%i}$"%(bas_eta, exp_eta), '\\\\')
return df
def plot_st(k, sp, hel=False, yks=False, N=5, Ay=1., Ay2=[],
Ax=[], inds_show=[], inds_show_txt=[], fb=False, min_sp=[],
max_sp=[], min_sp_pos=[], max_sp_pos=[], min_sp_neg=[],
max_sp_neg=[], yv=False, color='black', marg=1.02, diff0 = 0.1):
"""
Function to plot the spectrum selected, with options to plot positive
and negative values in different colors for helical spectra.
Arguments:
k -- array of wave numbers
sp -- array of spectral values
hel -- option to separate positive and negative values for spectral
functions (red shows positive and blue shows negative)
(default False)
yks -- option to plot power law fits above (or below) spectra
(default True)
N -- number of power law fits to be used (default 5)
Ay -- factor multiplied to the power law fits for visualization
(default 1)
Ax, Ay2 -- factors multiplied to the x, y positions of the text k^{a/b}
of the power law fits. It should be given as an array of
length N (default values are 1s)
inds_show -- indices of the discretized spectra in power law fits where
we show the power law fit (default all of them)
inds_show_txt -- indices of the discretized spectra in power law fits
where we show the text k^{a/b} (default all of them)
fb -- option to use filled plot between maximum and minimum of
spectra (default False)
min_sp, max_sp -- if fb is selected we need to provide the minimum
and maximum of the spectral functions
min_sp_pos, min_sp_neg, max_sp_pos, max_sp_neg -- if fb and hel are
selected, we need to provide the maximum and minimum of
the spectral functions (both for the positive and negative
values)
yv -- option to plot vertical lines over the oscillations at the
data points (default False)
color -- color of the spectra lines of the plot (default 'black'),
this option does not change the color of positive and negative
values of helical spectra
marg -- factor that indicates the length of the power law fits shown,
in the interval (k0*marg, k1/marg) to show that the power laws
are discretized
diff0 -- margin allowed to approximate the power law exponent to
a fraction for the text shown k^{a/b} (default 0.1)
"""
if len(Ay2) == 0: Ay2 = [1.]*N
if len(Ax) == 0: Ax = [1.]*N
if hel:
k_pos, k_neg, sp_pos, sp_neg, color = spectra.red_blue_func(k, sp)
plt.plot(k_pos, sp_pos, '.', color='red')
plt.plot(k_neg, abs(sp_neg), '.', color='blue')
plt.plot(k, abs(sp), color='black', ls='dotted')
# fb option to plot shaded regions between minimum and maximum
# of oscillations of the helical GW energy density spectra
if fb or yv:
if len(min_sp_pos)*len(max_sp_pos)*len(min_sp_neg)* \
len(max_sp_neg) == 0:
print('\n with the arguments hel=True and fb=True or yv=True',
' you need to provide min_sp_pos, max_sp_pos',
' min_sp_neg, and max_sp_neg')
else:
if fb:
plt.fill_between(k, min_sp_pos, max_sp_pos, alpha=0.1,
color='red')
plt.fill_between(k, min_sp_neg, max_sp_neg, alpha=0.1,
color='blue')
if yv:
for i in range(0, len(k)):
plt.vlines(k[i], min_sp_neg[i], max_sp_neg[i],
color='blue', ls='dashed', lw=0.6)
plt.vlines(k[i], min_sp_pos[i], max_sp_pos[i],
color='red', ls='dashed', lw=0.6)
else:
plt.plot(k, sp, color=color, lw=2)
# fb option to plot shaded regions between minimum and maximum
# of oscillations of the helical GW energy density spectra
if fb or yv:
if len(min_sp)*len(max_sp) == 0:
print('\n with the arguments hel=False and fb=True or yv=True',
' you need to provide min_sp and max_sp_pos')
else:
if fb:
plt.fill_between(k, min_sp, max_sp, alpha=0.1, color=color)
if yv:
for i in range(0, len(k)):
plt.vlines(k[i], min_sp[i], max_sp[i], color=color,
ls='dashed', lw=0.6)
if yks:
ks, sp_ks, aks, km, sp_m, kps, c = spectra.compute_yks(k, abs(sp), N)
show = np.zeros(N)
show_txt = np.zeros(N)
if len(inds_show) > N:
print('the indices selected in inds_show cannot be more than',
' the number of discretized power laws N')
inds_show = np.array(range(0, N))
if len(inds_show) == 0: show += 1
else: show[inds_show] = 1
if len(inds_show_txt) > N: inds_show = np.array(range(0, N))
else: show_txt[inds_show_txt] = 1
kps = np.logspace(np.log10(ks[0]), np.log10(ks[-1]), N + 1)
for i in range(0, N):
str_exp = '$k^{%.0f}$'%aks[i]
for j in range(0, 6):
str_exp, diff = spectra.str_exp(str_exp, aks[i],
j + 1, diff=diff0)
if diff < diff0: break
else: diff0 = diff
if show[i]:
kss = np.logspace(np.log10(kps[i]*marg),
np.log10(kps[i + 1]/marg))
plt.plot(kss, kss**aks[i]*10**c[i]*Ay,
color='black', lw=.6)
if show_txt[i]:
txt = plt.text(km[i]*Ax[i], sp_m[i]*Ay*Ay2[i],
str_exp, size=30)
def plot_EM_EGW(run, save=True):
"""
Function that plots the magnetic energy and helicity spectra at the time
of maximum magnetic energy density.
It also plots the GW energy density and helicity spectra, averaging over
times after the GW energy has entered a stationary oscillatory stage (this
needs to be previously computed and stored in the run variable, see
initialize_JCAP_2021.py).
It corresponds to figures 1-3 of <NAME>, <NAME>, <NAME>,
and <NAME>, "Polarization of gravitational waves from helical MHD
turbulent sources," https://arxiv.org/abs/2107.05356.
Arguments:
run -- variable run with spectral information
save -- option to save the figure in plots/'name_run'EM_EGW.pdf'
(default True)
"""
# auxiliary functions used in plot_EM_EGW
def init_Ay2_Ax(N, Ay, Ax):
return N, N*[Ay], N*[Ax]
# chose indices to show power law fits and text with k^(a/b) for magnetic
# spectrum EM(k)
def indices_EM(nm, Ax, Ay):
if 'i' in | |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import math
import numbers
import copy
import tensorflow as tf
import numpy as np
from tensorflow.python.ops import variables as tf_variables
from trident.backend.common import get_session, addindent, enforce_singleton, unpack_singleton, get_time_suffix, get_class, \
format_time, get_terminal_size, snake2camel, camel2snake,Signature,epsilon
from trident.backend import dtype
from trident.backend.tensorspec import TensorSpec,TensorShape
from trident.backend.tensorflow_backend import Layer, Sequential, Parameter
from trident.backend.tensorflow_ops import *
from trident.layers.tensorflow_initializers import *
__all__ = ['InstanceNorm','InstanceNorm2d','InstanceNorm3d','AdaptiveInstanceNorm','BatchNorm','BatchNorm2d','BatchNorm3d','GroupNorm','GroupNorm2d','GroupNorm3d','LayerNorm','LayerNorm2d','LayerNorm3d','L2Norm','PixelNorm','EvoNormB0','EvoNormS0','get_normalization']
_session = get_session()
_epsilon = _session.epsilon
def instance_std(x, eps=1e-5):
reduce_shape=range(len(x.shape))
_, var = tf.nn.moments(x, axes=reduce_shape[1:-1], keepdims=True)
return tf.sqrt(var + eps)
def group_std(x, groups, eps = 1e-5):
rank = len(x.shape) - 2
spaceshape=x.shape[1:-1]
N=x.shape[0]
C=x.shape[-1]
x1 = x.reshape(N,groups,-1)
var = (x1.var(dim=-1, keepdim = True)+eps).reshape(N,groups,-1)
return (x1 / var.sqrt()).reshape((N,C,)+spaceshape)
class BatchNorm(Layer):
"""Applies Batch Normalization over a 4D input (a mini-batch of 2D inputs
with additional channel dimension) as described in the paper
`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`_ .
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
The mean and standard-deviation are calculated per-dimension over
the mini-batches and :math:`\gamma` and :math:`\beta` are learnable parameter vectors
of size `C` (where `C` is the input size). By default, the elements of :math:`\gamma` are set
to 1 and the elements of :math:`\beta` are set to 0.
Also by default, during training this layer keeps running estimates of its
computed mean and variance, which are then used for normalization during
evaluation. The running estimates are kept with a default :attr:`momentum`
of 0.1.
If :attr:`track_running_stats` is set to ``False``, this layer then does not
keep running estimates, and batch statistics are instead used during
evaluation time as well.
.. note::
This :attr:`momentum` argument is different from one used in optimizer
classes and the conventional notion of momentum. Mathematically, the
update rule for running statistics here is
:math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
new observed value.
Because the Batch Normalization is done over the `C` dimension, computing statistics
on `(N, H, W)` slices, it's common terminology to call this Spatial Batch Normalization.
Shape:
- Input: :math:`(N, H, W, C)`
- Output: :math:`(N, H, W, C)` (same shape as input)
References:
.. _`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`:
https://arxiv.org/abs/1502.03167
"""
def __init__(self, momentum=0.1, affine=True, track_running_stats=True, axis=-1,renorm=False,eps=1e-8,name=None, **kwargs):
"""
Args:
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Can be set to ``None`` for cumulative moving average
(i.e. simple average). Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics and always uses batch
statistics in both training and eval modes. Default: ``True``
Examples:
>>> bn=BatchNorm2d(affine=False)
>>> input = to_tensor(np.random.standard_normal((2, 128, 128, 64)))
>>> print(int_shape(bn(input)))
(2, 64, 128, 128)
"""
super().__init__(name=name)
if isinstance(axis, (list, tuple)):
self.axis = axis[:]
elif isinstance(axis, int):
self.axis = [axis]
else:
raise TypeError('Expected an int or a list/tuple of ints for the '
'argument \'axis\', but received: %r' % axis)
self.eps = eps
self.momentum = momentum
self.affine = affine
self.track_running_stats = track_running_stats
self.register_parameter("weight",None) # gamma//scale
self.register_parameter("bias",None) # beta/ offset
self.renorm=renorm
def reset_running_stats(self):
if self.track_running_stats:
self.register_buffer("running_mean", zeros(shape=[self.input_filters]))
self.register_buffer("running_var", ones(shape=[self.input_filters]))
self.register_buffer("num_batches_tracked", to_tensor(0, dtype=tf.int64), persistent=False)
if self.affine :
self.register_parameter("weight",tf.Variable(tf.ones(shape=[self.input_filters]), trainable=True, name='weight')) # gamma//scale
self.register_parameter("bias",tf.Variable(tf.zeros(shape=[self.input_filters]), trainable=True, name='bias')) # beta/ offset
def assign_moving_average(self, variable, value, momentum, inputs_size):
with tf.name_scope('AssignMovingAvg') as scope:
decay = to_tensor(1.0 - momentum,device=self.get_root().device,dtype=variable.dtype.base_dtype)
update_delta = (variable - tf.cast(value, variable.dtype)) * decay
if inputs_size is not None:
update_delta = where(inputs_size > 0, update_delta, zeros_like(update_delta))
variable = tf.math.subtract(variable, update_delta, name=scope)
return variable
def build(self, input_shape:TensorShape):
if not self._built:
self.input_filters= input_shape[self.filter_index]
ndims = len(input_shape)
# Convert axis to list and resolve negatives
if isinstance(self.axis, int):
self.axis = [self.axis]
elif isinstance(self.axis, tuple):
self.axis = list(self.axis)
for idx, x in enumerate(self.axis):
if x < 0:
self.axis[idx] = ndims + x
# Validate axes
for x in self.axis:
if x < 0 or x >= ndims:
raise ValueError('Invalid axis: %d' % x)
if len(self.axis) != len(set(self.axis)):
raise ValueError('Duplicate axis: {}'.format(tuple(self.axis)))
param_shape = [input_shape[dim] for dim in self.axis]
if self.affine:
self.weight=tf.Variable(tf.ones(shape=param_shape),trainable=True, name='weight') #gamma//scale
self.bias=tf.Variable(tf.zeros(shape=param_shape),trainable=True, name='bias') #beta/ offset
if self.track_running_stats:
self.register_buffer('running_mean', zeros(shape=param_shape))
self.register_buffer('running_var', ones(shape=param_shape))
self.register_buffer('num_batches_tracked', to_tensor(0, dtype=tf.int64), persistent=False)
self._built = True
def forward(self, x, **kwargs) :
input_shape = x.shape
ndims = len(input_shape)
reduction_axes = [i for i in range(ndims) if i not in self.axis]
# Broadcasting only necessary for norm when the axis is not just
# the last dimension
broadcast_shape = [1] * ndims
for dim in self.axis:
broadcast_shape[dim] = input_shape.dims[dim].value
def _broadcast(v):
if (v is not None and len(v.shape) != ndims and self.axis != [ndims - 1]):
return reshape(v, broadcast_shape)
return v
scale, offset = _broadcast(self.weight), _broadcast(self.bias)
mean, variance = moments(x, axis=reduction_axes, keepdims=True)
running_mean = self.running_mean
running_var = self.running_var
if not self.training:
mean, variance = self.running_mean, self.running_var
new_mean, new_variance = mean, variance
def _do_update(var, value):
"""Compute the updates for mean and variance."""
return self.assign_moving_average(var, value, self.momentum, self.input_shape[0])
def mean_update():
"""Update the moving variance."""
true_branch = lambda: _do_update(self.running_mean, new_mean)
false_branch = lambda: self.running_mean
if self.training:
return true_branch
else:
return false_branch
def variance_update():
"""Update the moving variance."""
true_branch = lambda: _do_update(self.running_var, new_variance)
false_branch = lambda: self.running_var
if self.training:
return true_branch
else:
return false_branch
mean_update()
variance_update()
return tf.nn.batch_normalization(x, self.running_mean, self.running_var, offset,scale, self.eps)
def extra_repr(self):
return '{input_filters}, eps={eps}, momentum={momentum}, affine={affine}, ' \
'track_running_stats={track_running_stats}'.format(**self.__dict__)
BatchNorm2d=BatchNorm
BatchNorm3d=BatchNorm
class GroupNorm(Layer):
"""Applies Group Normalization over a mini-batch of inputs as described in the paper `Group Normalization`_ .
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
The input channels are separated into :attr:`num_groups` groups, each containing
``num_channels / num_groups`` channels. The mean and standard-deviation are calculated
separately over the each group. :math:`\gamma` and :math:`\beta` are learnable
per-channel affine transform parameter vectors of size :attr:`num_channels` if
:attr:`affine` is ``True``.
This layer uses statistics computed from input data in both training and
evaluation modes.
Shape:
- Input: :math:`(N, *, C)` where :math:`C=\text{num_channels}`
- Output: :math:`(N, *, C)` (same shape as input)
References:
.. _`Group Normalization`: https://arxiv.org/abs/1803.08494
"""
def __init__(self, num_groups=16,affine=True,axis=-1, eps=1e-5, **kwargs):
"""
Args:
num_groups (int): number of groups to separate the channels into
eps: a value added to the denominator for numerical stability. Default: 1e-5
affine: a boolean value that when set to ``True``, this module
has learnable per-channel affine parameters initialized to ones (for weights)
and zeros (for biases). Default: ``True``.
Examples:
>>> gn=GroupNorm(affine=False)
>>> input = to_tensor(np.random.standard_normal((2, 128, 128, 64)))
>>> print(int_shape(gn(input)))
(2, 64, 128, 128)
"""
super().__init__()
self.affine=affine
self.num_groups = num_groups
self.eps = eps
self.axis=axis
def build(self, input_shape:TensorShape):
if self._built == False :
assert self.input_filters % self.num_groups == 0, 'number of groups {} must divide number of channels {}'.format(self.num_groups, self.input_filters)
if self.affine:
self.weight = tf.Variable(ones((self.input_filters)))
self.bias = tf.Variable(zeros((self.input_filters)))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self._built = True
def forward(self, x, **kwargs) :
# Prepare broadcasting shape.
origin_shape=list(int_shape(x))
group_shape =list(int_shape(x))
last_dim=group_shape[self.axis]
group_shape[self.axis]=last_dim//self.num_groups
group_shape.insert(self.axis, self.groups)
x=reshape(x,group_shape)
x_mean,x_variance=moments(x,axis=self.axis,keepdims=True)
x=(x-x_mean)/(sqrt(x_variance)+self.eps)
x = reshape(x,origin_shape)
if self.affine:
x=x*self.weight+self.bias
return x
GroupNorm2d=GroupNorm
GroupNorm3d=GroupNorm
class InstanceNorm(GroupNorm):
"""Applies Instance Normalization
`Instance Normalization: The Missing Ingredient for Fast Stylization`_ .
.. math::
'y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta'
The mean and standard-deviation are calculated per-dimension separately
for each object in a mini-batch. :math:`\gamma` and :math:`\beta` are learnable parameter vectors
of size `C` (where `C` is the input size) if :attr:`affine` is ``True``.
Instance Normalization is an specific case of ```GroupNormalization```since
it normalizes all features of one channel. The Groupsize is equal to the
channel size. Empirically, its accuracy is more stable than batch norm in a
wide range of small batch sizes, if learning rate is adjusted linearly
with batch sizes.
By default, this layer uses instance statistics computed from input data in
both training and evaluation modes.
If :attr:`track_running_stats` is | |
Fahrenheit
temp_rc_df['degF_rc'] = (temp_rc_df['value'] * 9 / 5) + 32
temp_rc_df
# validTime Column split to date and time for Temperature
date_temp_rc = temp_rc_df['validTime'].str.split('T', n=1, expand=True)
time_temp_rc = date_temp_rc[1].str.split('+', n=1, expand=True)
time_temp_rc
temp_rc_df['date_temp_rc'] = date_temp_rc[0]
temp_rc_df['time_temp_rc'] = time_temp_rc[0]
# Combine date and time with a space in between the two
temp_rc_df['date_time_temp_rc'] = temp_rc_df['date_temp_rc'] + ' ' + temp_rc_df['time_temp_rc']
# Convert the above to date time format so it can be recognized by the PostgresSQL and js
# temp_rc_df['date_time_temp_rc'] = pd.to_datetime(temp_rc_df['date_time_temp_rc'])
# Pull all the data for today + 3 days
time_delta_temp_rc = datetime.datetime.strptime(temp_rc_df['date_temp_rc'][0],"%Y-%m-%d") + timedelta(days = 4)
temp_rc_df['times_temp_rc'] = time_delta_temp_rc.strftime("%Y-%m-%d")
temp_rc_df = temp_rc_df.loc[temp_rc_df['date_temp_rc'] < temp_rc_df['times_temp_rc']]
temp_rc_df
temp_rc_df.dtypes
# =================== Wind Speed Data ======================
wind_speed_rc = []
for i in data_rc_forecast["properties"]["windSpeed"]["values"]:
wind_speed_rc.append(i)
windSpeed_rc_df = pd.DataFrame(wind_speed_rc)
windSpeed_rc_df
# Converting KM/hour to Miles/hour
windSpeed_rc_df['miles/hour_rc'] = windSpeed_rc_df['value'] * 0.621371
windSpeed_rc_df
# validTime Column split to date and time for wind Speed
date_ws_rc = windSpeed_rc_df['validTime'].str.split('T', n=1, expand=True)
time_ws_rc = date_ws_rc[1].str.split('+', n=1, expand=True)
time_ws_rc
windSpeed_rc_df['date_ws_rc'] = date_ws_rc[0]
windSpeed_rc_df['time_ws_rc'] = time_ws_rc[0]
# Combine date and time with a space in between the two
windSpeed_rc_df['date_time_ws_rc'] = windSpeed_rc_df['date_ws_rc'] + ' ' + windSpeed_rc_df['time_ws_rc']
# Convert the above to date time format so it can be recognized by the PostgresSQL and js
# windSpeed_rc_df['date_time_ws_rc'] = pd.to_datetime(windSpeed_rc_df['date_time_ws_rc'])
# Pull all the data for today + 3 days
time_delta_ws = datetime.datetime.strptime(windSpeed_rc_df['date_ws_rc'][0],"%Y-%m-%d") + timedelta(days = 4)
windSpeed_rc_df['times_ws_rc'] = time_delta_ws.strftime("%Y-%m-%d")
windSpeed_rc_df = windSpeed_rc_df.loc[windSpeed_rc_df['date_ws_rc'] < windSpeed_rc_df['times_ws_rc']]
windSpeed_rc_df
# windSpeed_rc_df.dtypes
# =================== Wind Gust Data ======================
wind_gust_rc = []
for i in data_rc_forecast["properties"]["windGust"]["values"]:
wind_gust_rc.append(i)
wind_gust_rc_df = pd.DataFrame(wind_gust_rc)
wind_gust_rc_df
# Converting KM/hour to Miles/hour
wind_gust_rc_df['m/h_rc'] = wind_gust_rc_df['value'] * 0.621371
wind_gust_rc_df
# # validTime Column split to date and time for wind Gusts
date_wg_rc = wind_gust_rc_df['validTime'].str.split('T', n=1, expand=True)
time_wg_rc = date_wg_rc[1].str.split('+', n=1, expand=True)
time_wg_rc
wind_gust_rc_df['date_wg_rc'] = date_wg_rc[0]
wind_gust_rc_df['time_wg_rc'] = time_wg_rc[0]
# Combine date and time with a space in between the two
wind_gust_rc_df['date_time_wg_rc'] = wind_gust_rc_df['date_wg_rc'] + ' ' + wind_gust_rc_df['time_wg_rc']
# Convert the above to date time format so it can be recognized by the PostgresSQL and js
# wind_gust_rc_df['date_time_wg_rc'] = pd.to_datetime(wind_gust_rc_df['date_time_wg_rc'])
wind_gust_rc_df
# Pull all the data for today + 3 days
time_delta_wg = datetime.datetime.strptime(wind_gust_rc_df['date_wg_rc'][0],"%Y-%m-%d") + timedelta(days = 4)
wind_gust_rc_df['times_wg_rc'] = time_delta_wg.strftime("%Y-%m-%d")
wind_gust_rc_df = wind_gust_rc_df.loc[wind_gust_rc_df['date_wg_rc'] < wind_gust_rc_df['times_wg_rc']]
wind_gust_rc_df
# wind_gust_rc_df.dtypes
# =================== Probability of Precipitataion ======================
prob_precip_rc = []
for i in data_rc_forecast["properties"]["probabilityOfPrecipitation"]["values"]:
prob_precip_rc.append(i)
prob_precip_rc_df = pd.DataFrame(prob_precip_rc)
prob_precip_rc_df
# # validTime Column split to date and time for Probability Precipitation
date_pp_rc = prob_precip_rc_df['validTime'].str.split('T', n=1, expand=True)
time_pp_rc = date_pp_rc[1].str.split('+', n=1, expand=True)
time_pp_rc
prob_precip_rc_df['date_pp_rc'] = date_pp_rc[0]
prob_precip_rc_df['time_pp_rc'] = time_pp_rc[0]
# Combine date and time with a space in between the two
prob_precip_rc_df['date_time_pp_rc'] = prob_precip_rc_df['date_pp_rc'] + ' ' + prob_precip_rc_df['time_pp_rc']
# Convert the above to date time format so it can be recognized by the PostgresSQL and js
# prob_precip_rc_df['date_time_pp_rc'] = pd.to_datetime(prob_precip_rc_df['date_time_pp_rc'])
prob_precip_rc_df
# Pull all the data for today + 3 days
time_delta_pp = datetime.datetime.strptime(prob_precip_rc_df['date_pp_rc'][0],"%Y-%m-%d") + timedelta(days = 4)
prob_precip_rc_df['times_pp_rc'] = time_delta_pp.strftime("%Y-%m-%d")
prob_precip_rc_df = prob_precip_rc_df.loc[prob_precip_rc_df['date_pp_rc'] < prob_precip_rc_df['times_pp_rc']]
prob_precip_rc_df
# prob_precip_rc_df.dtypes
# =================== Quantity of Precipitataion ======================
qty_precip_rc = []
for i in data_rc_forecast["properties"]["quantitativePrecipitation"]["values"]:
qty_precip_rc.append(i)
qty_precip_rc_df = pd.DataFrame(qty_precip_rc)
qty_precip_rc_df
# # validTime Column split to date and time for quantity Precipitation
date_qp_rc = qty_precip_rc_df['validTime'].str.split('T', n=1, expand=True)
time_qp_rc = date_qp_rc[1].str.split('+', n=1, expand=True)
time_qp_rc
qty_precip_rc_df['date_qp_rc'] = date_qp_rc[0]
qty_precip_rc_df['time_qp_rc'] = time_qp_rc[0]
# Combine date and time with a space in between the two
qty_precip_rc_df['date_time_qp_rc'] = qty_precip_rc_df['date_qp_rc'] + ' ' + qty_precip_rc_df['time_qp_rc']
# Convert the above to date time format so it can be recognized by the PostgresSQL and js
# qty_precip_rc_df['date_time_qp_rc'] = pd.to_datetime(qty_precip_rc_df['date_time_qp_rc'])
qty_precip_rc_df
# Pull all the data for today + 3 days
time_delta_qp = datetime.datetime.strptime(qty_precip_rc_df['date_qp_rc'][0],"%Y-%m-%d") + timedelta(days = 4)
qty_precip_rc_df['times_qp_rc'] = time_delta_qp.strftime("%Y-%m-%d")
qty_precip_rc_df = qty_precip_rc_df.loc[qty_precip_rc_df['date_qp_rc'] < qty_precip_rc_df['times_qp_rc']]
qty_precip_rc_df
# qty_precip_rc_df.dtypes
# =================== Create DataFrame with all the above data for Rose Canyon Campground ======================
rc_grid_df = pd.DataFrame({"id":2,
"campground": "Rose Canyon",
"forecasted_temperature_degF": temp_rc_df['degF_rc'],
"forecastTime_temperature": temp_rc_df['date_time_temp_rc'],
"forecasted_windSpeed_miles_per_h": windSpeed_rc_df['miles/hour_rc'],
"forecastTime_windSpeed": windSpeed_rc_df['date_time_ws_rc'],
"forecasted_windGust_miles_per_h": wind_gust_rc_df['m/h_rc'],
"forecastTime_windGust": wind_gust_rc_df['date_time_wg_rc'],
"forecasted_probabilityOfPrecipitation": prob_precip_rc_df['value'],
"forecastTime_probabilityOfPrecipitation": prob_precip_rc_df['date_time_pp_rc'],
"forecasted_quantityOfPrecipitation_mm": qty_precip_rc_df['value'],
"forecastTime_quantityOfPrecipitation": qty_precip_rc_df['date_time_qp_rc'],
})
rc_grid_df
# rc_grid_df.dtypes
# %%
# --------------------------------------------------------------------
# SPENCER CANYON CAMPGROUND
# --------------------------------------------------------------------
# -------------------------------------------
# Pull Grid Data URL From Metadata url
# -------------------------------------------
sc_url = "https://api.weather.gov/points/32.4186,-110.7383"
response_sc = requests.get(sc_url)
data_sc = response_sc.json()
data_sc
grid_data_sc = data_sc["properties"]["forecastGridData"]
grid_data_sc
# %%
# ------------------------------------------------------------------------
# Pull latitude, Longitude and Elevation data for Rose Canyon Campground
# ------------------------------------------------------------------------
sc_forcast_url = grid_data_sc
response_sc_forecast = requests.get(sc_forcast_url)
data_sc_forecast = response_sc_forecast.json()
data_sc_forecast
lat_sc = data_sc_forecast["geometry"]["coordinates"][0][0][1]
lat_sc
lng_sc = data_sc_forecast["geometry"]["coordinates"][0][0][0]
lng_sc
elevation_sc = data_sc_forecast["properties"]["elevation"]["value"]
elevation_sc
# ---------------------------------------------------------------------------------
# Create a Dataframe with Latitude, Longitude Elevation and all other related URL
# ---------------------------------------------------------------------------------
sc_df = pd.DataFrame({"id": 3,
"campground": "Spencer Canyon",
"lat": [lat_sc],
"lon": [lng_sc],
"elevation": [elevation_sc],
"nws_meta_url": [sc_url],
"nws_grid_url": [grid_data_sc],
"forest_url":"https://www.fs.usda.gov/recarea/coronado/recreation/camping-cabins/recarea/?recid=25710&actid=29",
"campsite_url": "https://www.fs.usda.gov/Internet/FSE_MEDIA/fseprd746608.jpg",
# "nws_meta_json":[data_sc],
# "nws_grid_json": [data_sc_forecast],
"fire_danger": "Very High",
"map_code": '<iframe src="https://www.google.com/maps/embed?pb=!1m18!1m12!1m3!1d3368.0814680369876!2d-110.74302428360251!3d32.41697578108229!2m3!1f0!2f0!3f0!3m2!1i1024!2i768!4f13.1!3m3!1m2!1s0x86d61515ca1f56fd%3A0x242e26b2f2f72242!2sSpencer%20Canyon%20Campground!5e0!3m2!1sen!2sus!4v1626560995515!5m2!1sen!2sus" width="600" height="450" style="border:0;" allowfullscreen="" loading="lazy"></iframe>'
})
sc_df
# %%
# -------------------------------------------------------------------------------------------------
# Pull temperate, Wind Speed, Wind Gust, Probability of Precipitation, Quantity or Precipitation
# data along with the date and time for each.
# -------------------------------------------------------------------------------------------------
# =================== Temperature Data ======================
temp_sc = []
for i in data_sc_forecast["properties"]["temperature"]["values"]:
temp_sc.append(i)
temp_sc_df = pd.DataFrame(temp_sc)
temp_sc_df
# Temperature conversion to Degree Fahrenheit
temp_sc_df['degF_sc'] = (temp_sc_df['value'] * 9 / 5) + 32
temp_sc_df
# validTime Column split to date and time for Temperature
date_temp_sc = temp_sc_df['validTime'].str.split('T', n=1, expand=True)
time_temp_sc = date_temp_sc[1].str.split('+', n=1, expand=True)
time_temp_sc
temp_sc_df['date_temp_sc'] = date_temp_sc[0]
temp_sc_df['time_temp_sc'] = time_temp_sc[0]
# Combine date and time with a space in between the two
temp_sc_df['date_time_temp_sc'] = temp_sc_df['date_temp_sc'] + ' ' + temp_sc_df['time_temp_sc']
# Convert the above to date time format so it can be recognized by the PostgresSQL and js
# temp_sc_df['date_time_temp_sc'] = pd.to_datetime(temp_sc_df['date_time_temp_sc'])
# Pull all the data for today + 3 days
time_delta_temp_sc = datetime.datetime.strptime(temp_sc_df['date_temp_sc'][0],"%Y-%m-%d") + timedelta(days = 4)
temp_sc_df['times_temp_sc'] = time_delta_temp_sc.strftime("%Y-%m-%d")
temp_sc_df = temp_sc_df.loc[temp_sc_df['date_temp_sc'] < temp_sc_df['times_temp_sc']]
temp_sc_df
# temp_sc_df.dtypes
# =================== Wind Speed Data ======================
wind_speed_sc = []
for i in data_sc_forecast["properties"]["windSpeed"]["values"]:
wind_speed_sc.append(i)
windSpeed_sc_df = pd.DataFrame(wind_speed_sc)
windSpeed_sc_df
# Converting KM/hour to Miles/hour
windSpeed_sc_df['miles/hour_sc'] = windSpeed_sc_df['value'] * 0.621371
windSpeed_sc_df
# validTime Column split to date and time for wind Speed
date_ws_sc = windSpeed_sc_df['validTime'].str.split('T', n=1, expand=True)
time_ws_sc = date_ws_sc[1].str.split('+', n=1, expand=True)
time_ws_sc
windSpeed_sc_df['date_ws_sc'] = date_ws_sc[0]
windSpeed_sc_df['time_ws_sc'] = time_ws_sc[0]
# Combine date and time with a space in between the two
windSpeed_sc_df['date_time_ws_sc'] = windSpeed_sc_df['date_ws_sc'] + ' ' + windSpeed_sc_df['time_ws_sc']
# Convert the above to date time format so it can be recognized by the PostgresSQL and js
# windSpeed_sc_df['date_time_ws_sc'] = pd.to_datetime(windSpeed_sc_df['date_time_ws_sc'])
# Pull all the data for today + 3 days
time_delta_ws = datetime.datetime.strptime(windSpeed_sc_df['date_ws_sc'][0],"%Y-%m-%d") + timedelta(days = 4)
windSpeed_sc_df['times_ws_sc'] = time_delta_ws.strftime("%Y-%m-%d")
windSpeed_sc_df = windSpeed_sc_df.loc[windSpeed_sc_df['date_ws_sc'] < windSpeed_sc_df['times_ws_sc']]
windSpeed_sc_df
# windSpeed_sc_df.dtypes
# =================== Wind Gust Data ======================
wind_gust_sc = []
for i in data_sc_forecast["properties"]["windGust"]["values"]:
wind_gust_sc.append(i)
wind_gust_sc_df = pd.DataFrame(wind_gust_sc)
wind_gust_sc_df
# Converting KM/hour to Miles/hour
wind_gust_sc_df['m/h_sc'] = wind_gust_sc_df['value'] * 0.621371
wind_gust_sc_df
# # validTime Column split to date and time for wind Gusts
date_wg_sc = wind_gust_sc_df['validTime'].str.split('T', n=1, expand=True)
time_wg_sc = date_wg_sc[1].str.split('+', n=1, expand=True)
time_wg_sc
wind_gust_sc_df['date_wg_sc'] = date_wg_sc[0]
wind_gust_sc_df['time_wg_sc'] = time_wg_sc[0]
# Combine date and time with a space in between the two
wind_gust_sc_df['date_time_wg_sc'] = wind_gust_sc_df['date_wg_sc'] + ' ' + wind_gust_sc_df['time_wg_sc']
# Convert the above to date time format so it can be recognized by the PostgresSQL and js
# wind_gust_sc_df['date_time_wg_sc'] = pd.to_datetime(wind_gust_sc_df['date_time_wg_sc'])
wind_gust_sc_df
# Pull all the data for today + 3 days
time_delta_wg = datetime.datetime.strptime(wind_gust_sc_df['date_wg_sc'][0],"%Y-%m-%d") + timedelta(days = 4)
wind_gust_sc_df['times_wg_sc'] = time_delta_wg.strftime("%Y-%m-%d")
wind_gust_sc_df = wind_gust_sc_df.loc[wind_gust_sc_df['date_wg_sc'] < wind_gust_sc_df['times_wg_sc']]
wind_gust_sc_df
# wind_gust_sc_df.dtypes
# =================== Probability of Precipitation Data ======================
prob_precip_sc = []
for i in data_sc_forecast["properties"]["probabilityOfPrecipitation"]["values"]:
prob_precip_sc.append(i)
prob_precip_sc_df = pd.DataFrame(prob_precip_sc)
prob_precip_sc_df
# # validTime Column split to date and time for Probability Precipitation
date_pp_sc = prob_precip_sc_df['validTime'].str.split('T', n=1, expand=True)
time_pp_sc = date_pp_sc[1].str.split('+', n=1, expand=True)
time_pp_sc
prob_precip_sc_df['date_pp_sc'] = date_pp_sc[0]
prob_precip_sc_df['time_pp_sc'] = time_pp_sc[0]
# Combine date and time with a space in between the two
prob_precip_sc_df['date_time_pp_sc'] = prob_precip_sc_df['date_pp_sc'] + ' ' + prob_precip_sc_df['time_pp_sc']
# Convert the above to date time | |
<filename>bioslds/hdf.py<gh_stars>0
""" Define some functions to facilitate serializing to HDF files. """
import h5py
import numbers
import numpy as np
from types import SimpleNamespace
from typing import Any, Union
def write_dict_hierarchy(group: h5py.Group, d: dict, scalars_as_attribs: bool = True):
""" Write a nested dictionary structure to an HDF file.
This turns entries that are `dict`s into HDF groups. All other entries need to be
numbers, numeric arrays, or lists.
This function is adapted from https://stackoverflow.com/a/44077610.
Parameters
----------
group
HDF group where to save the data.
d
The data to save.
scalars_as_attribs
Single numbers are stored as attributes.
"""
for key, value in d.items():
if isinstance(value, dict):
sub_group = group.create_group(key)
write_dict_hierarchy(sub_group, value)
else:
is_seq = hasattr(value, "__len__")
is_str = isinstance(value, str)
if is_str:
value = np.string_(value)
if (is_seq and not is_str) or not scalars_as_attribs:
group.create_dataset(key, data=np.atleast_1d(value))
else:
group.attrs.create(key, value)
def read_dict_hierarchy(group: h5py.Group) -> dict:
""" Recurse through an HDF's group structure, and return it as a nested dictionary.
This is the converse to `write_dict_hierarchy`. The roundtrip is not perfect: all
sequences are returned as Numpy arrays.
The group's attributes are also stored in the dictionary. If an attribute name
conflicts with a dataset's name, it is prefixed by "attr_". If this prefixed version
of the name also conflicts, it is ignored.
Parameters
----------
group
HDF group from where to read.
Returns a nested dictionary with the contents of the HDF group.
"""
d = {}
for key in group.keys():
value = group[key]
if not isinstance(value, h5py.Group):
value = value[()]
if np.issubdtype(value.dtype, np.string_) and len(value) == 1:
value = value[0].decode()
d[key] = value
else:
d[key] = read_dict_hierarchy(value)
for key in group.attrs.keys():
value = group.attrs[key]
if isinstance(value, bytes):
value = value.decode()
if key not in d:
d[key] = value
else:
d["attr_" + key] = value
return d
def write_object_hierarchy(group: h5py.Group, obj: Any):
""" Write an object with all its sub-objects to an HDF file.
This first skips all attributes that start with an underscore or that are callable.
It then writes as datasets all attributes that are either numbers, strings, or
numeric Numpy arrays (including boolean). Scalars -- numbers and strings -- are
stored as `h5py` attributes. Non-numeric Numpy arrays are stored as lists (see
below). Everything that is not a scalar is stored as either a dataset or a group.
The function recursively goes through object attributes, writing them to file in a
hierarchical fashion.
The following types are treated in a special way:
1. Instances of `tuple` or `list`. Each item is stored using a name of the form
"_idx", and a special attribute called "_special_type" is set to "tuple" or
"list", respectively. A special attribute called "_len" is set to the number of
elements.
2. Instances of `dict`. This is stored as a list of tuples containing the
dictionary's items (that is, `list(d.items())`). The "_special_type" is set to
"dict".
3. Sequences, identified either by an iterable interface (i.e., has an `__iter__`
method) or by a random-access interface based on `__getitem__`. These are stored
as lists.
In each of the cases above, any additional non-callable attributes that do not start
with an underscore are also stored. There is a way to avoid storing all of a
sequence's elements: if the object has an attribute `hdf_skip_contents` that
evaluates to `True`. In this case, only the non-callable attributes that do not
start with an underscore are stored.
In all cases, the string representation of the object's Python `type` is stored as a
string attribute called "_type".
Because of the dynamic way in which Python processes attribute access, it is
entirely possible that accessing an attribute is a non-trivial operation that could
even potentially fail. For example, trying to access the `cffi` attribute of an
`np.BitGenerator` can raise `ImportError`. For this reason, in this function we
catch any exceptions raised while accessing an attribute, and silently ignore the
attributes that fail to be accessed.
Parameters
----------
group
HDF group where to save the data.
obj
The object to save.
"""
# store dictionaries as lists of (key, value) pairs
if isinstance(obj, dict):
write_object_hierarchy(group, list(obj.items()))
# but ensure that the type indicates that this was a dict
group.attrs.create("_special_type", "dict")
group.attrs.create("_type", str(type(obj)))
return
# store the object's type
group.attrs.create("_type", str(type(obj)))
# get the non-private attributes
attrib_names = [_ for _ in dir(obj) if not _.startswith("_")]
# ...but add some special and dummy attributes for sequences
is_seq = hasattr(obj, "__getitem__")
is_iter = hasattr(obj, "__iter__")
has_skip = hasattr(obj, "hdf_skip_contents") and obj.hdf_skip_contents
elem_list = None
if (is_seq or is_iter) and not has_skip:
# store a special type, and the sequence length
special_type = "list"
if isinstance(obj, tuple):
special_type = "tuple"
elif isinstance(obj, set):
special_type = "set"
group.attrs.create("_special_type", special_type)
if not is_seq or not hasattr(obj, "__len__"):
elem_list = [_ for _ in obj]
if elem_list is None:
n = len(obj)
else:
n = len(elem_list)
group.attrs.create("_len", n)
# add attributes for each element
attrib_names.extend(f"_{_}" for _ in range(n))
for attrib_name in attrib_names:
if attrib_name.startswith("_"):
# handle the special attributes for sequences
idx = int(attrib_name[1:])
crt_obj = obj if is_seq else elem_list
# noinspection PyBroadException
try:
attrib = crt_obj[idx]
except Exception:
# bail out if getitem fails for any reason
continue
else:
# otherwise get attribute value
# noinspection PyBroadException
try:
attrib = getattr(obj, attrib_name)
except Exception:
# bail out if getattr fails for any reason
continue
# skip callable attributes
if callable(attrib):
continue
# store single numbers or strings as attributes
if isinstance(attrib, numbers.Number) or isinstance(attrib, str):
group.attrs.create(attrib_name, attrib)
else:
# store numeric Numpy arrays (including boolean) as datasets
is_array = isinstance(attrib, np.ndarray)
is_number_array = is_array and np.issubdtype(attrib.dtype, np.number)
is_bool_array = is_array and np.issubdtype(attrib.dtype, np.bool_)
if is_number_array or is_bool_array:
group.create_dataset(attrib_name, data=np.atleast_1d(attrib))
else:
# store everything else as a sub-object
# in particular, store non-numeric Numpy arrays as lists
if is_array:
attrib = list(attrib)
sub_group = group.create_group(attrib_name)
write_object_hierarchy(sub_group, attrib)
class ExtendableList(list):
""" A list that we can add attributes to. """
pass
class ExtendableTuple(tuple):
""" A tuple that we can add attributes to. """
pass
class ExtendableSet(set):
""" A set that we can add attributes to. """
pass
class ExtendableDict(dict):
""" A dict that we can add attributes to. """
pass
def read_namespace_hierarchy(
group: h5py.Group,
) -> Union[tuple, list, set, dict, SimpleNamespace]:
""" Recurse through an HDF's group structure, and return it as a nested namespace.
This acts as a converse to `write_object_hierarchy`. While it does not attempt to
create instances of the appropriate objects beyond a few special cases (see below),
it returns a hierarchy that can be accessed in the same way as the original object
did before saving to HDF.
The group's attributes, not only its datasets, are also stored in the returned
namespace. If an attribute name conflicts with a dataset's name, it is prefixed by
"attr_". If this prefixed version of the name also conflicts, it is ignored.
The function treats `tuple`s, `list`s, `set`s, and `dict`s in a special way. It
looks for an attribute called "_special_type" in every group. If this exists and is
equal to "tuple", "list", "set", or "dict", it attempts to load that group as the
respective Python type. Dictionaries are assumed to be saved as lists of tuples, so
the function attempts to read an object with `special_type == "dict"` as a list and
then cast that into a `dict`.
For reading a sequence to work, an attribute called "_len" must exist, indicating
the number of elements in the sequence. Then for each index `idx` from 0 to this
length (excluding the length), the function looks for either a dataset or an
attribute called "_idx", and assigns this as the corresponding element in the list.
Missing elements are replaced by `None`, except for a `set`, where the numbering has
no effect.
If an object has `_special_type == "dict"` but is not stored as a list of tuples
with length 2, the list itself is returned instead.
The objects | |
"USER AD_histogram> "
for l in lines:
if pattern in l:
l = l.split(pattern)[1]
return l.split(",")
return False
# generic stuff
def cliOpts( cli_arguments, options):
# options, var = getopt.getopt(argv[1:], 'c:l:f:')
# cli_arguments = argv[1:]
# options = 'c:l:f:'
# XXX incomplete XXX
opts = {}
try:
options, extra = getopt.getopt(cli_arguments, options)
for o,a in options:
opts[o] = a
return opts
except getopt.GetoptError, err:
return str(err)
def checkDiskSpace(path, human=False):
from platform import uname
from sys import exc_info
system_info = uname()
platform = system_info[0]
if platform == 'Windows':
import ctypes
free_bytes = ctypes.c_ulonglong(0)
format_path = (u'%s\\') % path
ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(format_path), None, None, ctypes.pointer(free_bytes))
available = free_bytes.value
try:
available = float(available)
except:
print "checkDiskSpace[%s]: ERROR => [%s]" % (platform, exc_info()[1])
available = -1
elif platform == "Linux" or platform == "Darwin":
disk = os.statvfs(path)
capacity = disk.f_bsize * disk.f_blocks
available = disk.f_bsize * disk.f_bavail
used = disk.f_bsize * (disk.f_blocks - disk.f_bavail)
else:
print "checkDiskSpace[%s]: ERROR unrecognized platform '%s'" % platform
return -1
if not human:
return available
else:
factor = [ 1024, 1048576, # mb
1073741824, # gb
1099511627776, # tb
]
suffix = [ 'Kb', 'Mb', 'Gb', 'Tb' ]
text = "-1 Kb"
for i in range(len(factor)):
fac = factor[i]
suf = suffix[i]
curr = available/float(fac)
if curr < 1:
try:
fac = factor[i-1]
suf = suffix[i-1]
curr = available/float(factor[i-1])
except:
pass
#print "%s" % exc_info()[1]
break
text = "%2.3f %s" % (curr, suf)
#print "checkDiskSpace>", text
return text
def stripDlgTime(line, type = 'real'):
locations ={ "real" : 0,
"cpu" : 1,
"sys" : 2}
h = 0
m = 0
s = 0
line = line.split(",")[ locations[type] ]
line = line.split("=")[1]
try:
h,reminder = line.split("h", 1)
except:
reminder = line
try:
m,reminder = reminder.split("m", 1)
except:
pass
s = reminder.split("s",1)[0]
h = float(h)*3600
m = float(m)*60
s = float(s)
return h+m+s
def whichFile(filename):
""" provide same functionalities as the Unix command
'which', returning the full path of a given
filename/command
"""
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, filename)
if is_exe(exe_file):
return exe_file
return False
# if CheckExe(exe_file):
# AutoGridBin.set(os.path.normpath(exe_file))
# AutoGridExecButton.config(text = "Change AutoGrid executable", fg = 'black')
# TheCheck()
# return True
def removeEmptyLines(lines):
data = []
for l in lines:
if l.strip():
data.append(l)
return data
def getLines(filename, doStrip = False, removeEmpty=False):
""" """
f = open(filename, 'r')
lines = f.readlines()
f.close()
if doStrip:
lines = map(strip,lines)
if removeEmpty:
lines = removeEmptyLines(lines)
return lines
def makeDir(path, name):
""" create dir for the generation process"""
print "path", path
print "name",name
dirname = path + os.sep + name
try:
os.makedirs(dirname)
return dirname
except:
print "makeDir> ERROR creating dir [%s]: %s" % (dirname, exc_info()[1])
return False
def writeList(filename, inlist, mode = 'w', addNewLine = False):
if addNewLine: nl = "\n"
else: nl = ""
fp = open(filename, mode)
for i in inlist:
fp.write(str(i)+nl)
fp.close()
def readString(filename):
f = open(filename, 'r')
string = f.read()
f.close()
return string
def percent(value, total):
if total==0 or value ==0: return 0
return (float(value)/float(total))*100
def gaussian(x, ymax = 1, center=0, spread=0.7):
return ymax * e **( -((float(x)-center)**2/ (2*spread**2) ) )
def truncateName(s,
lmax, # max chars before start truncating
ellipses='...', # separator
lpad=20, # left padding chars (before ellipses)
rpad=30): # right padding chars (after ellipses)
if len(s)<=lmax:return s
else: return s[:lpad]+ellipses+s[-rpad:]
"""
#### REMOVED WITH THE NEXT ONE... TESTING THAT THEN REMOVE THIS
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
def pathToList(path, recursive = False, pattern = "", extension = ""):
""- given a path, returns all files matching a given pattern ("_OUT")
or an extension (i.e. ".dlg").
- if no pattern nor extension are specified, all files will be returned
- if (recursive == True), it scans also subfolders
Examples:
pattern = "_OUT", "_HIV_", "_VS.pdbqt"
extension = ".pdbqt", ".dlg"
NOTE: Extension has priority on Pattern!
""
def _matchpattern(pattern, text):
print "PATT, TEXT", pattern,text
return re.search(pattern, text)
if not recursive:
#matching_files = glob(os.path.join(path, "*"+extension))
#matching_files = filter(lambda x: pattern in x, matching_files)
matching_files = glob(os.path.join(path, pattern+"*"+extension))
else:
matching_files = []
for root, subFolders, files in os.walk(path):
for fname in files:
if _matchpattern(pattern, fname):
if extension:
name, ext = os.path.splitext(fname)
if ext == extension:
matching_files.append(os.path.join(root,fname))
else:
matching_files.append(os.path.join(root,fname))
return matching_files
"""
def pathToList(path, pattern="", recursive=False, extension = None):
""" find all files matching pattern in path, (OPT: recursively) """
result = []
if recursive:
for dirpath, dirnames, filenames in os.walk(path):
result.extend(os.path.join(dirpath,f) for f in fnmatch.filter(filenames,pattern))
else:
result = glob(os.path.join(path, pattern))
return result
def checkPdbqtList(filelist):
"""
check if the file is a valid AutoDock(Vina) file (mode):
- 'lig' : formatted ligand file
- 'rec' : target structure file
- 'flex': formatted flexible residue(s)
return dictionary { lig, rec, flex, error}
"""
lig = []
rec = []
flex = []
error = []
for f in filelist:
found = None
try:
data = getLines(f)
for l in data:
if l.startswith('BEGIN_RES'):
found = 'flex'
flex.append(f)
break
elif l.startswith('ROOT'):
found = 'lig'
lig.append(f)
break
elif l.startswith('ATOM') or l.startswith('HETATM'):
found = 'rec'
rec.append(f)
# if no keywords have been found so far
# a PDBQT can be only a rec file
break
except:
found = 'file_error [%s]: %s' % (f, exc_info()[1] )
error.append([f, found])
return {'lig' : lig, 'rec': rec, 'flex':flex, 'error': error}
def filetoname(filename, stripString=''):
""" usual string to strip could be '_rigid'
from a rigid PDBQT receptor file
"""
#print "filetoname>", filename
basename = os.path.basename(filename)
name = os.path.splitext(basename)[0]
return name.replace(stripString, '')
def xfindLigandsInDlg(dlg_list, # XXX DISABLED! XXX
checkSuccess=True,
variable=None,
widget=None,
handbrake=None,
mode='percent'):
"""
INFO parses the DLG in dlg_list to identify ligands;
multi-dlg for the same ligand are collated.
RETURN ligands dictionary:
ligands[x] = [dlg1,dlg2...]
list of problematic/unsuccessful dlg files (if found)
OPTIONS An optional Tk widget and a variable to be updated can be provided.
Update mode can be 'percent' or 'value'
handbrake : a global variable that can be checked to halt the process
"""
if not variable and widget:
print "requested to update a widget, but None provided (widget,var)"
doUpdate=False
ligands = {}
problematic = []
c=0
t=len(dlg_list)
qs = QuickStop()
for f in dlg_list:
if handbrake: return ligands,problematic
c+=1
if doUpdate:
if mode=='percent':
x=int(percent(c,t))
elif mode=='value':
x=c
variable.set(x)
widget.update()
try:
lines = getLines(f)
for l in lines:
if l.startswith("DPF> move"):
l = l.split("DPF> move", 1)[1].split(".pdbqt")[0]
if checkSuccess and not "Successful" in lines[-5]:
raise qs
if not l in ligands:
ligands[l] = [f]
else:
ligands[l].append(f)
break
except QuickStop:
if DEBUG: print "[debug] problem reading file:",f
problematic.append(f)
return ligands, problematic
# geometry stuff
def quickdist(f,s,sq = False):
""" works with coordinates/vectors"""
d=(f[0]-s[0])**2 + (f[1]-s[1])**2 + (f[2]-s[2])**2
if sq: return sqrt(d)
else: return d
def dist(f, s, sq=False):
""" works with PDB(QT) lines"""
if sq:
#print (f[30:38], f[38:46], f[46:54])
#print (s[30:38], s[38:46], s[46:54])
return sqrt((float(f[30:38])-float(s[30:38]))**2 +\
(float(f[38:46])-float(s[38:46]))**2 +\
(float(f[46:54])-float(s[46:54]))**2 )
else: return (float(f[30:38])-float(s[30:38]))**2 +\
(float(f[38:46])-float(s[38:46]))**2 +\
(float(f[46:54])-float(s[46:54]))**2
# graph stuff
def getDistMatrixOLD(data, square=0):
# generates the NxN distance matrix from data
#print "DISTMATRIX1"
#import time
#t0 = time.time()
mtx = []
for i in range(len(data)):
a1 = data[i]
row = []
for j in range(len(data)):
if not i==j:
a2 = data[j]
#print a1, a2
#row.append(func( a1[1], a2[1])
# quickdist gives squared dist!
row.append(quickdist(a1[1], a2[1], sq=square)) # CHANGED Monday, April 23 2012 => problematic
#row.append(quickdist(a1, a2, sq=square)) # CHANGED Monday, April 23 2012 => problematic
# XXX File "/entropia/local/rc3/MGLToolsPckgs/AutoDockTools/piStackingAndRingDetection.py", line 399, in findLigandRings
#row.append( dist(a1, a2, sq=square) ) # XXX WORKING WITHVS
else:
row.append(0)
mtx.append(row)
#print time.time() - t0
return mtx
def getDistMatrix(data, square=0):
# XXX update to use np.array pre-initialized
# to save 50% of time skipping calculating b,a
# if a,b already calculated
#print "DISTMATRIX2"
#import time
#t0 = time.time()
mtx = -ones( (len(data), len(data)))
for i in range(len(data)):
a1 = data[i]
for j in range(len(data)):
a2 = data[j]
if i == j:
mtx[i,j] = 0.
else:
if not mtx[j,i] == -1:
mtx[i,j] = mtx[j,i]
else:
mtx[i,j] = quickdist(a1[1], a2[1], sq=square)
#print time.time() - t0
return mtx
def makeGraph(nodes_list, distance_matrix, cutoff, exclusion_list = []):
# generates the connection graph | |
""" Module for risk-exploiting landmark identification scenario
- Scenario is derived from ergo_spread_small.py which is, in turn, derived from simple_spread.py
- N-agents must disribute themselves to cover N-landmarks
- Reward is based on the distance from each landmark to the closest agent (identical_reward case)
- One of the landmarks is actually a hazard that can cause agents in its vicinity to be terminated. The hazardous
lanmark is unknown until one agent moves within its vicinity
- In contrast to ergo_spread_small, this scenario makes several parameters come from user inputs instead of
hardcoded. These user-defined inputs include
- number of agents
- number of hazards (0,1)
- whether rewards are shared or "local"
- whether observations are direct per entity or histogram based
- Hazard failure risk
- Collision failure risk
"""
import numpy as np
from random import shuffle
from multiagent.scenario import BaseScenario
from particle_environments.mager.world import MortalAgent, HazardousWorld, RiskRewardLandmark
from particle_environments.mager.observation import format_observation
from particle_environments.common import is_collision, distance, delta_pos, delta_vel
from particle_environments.common import RadialPolynomialRewardFunction2D as RadialReward
from particle_environments.common import RadialBernoulliRiskFunction2D as RadialRisk
from rl_algorithms.scenariolearning import ScenarioHeuristicAgentTrainer
from particle_environments.mager.observation import format_observation, agent_histogram_observation, landmark_histogram_observation
from particle_environments.common import truncate_or_pad
# Scenario Parameters
_MAX_COMMUNICATION_DISTANCE = np.inf
# _AGENT_SIZE = 0.15
_LANDMARK_SIZE = 0.05
_AGENT_OBSERVATION_LEN = 5
_LANDMARK_OBSERVATION_LEN = 3
_N_RADIAL_BINS = 4
_N_ANGULAR_BINS = 8
_MAX_HISTOGRAM_OBSERVATION_DISTANCE = 1.0
_N_OBSERVED_HAZARDS = 1
_N_OBSERVED_TERMINATIONS = 5
class Scenario(BaseScenario):
def __init__(self, *, num_agents, num_hazards, identical_rewards, observation_type, hazard_risk=0.5, collision_risk=0.0):
'''
Args:
- num_agents [int] number of agents in scenario
- num_hazards [int] number of hazards landmarks in the scenario
- identical_rewards [bool] true if all agents receieve exact same reward, false if rewards are "local" to agents
- observation_type [str] "direct" if observation directly of each entity, "histogram" if bin entities in spacial grid
- hazard_risk [float] max probability of failure caused by hazard landmark
- collision_risk [float] probability of failure caused by collision
'''
# check inputs
assert isinstance(num_agents, int)
assert isinstance(num_hazards, int); assert (num_hazards == 0 or num_hazards == 1)
assert isinstance(identical_rewards, bool)
assert (observation_type == "direct" or observation_type == "histogram")
assert (hazard_risk >= 0.0 and hazard_risk <= 1.0)
assert (collision_risk >= 0.0 and collision_risk <= 1.0)
# set member vars
self.num_agents = num_agents
self.num_hazards = num_hazards
self.identical_rewards = identical_rewards
self.observation_type = observation_type
self.hazard_risk = hazard_risk
self.collision_risk = collision_risk
# create list of landmarks
# Note: RadialReward function is not directly used for calculating reward in this scenario, thus peak value of 0.0.
# non-zero radius used for numerical reasons
landmarks = []
for i in range(self.num_agents):
landmarks.append(RiskRewardLandmark(risk_fn=None, reward_fn=RadialReward(1.0, 0.0)))
for i in range(self.num_hazards):
landmarks.append(RiskRewardLandmark(risk_fn=RadialRisk(_LANDMARK_SIZE, 0.5), reward_fn=RadialReward(1.0, 0.0)))
self.scenario_landmarks = landmarks
self.n_landmarks = len(self.scenario_landmarks)
def make_world(self):
world = HazardousWorld(collision_termination_probability=0.0)
# observation-based communication
world.dim_c = 0
world.max_communication_distance = _MAX_COMMUNICATION_DISTANCE
# collaborative rewards
world.collaborative = True
world.systemic_rewards = False
world.identical_rewards = self.identical_rewards
# add landmarks to world
world.landmarks = []
for lm in self.scenario_landmarks:
world.landmarks.append(lm)
for i, landmark in enumerate(world.landmarks):
landmark.name = 'landmark_%d' % i
landmark.collide = False
landmark.movable = False
landmark.size = _LANDMARK_SIZE
# properties for landmarks
if isinstance(landmark, RiskRewardLandmark) and landmark.is_hazard:
#TODO: make colors heatmap of risk probability over all bounds
landmark.color = np.array([landmark.risk_fn.get_failure_probability(0,0) + .1, 0, 0])
else:
landmark.color = np.array([0.25, 0.25, 0.25])
# make initial conditions
self.reset_world(world)
return world
def reset_world(self, world):
# find agent size as function of number of agents baselined off simple_spread 3-agent case
agent_size = 0.15*np.sqrt(3.0/float(self.num_agents))
# random properties for agents
# add agents
world.agents = [MortalAgent() for i in range(self.num_agents)]
for i, agent in enumerate(world.agents):
agent.name = 'agent_%d' % i
agent.collide = True
agent.silent = True
agent.terminated = False
agent.size = agent_size
agent.state.p_pos = np.random.uniform(-1, +1, world.dim_p)
agent.state.p_vel = np.zeros(world.dim_p)
agent.state.c = np.zeros(world.dim_c)
agent.color = np.array([0.35, 0.35, 0.85])
agent.previous_observation = None
# shuffle landmarks to make sure hazard is not in same index
shuffle(world.landmarks)
for landmark in world.landmarks:
# rename landmarks to preserve label ordering in joint state (see mager/environment.py:get_joint_state)
landmark.name = 'landmark_%d' % i
landmark.state.p_pos = np.random.uniform(-1, +1, world.dim_p)
landmark.state.p_vel = np.zeros(world.dim_p)
landmark.hazard_tag = 0.0
landmark.color = np.array([0.25, 0.25, 0.25])
def benchmark_data(self, agent, world):
collisions = 0
occupied_landmarks = 0
min_dists = 0
for l in world.landmarks:
dists = [np.linalg.norm(a.state.p_pos - l.state.p_pos) for a in world.agents]
min_dists += min(dists)
if min(dists) < 0.1:
occupied_landmarks += 1
if agent.collide:
for a in world.agents:
if is_collision(a, agent):
collisions += 1
return (self.reward(agent, world), collisions, min_dists, occupied_landmarks)
def done_callback(self, agent, world):
''' indicate a terminated agent as done (still being decided)
Notes:
- Even though terminated agent cannot take actions, it may be more appropriate
to NOT set the agent is done in order to keep collecting data for training
purposes
'''
# if agent.terminated:
# return True
# else:
# return False
return False
def reward(self, agent, world):
if self.identical_rewards and world.identical_rewards:
return self._identical_reward(agent, world)
elif not self.identical_rewards and not world.identical_rewards:
return self._local_reward(agent, world)
else:
raise Exception(
"Inconsistent reward options: self.identical_rewards={} world.identical_rewards={}".format(
self.identical_rewards, world.identical_rewards))
def _identical_reward(self, agent, world):
''' use this function if all agents recieve identical rewards '''
# Agents are rewarded based on minimum agent distance to each landmark, penalized for collisions
assert self.identical_rewards == True
assert world.identical_rewards == True
rew = 0
for lm in [l for l in world.landmarks if not l.is_hazard]:
# dists = [np.sqrt(np.sum(np.square(a.state.p_pos - l.state.p_pos))) for a in world.agents]
dists = [distance(ag,lm) for ag in world.agents]
rew -= min(dists)
if agent.collide:
for a in world.agents:
if is_collision(a, agent):
rew -= 1
return rew
def _local_reward(self, agent, world):
''' use this function if agents recieve separate "local" rewards, not identical '''
raise NotImplementedError()
def observation(self, agent, world):
''' call observation function based on type of observation function '''
if self.observation_type == "direct":
return self._direct_observation(agent, world)
elif self.observation_type == "histogram":
return self._histogram_observation(agent, world)
else:
raise Exception("Unrecognized observation type: {}".format(self.observation_type))
def _histogram_observation(self, agent, world):
''' observation in histogram format of number of entities in spacial bins '''
# get histogram of landmark observations (marking hazardous landmarks as needed)
landmark_histogram_2d, observed_hazards_2d = landmark_histogram_observation(
agent, world.landmarks, _MAX_HISTOGRAM_OBSERVATION_DISTANCE, _N_RADIAL_BINS, _N_ANGULAR_BINS)
# get histogram of agent observations
agent_histogram_2d, observed_terminations_2d = agent_histogram_observation(
agent, world.agents, _MAX_HISTOGRAM_OBSERVATION_DISTANCE, _N_RADIAL_BINS, _N_ANGULAR_BINS)
# flatten landmark and agent histograms to 1d list
landmark_histogram = [val for sublist in landmark_histogram_2d for val in sublist]
agent_histogram = [val for sublist in agent_histogram_2d for val in sublist]
# flatten, truncate/pad observed hazards and terminations to fixed length
observed_hazards = [val for sublist in observed_hazards_2d for val in sublist]
observed_hazards = truncate_or_pad(observed_hazards, 2*_N_OBSERVED_HAZARDS)
observed_terminations = [val for sublist in observed_terminations_2d for val in sublist]
observed_terminations = truncate_or_pad(observed_terminations, 2*_N_OBSERVED_TERMINATIONS)
# package new observation
new_obs = np.asarray([agent.terminated] + agent.state.p_vel.tolist() + agent.state.p_pos.tolist() + landmark_histogram + observed_hazards + agent_histogram + observed_terminations)
# append previous observation for velocity estimation
if agent.previous_observation is None:
agent.previous_observation = 0.0*new_obs
obs = np.append(new_obs, agent.previous_observation)
agent.previous_observation = new_obs
return obs
def _direct_observation(self, agent, world):
''' observation where each entity's state has it's own component of observe vector '''
# get positions of all entities in this agent's reference frame
def observe_agents(other_agent):
''' fill in information communicated/observed between agents
'''
# check if node is terminated
is_terminated = 0
if isinstance(other_agent, MortalAgent) and other_agent.terminated:
is_terminated = 1
dx = dy = dvx = dvy = 0.
if not is_terminated:
dx, dy = delta_pos(other_agent, agent)
dvx, dvy = delta_vel(other_agent, agent)
ag_obs = [is_terminated, dx, dy, dvx, dvy]
assert(len(ag_obs) == _AGENT_OBSERVATION_LEN)
return ag_obs
def observe_landmarks(landmark):
''' fill in information observed about landmarks
'''
ld_obs = delta_pos(landmark, agent).tolist()
# check if within observation range and is observable
d = distance(landmark, agent)
if d > world.max_communication_distance:
ld_obs = [0.0]*len(ld_obs)
# check if landmark is giving reward or hazard warning
if d < landmark.size:
if landmark.is_hazard:
landmark.hazard_tag = 1.0
landmark.color = np.array([1.1, 0, 0])
ld_obs += [landmark.hazard_tag]
assert(len(ld_obs) == _LANDMARK_OBSERVATION_LEN)
return ld_obs
landmark_positions = format_observation(observe = observe_landmarks,
objects = world.landmarks,
num_observations = len(world.landmarks),
observation_size = _LANDMARK_OBSERVATION_LEN)
agent_observations = format_observation(observe = observe_agents,
objects = [a for a in world.agents if (a is not agent)],
num_observations = self.num_agents-1,
observation_size = _AGENT_OBSERVATION_LEN)
new_obs = np.asarray([agent.terminated] + agent.state.p_vel.tolist() + agent.state.p_pos.tolist() + landmark_positions | |
= to_chain((df, x, y), meta)
chain.name = name
chain_dfs.append(chain)
if not folder:
chain_coll.extend(chain_dfs)
else:
folders = [(i, list(c.keys())[0]) for i, c in
enumerate(chain_coll, 0) if
isinstance(c, dict)]
if folder in [f[1] for f in folders]:
pos = [f[0] for f in folders
if f[1] == folder][0]
chain_coll[pos][folder].extend(chain_dfs)
else:
chain_coll.append({folder: chain_dfs})
except:
failed.append(name)
return chain_coll
chain_coll = []
chains = mine_mtd(mtd_doc, paint, chain_coll)
self.__chains = chains
return self
def from_cmt(self, crunch_tabbook, ignore=None, cell_items='c',
array_summaries=True):
"""
Convert a Crunch multitable document (tabbook) into a collection of
quantipy.Chain representations.
Parameters
----------
crunch_tabbook : ``Tabbook`` object instance
Text
ignore : bool, default False
Text
cell_items : {'c', 'p', 'cp'}, default 'c'
Text
array_summaries : bool, default True
Text
Returns
-------
self : quantipy.ChainManager
Will consist of Quantipy representations of the Crunch table
document.
"""
def cubegroups_to_chain_defs(cubegroups, ci, arr_sum):
"""
Convert CubeGroup DataFrame to a Chain.dataframe.
"""
chain_dfs = []
# DataFrame edits to get basic Chain.dataframe rep.
for idx, cubegroup in enumerate(cubegroups):
cubegroup_df = cubegroup.dataframe
array = cubegroup.is_array
# split arrays into separate dfs / convert to summary df...
if array:
ai_aliases = cubegroup.subref_aliases
array_elements = []
dfs = []
if array_summaries:
arr_sum_df = cubegroup_df.copy().unstack()['All']
arr_sum_df.is_summary = True
x_label = arr_sum_df.index.get_level_values(0).tolist()[0]
x_name = cubegroup.rowdim.alias
dfs.append((arr_sum_df, x_label, x_name))
array_elements = cubegroup_df.index.levels[1].values.tolist()
ai_df = cubegroup_df.copy()
idx = cubegroup_df.index.droplevel(0)
ai_df.index = idx
for array_element, alias in zip(array_elements, ai_aliases):
dfs.append((ai_df.loc[[array_element], :].copy(),
array_element, alias))
else:
x_label = cubegroup_df.index.get_level_values(0).tolist()[0]
x_name = cubegroup.rowdim.alias
dfs = [(cubegroup_df, x_label, x_name)]
# Apply QP-style DataFrame conventions (indexing, names, etc.)
for cgdf, x_var_label, x_var_name in dfs:
is_summary = hasattr(cgdf, 'is_summary')
if is_summary:
cgdf = cgdf.T
y_var_names = ['@']
x_names = ['Question', 'Values']
y_names = ['Array', 'Questions']
else:
y_var_names = cubegroup.colvars
x_names = ['Question', 'Values']
y_names = ['Question', 'Values']
cgdf.index = cgdf.index.droplevel(0)
# Compute percentages?
if cell_items == 'p': _calc_pct(cgdf)
# Build x-axis multiindex / rearrange "Base" row
idx_vals = cgdf.index.values.tolist()
cgdf = cgdf.reindex([idx_vals[-1]] + idx_vals[:-1])
idx_vals = cgdf.index.values.tolist()
mi_vals = [[x_var_label], self._native_stat_names(idx_vals)]
row_mi = pd.MultiIndex.from_product(mi_vals, names=x_names)
cgdf.index = row_mi
# Build y-axis multiindex
y_vals = [('Total', 'Total') if y[0] == 'All'
else y for y in cgdf.columns.tolist()]
col_mi = pd.MultiIndex.from_tuples(y_vals, names=y_names)
cgdf.columns = col_mi
if is_summary:
cgdf = cgdf.T
chain_dfs.append((cgdf, x_var_name, y_var_names, cubegroup._meta))
return chain_dfs
def _calc_pct(df):
df.iloc[:-1, :] = df.iloc[:-1, :].div(df.iloc[-1, :]) * 100
return None
def to_chain(basic_chain_defintion, add_chain_meta):
"""
"""
new_chain = Chain(None, basic_chain_defintion[1])
new_chain.source = 'Crunch multitable'
new_chain.stack = None
new_chain.painted = True
new_chain._meta = add_chain_meta
new_chain._frame = basic_chain_defintion[0]
new_chain._x_keys = [basic_chain_defintion[1]]
new_chain._y_keys = basic_chain_defintion[2]
new_chain._given_views = None
new_chain._grp_text_map = []
new_chain._text_map = None
new_chain._pad_id = None
new_chain._array_style = None
new_chain._has_rules = False
new_chain.double_base = False
new_chain.sig_test_letters = None
new_chain.totalize = True
new_chain._meta['var_meta'] = basic_chain_defintion[-1]
new_chain._extract_base_descriptions()
new_chain._views = OrderedDict()
for vk in new_chain._views_per_rows():
if not vk in new_chain._views:
new_chain._views[vk] = new_chain._views_per_rows().count(vk)
return new_chain
# self.name = name OK!
# self._meta = Crunch meta OK!
# self._x_keys = None OK!
# self._y_keys = None OK!
# self._frame = None OK!
# self.totalize = False OK! -> But is True!
# self.stack = stack OK! -> N/A
# self._has_rules = None OK! -> N/A
# self.double_base = False OK! -> N/A
# self.sig_test_letters = None OK! -> N/A
# self._pad_id = None OK! -> N/A
# self._given_views = None OK! -> N/A
# self._grp_text_map = [] OK! -> N/A
# self._text_map = None OK! -> N/A
# self.grouping = None ?
# self._group_style = None ?
# self._transl = qp.core.view.View._metric_name_map() * with CMT/MTD
self.source = 'Crunch multitable'
cubegroups = crunch_tabbook.cube_groups
meta = {'display_settings': crunch_tabbook.display_settings,
'weight': crunch_tabbook.weight}
if cell_items == 'c':
meta['display_settings']['countsOrPercents'] = 'counts'
elif cell_items == 'p':
meta['display_settings']['countsOrPercents'] = 'percent'
chain_defs = cubegroups_to_chain_defs(cubegroups, cell_items,
array_summaries)
self.__chains = [to_chain(c_def, meta) for c_def in chain_defs]
return self
# ------------------------------------------------------------------------
def from_cluster(self, clusters):
"""
Create an OrderedDict of ``Cluster`` names storing new ``Chain``\s.
Parameters
----------
clusters : cluster-like ([dict of] quantipy.Cluster)
Text ...
Returns
-------
new_chain_dict : OrderedDict
Text ...
"""
self.source = 'native (old qp.Cluster of qp.Chain)'
qp.set_option('new_chains', True)
def check_cell_items(views):
c = any('counts' in view.split('|')[-1] for view in views)
p = any('c%' in view.split('|')[-1] for view in views)
cp = c and p
if cp:
cell_items = 'counts_colpct'
else:
cell_items = 'counts' if c else 'colpct'
return cell_items
def check_sigtest(views):
"""
"""
levels = []
sigs = [v.split('|')[1] for v in views if v.split('|')[1].startswith('t.')]
for sig in sigs:
l = '0.{}'.format(sig.split('.')[-1])
if not l in levels: levels.append(l)
return levels
def mine_chain_structure(clusters):
cluster_defs = []
for cluster_def_name, cluster in list(clusters.items()):
for name in cluster:
if isinstance(list(cluster[name].items())[0][1], pd.DataFrame):
cluster_def = {'name': name,
'oe': True,
'df': list(cluster[name].items())[0][1],
'filter': chain.filter,
'data_key': chain.data_key}
else:
xs, views, weight = [], [], []
for chain_name, chain in list(cluster[name].items()):
for v in chain.views:
w = v.split('|')[-2]
if w not in weight: weight.append(w)
if v not in views: views.append(v)
xs.append(chain.source_name)
ys = chain.content_of_axis
cluster_def = {'name': '{}-{}'.format(cluster_def_name, name),
'filter': chain.filter,
'data_key': chain.data_key,
'xs': xs,
'ys': ys,
'views': views,
'weight': weight[-1],
'bases': 'both' if len(weight) == 2 else 'auto',
'cell_items': check_cell_items(views),
'tests': check_sigtest(views)}
cluster_defs.append(cluster_def)
return cluster_defs
from quantipy.core.view_generators.view_specs import ViewManager
cluster_specs = mine_chain_structure(clusters)
for cluster_spec in cluster_specs:
oe = cluster_spec.get('oe', False)
if not oe:
vm = ViewManager(self.stack)
vm.get_views(cell_items=cluster_spec['cell_items'],
weight=cluster_spec['weight'],
bases=cluster_spec['bases'],
stats= ['mean', 'stddev', 'median', 'min', 'max'],
tests=cluster_spec['tests'])
self.get(data_key=cluster_spec['data_key'],
filter_key=cluster_spec['filter'],
x_keys = cluster_spec['xs'],
y_keys = cluster_spec['ys'],
views=vm.views,
orient='x',
prioritize=True)
else:
meta = [cluster_spec['data_key'], cluster_spec['filter']]
df, name = cluster_spec['df'], cluster_spec['name']
self.add(df, meta_from=meta, name=name)
return None
@staticmethod
def _force_list(obj):
if isinstance(obj, (list, tuple)):
return obj
return [obj]
def _check_keys(self, data_key, keys):
""" Checks given keys exist in meta['columns']
"""
keys = self._force_list(keys)
meta = self.stack[data_key].meta
valid = list(meta['columns'].keys()) + list(meta['masks'].keys())
invalid = ['"%s"' % _ for _ in keys if _ not in valid and _ != _TOTAL]
if invalid:
raise ValueError("Keys %s do not exist in meta['columns'] or "
"meta['masks']." % ", ".join(invalid))
return keys
def add(self, structure, meta_from=None, meta=None, name=None):
""" Add a pandas.DataFrame as a Chain.
Parameters
----------
structure : ``pandas.Dataframe``
The dataframe to add to the ChainManger
meta_from : list, list-like, str, default None
The location of the meta in the stack. Either a list-like object with data key and
filter key or a str as the data key
meta : quantipy meta (dict)
External meta used to paint the frame
name : ``str``, default None
The name to give the resulting chain. If not passed, the name will become
the concatenated column names, delimited by a period
Returns
-------
appended : ``quantipy.ChainManager``
"""
name = name or '.'.join(structure.columns.tolist())
chain = Chain(self.stack, name, structure=structure)
chain._frame = chain.structure
chain._index = chain._frame.index
chain._columns = chain._frame.columns
chain._frame_values = chain._frame.values
if meta_from:
if isinstance(meta_from, str):
chain._meta = self.stack[meta_from].meta
else:
data_key, filter_key = meta_from
chain._meta = self.stack[data_key][filter_key].meta
elif meta:
chain._meta = meta
self.__chains.append(chain)
return self
def get(self, data_key, filter_key, x_keys, y_keys, views, orient='x',
rules=True, rules_weight=None, prioritize=True, folder=None):
"""
TODO: Full doc string
Get a (list of) Chain instance(s) in either 'x' or 'y' orientation.
Chain.dfs will be concatenated along the provided 'orient'-axis.
"""
# TODO: VERIFY data_key
# TODO: VERIFY filter_key
# TODO: Add verbose arg to get()
x_keys = self._check_keys(data_key, x_keys)
y_keys = self._check_keys(data_key, y_keys)
if folder and not isinstance(folder, str):
err = "'folder' must be a name provided as string!"
raise ValueError(err)
if orient == 'x':
it, keys = x_keys, y_keys
else:
it, keys = y_keys, x_keys
for key in it:
x_key, y_key = (key, keys) if orient == 'x' else (keys, key)
chain = Chain(self.stack, key)
chain = chain.get(data_key, filter_key, self._force_list(x_key),
self._force_list(y_key), views, rules=rules,
rules_weight=rules_weight, prioritize=prioritize,
orient=orient)
folders = self.folder_names
if folder in folders:
idx = self._idx_from_name(folder)
self.__chains[idx][folder].append(chain)
else:
if folder:
self.__chains.append({folder: [chain]})
else:
self.__chains.append(chain)
return None
def paint_all(self, *args, **kwargs):
"""
Apply labels, sig. testing conversion and other post-processing to the
``Chain.dataframe`` property.
Use this to prepare a ``Chain`` for further usage in an Excel or Power-
point Build.
Parameters
----------
text_key : str, default meta['lib']['default text']
The language version of any variable metadata applied.
| |
:
code, lower, upper, frac, interpolation, interpolationQualifier = self.getBoundingSubFunctions( energy )
if( code is None ) :
raise Exception( 'No distribution' )
elif( code == '' ) :
EpLowerMin, EpLowerMax = lower.domainMin, lower.domainMax
EpUpperMin, EpUpperMax = upper.domainMin, upper.domainMax
EpMidMin = ( 1 - frac ) * EpLowerMin + frac * EpUpperMin
EpMidMax = ( 1 - frac ) * EpLowerMax + frac * EpUpperMax
EpLower = ( lower.integrateWithWeight_x( ) - EpLowerMin ) / ( EpLowerMax - EpLowerMin ) * \
( EpMidMax - EpMidMin ) + EpMidMin
EpUpper = ( upper.integrateWithWeight_x( ) - EpUpperMin ) / ( EpUpperMax - EpUpperMin ) * \
( EpMidMax - EpMidMin ) + EpMidMin
return( ( 1 - frac ) * EpLower + frac * EpUpper )
else :
return( lower.integrateWithWeight_x( ) )
def check( self, info ) :
from fudge import warning
warnings = []
if self.interpolation == standardsModule.interpolation.flatToken:
warnings.append( warning.flatIncidentEnergyInterpolation( ) )
for idx in range(len(self)):
integral = self[idx].integrate()
if abs(integral - 1.0) > info['normTolerance']:
warnings.append( warning.unnormalizedDistribution( PQUModule.PQU( self[idx].outerDomainValue, self.axes[-1].unit ), idx, integral, self[idx] ) )
if( self[idx].rangeMin < 0.0 ) :
warnings.append( warning.negativeProbability( PQUModule.PQU( self[idx].outerDomainValue, self.axes[-1].unit ),
value=self[idx].rangeMin, obj=self[idx] ) )
return warnings
def sqrtEp_AverageAtE( self, E ) :
return( self.energySpectrumAtEnergy( E ).integrateWithWeight_sqrt_x( ) )
def toPointwise_withLinearXYs( self, **kwargs ) :
return( multiD_XYsModule.XYs2d.toPointwise_withLinearXYs( self, cls = XYs2d, **kwargs ) )
def to_xs_pdf_cdf1d( self, style, tempInfo, indent ) :
linear = self
for xys in self :
if( isinstance( xys, XYs1d ) ) :
if( xys.interpolation not in [ standardsModule.interpolation.linlinToken, standardsModule.interpolation.flatToken ] ) :
linear = self.toPointwise_withLinearXYs( accuracy = XYsModule.defaultAccuracy, upperEps = 1e-8 )
break
else :
linear = self.toPointwise_withLinearXYs( accuracy = XYsModule.defaultAccuracy, upperEps = 1e-8 )
break
subform = XYs2d( axes = self.axes, interpolation = self.interpolation,
interpolationQualifier = self.interpolationQualifier )
for xys in linear : subform.append( xs_pdf_cdf1d.fromXYs( xys, xys.outerDomainValue ) )
return( subform )
@staticmethod
def allowedSubElements( ) :
return( ( XYs1d, regions1d, xs_pdf_cdf1d ) )
class regions2d( subform, regionsModule.regions2d ) :
def __init__( self, **kwargs ):
regionsModule.regions2d.__init__( self, **kwargs )
subform.__init__( self )
def check( self, info ) :
from fudge import warning
warnings = []
for idx, region in enumerate( self ):
regionWarnings = region.check( info )
if regionWarnings:
warnings.append( warning.context("Region %d:" % idx, regionWarnings) )
return warnings
def toPointwise_withLinearXYs( self, **kwargs ) :
return( regionsModule.regions2d.toPointwise_withLinearXYs( self, cls = XYs2d, **kwargs ) )
def to_xs_pdf_cdf1d( self, style, tempInfo, indent ) :
_regions2d = regions2d( axes = self.axes )
for region in self : _regions2d.append( region.to_xs_pdf_cdf1d( style, tempInfo, indent ) )
return( _regions2d )
@staticmethod
def allowedSubElements( ) :
return( ( XYs2d, ) )
class energyFunctionalData( ancestryModule.ancestry ) :
ancestryMembers = ( 'data', )
def __init__( self, data ) :
ancestryModule.ancestry.__init__( self )
self.data = data
self.data.setAncestor( self )
def convertUnits( self, unitMap ) :
"See documentation for reactionSuite.convertUnits."
self.data.convertUnits( unitMap )
def copy( self ):
return self.__class__( self.data.copy( ) )
__copy__ = copy
def toXMLList( self, indent = '', **kwargs ) :
xml = ['%s<%s>' % (indent, self.moniker)]
xml += self.data.toXMLList( indent + ' ', **kwargs )
xml[-1] += '</%s>' % self.moniker
return xml
@classmethod
def parseXMLNode( cls, element, xPath, linkData ):
xPath.append( element.tag )
subClass = {
XYs1d.moniker : XYsModule.XYs1d,
regions1d.moniker : regionsModule.regions1d,
xs_pdf_cdf1d.moniker : xs_pdf_cdfModule.xs_pdf_cdf1d
}.get( element[0].tag )
if( subClass is None ) : raise Exception( "encountered unknown energy functional subform: %s" % element[0].tag )
EFD = cls( subClass.parseXMLNode( element[0], xPath, linkData ) )
xPath.pop()
return EFD
class a( energyFunctionalData ) :
moniker = 'a'
class b( energyFunctionalData ) :
moniker = 'b'
class theta( energyFunctionalData ) :
moniker = 'theta'
class g( energyFunctionalData ) :
moniker = 'g'
class T_M( energyFunctionalData ) :
moniker = 'T_M'
class functionalBase( subform ) :
ancestryMembers = ( 'parameter1', 'parameter2' )
def __init__( self, LF, U, parameter1, parameter2 = None ) :
subform.__init__( self )
if( U is not None ) :
if( not( isinstance( U, physicalQuantityModule.U ) ) ) : raise TypeError( 'Invalid U type' )
self.U = U
self.LF = LF
self.parameter1 = parameter1
self.parameter1.setAncestor( self )
self.parameter2 = parameter2
if( parameter2 is not None ) : self.parameter2.setAncestor( self )
def convertUnits( self, unitMap ) :
"See documentation for reactionSuite.convertUnits."
if( self.U is not None ) : self.U.convertUnits( unitMap )
self.parameter1.convertUnits( unitMap )
if( self.parameter2 is not None ) : self.parameter2.convertUnits( unitMap )
def copy( self ):
U = self.U
if( U is not None ) : U = self.U.copy( )
if( self.parameter2 is None ) :
return self.__class__( U, self.parameter1.copy( ) )
else :
return self.__class__( U, self.parameter1.copy( ), self.parameter2.copy( ) )
__copy__ = copy
def check( self, info ):
from fudge import warning
warnings = []
if( ( self.domainMin - self.U.value ) < 0 ) :
warnings.append( warning.energyDistributionBadU( self ) )
return( warnings )
@property
def domainMin( self ) :
return( self.parameter1.data.domainMin )
@property
def domainMax( self ) :
return( self.parameter1.data.domainMax )
def getEnergyArray( self, EMin = None, EMax = None ) :
if( isinstance( self.parameter1.data, regionsModule.regions1d ) ) :
Es = []
for region in self.parameter1.data :
Es = Es[:-1] + [ E for E, p in region ]
else :
Es = [ E for E, p in self.parameter1.data ]
if( EMin is not None ) :
if( EMin < ( 1.0 - 1e-15 ) * Es[0] ) : Es.insert( 0, EMin )
if( EMax is not None ) :
if( EMax > Es[-1] ) : Es.append( EMax )
return( Es )
def toXMLList( self, indent = '', **kwargs ) :
"""Returns the xml string representation of self."""
indent2 = indent + kwargs.get( 'incrementalIndent', ' ' )
xmlString = [ self.XMLStartTagString( indent = indent ) ]
if( self.LF == 12 ) :
xmlString += self.EFL.toXMLList( indent2, **kwargs )
xmlString += self.EFH.toXMLList( indent2, **kwargs )
else :
xmlString += self.U.toXMLList( indent2, **kwargs )
xmlString += self.parameter1.toXMLList( indent2, **kwargs )
if( not( self.parameter2 is None ) ) : xmlString += self.parameter2.toXMLList( indent2, **kwargs )
xmlString[-1] += '</%s>' % self.moniker
return( xmlString )
class generalEvaporationSpectrum( functionalBase ) :
moniker = 'generalEvaporation'
def __init__( self, U, thetas, gs ) :
functionalBase.__init__( self, 5, U, thetas, gs )
def convertUnits( self, unitMap ) :
"""
Overriding method from base class due to a processing error
(axes for 'g' parameter not copied during processing).
"""
#FIXME: This method should be removed once processed libraries are replaced.
if( self.U is not None ) : self.U.convertUnits( unitMap )
self.parameter1.convertUnits( unitMap )
# skip 'g': it's unitless and may not have axes defined
#if( self.parameter2 is not None ) : self.parameter2.convertUnits( unitMap )
def averageEp( self, E ) :
return( self.parameter1.data.evaluate( E ) * self.parameter2.data.integrateWithWeight_x( ) )
def sqrtEp_AverageAtE( self, E ) :
return( math.sqrt( self.parameter1.data.evaluate( E ) ) * self.parameter2.data ).integrateWithWeight_sqrt_x( )
def isLinear( self, qualifierOk = False, flatIsOk = False ) :
"""
Returns the results of isLinear called on the axes of g(E'|E).
"""
return( self.parameter2.axes.isLinear( qualifierOk = qualifierOk, flatIsOk = flatIsOk ) )
def to_xs_pdf_cdf1d( self, style, tempInfo, indent ) :
_gs = g( xs_pdf_cdf1d.fromXYs( self.parameter2.data ) )
_gs.data.axes = self.parameter2.data.axes.copy()
_form = generalEvaporationSpectrum( self.U, thetas = self.parameter1.copy( ), gs = _gs )
return( _form )
def toPointwise_withLinearXYs( self, **kwargs ) :
pwl = XYs2d( axes = defaultAxes( self.parameter1.data.domainUnit ) )
thetas = self.parameter1.data.toPointwise_withLinearXYs( **kwargs )
gs = self.parameter2.data.toPointwise_withLinearXYs( **kwargs )
for E_in, theta in thetas :
data = [ [ theta * x, y / theta ] for x, y in gs ]
data = XYs1d( data, outerDomainValue = E_in )
data.normalize( insitu = True )
pwl.append( data )
return( pwl )
@staticmethod
def parseXMLNode( element, xPath, linkData ) :
"""Translate <generalEvaporation> element from xml."""
xPath.append( element.tag )
theta_ = theta.parseXMLNode( element.find(theta.moniker), xPath, linkData )
g_ = g.parseXMLNode( element.find(g.moniker), xPath, linkData )
U = physicalQuantityModule.U.parseXMLNode( element.find( 'U' ), xPath, linkData )
GES = generalEvaporationSpectrum( U, theta_, g_ )
xPath.pop()
return GES
class simpleMaxwellianFissionSpectrum1d : # FIXME, | |
colN, means, scatters):
"""
Creates lists of ids of centrals and satellite galaxies corresponding to the density given in the n(z).
:param colN: name of the column you construct the catalog with
:param means: means of the Gaussians, array the same length of the redshift bin
:param scatters: scatters of the Gaussians, array the same length of the redshift bin
"""
ids = []
for ii in range(len(self.zmin)):
print "gets all halos for ", self.zmin[ii], "<z<", self.zmax[ii], "with col5 to mock ", self.nGal[ii], " galaxies."
IDhz, QTY, nn, bb = get_distrib_QTY( hdu, colN, self.zmin[ii], self.zmax[ii] )
ids.append( self.select_Gaussian( means[ii], scatters[ii], self.nGal[ii], IDhz, QTY ) )
self.idSel = n.hstack(( ids ))
self.NhaloMock = len((self.idSel).nonzero()[0])
def get_distrib_QTY_cen(self, colN, z1, z2):
"""Computes the cumulative histogram of a column for central halos in the range z1, z2.
:param colN: name of the column you want to take the histogram.
:param z1: minimum redshift
:param z2: maximum redshift
"""
zsel = self.slice_Z(z1, z2) & (self.cen)
IDhz = self.IDh[zsel] # all ids in this redshift bin
QTY = self.hdu[1].data[colN][zsel] # all QTY in this redshift bin
nn,bb,pp = p.hist(QTY,cumulative = True,bins = len(QTY)/100)
p.clf()
return IDhz,QTY,nn,bb
def get_distrib_QTY_sat(self, colN, z1, z2):
"""Computes the cumulative histogram of a column for satellite halos in the range z1, z2.
:param colN: name of the column you want to take the histogram.
:param z1: minimum redshift
:param z2: maximum redshift
"""
zsel = self.slice_Z(z1, z2) & (self.sat)
IDhz = self.IDh[zsel] # all ids in this redshift bin
QTY = self.hdu[1].data[colN][zsel] # all QTY in this redshift bin
nn,bb,pp = p.hist(QTY,cumulative = True,bins = len(QTY)/100)
p.clf()
return IDhz,QTY,nn,bb
def select_GaussianFsat(self,meanQTY,scatterQTY,fsat, nGal_perbin, IDhz_c, QTY_c, IDhz_s, QTY_s ):
"""
Extracts the ids of halos to create a mock with a Gaussian distribution.
:param colN: name of the column you wish to work on for the sham.
:param meanQTY: mean of the distribution
:param scatterQTY: scatter of the distribution
:param fsat: fraction of satellite in this bin
:param nGal_perbin: total number of galaxies in this bins to mock
:param IDhz_c: IDs of the central halos in this bin
:param QTY_c: column to do the match on, mass, velocity, ... for the central halos
:param IDhz_s: IDs of the satellite halos in this bin
:param QTY_s: column to do the match on, mass, velocity, ... for the satellite halos
"""
nSat = int(nGal_perbin*fsat)
print "satellites",nGal_perbin,nSat,fsat,meanQTY,scatterQTY
# constructs the QTY intervals around the distribution
expected_cdf = lambda x : st.norm.cdf(x, loc = meanQTY, scale = scatterQTY)
interval = [ meanQTY - 9 * scatterQTY , meanQTY + 9 * scatterQTY]
xs = n.arange(interval[0],interval[1],(interval[1]-interval[0])/1000.)
out = expected_cdf(xs)
expected_cdf_inv = interp1d(out,xs)
boundaries = n.hstack((expected_cdf_inv(0.01),expected_cdf_inv(n.arange(0.1,0.91,0.1)), interval[1]))
# gets the number of halos to select the SAT
expected_cdf_s = lambda x : nSat * st.norm.cdf(x, loc = meanQTY, scale = scatterQTY)
Up_s = expected_cdf_s(boundaries[1:])
Low_s = n.hstack(( 0., expected_cdf_s(boundaries[1:])[:-1] ))
N2select_s = Up_s-Low_s
# select in mass in the box
qsels_s = n.array([ (QTY_s>boundaries[ii])&(QTY_s<= boundaries[ii+1]) for ii in range(len(boundaries)-1) ])
IDhzqAll_s = n.array([ IDhz_s[qs] for qs in qsels_s ])
# random downsample to the N2select in each bin
i = 0
ids_selected_s = []
for arr2 in IDhzqAll_s:
random.shuffle(arr2)
#print len(arr2),int(N2select_s[i])
ids_selected_s.append(arr2[:int(N2select_s[i])])
i+= 1
id_s = n.hstack((n.array(ids_selected_s)))
nSatReal = len(id_s)
nCen = nGal_perbin-nSatReal
print "centrals", nGal_perbin,nSat,nCen,fsat,meanQTY,scatterQTY
# gets the number of halos to select the CEN, compatible with the sat fraction to get the right density.
print "centrals"
expected_cdf_c = lambda x : nCen * st.norm.cdf(x, loc = meanQTY, scale = scatterQTY)
Up_c = expected_cdf_c(boundaries[1:])
Low_c = n.hstack(( 0., expected_cdf_c(boundaries[1:])[:-1] ))
N2select_c = Up_c-Low_c
# select in mass in the box
qsels_c = n.array([ (QTY_c>boundaries[ii])&(QTY_c<= boundaries[ii+1]) for ii in range(len(boundaries)-1) ])
IDhzqAll_c = n.array([ IDhz_c[qs] for qs in qsels_c ])
# random downsample to the N2select in each bin
i = 0
ids_selected_c = []
for arr in IDhzqAll_c:
random.shuffle(arr)
#print len(arr),int(N2select_c[i])
ids_selected_c.append(arr[:int(N2select_c[i])])
i+= 1
id_c = n.hstack((n.array(ids_selected_c)))
ids_selected = n.hstack((id_c,id_s ))
print len(id_c),len(id_s),len(ids_selected)
return ids_selected
def make_GaussianFsat_catalog(self, colN, means, scatters, fsats):
"""
Creates lists of ids of centrals and satellite galaxies corresponding to the density given in the n(z).
:param colN: name of the column you construct the catalog with
:param means: means of the Gaussians, array the same length of the redshift bin
:param scatters: scatters of the Gaussians, array the same length of the redshift bin
:param fsats: fractions of satellite, array the same length of the redshift bin
"""
ids = []
for ii in range(len(self.zmin)):
print "gets all halos for ",self.zmin[ii],"<z<",self.zmax[ii], "with col5 to mock ", self.nGal[ii], " galaxies."
IDhz_c,QTY_c,nn_c,bb_c = self.get_distrib_QTY_cen( colN, z1=self.zmin[ii], z2=self.zmax[ii])
IDhz_s,QTY_s,nn_s,bb_s = self.get_distrib_QTY_sat( colN, z1=self.zmin[ii], z2=self.zmax[ii])
ids.append( self.select_GaussianFsat( means[ii], scatters[ii], fsats[ii], self.nGal[ii], IDhz_c, QTY_c, IDhz_s, QTY_s ) )
self.idSel = n.hstack(( ids ))
self.NhaloMock = len((self.idSel).nonzero()[0])
def select_LogNorm(self, meanQTY, scatterQTY, nGal_perbin,IDhz, QTY, nn,bb):
"""
Creates lists of ids of centrals and satellite galaxies corresponding to the density given in the n(z) and to a gaussian distribution .
For every bin of redshift, it gets the distribution o fthe column of interest and matches to the density of galaxies in the NZ given.
Then provides a column of ids extracted from teh lightcone.
:param colN: name of the column you wish to work on for the sham.
:param meanQTY: mean of the distribution
:param scatterQTY: scatter of the distribution
:param nGal_perbin: total number of galaxies in this bins to mock
:param IDhz: IDs of the halos in this bin
:param QTY: array of the column to do the match on, mass, velocity, ...
"""
# constructs the QTY intervals around the distribution
expected_cdf = lambda x : st.lognorm.cdf(x, meanQTY, scatterQTY)
interval = [ meanQTY - 9 * scatterQTY , meanQTY + 9 * scatterQTY]
xs = n.arange(interval[0],interval[1],(interval[1]-interval[0])/1000.)
out = expected_cdf(xs)
expected_cdf_inv = interp1d(out,xs)
boundaries = n.hstack((expected_cdf_inv(0.01),expected_cdf_inv(n.arange(0.1,0.91,0.1)), interval[1]))
# gets the number of halos to select
expected_cdf_tot = lambda x : nGal_perbin * st.lognorm.cdf(x, meanQTY, scatterQTY)
Up = expected_cdf_tot(boundaries[1:])
Low = n.hstack(( 0., expected_cdf_tot(boundaries[1:])[:-1] ))
N2select = Up-Low
#print N2select,Up,Low
# select in mass in the box
qsels = n.array([ (QTY>boundaries[ii])&(QTY<= boundaries[ii+1]) for ii in range(len(boundaries)-1) ])
IDhzqAll = n.array([ IDhz[qs] for qs in qsels ])
# random downsample to the N2select in each bin
i = 0
ids_selected = []
for arr in IDhzqAll:
random.shuffle(arr)
ids_selected.append(arr[:N2select[i]])
i+= 1
ids_selected = n.hstack(( n.array(ids_selected) ))
return ids_selected
def make_LogNorm_catalog(self, colN, means, scatters):
"""
Creates lists of ids of centrals and satellite galaxies corresponding to the density given in the n(z).
:param colN: name of the column you construct the catalog with
:param means: means of the Gaussians, array the same length of the redshift bin
:param scatters: scatters of the Gaussians, array the same length of the redshift bin
"""
ids = []
for ii in range(len(self.zmin)):
print "gets all halos for ", self.zmin[ii], "<z<", self.zmax[ii], "with col5 to mock ", self.nGal[ii], " galaxies."
IDhz, QTY, nn, bb = get_distrib_QTY( hdu, colN, self.zmin[ii], self.zmax[ii] )
ids.append( self.select_LogNorm( means[ii], scatters[ii], self.nGal[ii], IDhz, QTY ) )
self.idSel = n.hstack(( ids ))
self.NhaloMock = len((self.idSel).nonzero()[0])
def create_random_catalog(self, factor = 5., dz=0.025 ):
"""Writes a random catalog"""
self.nRandom = int(self.NhaloMock * factor )
raR = n.random.uniform(n.min(self.raMock), n.max(self.raMock), self.nRandom )
decR = n.random.uniform(n.min(self.decMock), n.max(self.decMock), self.nRandom )
z1=n.arange(n.min(self.zMock)-0.1, n.max(self.zMock)+0.1, dz)
nn,bb,pp=p.hist(self.zMock, bins=z1)
nz=interp1d((z1[1:]+z1[:-1])/2.,factor*nn)
zs=n.arange(n.min(self.zMock), n.max(self.zMock), dz)
rdsz=[]
for i in range(len(zs)-1):
inter=n.random.uniform(low=zs[i], high=zs[i+1], size=int(2* nz( zs[i]+dz/2. )))
rdsz.append(inter)
rds=n.hstack((rdsz))
n.random.shuffle(rds)
selRDS=(n.random.rand(len(raR))<float(self.nRandom)/len(raR))
RR=rds[:len(raR[selRDS])]
print "N final",len(raR[selRDS])
outPutFileName = join( self.mockOutput_dir, self.mockName + "_random.cat" )
n.savetxt(outPutFileName,n.transpose([raR[selRDS],decR[selRDS],RR]),fmt='%.8f %.8f %.5f')
raR,decR,RR=0,0,0
def writeClusteringParamFile(self,type,decade=""):
""" Writes the clustering commands that command the CUTE code, see Alonso et al. 2012 https://arxiv.org/abs/1210.1833
:param type: monopole or angular or ...
:param decade: string suffix that is appended if you study different scales (decades) _d1, _d2, _d3 are used for the angular clustering."""
f=open(join( self.mockOutput_dir, self.mockName +".param2PCF_"+type+decade),'a')
f.write("data_filename= | |
and word[4] != "c" and word[5] != "C" and word[5] != "c" and word[6] != "C" and word[6] != "c" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "c" + ", "
if guessChar == "D" or guessChar == "d" :
if word[1] == "D" or word[1] == "d" :
toGuess = toGuess[:1] + "d" + toGuess[2:]
if word[2] == "D" or word[2] == "d" :
toGuess = toGuess[:2] + "d" + toGuess[3:]
if word[3] == "D" or word[3] == "d" :
toGuess = toGuess[:3] + "d" + toGuess[4:]
if word[4] == "D" or word[4] == "d" :
toGuess = toGuess[:4] + "d" + toGuess[5:]
if word[5] == "D" or word[5] == "d" :
toGuess = toGuess[:5] + "d" + toGuess[6:]
if word[6] == "D" or word[6] == "d" :
toGuess = toGuess[:6] + "d" + toGuess[7:]
if word[1] != "D" and word[1] != "d" and word[2] != "D" and word[2] != "d" and word[3] != "D" and word[3] != "d" and word[4] != "D" and word[4] != "d" and word[5] != "D" and word[5] != "d" and word[6] != "D" and word[6] != "d" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "d" + ", "
if guessChar == "E" or guessChar == "e" :
if word[1] == "E" or word[1] == "e" :
toGuess = toGuess[:1] + "e" + toGuess[2:]
if word[2] == "E" or word[2] == "e" :
toGuess = toGuess[:2] + "e" + toGuess[3:]
if word[3] == "E" or word[3] == "e" :
toGuess = toGuess[:3] + "e" + toGuess[4:]
if word[4] == "E" or word[4] == "e" :
toGuess = toGuess[:4] + "e" + toGuess[5:]
if word[5] == "E" or word[5] == "e" :
toGuess = toGuess[:5] + "e" + toGuess[6:]
if word[6] == "E" or word[6] == "e" :
toGuess = toGuess[:6] + "e" + toGuess[7:]
if word[1] != "E" and word[1] != "e" and word[2] != "E" and word[2] != "e" and word[3] != "E" and word[3] != "e" and word[4] != "E" and word[4] != "e" and word[5] != "E" and word[5] != "e" and word[6] != "E" and word[6] != "e" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "e" + ", "
if guessChar == "F" or guessChar == "f" :
if word[1] == "F" or word[1] == "f" :
toGuess = toGuess[:1] + "f" + toGuess[2:]
if word[2] == "F" or word[2] == "f" :
toGuess = toGuess[:2] + "f" + toGuess[3:]
if word[3] == "F" or word[3] == "f" :
toGuess = toGuess[:3] + "f" + toGuess[4:]
if word[4] == "F" or word[4] == "f" :
toGuess = toGuess[:4] + "f" + toGuess[5:]
if word[5] == "F" or word[5] == "f" :
toGuess = toGuess[:5] + "f" + toGuess[6:]
if word[6] == "F" or word[6] == "f" :
toGuess = toGuess[:6] + "f" + toGuess[7:]
if word[1] != "F" and word[1] != "f" and word[2] != "F" and word[2] != "f" and word[3] != "F" and word[3] != "f" and word[4] != "F" and word[4] != "f" and word[5] != "F" and word[5] != "f" and word[6] != "F" and word[6] != "f" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "f" + ", "
if guessChar == "G" or guessChar == "g" :
if word[1] == "G" or word[1] == "g" :
toGuess = toGuess[:1] + "g" + toGuess[2:]
if word[2] == "G" or word[2] == "g" :
toGuess = toGuess[:2] + "g" + toGuess[3:]
if word[3] == "G" or word[3] == "g" :
toGuess = toGuess[:3] + "g" + toGuess[4:]
if word[4] == "G" or word[4] == "g" :
toGuess = toGuess[:4] + "g" + toGuess[5:]
if word[5] == "G" or word[5] == "g" :
toGuess = toGuess[:5] + "g" + toGuess[6:]
if word[6] == "G" or word[6] == "g" :
toGuess = toGuess[:6] + "g" + toGuess[7:]
if word[1] != "G" and word[1] != "g" and word[2] != "G" and word[2] != "g" and word[3] != "G" and word[3] != "g" and word[4] != "G" and word[4] != "g" and word[5] != "G" and word[5] != "g" and word[6] != "G" and word[6] != "g" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "g" + ", "
if guessChar == "H" or guessChar == "h" :
if word[1] == "H" or word[1] == "h" :
toGuess = toGuess[:1] + "h" + toGuess[2:]
if word[2] == "H" or word[2] == "h" :
toGuess = toGuess[:2] + "h" + toGuess[3:]
if word[3] == "H" or word[3] == "h" :
toGuess = toGuess[:3] + "h" + toGuess[4:]
if word[4] == "H" or word[4] == "h" :
toGuess = toGuess[:4] + "h" + toGuess[5:]
if word[5] == "H" or word[5] == "h" :
toGuess = toGuess[:5] + "h" + toGuess[6:]
if word[6] == "H" or word[6] == "h" :
toGuess = toGuess[:6] + "h" + toGuess[7:]
if word[1] != "H" and word[1] != "h" and word[2] != "H" and word[2] != "h" and word[3] != "H" and word[3] != "h" and word[4] != "H" and word[4] != "h" and word[5] != "H" and word[5] != "h" and word[6] != "H" and word[6] != "h" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "h" + ", "
if guessChar == "I" or guessChar == "i" :
if word[1] == "I" or word[1] == "i" :
toGuess = toGuess[:1] + "i" + toGuess[2:]
if word[2] == "I" or word[2] == "i" :
toGuess = toGuess[:2] + "i" + toGuess[3:]
if word[3] == "I" or word[3] == "i" :
toGuess = toGuess[:3] + "i" + toGuess[4:]
if word[4] == "I" or word[4] == "i" :
toGuess = toGuess[:4] + "i" + toGuess[5:]
if word[5] == "I" or word[5] == "i" :
toGuess = toGuess[:5] + "i" + toGuess[6:]
if word[6] == "I" or word[6] == "i" :
toGuess = toGuess[:6] + "i" + toGuess[7:]
if word[1] != "I" and word[1] != "i" and word[2] != "I" and word[2] != "i" and word[3] != "I" and word[3] != "i" and word[4] != "I" and word[4] != "i" and word[5] != "I" and word[5] != "i" and word[6] != "I" and word[6] != "i" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "i" + ", "
if guessChar == "J" or guessChar == "j" :
if word[1] == "J" or word[1] == "j" :
toGuess = toGuess[:1] + "j" + toGuess[2:]
if word[2] == "J" or word[2] == "j" :
toGuess = toGuess[:2] + "j" + toGuess[3:]
if word[3] == "J" or word[3] == "j" :
toGuess = toGuess[:3] + "j" + toGuess[4:]
if word[4] == "J" or word[4] == "j" :
toGuess = toGuess[:4] + "j" + toGuess[5:]
if word[5] == "J" or word[5] == "j" :
toGuess = toGuess[:5] + "j" + toGuess[6:]
if word[6] == "J" or word[6] == "j" :
toGuess = toGuess[:6] + "j" + toGuess[7:]
if word[1] != "J" and word[1] != "j" and word[2] != "J" and word[2] != "j" and word[3] != "J" and word[3] != "j" and word[4] != "J" and word[4] != "j" and word[5] != "J" and word[5] != "j" and word[6] != "J" and word[6] != "j" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "j" + ", "
if guessChar == "K" or guessChar == "k" :
if word[1] == "K" or word[1] == "k" :
toGuess = toGuess[:1] + "k" + toGuess[2:]
if word[2] == "K" or word[2] == "k" :
toGuess = toGuess[:2] + "k" + toGuess[3:]
if word[3] == "K" or word[3] == "k" :
toGuess = toGuess[:3] + "k" + toGuess[4:]
if word[4] == "K" or word[4] == "k" :
toGuess = toGuess[:4] + "k" + toGuess[5:]
if word[5] == "K" or word[5] == "k" :
toGuess = toGuess[:5] + "k" + toGuess[6:]
if word[6] == "K" or word[6] == "k" :
toGuess = toGuess[:6] + "k" + toGuess[7:]
if word[1] != "K" and word[1] != "k" and word[2] != "K" and word[2] != "k" and word[3] != "K" and word[3] != "k" and word[4] != "K" and word[4] != "k" and word[5] != "K" and word[5] != "k" and word[6] != "K" and word[6] != "k" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "k" + ", "
if guessChar == "L" or guessChar == "l" :
if word[1] == "L" or word[1] == "l" :
toGuess = toGuess[:1] + "l" + toGuess[2:]
if word[2] == "L" or word[2] == "l" :
toGuess = toGuess[:2] + "l" + toGuess[3:]
if word[3] == "L" or | |
id=None, name=None, uptime_secs=None, executors=None, status=None, errors=None, component_debug=None, sched_status=None, owner=None, replication_count=None, requested_memonheap=None, requested_memoffheap=None, requested_cpu=None, assigned_memonheap=None, assigned_memoffheap=None, assigned_cpu=None,):
self.id = id
self.name = name
self.uptime_secs = uptime_secs
self.executors = executors
self.status = status
self.errors = errors
self.component_debug = component_debug
self.sched_status = sched_status
self.owner = owner
self.replication_count = replication_count
self.requested_memonheap = requested_memonheap
self.requested_memoffheap = requested_memoffheap
self.requested_cpu = requested_cpu
self.assigned_memonheap = assigned_memonheap
self.assigned_memoffheap = assigned_memoffheap
self.assigned_cpu = assigned_cpu
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.uptime_secs = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.executors = []
(_etype286, _size283) = iprot.readListBegin()
for _i287 in xrange(_size283):
_elem288 = ExecutorSummary()
_elem288.read(iprot)
self.executors.append(_elem288)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.status = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.MAP:
self.errors = {}
(_ktype290, _vtype291, _size289 ) = iprot.readMapBegin()
for _i293 in xrange(_size289):
_key294 = iprot.readString().decode('utf-8')
_val295 = []
(_etype299, _size296) = iprot.readListBegin()
for _i300 in xrange(_size296):
_elem301 = ErrorInfo()
_elem301.read(iprot)
_val295.append(_elem301)
iprot.readListEnd()
self.errors[_key294] = _val295
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.MAP:
self.component_debug = {}
(_ktype303, _vtype304, _size302 ) = iprot.readMapBegin()
for _i306 in xrange(_size302):
_key307 = iprot.readString().decode('utf-8')
_val308 = DebugOptions()
_val308.read(iprot)
self.component_debug[_key307] = _val308
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 513:
if ftype == TType.STRING:
self.sched_status = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 514:
if ftype == TType.STRING:
self.owner = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 515:
if ftype == TType.I32:
self.replication_count = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 521:
if ftype == TType.DOUBLE:
self.requested_memonheap = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 522:
if ftype == TType.DOUBLE:
self.requested_memoffheap = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 523:
if ftype == TType.DOUBLE:
self.requested_cpu = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 524:
if ftype == TType.DOUBLE:
self.assigned_memonheap = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 525:
if ftype == TType.DOUBLE:
self.assigned_memoffheap = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 526:
if ftype == TType.DOUBLE:
self.assigned_cpu = iprot.readDouble()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TopologyInfo')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id.encode('utf-8'))
oprot.writeFieldEnd()
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 2)
oprot.writeString(self.name.encode('utf-8'))
oprot.writeFieldEnd()
if self.uptime_secs is not None:
oprot.writeFieldBegin('uptime_secs', TType.I32, 3)
oprot.writeI32(self.uptime_secs)
oprot.writeFieldEnd()
if self.executors is not None:
oprot.writeFieldBegin('executors', TType.LIST, 4)
oprot.writeListBegin(TType.STRUCT, len(self.executors))
for iter309 in self.executors:
iter309.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.status is not None:
oprot.writeFieldBegin('status', TType.STRING, 5)
oprot.writeString(self.status.encode('utf-8'))
oprot.writeFieldEnd()
if self.errors is not None:
oprot.writeFieldBegin('errors', TType.MAP, 6)
oprot.writeMapBegin(TType.STRING, TType.LIST, len(self.errors))
for kiter310,viter311 in self.errors.items():
oprot.writeString(kiter310.encode('utf-8'))
oprot.writeListBegin(TType.STRUCT, len(viter311))
for iter312 in viter311:
iter312.write(oprot)
oprot.writeListEnd()
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.component_debug is not None:
oprot.writeFieldBegin('component_debug', TType.MAP, 7)
oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.component_debug))
for kiter313,viter314 in self.component_debug.items():
oprot.writeString(kiter313.encode('utf-8'))
viter314.write(oprot)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.sched_status is not None:
oprot.writeFieldBegin('sched_status', TType.STRING, 513)
oprot.writeString(self.sched_status.encode('utf-8'))
oprot.writeFieldEnd()
if self.owner is not None:
oprot.writeFieldBegin('owner', TType.STRING, 514)
oprot.writeString(self.owner.encode('utf-8'))
oprot.writeFieldEnd()
if self.replication_count is not None:
oprot.writeFieldBegin('replication_count', TType.I32, 515)
oprot.writeI32(self.replication_count)
oprot.writeFieldEnd()
if self.requested_memonheap is not None:
oprot.writeFieldBegin('requested_memonheap', TType.DOUBLE, 521)
oprot.writeDouble(self.requested_memonheap)
oprot.writeFieldEnd()
if self.requested_memoffheap is not None:
oprot.writeFieldBegin('requested_memoffheap', TType.DOUBLE, 522)
oprot.writeDouble(self.requested_memoffheap)
oprot.writeFieldEnd()
if self.requested_cpu is not None:
oprot.writeFieldBegin('requested_cpu', TType.DOUBLE, 523)
oprot.writeDouble(self.requested_cpu)
oprot.writeFieldEnd()
if self.assigned_memonheap is not None:
oprot.writeFieldBegin('assigned_memonheap', TType.DOUBLE, 524)
oprot.writeDouble(self.assigned_memonheap)
oprot.writeFieldEnd()
if self.assigned_memoffheap is not None:
oprot.writeFieldBegin('assigned_memoffheap', TType.DOUBLE, 525)
oprot.writeDouble(self.assigned_memoffheap)
oprot.writeFieldEnd()
if self.assigned_cpu is not None:
oprot.writeFieldBegin('assigned_cpu', TType.DOUBLE, 526)
oprot.writeDouble(self.assigned_cpu)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.id is None:
raise TProtocol.TProtocolException(message='Required field id is unset!')
if self.name is None:
raise TProtocol.TProtocolException(message='Required field name is unset!')
if self.uptime_secs is None:
raise TProtocol.TProtocolException(message='Required field uptime_secs is unset!')
if self.executors is None:
raise TProtocol.TProtocolException(message='Required field executors is unset!')
if self.status is None:
raise TProtocol.TProtocolException(message='Required field status is unset!')
if self.errors is None:
raise TProtocol.TProtocolException(message='Required field errors is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.id)
value = (value * 31) ^ hash(self.name)
value = (value * 31) ^ hash(self.uptime_secs)
value = (value * 31) ^ hash(self.executors)
value = (value * 31) ^ hash(self.status)
value = (value * 31) ^ hash(self.errors)
value = (value * 31) ^ hash(self.component_debug)
value = (value * 31) ^ hash(self.sched_status)
value = (value * 31) ^ hash(self.owner)
value = (value * 31) ^ hash(self.replication_count)
value = (value * 31) ^ hash(self.requested_memonheap)
value = (value * 31) ^ hash(self.requested_memoffheap)
value = (value * 31) ^ hash(self.requested_cpu)
value = (value * 31) ^ hash(self.assigned_memonheap)
value = (value * 31) ^ hash(self.assigned_memoffheap)
value = (value * 31) ^ hash(self.assigned_cpu)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class CommonAggregateStats:
"""
Attributes:
- num_executors
- num_tasks
- emitted
- transferred
- acked
- failed
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'num_executors', None, None, ), # 1
(2, TType.I32, 'num_tasks', None, None, ), # 2
(3, TType.I64, 'emitted', None, None, ), # 3
(4, TType.I64, 'transferred', None, None, ), # 4
(5, TType.I64, 'acked', None, None, ), # 5
(6, TType.I64, 'failed', None, None, ), # 6
)
def __init__(self, num_executors=None, num_tasks=None, emitted=None, transferred=None, acked=None, failed=None,):
self.num_executors = num_executors
self.num_tasks = num_tasks
self.emitted = emitted
self.transferred = transferred
self.acked = acked
self.failed = failed
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.num_executors = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.num_tasks = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.emitted = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.transferred = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I64:
self.acked = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I64:
self.failed = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('CommonAggregateStats')
if self.num_executors is not None:
oprot.writeFieldBegin('num_executors', TType.I32, 1)
oprot.writeI32(self.num_executors)
oprot.writeFieldEnd()
if self.num_tasks is not None:
oprot.writeFieldBegin('num_tasks', TType.I32, 2)
oprot.writeI32(self.num_tasks)
oprot.writeFieldEnd()
if self.emitted is not None:
oprot.writeFieldBegin('emitted', TType.I64, 3)
oprot.writeI64(self.emitted)
oprot.writeFieldEnd()
if self.transferred is not None:
oprot.writeFieldBegin('transferred', TType.I64, 4)
oprot.writeI64(self.transferred)
oprot.writeFieldEnd()
if self.acked is not None:
oprot.writeFieldBegin('acked', TType.I64, 5)
oprot.writeI64(self.acked)
oprot.writeFieldEnd()
if self.failed is not None:
oprot.writeFieldBegin('failed', TType.I64, 6)
oprot.writeI64(self.failed)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.num_executors)
value = (value * 31) ^ hash(self.num_tasks)
value = (value * 31) ^ hash(self.emitted)
value = (value * 31) ^ hash(self.transferred)
value = (value * 31) ^ hash(self.acked)
value = (value * 31) ^ hash(self.failed)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class SpoutAggregateStats:
"""
Attributes:
- complete_latency_ms
"""
thrift_spec = (
None, # 0
(1, TType.DOUBLE, 'complete_latency_ms', None, None, ), # 1
)
def __init__(self, complete_latency_ms=None,):
self.complete_latency_ms = complete_latency_ms
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == | |
API, you instantiate this class. See :ref:`enumerated
type description page <enumeration_description>`.
"""
KMIP = None
"""
Key Management Interoperability Protocol (KMIP) based key management
server. This class attribute was added in vSphere API 7.0.0.
"""
def __init__(self, string):
"""
:type string: :class:`str`
:param string: String value for the :class:`Type` instance.
"""
Enum.__init__(string)
Type._set_values([
Type('KMIP'),
])
Type._set_binding_type(type.EnumType(
'com.vmware.vcenter.trusted_infrastructure.trust_authority_clusters.kms.providers.key_server_create_spec.type',
Type))
KeyServerCreateSpec._set_binding_type(type.StructType(
'com.vmware.vcenter.trusted_infrastructure.trust_authority_clusters.kms.providers.key_server_create_spec', {
'type': type.ReferenceType(__name__, 'Providers.KeyServerCreateSpec.Type'),
'description': type.OptionalType(type.StringType()),
'proxy_server': type.OptionalType(type.ReferenceType('com.vmware.vcenter.trusted_infrastructure_client', 'NetworkAddress')),
'connection_timeout': type.OptionalType(type.IntegerType()),
'kmip_server': type.OptionalType(type.ReferenceType(__name__, 'Providers.KmipServerCreateSpec')),
},
KeyServerCreateSpec,
False,
None))
class CreateSpec(VapiStruct):
"""
The ``Providers.CreateSpec`` class contains attributes that describe the
desired configuration for a new Key Provider. This class was added in
vSphere API 7.0.0.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
provider=None,
master_key_id=None,
key_server=None,
):
"""
:type provider: :class:`str`
:param provider: Name of the provider.
A unique string chosen by the client.. This attribute was added in
vSphere API 7.0.0.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``com.vmware.vcenter.trusted_infrastructure.trust_authority_clusters.kms.Provider``.
When methods return a value of this class as a return value, the
attribute will be an identifier for the resource type:
``com.vmware.vcenter.trusted_infrastructure.trust_authority_clusters.kms.Provider``.
:type master_key_id: :class:`str`
:param master_key_id: Master key ID created for the provider.
A unique Key ID.. This attribute was added in vSphere API 7.0.0.
:type key_server: :class:`Providers.KeyServerCreateSpec`
:param key_server: Key server associated with this Provider. This attribute was added
in vSphere API 7.0.0.
"""
self.provider = provider
self.master_key_id = master_key_id
self.key_server = key_server
VapiStruct.__init__(self)
CreateSpec._set_binding_type(type.StructType(
'com.vmware.vcenter.trusted_infrastructure.trust_authority_clusters.kms.providers.create_spec', {
'provider': type.IdType(resource_types='com.vmware.vcenter.trusted_infrastructure.trust_authority_clusters.kms.Provider'),
'master_key_id': type.StringType(),
'key_server': type.ReferenceType(__name__, 'Providers.KeyServerCreateSpec'),
},
CreateSpec,
False,
None))
class KmipServerUpdateSpec(VapiStruct):
"""
The ``Providers.KmipServerUpdateSpec`` class contains attributes that
describe new configuration for KMIP based key server. This class was added
in vSphere API 7.0.0.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
servers=None,
username=None,
):
"""
:type servers: :class:`list` of :class:`Providers.Server` or ``None``
:param servers: List of KMIP compliant key servers.
Key servers must be configured for active-active replication. If
the server port is None, a default value for KMIP's port will be
used.
. This attribute was added in vSphere API 7.0.0.
If None, server configuration will remain unchanged.
:type username: :class:`str` or ``None``
:param username: Username for authentication.
. This attribute was added in vSphere API 7.0.0.
If None, username will remain unchanged.
"""
self.servers = servers
self.username = username
VapiStruct.__init__(self)
KmipServerUpdateSpec._set_binding_type(type.StructType(
'com.vmware.vcenter.trusted_infrastructure.trust_authority_clusters.kms.providers.kmip_server_update_spec', {
'servers': type.OptionalType(type.ListType(type.ReferenceType(__name__, 'Providers.Server'))),
'username': type.OptionalType(type.StringType()),
},
KmipServerUpdateSpec,
False,
None))
class KeyServerUpdateSpec(VapiStruct):
"""
The ``Providers.KeyServerUpdateSpec`` class contains attributes that
describe new configuration for an existing key server. This class was added
in vSphere API 7.0.0.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
_validator_list = [
UnionValidator(
'type',
{
'KMIP' : [('kmip_server', False)],
}
),
]
def __init__(self,
type=None,
description=None,
proxy_server=None,
connection_timeout=None,
kmip_server=None,
):
"""
:type type: :class:`Providers.KeyServerUpdateSpec.Type` or ``None``
:param type: Type of the key server.
. This attribute was added in vSphere API 7.0.0.
If None, key server type and configuration information will remain
unchanged. In this case all key server configuration information
fields (e.g KMIP) should be unset.
:type description: :class:`str` or ``None``
:param description: Description of the key server.
. This attribute was added in vSphere API 7.0.0.
If None, description will remain unchanged.
:type proxy_server: :class:`com.vmware.vcenter.trusted_infrastructure_client.NetworkAddress` or ``None``
:param proxy_server: Proxy server configuration.
. This attribute was added in vSphere API 7.0.0.
If None, proxy server configuration will remain unchanged.
:type connection_timeout: :class:`long` or ``None``
:param connection_timeout: Connection timeout in seconds.
. This attribute was added in vSphere API 7.0.0.
If None, connection timeout will remain unchanged.
:type kmip_server: :class:`Providers.KmipServerUpdateSpec` or ``None``
:param kmip_server: Configuration information for KMIP based key server.
. This attribute was added in vSphere API 7.0.0.
If None, kmip server configuration will remain unchanged.
"""
self.type = type
self.description = description
self.proxy_server = proxy_server
self.connection_timeout = connection_timeout
self.kmip_server = kmip_server
VapiStruct.__init__(self)
class Type(Enum):
"""
The ``Providers.KeyServerUpdateSpec.Type`` class list the key server types.
This enumeration was added in vSphere API 7.0.0.
.. note::
This class represents an enumerated type in the interface language
definition. The class contains class attributes which represent the
values in the current version of the enumerated type. Newer versions of
the enumerated type may contain new values. To use new values of the
enumerated type in communication with a server that supports the newer
version of the API, you instantiate this class. See :ref:`enumerated
type description page <enumeration_description>`.
"""
KMIP = None
"""
Key Management Interoperability Protocol (KMIP) based key management
server. This class attribute was added in vSphere API 7.0.0.
"""
def __init__(self, string):
"""
:type string: :class:`str`
:param string: String value for the :class:`Type` instance.
"""
Enum.__init__(string)
Type._set_values([
Type('KMIP'),
])
Type._set_binding_type(type.EnumType(
'com.vmware.vcenter.trusted_infrastructure.trust_authority_clusters.kms.providers.key_server_update_spec.type',
Type))
KeyServerUpdateSpec._set_binding_type(type.StructType(
'com.vmware.vcenter.trusted_infrastructure.trust_authority_clusters.kms.providers.key_server_update_spec', {
'type': type.OptionalType(type.ReferenceType(__name__, 'Providers.KeyServerUpdateSpec.Type')),
'description': type.OptionalType(type.StringType()),
'proxy_server': type.OptionalType(type.ReferenceType('com.vmware.vcenter.trusted_infrastructure_client', 'NetworkAddress')),
'connection_timeout': type.OptionalType(type.IntegerType()),
'kmip_server': type.OptionalType(type.ReferenceType(__name__, 'Providers.KmipServerUpdateSpec')),
},
KeyServerUpdateSpec,
False,
None))
class UpdateSpec(VapiStruct):
"""
The ``Providers.UpdateSpec`` class contains attributes that describe the
new configuration for an existing provider. This class was added in vSphere
API 7.0.0.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
master_key_id=None,
key_server=None,
):
"""
:type master_key_id: :class:`str` or ``None``
:param master_key_id: Master key identifier created for the provider.
A unique Key identifier.
. This attribute was added in vSphere API 7.0.0.
If None, masterKeyId will remain unchanged.
:type key_server: :class:`Providers.KeyServerUpdateSpec` or ``None``
:param key_server: Key server associated with this provider.
. This attribute was added in vSphere API 7.0.0.
If None, key server configuration will remain unchanged.
"""
self.master_key_id = master_key_id
self.key_server = key_server
VapiStruct.__init__(self)
UpdateSpec._set_binding_type(type.StructType(
'com.vmware.vcenter.trusted_infrastructure.trust_authority_clusters.kms.providers.update_spec', {
'master_key_id': type.OptionalType(type.StringType()),
'key_server': type.OptionalType(type.ReferenceType(__name__, 'Providers.KeyServerUpdateSpec')),
},
UpdateSpec,
False,
None))
class Summary(VapiStruct):
"""
The ``Providers.Summary`` class contains attributes that summarize a
provider. This class was added in vSphere API 7.0.0.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
provider=None,
health=None,
):
"""
:type provider: :class:`str`
:param provider: Name of the provider.
A unique string chosen by the client.. This attribute was added in
vSphere API 7.0.0.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``com.vmware.vcenter.trusted_infrastructure.trust_authority_clusters.kms.Provider``.
When methods return a value of this class as a return value, the
attribute will be an identifier for the resource type:
``com.vmware.vcenter.trusted_infrastructure.trust_authority_clusters.kms.Provider``.
:type health: :class:`Providers.Health`
:param health: Health of the provider in the cluster. This attribute was added in
vSphere API 7.0.0.
"""
self.provider = provider
self.health = health
VapiStruct.__init__(self)
Summary._set_binding_type(type.StructType(
'com.vmware.vcenter.trusted_infrastructure.trust_authority_clusters.kms.providers.summary', {
'provider': type.IdType(resource_types='com.vmware.vcenter.trusted_infrastructure.trust_authority_clusters.kms.Provider'),
'health': type.ReferenceType(__name__, 'Providers.Health'),
},
Summary,
False,
None))
class KmipServerInfo(VapiStruct):
"""
The ``Providers.KmipServerInfo`` class contains attributes that describe
the current configuration of a KMIP based key server. This class was added
in vSphere API 7.0.0.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
servers=None,
username=None,
):
"""
:type servers: :class:`list` of :class:`Providers.Server`
:param servers: List of KMIP compliant key servers. This attribute was added in
vSphere API 7.0.0.
:type username: :class:`str` or ``None``
:param username: Username for authentication.
. This attribute was added in vSphere API 7.0.0.
If None, username will not be set.
"""
self.servers = servers
self.username = username
VapiStruct.__init__(self)
KmipServerInfo._set_binding_type(type.StructType(
'com.vmware.vcenter.trusted_infrastructure.trust_authority_clusters.kms.providers.kmip_server_info', {
'servers': type.ListType(type.ReferenceType(__name__, 'Providers.Server')),
'username': type.OptionalType(type.StringType()),
},
KmipServerInfo,
False,
None))
class KeyServerInfo(VapiStruct):
"""
The ``Providers.KeyServerInfo`` class contains attributes that describe the
current configuration of a key server. This class was added in vSphere API
7.0.0.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
_validator_list = [
UnionValidator(
'type',
{
'KMIP' : [('kmip_server', True)],
}
),
]
def | |
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import abc
import sys
import numpy as np
import pandas as pd
from sklearn import linear_model, preprocessing, cluster
import scipy.linalg as slin
import scipy.sparse.linalg as sparselin
import scipy.sparse as sparse
from scipy.optimize import fmin_ncg
import os.path
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.python.ops import array_ops
from keras import backend as K
from tensorflow.contrib.learn.python.learn.datasets import base
from .hessians import hessian_vector_product
from .dataset import DataSet
def variable(name, shape, initializer):
dtype = tf.float32
var = tf.get_variable(
name,
shape,
initializer=initializer,
dtype=dtype)
return var
def variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
dtype = tf.float32
var = variable(
name,
shape,
initializer=tf.truncated_normal_initializer(
stddev=stddev,
dtype=dtype))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def normalize_vector(v):
"""
Takes in a vector in list form, concatenates it to form a single vector,
normalizes it to unit length, then returns it in list form together with its norm.
"""
norm_val = np.linalg.norm(np.concatenate(v))
norm_v = [a/norm_val for a in v]
return norm_v, norm_val
class GenericNeuralNet(object):
"""
Multi-class classification.
"""
def __init__(self, **kwargs):
np.random.seed(0)
tf.set_random_seed(0)
self.batch_size = kwargs.pop('batch_size')
self.data_sets = kwargs.pop('data_sets')
self.train_dir = kwargs.pop('train_dir', 'output')
log_dir = kwargs.pop('log_dir', 'log')
self.model_name = kwargs.pop('model_name')
self.num_classes = kwargs.pop('num_classes')
self.initial_learning_rate = kwargs.pop('initial_learning_rate')
self.decay_epochs = kwargs.pop('decay_epochs')
if 'keep_probs' in kwargs: self.keep_probs = kwargs.pop('keep_probs')
else: self.keep_probs = None
if 'mini_batch' in kwargs: self.mini_batch = kwargs.pop('mini_batch')
else: self.mini_batch = True
if 'damping' in kwargs: self.damping = kwargs.pop('damping')
else: self.damping = 0.0
if not os.path.exists(self.train_dir):
os.makedirs(self.train_dir)
# Initialize session
config = tf.ConfigProto()
self.sess = tf.Session(config=config)
K.set_session(self.sess)
# Setup input
self.input_placeholder, self.labels_placeholder = self.placeholder_inputs()
self.num_train_examples = self.data_sets.train.labels.shape[0]
self.num_test_examples = self.data_sets.test.labels.shape[0]
# Setup inference and training
if self.keep_probs is not None:
self.keep_probs_placeholder = tf.placeholder(tf.float32, shape=(2))
self.logits = self.inference(self.input_placeholder, self.keep_probs_placeholder)
elif hasattr(self, 'inference_needs_labels'):
self.logits = self.inference(self.input_placeholder, self.labels_placeholder)
else:
self.logits = self.inference(self.input_placeholder)
self.total_loss, self.loss_no_reg, self.indiv_loss_no_reg = self.loss(
self.logits,
self.labels_placeholder)
self.global_step = tf.Variable(0, name='global_step', trainable=False)
self.learning_rate = tf.Variable(self.initial_learning_rate, name='learning_rate', trainable=False)
self.learning_rate_placeholder = tf.placeholder(tf.float32)
self.update_learning_rate_op = tf.assign(self.learning_rate, self.learning_rate_placeholder)
self.train_op = self.get_train_op(self.total_loss, self.global_step, self.learning_rate)
self.train_sgd_op = self.get_train_sgd_op(self.total_loss, self.global_step, self.learning_rate)
self.accuracy_op = self.get_accuracy_op(self.logits, self.labels_placeholder)
self.preds = self.predictions(self.logits)
# Setup misc
self.saver = tf.train.Saver()
# Setup gradients and Hessians
self.params = self.get_all_params()
self.grad_total_loss_op = tf.gradients(self.total_loss, self.params)
self.grad_loss_no_reg_op = tf.gradients(self.loss_no_reg, self.params)
self.v_placeholder = [tf.placeholder(tf.float32, shape=a.get_shape()) for a in self.params]
self.u_placeholder = [tf.placeholder(tf.float32, shape=a.get_shape()) for a in self.params]
self.hessian_vector = hessian_vector_product(self.total_loss, self.params, self.v_placeholder)
self.grad_loss_wrt_input_op = tf.gradients(self.total_loss, self.input_placeholder)
# Because tf.gradients auto accumulates, we probably don't need the add_n (or even reduce_sum)
self.influence_op = tf.add_n(
[tf.reduce_sum(tf.multiply(a, array_ops.stop_gradient(b))) for a, b in zip(self.grad_total_loss_op, self.v_placeholder)])
self.grad_influence_wrt_input_op = tf.gradients(self.influence_op, self.input_placeholder)
self.checkpoint_file = os.path.join(self.train_dir, "%s-checkpoint" % self.model_name)
self.all_train_feed_dict = self.fill_feed_dict_with_all_ex(self.data_sets.train)
self.all_test_feed_dict = self.fill_feed_dict_with_all_ex(self.data_sets.test)
init = tf.global_variables_initializer()
self.sess.run(init)
self.vec_to_list = self.get_vec_to_list_fn()
self.adversarial_loss, self.indiv_adversarial_loss = self.adversarial_loss(self.logits, self.labels_placeholder)
if self.adversarial_loss is not None:
self.grad_adversarial_loss_op = tf.gradients(self.adversarial_loss, self.params)
def get_vec_to_list_fn(self):
params_val = self.sess.run(self.params)
self.num_params = len(np.concatenate(params_val))
print('Total number of parameters: %s' % self.num_params)
def vec_to_list(v):
return_list = []
cur_pos = 0
for p in params_val:
return_list.append(v[cur_pos : cur_pos+len(p)])
cur_pos += len(p)
assert cur_pos == len(v)
return return_list
return vec_to_list
def reset_datasets(self):
for data_set in self.data_sets:
if data_set is not None:
data_set.reset_batch()
def fill_feed_dict_with_all_ex(self, data_set):
feed_dict = {
self.input_placeholder: data_set.x,
self.labels_placeholder: data_set.labels
}
return feed_dict
def fill_feed_dict_with_all_but_one_ex(self, data_set, idx_to_remove):
num_examples = data_set.x.shape[0]
idx = np.array([True] * num_examples, dtype=bool)
idx[idx_to_remove] = False
feed_dict = {
self.input_placeholder: data_set.x[idx, :],
self.labels_placeholder: data_set.labels[idx]
}
return feed_dict
def fill_feed_dict_with_batch(self, data_set, batch_size=0):
if batch_size is None:
return self.fill_feed_dict_with_all_ex(data_set)
elif batch_size == 0:
batch_size = self.batch_size
input_feed, labels_feed = data_set.next_batch(batch_size)
feed_dict = {
self.input_placeholder: input_feed,
self.labels_placeholder: labels_feed,
}
return feed_dict
def fill_feed_dict_with_some_ex(self, data_set, target_indices):
input_feed = data_set.x[target_indices, :].reshape(len(target_indices), -1)
labels_feed = data_set.labels[target_indices].reshape(-1)
feed_dict = {
self.input_placeholder: input_feed,
self.labels_placeholder: labels_feed,
}
return feed_dict
def fill_feed_dict_with_one_ex(self, data_set, target_idx):
input_feed = data_set.x[target_idx, :].reshape(1, -1)
labels_feed = data_set.labels[target_idx].reshape(-1)
feed_dict = {
self.input_placeholder: input_feed,
self.labels_placeholder: labels_feed,
}
return feed_dict
def fill_feed_dict_manual(self, X, Y):
X = np.array(X)
Y = np.array(Y)
input_feed = X.reshape(len(Y), -1)
labels_feed = Y.reshape(-1)
feed_dict = {
self.input_placeholder: input_feed,
self.labels_placeholder: labels_feed,
}
return feed_dict
def minibatch_mean_eval(self, ops, data_set):
num_examples = data_set.num_examples
assert num_examples % self.batch_size == 0
num_iter = int(num_examples / self.batch_size)
self.reset_datasets()
ret = []
for i in xrange(num_iter):
feed_dict = self.fill_feed_dict_with_batch(data_set)
ret_temp = self.sess.run(ops, feed_dict=feed_dict)
if len(ret)==0:
for b in ret_temp:
if isinstance(b, list):
ret.append([c / float(num_iter) for c in b])
else:
ret.append([b / float(num_iter)])
else:
for counter, b in enumerate(ret_temp):
if isinstance(b, list):
ret[counter] = [a + (c / float(num_iter)) for (a, c) in zip(ret[counter], b)]
else:
ret[counter] += (b / float(num_iter))
return ret
def print_model_eval(self):
params_val = self.sess.run(self.params)
if self.mini_batch == True:
grad_loss_val, loss_no_reg_val, loss_val, train_acc_val = self.minibatch_mean_eval(
[self.grad_total_loss_op, self.loss_no_reg, self.total_loss, self.accuracy_op],
self.data_sets.train)
test_loss_val, test_acc_val = self.minibatch_mean_eval(
[self.loss_no_reg, self.accuracy_op],
self.data_sets.test)
else:
grad_loss_val, loss_no_reg_val, loss_val, train_acc_val = self.sess.run(
[self.grad_total_loss_op, self.loss_no_reg, self.total_loss, self.accuracy_op],
feed_dict=self.all_train_feed_dict)
test_loss_val, test_acc_val = self.sess.run(
[self.loss_no_reg, self.accuracy_op],
feed_dict=self.all_test_feed_dict)
print('Train loss (w reg) on all data: %s' % loss_val)
print('Train loss (w/o reg) on all data: %s' % loss_no_reg_val)
print('Test loss (w/o reg) on all data: %s' % test_loss_val)
print('Train acc on all data: %s' % train_acc_val)
print('Test acc on all data: %s' % test_acc_val)
print('Norm of the mean of gradients: %s' % np.linalg.norm(np.concatenate(grad_loss_val)))
print('Norm of the params: %s' % np.linalg.norm(np.concatenate(params_val)))
def retrain(self, num_steps, feed_dict):
for step in xrange(num_steps):
self.sess.run(self.train_op, feed_dict=feed_dict)
def update_learning_rate(self, step):
assert self.num_train_examples % self.batch_size == 0
num_steps_in_epoch = self.num_train_examples / self.batch_size
epoch = step // num_steps_in_epoch
multiplier = 1
if epoch < self.decay_epochs[0]:
multiplier = 1
elif epoch < self.decay_epochs[1]:
multiplier = 0.1
else:
multiplier = 0.01
self.sess.run(
self.update_learning_rate_op,
feed_dict={self.learning_rate_placeholder: multiplier * self.initial_learning_rate})
def train(self, num_steps,
iter_to_switch_to_batch=20000,
iter_to_switch_to_sgd=40000,
save_checkpoints=True, verbose=True):
"""
Trains a model for a specified number of steps.
"""
if verbose: print('Training for %s steps' % num_steps)
sess = self.sess
for step in xrange(num_steps):
self.update_learning_rate(step)
start_time = time.time()
if step < iter_to_switch_to_batch:
feed_dict = self.fill_feed_dict_with_batch(self.data_sets.train)
_, loss_val = sess.run([self.train_op, self.total_loss], feed_dict=feed_dict)
elif step < iter_to_switch_to_sgd:
feed_dict = self.all_train_feed_dict
_, loss_val = sess.run([self.train_op, self.total_loss], feed_dict=feed_dict)
else:
feed_dict = self.all_train_feed_dict
_, loss_val = sess.run([self.train_sgd_op, self.total_loss], feed_dict=feed_dict)
duration = time.time() - start_time
if verbose:
if step % 1000 == 0:
# Print status to stdout.
print('Step %d: loss = %.8f (%.3f sec)' % (step, loss_val, duration))
# Save a checkpoint and evaluate the model periodically.
if (step + 1) % 100000 == 0 or (step + 1) == num_steps:
if save_checkpoints: self.saver.save(sess, self.checkpoint_file, global_step=step)
if verbose: self.print_model_eval()
def load_checkpoint(self, iter_to_load, do_checks=True):
checkpoint_to_load = "%s-%s" % (self.checkpoint_file, iter_to_load)
self.saver.restore(self.sess, checkpoint_to_load)
if do_checks:
print('Model %s loaded. Sanity checks ---' % checkpoint_to_load)
self.print_model_eval()
def get_train_op(self, total_loss, global_step, learning_rate):
"""
Return train_op
"""
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(total_loss, global_step=global_step)
return train_op
def get_train_sgd_op(self, total_loss, global_step, learning_rate=0.001):
"""
Return train_sgd_op
"""
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train_op = optimizer.minimize(total_loss, global_step=global_step)
return train_op
def get_accuracy_op(self, logits, labels):
"""Evaluate the quality of the logits at predicting the label.
Args:
logits: Logits tensor, float - [batch_size, NUM_CLASSES].
labels: Labels tensor, int32 - [batch_size], with values in the
range [0, NUM_CLASSES).
Returns:
A scalar int32 tensor with the number of examples (out of batch_size)
that were predicted correctly.
"""
correct = tf.nn.in_top_k(logits, labels, 1)
return tf.reduce_sum(tf.cast(correct, tf.int32)) / tf.shape(labels)[0]
def loss(self, logits, labels):
labels = tf.one_hot(labels, depth=self.num_classes)
# correct_prob = tf.reduce_sum(tf.multiply(labels, tf.nn.softmax(logits)), reduction_indices=1)
cross_entropy = - tf.reduce_sum(tf.multiply(labels, tf.nn.log_softmax(logits)), reduction_indices=1)
indiv_loss_no_reg = cross_entropy
loss_no_reg = tf.reduce_mean(cross_entropy, name='xentropy_mean')
tf.add_to_collection('losses', loss_no_reg)
total_loss | |
### QMIX algorithm
# paper: QMIX: Monotonic Value Function Factorisation for Deep Multi-Agent Reinforcement Learning
# reference: https://github.com/AI4Finance-Foundation/ElegantRL/blob/e980158e89cdc3c80be9c0770790a84dc6db8efd/elegantrl/agents/AgentQMix.py
from numpy.core.function_base import _logspace_dispatcher
from pettingzoo.butterfly import cooperative_pong_v3 # cannot use ram
from pettingzoo.atari import entombed_cooperative_v2
import numpy as np
from common.wrappers import Dict2TupleWrapper
import supersuit
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions import Categorical
import random
from os import path
import pickle
import argparse
GPU = True
device_idx = 0
if GPU:
device = torch.device("cuda:" + str(device_idx) if torch.cuda.is_available() else "cpu")
else:
device = torch.device("cpu")
print(device)
parser = argparse.ArgumentParser(description='Train or test neural net motor controller.')
parser.add_argument('--train', dest='train', action='store_true', default=False)
parser.add_argument('--test', dest='test', action='store_true', default=False)
args = parser.parse_args()
def wrap_env(env, obs_type='ram'):
env = env.parallel_env(obs_type=obs_type)
env_agents = env.unwrapped.agents
if obs_type == 'rgb_image':
env = supersuit.max_observation_v0(env, 2) # as per openai baseline's MaxAndSKip wrapper, maxes over the last 2 frames to deal with frame flickering
env = supersuit.sticky_actions_v0(env, repeat_action_probability=0.25) # repeat_action_probability is set to 0.25 to introduce non-determinism to the system
env = supersuit.frame_skip_v0(env, 4) # skip frames for faster processing and less control to be compatable with gym, use frame_skip(env, (2,5))
env = supersuit.resize_v0(env, 84, 84) # downscale observation for faster processing
env = supersuit.frame_stack_v1(env, 4) # allow agent to see everything on the screen despite Atari's flickering screen problem
else:
env = supersuit.frame_skip_v0(env, 4) # RAM version also need frame skip, essential for boxing-v1, etc
# normalize the observation of Atari for both image or RAM
env = supersuit.dtype_v0(env, 'float32') # need to transform uint8 to float first for normalizing observation: https://github.com/PettingZoo-Team/SuperSuit
env = supersuit.normalize_obs_v0(env, env_min=0, env_max=1) # normalize the observation to (0,1)
env.observation_space = list(env.observation_spaces.values())[0]
env.action_space = list(env.action_spaces.values())[0]
env.agents = env_agents
env = Dict2TupleWrapper(env)
return env
class ReplayBufferGRU:
"""
Replay buffer for agent with GRU network additionally storing previous action,
initial input hidden state and output hidden state of GRU.
And each sample contains the whole episode instead of a single step.
'hidden_in' and 'hidden_out' are only the initial hidden state for each episode, for GRU initialization.
"""
def __init__(self, capacity):
self.capacity = capacity
self.buffer = []
self.position = 0
def push(self, hidden_in, hidden_out, state, action, last_action, reward, next_state):
if len(self.buffer) < self.capacity:
self.buffer.append(None)
self.buffer[self.position] = (
hidden_in, hidden_out, state, action, last_action, reward, next_state)
self.position = int((self.position + 1) %
self.capacity) # as a ring buffer
def sample(self, batch_size):
s_lst, a_lst, la_lst, r_lst, ns_lst, hi_lst, ho_lst = [], [], [], [], [], [], []
batch = random.sample(self.buffer, batch_size)
min_seq_len = float('inf')
for sample in batch:
h_in, h_out, state, action, last_action, reward, next_state = sample
min_seq_len = min(len(state), min_seq_len)
hi_lst.append(h_in) # h_in: (1, batch_size=1, n_agents, hidden_size)
ho_lst.append(h_out)
hi_lst = torch.cat(hi_lst, dim=-3).detach() # cat along the batch dim
ho_lst = torch.cat(ho_lst, dim=-3).detach()
# strip sequence length
for sample in batch:
h_in, h_out, state, action, last_action, reward, next_state = sample
sample_len = len(state)
start_idx = int((sample_len - min_seq_len)/2)
end_idx = start_idx+min_seq_len
s_lst.append(state[start_idx:end_idx])
a_lst.append(action[start_idx:end_idx])
la_lst.append(last_action[start_idx:end_idx])
r_lst.append(reward[start_idx:end_idx])
ns_lst.append(next_state[start_idx:end_idx])
# print("s_lst.shape: {}".format(np.array(s_lst).shape))
# print("a_lst.shape: {}".format(np.array(a_lst).shape))
# print("la_lst.shape: {}".format(np.array(la_lst).shape))
# print("r_lst.shape: {}".format(np.array(r_lst).shape))
# print("ns_lst.shape: {}".format(np.array(ns_lst).shape))
return hi_lst, ho_lst, s_lst, a_lst, la_lst, r_lst, ns_lst
def __len__(
self): # cannot work in multiprocessing case, len(replay_buffer) is not available in proxy of manager!
return len(self.buffer)
def get_length(self):
return len(self.buffer)
def dump_buffer(self):
# Saving the objects:
with open(self.save2file, 'wb') as f: # Python 3: open(..., 'wb')
pickle.dump([self.buffer, self.position], f)
class RNNAgent(nn.Module):
'''
@brief:
evaluate Q value given a state and the action
'''
def __init__(self, num_inputs, action_shape, num_actions, hidden_size):
super(RNNAgent, self).__init__()
self.num_inputs = num_inputs
self.action_shape = action_shape
self.num_actions = num_actions
self.linear1 = nn.Linear(num_inputs+action_shape*num_actions, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.rnn = nn.GRU(hidden_dim, hidden_dim)
self.linear3 = nn.Linear(hidden_size, hidden_size)
self.linear4 = nn.Linear(hidden_size, action_shape*num_actions)
def forward(self, state, action, hidden_in):
'''
@params:
state: [#batch, #sequence, #agent, #n_feature]
action: [#batch, #sequence, #agent, action_shape]
@return:
qs: [#batch, #sequence, #agent, action_shape, num_actions]
'''
# to [#sequence, #batch, #agent, #n_feature]
bs, seq_len, n_agents, _= state.shape
state = state.permute(1, 0, 2, 3)
action = action.permute(1, 0, 2, 3)
action = F.one_hot(action, num_classes=self.num_actions)
action = action.view(seq_len, bs, n_agents, -1) # [#batch, #sequence, #agent, action_shape*num_actions]
x = torch.cat([state, action], -1) # the dim 0 is number of samples
x = x.view(seq_len, bs*n_agents, -1) # change x to [#sequence, #batch*#agent, -1] to meet rnn's input requirement
hidden_in = hidden_in.view(1, bs*n_agents, -1)
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
x, hidden = self.rnn(x, hidden_in)
x = F.relu(self.linear3(x))
x = self.linear4(x) # [#sequence, #batch, #agents, #action_shape*#actions]
# [#sequence, #batch, #agent, #head * #action]
x = x.view(seq_len, bs, n_agents, self.action_shape, self.num_actions)
hidden = hidden.view(1, bs, n_agents, -1)
# categorical over the discretized actions
qs = F.softmax(x, dim=-1)
qs = qs.permute(1, 0, 2, 3, 4) # permute back [#batch, #sequence, #agents, #action_shape, #actions]
return qs, hidden
def get_action(self, state, last_action, hidden_in, deterministic=False):
'''
@brief:
for each distributed agent, generate action for one step given input data
@params:
state: [n_agents, n_feature]
last_action: [n_agents, action_shape]
'''
state = torch.FloatTensor(state).unsqueeze(0).unsqueeze(0).to(device) # add #sequence and #batch: [[#batch, #sequence, n_agents, n_feature]]
last_action = torch.LongTensor(
last_action).unsqueeze(0).unsqueeze(0).to(device) # add #sequence and #batch: [#batch, #sequence, n_agents, action_shape]
hidden_in = hidden_in.unsqueeze(1) # add #batch: [#batch, n_agents, hidden_dim]
agent_outs, hidden_out = self.forward(state, last_action, hidden_in) # agents_out: [#batch, #sequence, n_agents, action_shape, action_dim]; hidden_out same as hidden_in
dist = Categorical(agent_outs)
if deterministic:
action = np.argmax(agent_outs.detach().cpu().numpy(), axis=-1)
else:
action = dist.sample().squeeze(0).squeeze(0).detach().cpu().numpy() # squeeze the added #batch and #sequence dimension
return action, hidden_out # [n_agents, action_shape]
class QMix(nn.Module):
def __init__(self, state_dim, n_agents, action_shape, embed_dim=64, hypernet_embed=128, abs=True):
"""
Critic network class for Qmix. Outputs centralized value function predictions given independent q value.
:param args: (argparse) arguments containing relevant model information.
"""
super(QMix, self).__init__()
self.n_agents = n_agents
self.state_dim = state_dim*n_agents*action_shape # #features*n_agents
self.action_shape = action_shape
self.embed_dim = embed_dim
self.hypernet_embed = hypernet_embed
self.abs = abs
self.hyper_w_1 = nn.Sequential(nn.Linear(self.state_dim, self.hypernet_embed),
nn.ReLU(inplace=True),
nn.Linear(self.hypernet_embed, self.action_shape * self.embed_dim * self.n_agents))
self.hyper_w_final = nn.Sequential(nn.Linear(self.state_dim, self.hypernet_embed),
nn.ReLU(inplace=True),
nn.Linear(self.hypernet_embed, self.embed_dim))
# State dependent bias for hidden layer
self.hyper_b_1 = nn.Linear(
self.state_dim, self.embed_dim)
# V(s) instead of a bias for the last layers
self.V = nn.Sequential(nn.Linear(self.state_dim, self.embed_dim),
nn.ReLU(inplace=True),
nn.Linear(self.embed_dim, 1))
def forward(self, agent_qs, states):
"""
Compute actions from the given inputs.
@params:
agent_qs: [#batch, #sequence, #agent, #action_shape]
states: [#batch, #sequence, #agent, #features*action_shape]
:param agent_qs: q value inputs into network [batch_size, #agent, action_shape]
:param states: state observation.
:return q_tot: (torch.Tensor) return q-total .
"""
bs = agent_qs.size(0)
states = states.reshape(-1, self.state_dim) # [#batch*#sequence, action_shape*#features*#agent]
agent_qs = agent_qs.reshape(-1, 1, self.n_agents*self.action_shape) # [#batch*#sequence, 1, #agent*#action_shape]
# First layer
w1 = self.hyper_w_1(states).abs() if self.abs else self.hyper_w_1(states) # [#batch*#sequence, action_shape*self.embed_dim*#agent]
b1 = self.hyper_b_1(states) # [#batch*#sequence, self.embed_dim]
w1 = w1.view(-1, self.n_agents*self.action_shape, self.embed_dim) # [#batch*#sequence, #agent*action_shape, self.embed_dim]
b1 = b1.view(-1, 1, self.embed_dim) # [#batch*#sequence, 1, self.embed_dim]
hidden = F.elu(torch.bmm(agent_qs, w1) + b1) # [#batch*#sequence, 1, self.embed_dim]
# Second layer
w_final = self.hyper_w_final(states).abs() if self.abs else self.hyper_w_final(states) # [#batch*#sequence, self.embed_dim]
w_final = w_final.view(-1, self.embed_dim, 1) # [#batch*#sequence, self.embed_dim, 1]
# State-dependent bias
v = self.V(states).view(-1, 1, 1) # [#batch*#sequence, 1, 1]
# Compute final output
y = torch.bmm(hidden, w_final) + v
# Reshape and return
q_tot = y.view(bs, -1, 1) # [#batch, #sequence, 1]
return q_tot
def k(self, states):
bs = states.size(0)
w1 = torch.abs(self.hyper_w_1(states))
w_final = torch.abs(self.hyper_w_final(states))
w1 = w1.view(-1, self.n_agents, self.embed_dim*self.action_shape)
w_final = w_final.view(-1, self.embed_dim*self.action_shape, 1)
k = torch.bmm(w1, w_final).view(bs, -1, self.n_agents)
k = k / torch.sum(k, dim=2, keepdim=True)
return k
def b(self, states):
bs = states.size(0)
w_final = torch.abs(self.hyper_w_final(states))
w_final = w_final.view(-1, self.embed_dim*self.action_shape, 1)
b1 = self.hyper_b_1(states)
b1 = b1.view(-1, 1, self.embed_dim*self.action_shape)
v = self.V(states).view(-1, 1, 1)
b = torch.bmm(b1, w_final) + v
return b
class QMix_Trainer():
def __init__(self, replay_buffer, n_agents, state_dim, action_shape, action_dim, hidden_dim, hypernet_dim, target_update_interval, lr=0.001, logger=None):
self.replay_buffer = replay_buffer
self.action_dim = action_dim
self.action_shape = action_shape
self.n_agents = n_agents
self.target_update_interval = target_update_interval
self.agent = RNNAgent(state_dim, action_shape,
action_dim, hidden_dim).to(device)
self.target_agent = RNNAgent(
state_dim, action_shape, action_dim, hidden_dim).to(device)
self.mixer = QMix(state_dim, n_agents, action_shape,
hidden_dim, hypernet_dim).to(device)
self.target_mixer = QMix(state_dim, n_agents, action_shape,
hidden_dim, hypernet_dim).to(device)
self._update_targets()
self.update_cnt = 0
self.criterion = nn.MSELoss()
self.optimizer = optim.Adam(
list(self.agent.parameters())+list(self.mixer.parameters()), lr=lr)
def sample_action(self):
probs = torch.FloatTensor(
np.ones(self.action_dim)/self.action_dim).to(device)
dist = Categorical(probs)
action = dist.sample((self.n_agents, self.action_shape))
return action.type(torch.FloatTensor).numpy()
def get_action(self, state, | |
<filename>phantom_analysis/dicom_util.py
# functions for interacting with dicom files
import os
import numpy as np
from collections import defaultdict
import logging
import time
from datetime import datetime
try:
import pydicom as dicom
logging.info("using pydicom")
except:
import dicom
logging.info("using dicom")
class InvalidDataException(Exception):
pass
class ImageCoordinateSystem(object):
"""
Defines image LPS (left, posterior, superior) position from DICOM attributes and a slice's x y pixels.
"""
def __init__(self, pixel_spacing_cm, spacing_between_slices_cm, image_orientation_patient,
pixel_x0_left_cm, pixel_y0_posterior_cm, min_superior_cm, max_superior_cm):
"""
Args:
pixel_spacing_cm: float from DICOM "Pixel Spacing" attribute
spacing_between_slices_cm: float from DICOM "Spacing Between Slices" attribute
pixel_x0_left_cm: float from DICOM "Image Position (Patient)" attribute
pixel_y0_posterior_cm: float from DICOM "Image Position (Patient)" attribute
min_superior_cm: float from DICOM "Image Position (Patient)" attribute
"""
self.pixel_spacing_cm = pixel_spacing_cm
self.spacing_between_slices_cm = spacing_between_slices_cm
self.pixel_x0_left_cm = pixel_x0_left_cm
self.pixel_y0_posterior_cm = pixel_y0_posterior_cm
self.min_superior_cm = min_superior_cm
self.max_superior_cm = max_superior_cm
negative_axial_image_orientation_patient = [-1.0, -0.0, 0.0, -0.0, -1.0, 0.0]
if image_orientation_patient != negative_axial_image_orientation_patient:
# we may need to add support for other orientations later, but deferring for now
raise InvalidDataException("Only negative axial image orientation patient supported: expected %s but got %s" % (
negative_axial_image_orientation_patient, image_orientation_patient))
self.image_orientation = image_orientation_patient
# return an array of coordinate system values for unit testing and printing
def cs_to_array(self):
return [
self.pixel_spacing_cm,
self.spacing_between_slices_cm,
self.pixel_x0_left_cm,
self.pixel_y0_posterior_cm,
self.min_superior_cm,
self.max_superior_cm,
self.image_orientation
]
def lps_cm(self, pixel_x, pixel_y, slice):
"""
Returns: (left, posterior, superior) position in centimeters relative to DICOM "Image Position (Patient)"
From DICOM tag "Image Position (Patient)", we get the top left coordinate cm of x=0 and y=0.
The image has left as low x and right as high x, so we subtract the pixel x to get the left coordinate.
A positive center_left_cm value means "left" and a negative value means "right".
The image has posterior as low y and anterior as high y, so we subtract the pixel y to get the posterior
coordinate. A positive center_posterior_cm value means "anterior" and a negative value means "posterior".
This is consistent with how slicer and UCSF uses the LPS coordinate system. We may need to account for the
"Image Orientation (Patient)" tag if this logic breaks with future data sets.
e.g. assuming:
* 256x256 pixel image per slice
* "Pixel Spacing" is "1.5625 1.5625"
* "Image Position (Patient)" is 206.919, 151.819, 76
then:
* x value 0 (center of first pixel) becomes 206.919 mm or 20.6919 cm
* x value 255 (center of last pixel) becomes 206.919 - 255*1.5625 = -191.5185 mm = -19.15185 cm
* x value 128 (center of middle pixel) becomes 206.919 - 128*1.5625 = 6.919 mm = 0.6919 cm
* y value 0 (center of first pixel) becomes 151.819 mm or 15.1819 cm
* y value 255 (center of last pixel) becomes 151.819 - 255*1.5625 = -246.6185 mm = -24.66185 cm
* y value 128 (center of middle pixel) becomes 151.819 - 128*1.5625 = -48.181 mm = -4.8181 cm
"""
return (
self.pixel_x0_left_cm - (pixel_x * self.pixel_spacing_cm), # left
self.pixel_y0_posterior_cm - (pixel_y * self.pixel_spacing_cm), # posterior
self.min_superior_cm + (slice * self.spacing_between_slices_cm) # superior
)
# get coronal center coordinates and radius in cm
def coronal_circle_to_cm(self, coronal_x, coronal_y, radius_pixels):
return [
self.pixel_x0_left_cm - (coronal_x * self.pixel_spacing_cm), # left
self.max_superior_cm - (coronal_y * self.spacing_between_slices_cm), # superior
radius_pixels * max(self.pixel_spacing_cm, self.spacing_between_slices_cm)
]
# get coronal center in pixels (as ints) for creating label map
def coronal_center_to_pixels(self, coronal_x_cm, coronal_y_cm):
return (
int(np.rint((self.pixel_x0_left_cm - coronal_x_cm) / self.pixel_spacing_cm)),
int(np.rint((self.max_superior_cm - coronal_y_cm) / self.spacing_between_slices_cm)),
)
# get coronal z value in cm
def coronal_z_to_cm(self, coronal_slice):
return self.pixel_y0_posterior_cm - (coronal_slice * self.pixel_spacing_cm)
def get_datetime(dcm):
""" Returns datetime object from a DICOM DA datatype and an optional DICOM TM datatype.
If TM is None or cannot be parsed, attempts to parse just the DA.
If fails to parse, logs warning.
If da cannot be parsed, returns None (doesn't raise exception).
See http://northstar-www.dartmouth.edu/doc/idl/html_6.2/Value_Representations.html for
more details
"""
da = dcm.StudyDate
tm = dcm.SeriesTime or dcm.StudyTime
date_format = '%Y%m%d'
fmts = []
try:
fmts = [date_format + ' %H%M%S.%f',
date_format + ' %H:%M:%S.%f',
date_format + ' %H%M',
date_format + ' %H%M%S']
if tm is not None:
for fmt in fmts:
try:
return datetime.strptime("%s %s" % (da, tm), fmt)
except ValueError:
pass
return datetime.strptime(da, date_format)
except ValueError:
return None
# get the b_value for a dicom object
def get_b_value(dcm):
manufacturer = dcm.Manufacturer.upper()
if manufacturer in ('PHILIPS MEDICAL SYSTEMS', 'PHILIPS HEALTHCARE'):
key = (<KEY>
elif manufacturer == 'SIEMENS':
key = (<KEY>
elif manufacturer == 'GE MEDICAL SYSTEMS':
key = (<KEY>
else:
raise InvalidDataException("Unknown location of bvalue for manufacturer %s" % manufacturer)
if manufacturer != 'GE MEDICAL SYSTEMS':
return int(dcm[key].value)
# GE can be wacky
bvalue_data_element = dcm[key]
# GE bvalue type is either "OB" (Other Byte String) or "IS" (Integer String)
# for bvalue 1000, an example string is '1000\\8\\0\\0'
# for bvalue 1000, an example "integer string" is ['1000001000', '8', '0', '0']
if bvalue_data_element.VR == 'OB':
bvalue = int(dcm[key].value.split('\\')[0])
else:
bvalue = int(dcm[key].value[0])
if bvalue >= 10 ** 9:
# e.g. a GE bvalue may be "1000000900"
# "DW-MRI vendor-specific tags.xlsx" suggests subtracting 10^6, but 10^9 seems to work better
bvalue -= 10 ** 9
return bvalue
# get all bvalues for a set of dicom objects
def get_all_bvalues(dicom_array):
bvalues = set()
for dcm in dicom_array:
bvalues.add(get_b_value(dcm))
return np.array(sorted(list(bvalues)))
# get the repetition time for a dicom object
def repetition_time(dcm):
key = (0x0018, 0x0080)
return int(dcm[key].value) if key in dcm else None
# get the spacing between slices for a dicom object
def spacing_between_slices_cm(dcm):
# SpacingBetweenSlices is in mm as a "Decimal String (DS)"
return float(dcm.SpacingBetweenSlices)/10
# get the pixel spacing for a dicom object
def pixel_spacing_cm(dcm):
# PixelSpacing is in mm as a "Decimal String" in row column order
if len(dcm.PixelSpacing) != 2 or dcm.PixelSpacing[0] != dcm.PixelSpacing[1]:
# we can probably support unequal row/column spacing, but if this doesn't occur in practice lets not bother
raise InvalidDataException("Expected equal row and column pixel spacing but got %s" % dcm.PixelSpacing)
return float(dcm.PixelSpacing[0])/10
# get the patient position for a dicom object
def image_position_patient_cm(dcm):
return [float(p)/10 for p in dcm.ImagePositionPatient]
# get the image orientation for a dicom object
def image_orientation_patient(dcm):
return [float(p) for p in dcm.ImageOrientationPatient]
def iop_string(iop):
return {
(1, 0, 0, 0, 0, -1): "Coronal",
(1, 0, 0, 0, 1, 0): "Axial",
(-1, 0, 0, 0, -1, 0): "Axial (Negative)",
(0, 1, 0, 0, 0, -1): "Sagital"
}.get(tuple(iop))
# get all the flip angles (in degrees) for a set of dicom objects
def get_all_flipangles(dicom_array):
flip_angles = set()
for dcm in dicom_array:
flip_angles.add(int(dcm.FlipAngle))
return np.array(sorted(list(flip_angles)))
# get the dwi array for a set of dicoms based on bvalue or flip angle
def get_dwi_array(dicom_array, sorting_param):
if sorting_param not in ("FLIP", "BVALUE"):
raise InvalidDataException("Expected a valid parameter (FLIP or BVALUE), got: {}".format(sorting_param))
# order dicoms by position then bvalue/flip angle
stack_pos_to_param_to_dcm = defaultdict(lambda: defaultdict(list))
for dcm in dicom_array:
stack_pos_to_param_to_dcm[dcm.InStackPositionNumber][dcm.FlipAngle if sorting_param == "FLIP" else get_b_value(dcm)].append(dcm)
dwi_array = []
params = None
for stack_pos in sorted(stack_pos_to_param_to_dcm.keys()):
dwi_for_stack_pos = []
if params is None:
params = sorted(stack_pos_to_param_to_dcm[stack_pos].keys())
else:
if params != sorted(stack_pos_to_param_to_dcm[stack_pos].keys()):
raise InvalidDataException("Inconsistent secondary parameters: expected %s, got %s" % params, sorted(stack_pos_to_param_to_dcm[stack_pos].keys()))
for param in params:
pixel_arrays = [dcm.pixel_array for dcm in stack_pos_to_param_to_dcm[stack_pos][param]]
avg_array = np.mean(pixel_arrays, axis=0, dtype=pixel_arrays[0].dtype)
dwi_for_stack_pos.append(np.transpose(avg_array)) # transpose will be undone at the end of the for loop
dwi_array.append(np.transpose(dwi_for_stack_pos)) # this ensures z, x, y, then b/flip indexing order
dwi_array = np.array(dwi_array)
return dwi_array
def get_summary_info(dcm):
return {'study_date': get_datetime(dcm).isoformat(),
'series': dcm.SeriesNumber,
'series_description': dcm.SeriesDescription,
'position': ", ".join([str(10*x) for x in image_position_patient_cm(dcm)]),
'bvalue': get_b_value(dcm),
'flip_angle': dcm.FlipAngle}
# takes a directory and returns the information about the dicom dataset
# what type of dataset: T1, ADC, or thermometry
# the dwi array for the dataset
# flip angles (in degrees and radians) or bvalues
# repetition time for T1 datasets
# TODO: do we want to return bvalues/flip angles and rep time for all cases and some would just have one value? would this be useful for errors or calibration later?
def read_dicomdir(directory):
before = time.time()
dicoms = []
dicom_summaries = {}
between_slices_cm = None
pixel_cm = None
pixel_x0_left_cm = None
pixel_y0_posterior_cm = None
orientation = None
min_superior_cm, max_superior_cm = | |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 16 10:05:19 2018
@author: trevor
To determine the daily station statistics (unused)
#bin_size = 24
#bin_layout = np.arange(humid.size)//bin_size
#avg_humid = np.bincount(bin_layout,humid)/np.bincount(bin_layout)
convert function developed by <NAME>
https://stackoverflow.com/questions/1254454/fastest-way-to-convert-a-dicts-keys-values-from-unicode-to-str/1254499#1254499
"""
import codecs # Needs to be removed. Obsolete.
import collections
import os
from datetime import datetime as dt
import matplotlib.pyplot as plt
import numpy as np
import unicodecsv as csv
def convert(data):
try:
if isinstance(data, basestring):
try:
return str(data)
except Exception as e:
print e
return data.encode('ascii', 'ignore')
elif isinstance(data, collections.Mapping):
return dict(map(convert, data.iteritems()))
elif isinstance(data, collections.Iterable):
return type(data)(map(convert, data))
else:
return data
except UnicodeEncodeError:
return ''
def period(data_frame):
"""
Determines the earliest and latest date/times and returns the length
of month in total number of days, accounting for leap years.
"""
dates = np.array(data_frame['Date/Time'], dtype='datetime64[h]')
min_date = np.datetime64(min(dates), dtype='datetime64[D]')
max_date = np.datetime64(max(dates), dtype='datetime64[D]')
month = data_frame['Month'][0]
period = np.arange(min_date, max_date + 1, dtype='datetime64[D]')
return month, period
def humid(data_frame):
"""
Summarizes the indicators for Relative Humidity.
"""
key = 'Rel Hum (%)'
month, days = period(data_frame)
humid_raw = data_frame[key]
for count, factor in enumerate(humid_raw): # Handling NoneTypes
try:
factor = float(factor)
except (TypeError, ValueError):
humid_raw[count] = 'NaN'
humid_hourly = np.array(humid_raw, dtype=np.float)
humid_hourly = np.ma.masked_where(np.isnan(humid_hourly), humid_hourly)
if type(np.nansum(humid_hourly)) != np.float64:
return False, key, month
humid_daily = humid_hourly.reshape(len(humid_hourly) / 24, 24)
min_humid = np.amin(humid_daily, axis=1)
max_humid = np.amax(humid_daily, axis=1)
y1 = min_humid
y2 = max_humid
hu_min, hu_max = y1.min(), y1.max()
title = 'Relative Humidity'
y1_label = '%'
y2_label = None
y1_title = 'Monthly Min Humidity: {0:.3}'.format(hu_min)
y2_title = 'Monthly Max Humidity: {0:.3}'.format(hu_max)
return days, y1, y2, month, title, y1_label, y2_label, y1_title, y2_title
def windchill(dataframe):
"""
Summarizes the indicators for Wind Chill Factor.
"""
key = 'Wind Chill'
month, days = period(dataframe)
wcf_raw = dataframe[key]
for count, factor in enumerate(wcf_raw): # Handling those annoying NoneTypes
try:
float(factor)
except (TypeError, ValueError):
wcf_raw[count] = 'NaN'
wcf_hourly = np.array(wcf_raw, dtype=np.float)
wcf_hourly = np.ma.masked_where(np.isnan(wcf_hourly), wcf_hourly)
if type(np.nansum(wcf_hourly)) != np.float64:
return False, key, month
wcf_daily = wcf_hourly.reshape(len(wcf_hourly) / 24, 24)
min_wcf = np.amin(wcf_daily, axis=1)
max_wcf = np.amax(wcf_daily, axis=1)
y1 = min_wcf
y2 = max_wcf
minmean_wcf, maxmean_wcf = y1.mean(), y2.mean()
title = 'Wind Chill Factor'
y1_label = 'Deg C'
y2_label = None
y1_title = 'Avg Min Wind Chill: {0:.3} deg C'.format(minmean_wcf)
y2_title = 'Avg Max Wind Chill: {0:.3} deg C'.format(maxmean_wcf)
return days, y1, y2, month, title, y1_label, y2_label, y1_title, y2_title
def collect_that():
"""
Collects all files starting with "eng-" and ending with ".csv"
located in the same directory as the script. Need to consider
making a method allowing for users to feed in a directory location
if they want to scan a filesystem.
"""
print
'Scanning directories:\n'
ec_stations = [ec for ec in os.listdir('.')
if ec.startswith('eng-hourly')]
ec_stations.sort()
if len(ec_stations) >= 1:
return ec_stations
else:
raise Exception("No stations were collected. Verify CSV locations.")
def place_that(name):
"""
When given a filename will dump station location headers
to console and return a dictionary with raw unicode keys
and values for station name and location variables.
"""
try:
location = str(name)
with codecs.open(location, 'rb') as f:
dialect = csv.Sniffer().sniff(f.read(1024))
f.seek(0)
verifier = csv.reader(f, dialect)
for count, row in enumerate(verifier): # Read and format metadata
if count > 6:
break
f.seek(0)
names = ('Station Name',
'Province',
'Latitude',
'Longitude',
'Elevation',
'Climate Identifier',
'WMO Identifier',
'TC Identifier')
datum = {}
for name in names:
datum[name] = []
for count, row in enumerate(verifier):
if count == 0: # Special handling to deal with UTF-8 BOM
key = 'Station Name'
field = convert(row[1])
datum[key] = field
continue
try:
if row[0] in names:
key = convert(row[0])
field = convert(row[1])
datum[key] = field
except Exception as e:
print e
continue
return datum
except ValueError:
raise Exception("Invalid station CSV. \
Verify that CSVs hold Environment Canada station data.")
pass
def grab_that(station):
"""
A method that extracts climate data from CSV and converts it to a
dictionary object.
"""
with codecs.open(station, 'rb', ) as f:
# Tries to figure out CSV formatting to address encoding issues.
dialect = csv.Sniffer().sniff(f.read(1024))
f.seek(0)
lines = csv.reader(f, dialect)
for i in range(16): # Skips the metadata
next(lines)
names, datum = [], {}
for column in lines:
for name in column:
names.append(name)
datum[name] = []
break
reader = csv.DictReader(f, fieldnames=names, delimiter=',', quotechar='"')
for row in reader:
for column, value in row.iteritems():
value = convert(value)
datum.setdefault(column, []).append(value)
return datum
def match_locations(locations):
"""
A method to match locations that appear multiple times in the same list
and return year-ordered lists that can then be plotted sequentially
"""
ident = 'Climate Identifier'
yr = 'Year'
mon = 'Month'
matches = []
order_months = [[]]
processed_stations = []
try:
for i, station1 in enumerate(locations):
if (station1[ident], station1[yr][0]) in processed_stations:
continue
matches.append([])
matches[-1].append(station1)
order_months[-1].append(int(station1[mon][0]))
for station2 in locations[i + 1:]:
if station1[ident] == station2[ident] \
and int(station1[yr][0]) == int(station2[yr][0]) \
and int(station1[mon][0]) != int(station2[mon][0]):
matches[-1].append(station2)
order_months[-1].append(int(station2[mon][0]))
processed_stations.append((station1[ident], station1[yr][0]))
return matches
except ValueError:
raise Exception("Verify that CSV has valid dates and formatted properly")
def calc_that(match, plot):
"""
A method that converts a unicode dictionary of climate data to ASCII and
proceeds to calculate daily variables derived from it.
"""
location = match.copy()
# Depending on the plot type being calculated, only return the variables needed
if plot == 0:
return humid(location)
elif plot == 1:
return windchill(location)
else:
return "You need more plot styles"
def data_unpacker(matches, make_plots=True):
"""
Unpacks the matches and match data to return continuous data
that will be appended to CSVs. If make_plots == True, will create a
series of subplots.
"""
csv_list = []
for match in matches:
csv_meta = ('Station Name',
'Province',
'Latitude',
'Longitude',
'Elevation',
'Climate Identifier',
'WMO Identifier',
'TC Identifier')
csv_data = {'Date': [], 'Min Rel Humid (%)': [], 'Max Rel Humid (%)': [],
'Min WCF (deg C)': [], 'Max WCF (deg C)': []}
for keys in csv_meta:
csv_meta = {keys: match[0][keys]}
csv_data.update(csv_meta)
if len(match) > 1:
print '\nMulti-Month Set Found; Matched as follows:'
for iterable, station in enumerate(match):
print(station['Station Name'] + ' ID:' + station['Climate Identifier']
+ ' for Month ' + station['Month'][0] + ' in ' + station['Year'][0])
# Begin sub-plotting processes
for plot in range(2):
if make_plots:
f, axarr = plt.subplots(len(match), 1, sharex=True)
for subplot, station in enumerate(match):
analysis = calc_that(station, plot)
if make_plots:
plot_maker(analysis, axarr, subplot, plot)
length = len(period(station)[1])
empty = np.ma.masked_array(np.zeros((length,)), mask=np.ones((length,)))
# Grab formatted data as it is iterated over
if plot == 0:
csv_data['Date'].extend(period(station)[1])
if analysis[0] is False:
csv_data['Min Rel Humid (%)'].extend(empty)
csv_data['Max Rel Humid (%)'].extend(empty)
else:
csv_data['Min Rel Humid (%)'].extend(analysis[1])
csv_data['Max Rel Humid (%)'].extend(analysis[2])
if plot == 1:
if analysis[0] is False:
csv_data['Min WCF (deg C)'].extend(empty)
csv_data['Max WCF (deg C)'].extend(empty)
else:
csv_data['Min WCF (deg C)'].extend(analysis[1])
csv_data['Max WCF (deg C)'].extend(analysis[2])
if make_plots:
f.subplots_adjust(hspace=0.5)
f.text(0.5, 0.04, 'Day in Month', ha='center', va='center')
stationplace = match[0]['Station Name'] + ' Station for Year ' + match[0]['Year'][0]
f.text(0.5, 0.96, stationplace, ha='center', va='center')
elif len(match) == 1:
print
'\nSingle Month Station Found:'
print(match[0]['Station Name'] + ' ID:' + match[0]['Climate Identifier']
+ ' for Month ' + match[0]['Month'][0] + ' in ' + match[0]['Year'][0])
# Begin plotting processes
if make_plots:
f, axarr = plt.subplots(3, 1, sharex=True)
for plot in range(2):
analysis = calc_that(match[0], plot)
if make_plots:
plot_maker(analysis, axarr, plot, plot)
length = len(period(match[0])[1])
empty = np.ma.masked_array(np.zeros((length,)), mask=np.ones((length,)))
# Grab formatted data as it is iterated over
if plot == 0:
csv_data['Date'].extend(period(match[0])[1])
if analysis[0] is False:
csv_data['Min Rel Humid (%)'].extend(empty)
csv_data['Max Rel Humid (%)'].extend(empty)
else:
csv_data['Min Rel Humid (%)'].extend(analysis[1])
csv_data['Max Rel Humid (%)'].extend(analysis[2])
if plot == 1:
if analysis[0] is False:
csv_data['Min WCF (deg C)'].extend(empty)
csv_data['Max WCF (deg C)'].extend(empty)
else:
csv_data['Min WCF (deg C)'].extend(analysis[1])
csv_data['Max WCF (deg C)'].extend(analysis[2])
if make_plots:
f.subplots_adjust(hspace=0.25)
f.text(0.5, 0.04, 'Day in Month', ha='center', va='center')
stationplace = match[0]['Station Name'] + ' Station for Year ' + match[0]['Year'][0]
f.text(0.5, 0.96, stationplace, ha='center', va='center')
else:
print
"This should never happen."
csv_list.append(csv_data)
if make_plots:
plt.show()
return csv_list
def plot_maker(analysis, axarr, subplot, plot):
"""
Using plot names, axes, year and titles, creates a sublot | |
. 1 1 2
. 2 3
1 3 4
sage: S.is_ribbon()
False
sage: S=SkewTableau([[None, None, 1, 2],[None, None, 3],[1]])
sage: S.pp()
. . 1 2
. . 3
1
sage: S.is_ribbon()
False
sage: S=SkewTableau([[None, None, None, None],[None, None, 3],[1, 2, 4]])
sage: S.pp()
. . . .
. . 3
1 2 4
sage: S.is_ribbon()
True
sage: S=SkewTableau([[None, None, None, None],[None, None, 3],[None, 2, 4]])
sage: S.pp()
. . . .
. . 3
. 2 4
sage: S.is_ribbon()
True
sage: S=SkewTableau([[None, None],[None]])
sage: S.pp()
. .
.
sage: S.is_ribbon()
True
"""
lam = list(self.outer_shape())
mu = list(self.inner_shape())
l_out = len(lam)
l_in = len(mu)
mu += [0]*(l_out-l_in)
if l_out == 0:
return True
else:
# Find the least u for which lam[u]>mu[u], if it exists.
# If it does not exist then u will equal l_out.
u = 0
u_test = True
while u_test:
if u >= l_out or lam[u] > mu[u]:
u_test = False
else:
u += 1
# Find the least v strictly greater than u for which
# lam[v] != mu[v-1]+1
v = u + 1
v_test = True
while v_test:
if v >= l_out or lam[v] != mu[v-1] + 1:
v_test = False
else:
v += 1
# Check if lam[i]==mu[i] for all i >= v
for i in range(v, l_out):
if lam[i] != mu[i]:
return False
return True
def to_ribbon(self, check_input=True):
"""
Return ``self`` as a ribbon-shaped tableau
(:class:`~sage.combinat.ribbon_shaped_tableau.RibbonShapedTableau`),
provided that the shape of ``self`` is a ribbon.
INPUT:
- ``check_input`` -- (default: ``True``) whether or not to check
that ``self`` indeed has ribbon shape
EXAMPLES::
sage: SkewTableau([[None,1],[2,3]]).to_ribbon()
[[None, 1], [2, 3]]
"""
if check_input and not self.is_ribbon():
raise ValueError("self must be a ribbon")
from sage.combinat.ribbon_shaped_tableau import RibbonShapedTableau
r = [[i for i in row if i is not None] for row in self]
return RibbonShapedTableau(r)
def filling(self):
"""
Return a list of the non-empty entries in ``self``.
EXAMPLES::
sage: t = SkewTableau([[None,1],[2,3]])
sage: t.filling()
[[1], [2, 3]]
"""
return [[i for i in row if i is not None] for row in self]
def cells_by_content(self, c):
"""
Return the coordinates of the cells in ``self`` with content ``c``.
EXAMPLES::
sage: s = SkewTableau([[None,1,2],[3,4,5],[6]])
sage: s.cells_by_content(0)
[(1, 1)]
sage: s.cells_by_content(1)
[(0, 1), (1, 2)]
sage: s.cells_by_content(2)
[(0, 2)]
sage: s.cells_by_content(-1)
[(1, 0)]
sage: s.cells_by_content(-2)
[(2, 0)]
"""
if len(self) == 0:
return []
if c >= 0:
if c >= len(self[0]):
return []
i,j = 0,c
else:
c = -c
if c >= len(self):
return []
i,j = c,0
res = []
while True:
if self[i][j] is not None:
res.append((i,j))
i,j = i+1, j+1
if i >= len(self) or j >= len(self[i]):
break
return res
def entries_by_content(self, c):
"""
Return the entries in ``self`` with content ``c``.
EXAMPLES::
sage: s = SkewTableau([[None,1,2],[3,4,5],[6]])
sage: s.entries_by_content(0)
[4]
sage: s.entries_by_content(1)
[1, 5]
sage: s.entries_by_content(2)
[2]
sage: s.entries_by_content(-1)
[3]
sage: s.entries_by_content(-2)
[6]
"""
return [self[i][j] for i,j in self.cells_by_content(c)]
def cells(self):
"""
Return the cells in ``self``.
EXAMPLES::
sage: s = SkewTableau([[None,1,2],[3],[6]])
sage: s.cells()
[(0, 1), (0, 2), (1, 0), (2, 0)]
"""
res = []
for i in range(len(self)):
for j in range(len(self[i])):
if self[i][j] is not None:
res.append( (i,j) )
return res
def cells_containing(self, i):
r"""
Return the list of cells in which the letter ``i`` appears in the
tableau ``self``. The list is ordered with cells appearing from
left to right.
Cells are given as pairs of coordinates `(a, b)`, where both
rows and columns are counted from `0` (so `a = 0` means the cell
lies in the leftmost column of the tableau, etc.).
EXAMPLES::
sage: t = SkewTableau([[None,None,3],[None,3,5],[4,5]])
sage: t.cells_containing(5)
[(2, 1), (1, 2)]
sage: t.cells_containing(4)
[(2, 0)]
sage: t.cells_containing(2)
[]
sage: t = SkewTableau([[None,None,None,None],[None,4,5],[None,5,6],[None,9],[None]])
sage: t.cells_containing(2)
[]
sage: t.cells_containing(4)
[(1, 1)]
sage: t.cells_containing(5)
[(2, 1), (1, 2)]
sage: SkewTableau([]).cells_containing(3)
[]
sage: SkewTableau([[None,None],[None]]).cells_containing(3)
[]
"""
cell_list = []
for r in range(len(self)-1, -1, -1):
rth_row = self[r]
for c,val in enumerate(rth_row):
if val == i:
cell_list.append((r,c))
return cell_list
def is_k_tableau(self, k):
r"""
Checks whether ``self`` is a valid skew weak `k`-tableau.
EXAMPLES::
sage: t = SkewTableau([[None,2,3],[2,3],[3]])
sage: t.is_k_tableau(3)
True
sage: t = SkewTableau([[None,1,3],[2,2],[3]])
sage: t.is_k_tableau(3)
False
"""
shapes = self.to_chain()
kshapes = [ la.k_conjugate(k) for la in shapes ]
return all( kshapes[i+1].contains(kshapes[i]) for i in range(len(shapes)-1) )
def _label_skew(list_of_cells, sk):
"""
Return a filled-in standard skew tableau given an
ordered list ``list_of_cells`` of the coordinates to fill in
(as pairs) and an empty shape ``sk``.
EXAMPLES::
sage: import sage.combinat.skew_tableau as skew_tableau
sage: l = [(0, 0), (1, 1), (1, 0), (0, 1)]
sage: empty = [[None,None],[None,None]]
sage: skew_tableau._label_skew(l, empty)
[[1, 4], [3, 2]]
"""
i = 1
skew = [list(row) for row in sk]
for row, column in list_of_cells:
skew[row][column] = i
i += 1
return skew
class SkewTableaux(UniqueRepresentation, Parent):
r"""
Class of all skew tableaux.
"""
def __init__(self, category=None):
"""
Initialize ``self``.
EXAMPLES::
sage: S = SkewTableaux()
sage: TestSuite(S).run()
"""
if category is None:
Parent.__init__(self, category=Sets())
else:
Parent.__init__(self, category=category)
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: SkewTableaux()
Skew tableaux
"""
return "Skew tableaux"
def _element_constructor_(self, st):
"""
Construct an element of ``self``.
EXAMPLES::
sage: S = SkewTableaux()
sage: elt = S([[None,1],[2,3]]); elt
[[None, 1], [2, 3]]
sage: elt.parent() is S
True
"""
return self.element_class(self, st)
Element = SkewTableau
options = Tableaux.options
def __contains__(self, x):
"""
Checks if ``x`` is a skew tableau.
EXAMPLES::
sage: T = SkewTableau([[None, None, 1], [3], [4]])
sage: T in SkewTableaux()
True
sage: [[None,1],[2,3]] in SkewTableaux()
True
"""
if isinstance(x, SkewTableau):
return True
try:
self.element_class(self, x)
except Exception:
return False
return True
def from_expr(self, expr):
"""
Return a :class:`SkewTableau` from a MuPAD-Combinat expr for a skew
tableau. The first list in ``expr`` is the inner shape of the skew
tableau. The second list are the entries in the rows of the skew
tableau from bottom to top.
Provided primarily for compatibility with MuPAD-Combinat.
EXAMPLES::
sage: SkewTableaux().from_expr([[1,1],[[5],[3,4],[1,2]]])
[[None, 1, 2], [None, 3, 4], [5]]
"""
skp = []
outer = expr[1]
inner = expr[0]+[0]*(len(outer)-len(expr[0]))
for i in range(len(outer)):
skp.append( [None]*(inner[i]) + outer[-(i+1)] )
return self.element_class(self, skp)
def from_chain(self, chain):
"""
Return the tableau corresponding to the chain of partitions.
EXAMPLES::
sage: SkewTableaux().from_chain([[1,1],[2,1],[3,1],[3,2],[3,3],[3,3,1]])
[[None, 1, 2], [None, 3, 4], [5]]
"""
shape = chain[-1]
T = [[None for _ in range(r)] for r in shape]
for i in range(1,len(chain)):
la = chain[i]
mu = chain[i-1]
mu += [0]*(len(la) - len(mu))
for r in range(len(la)):
for c in range(mu[r], la[r]):
T[r][c] = i
return self.element_class(self, T)
def from_shape_and_word(self, shape, word):
"""
Return the skew tableau corresponding to the skew partition ``shape``
and the word ``word`` obtained from the row reading.
EXAMPLES::
sage: t = SkewTableau([[None, 1, 3], [None, 2], [4]])
sage: shape = t.shape()
sage: word = t.to_word()
sage: SkewTableaux().from_shape_and_word(shape, word)
[[None, 1, 3], [None, 2], [4]]
"""
st = [ [None]*row_length for row_length in shape[0] ]
w_count = 0
for i in reversed(range(len(shape[0]))):
for j in range(shape[0][i]):
if i >= len(shape[1]) or j >= shape[1][i]:
st[i][j] = word[w_count]
w_count += 1
return self.element_class(self, st)
class StandardSkewTableaux(SkewTableaux):
"""
Standard skew tableaux.
EXAMPLES::
sage: S = StandardSkewTableaux(); S
Standard skew tableaux
sage: S.cardinality()
+Infinity
::
sage: S = StandardSkewTableaux(2); S
Standard skew tableaux of size 2
sage: S.cardinality()
4
::
sage: StandardSkewTableaux([[3, 2, 1], [1, 1]]).list()
[[[None, 2, 3], [None, 4], [1]],
[[None, 1, 2], [None, 3], [4]],
[[None, 1, 2], [None, 4], [3]],
[[None, 1, 3], [None, 4], [2]],
[[None, 1, 4], [None, 3], [2]],
[[None, 1, 4], [None, 2], [3]],
[[None, 1, 3], [None, 2], [4]],
[[None, 2, 4], [None, 3], [1]]]
"""
@staticmethod
def __classcall_private__(cls, skp=None):
"""
Return the class of standard skew tableaux of skew shape ``skp``.
EXAMPLES::
sage: SST1 = StandardSkewTableaux([[3, 2, 1], [1, 1]])
sage: SST2 = StandardSkewTableaux(SkewPartition([[3, 2, 1], [1, 1]]))
sage: SST1 is | |
<gh_stars>100-1000
# This messy code computes vertex colors based on the distance reconstruction <-> GT mesh
from source.base import parula_colormap
from source.base import utils_mp
import numpy as np
import trimesh
import trimesh.proximity
def get_normalization_target(distances: list, cut_percentil=0.9):
dist_concat = np.concatenate(distances, axis=0)
dist_concat_sorted = np.sort(dist_concat)
if cut_percentil is not None and cut_percentil < 1.0:
percentil_id = int(dist_concat_sorted.shape[0] * cut_percentil)
return dist_concat_sorted[percentil_id]
else:
return dist_concat_sorted[-1]
def get_closest_distance_batched(query_pts: np.ndarray, mesh: trimesh.Trimesh, batch_size=1000):
import multiprocessing
num_of_cpu = multiprocessing.cpu_count()
# process batches because trimesh's signed_distance very inefficient on memory
# 3k queries on a mesh with 27k vertices and 55k faces takes around 8 GB of RAM
# dists_ms = np.zeros((query_pts.shape[0],))
pts_ids = np.arange(query_pts.shape[0])
pts_ids_split = np.array_split(pts_ids, max(1, int(query_pts.shape[0] / batch_size)))
params = []
for pts_ids_batch in pts_ids_split:
# dists_ms[pts_ids_batch] = trimesh.proximity.closest_point(mesh, query_pts[pts_ids_batch])[1]
params.append((mesh, query_pts[pts_ids_batch]))
dist_list = utils_mp.start_process_pool(trimesh.proximity.closest_point, params, num_of_cpu)
dists = np.concatenate([d[1] for d in dist_list])
print('got distances for {} vertices'.format(query_pts.shape[0]))
return dists
def visualize_mesh_with_distances(mesh_file: str, mesh: trimesh.Trimesh,
dist_per_vertex: np.ndarray, normalize_to: float, cut_percentil=0.9):
dist_per_vertex_normalized = dist_per_vertex / normalize_to
# use parula colormap: dist=0 -> blue, dist=0.5 -> green, dist=1.0 -> yellow
parulas_indices = (dist_per_vertex_normalized * (parula_colormap.parula_cm.shape[0] - 1)).astype(np.int32)
dist_greater_than_norm_target = parulas_indices >= parula_colormap.parula_cm.shape[0]
parulas_indices[dist_greater_than_norm_target] = parula_colormap.parula_cm.shape[0] - 1
dist_colors_rgb = [parula_colormap.parula_cm[parula_indices] for parula_indices in parulas_indices]
file_out_vis = mesh_file + '_vis.ply'
mesh_vis = trimesh.Trimesh(vertices=mesh.vertices, faces=mesh.faces, vertex_colors=dist_colors_rgb)
mesh_vis.export(file_out_vis)
file_out_stats = mesh_file + '_stats.txt'
with open(file_out_stats, 'w+') as stats_file:
stats_file.write(
'Distance from reconstructed mesh vertex to nearest sample on GT mesh, '
'Min={}, Max={}, Mean={}, normalized to {}, cut percentil {}'.format(
np.min(dist_per_vertex), np.max(dist_per_vertex), np.mean(dist_per_vertex),
normalize_to, cut_percentil)
)
def make_distance_comparison(in_file_rec_meshes: list, in_file_gt_mesh, cut_percentil=0.9, batch_size=1000):
import trimesh.proximity
meshes_rec = [trimesh.load(in_file_rec_mesh) for in_file_rec_mesh in in_file_rec_meshes]
if type(in_file_gt_mesh) == str:
mesh_gt = trimesh.load(in_file_gt_mesh)
elif type(in_file_gt_mesh) == list:
mesh_gt = [trimesh.load(in_file_gt_mesh) for in_file_gt_mesh in in_file_gt_mesh]
else:
raise ValueError('Not implemented!')
# vertices_rec_dists = [trimesh.proximity.closest_point(mesh_gt, mesh_rec.vertices)[1] for mesh_rec in meshes_rec]
if type(in_file_gt_mesh) == str:
vertices_rec_dists = [get_closest_distance_batched(mesh_rec.vertices, mesh_gt, batch_size)
for mesh_rec in meshes_rec]
elif type(in_file_gt_mesh) == list:
vertices_rec_dists = [get_closest_distance_batched(mesh_rec.vertices, mesh_gt[mi], batch_size)
for mi, mesh_rec in enumerate(meshes_rec)]
else:
raise ValueError('Not implemented!')
normalize_to = get_normalization_target(vertices_rec_dists, cut_percentil=cut_percentil)
for fi, f in enumerate(in_file_rec_meshes):
visualize_mesh_with_distances(
f, meshes_rec[fi], dist_per_vertex=vertices_rec_dists[fi],
normalize_to=normalize_to, cut_percentil=cut_percentil)
def main(in_file_rec_meshes: list, in_file_gt_mesh, cut_percentile=0.9, batch_size=1000):
print('Visualize distances of {} to {}'.format(in_file_rec_meshes, in_file_gt_mesh))
make_distance_comparison(
in_file_rec_meshes=in_file_rec_meshes,
in_file_gt_mesh=in_file_gt_mesh,
cut_percentil=cut_percentile,
batch_size=batch_size
)
if __name__ == "__main__":
# # holes close-up
# mesh_name = '00011827_73c6505f827541168d5410e4_trimesh_096.ply'
# in_dirs_rec_meshes = [
# '/home/perler/Nextcloud/point2surf results/figures/features_close_up/holes/point2surf/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/features_close_up/holes/spsr+pcpnet/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/features_close_up/holes/spsr+gt/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/features_close_up/holes/deepsdf/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/features_close_up/holes/atlasnet/' + mesh_name[:-4] + '.xyz.npy.ply',
# ]
# in_dirs_gt_meshes = '/home/perler/Nextcloud/point2surf results/figures/features_close_up/holes/gt/' + mesh_name
# main(in_dirs_rec_meshes, in_dirs_gt_meshes, cut_percentile=0.9)
# # flat areas close-up
# mesh_name = '00019114_87f2e2e15b2746ffa4a2fd9a_trimesh_003.ply'
# in_dirs_rec_meshes = [
# '/home/perler/Nextcloud/point2surf results/figures/features_close_up/flats/point2surf/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/features_close_up/flats/spsr+pcpnet/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/features_close_up/flats/spsr+gt/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/features_close_up/flats/deepsdf/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/features_close_up/flats/atlasnet/' + mesh_name[:-4] + '.xyz.npy.ply',
# ]
# in_dirs_gt_meshes = '/home/perler/Nextcloud/point2surf results/figures/features_close_up/flats/gt/' + mesh_name
# main(in_dirs_rec_meshes, in_dirs_gt_meshes, cut_percentile=0.9)
# # denoising close-up
# #mesh_name = '00993706_f8bc5c196ab9685d0182bbed_trimesh_001.ply'
# mesh_name = 'Armadillo.ply'
# in_dirs_rec_meshes = [
# '/home/perler/Nextcloud/point2surf results/figures/features_close_up/denoising/point2surf/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/features_close_up/denoising/spsr+pcpnet/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/features_close_up/denoising/spsr+gt/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/features_close_up/denoising/deepsdf/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/features_close_up/denoising/atlasnet/' + mesh_name[:-4] + '.xyz.npy.ply',
# ]
# in_dirs_gt_meshes = '/home/perler/Nextcloud/point2surf results/figures/features_close_up/denoising/gt/' + mesh_name
# main(in_dirs_rec_meshes, in_dirs_gt_meshes, cut_percentile=0.9)
# ## denoising (luckily same rotation everywhere)
# #mesh_name = 'flower.ply'
# #in_dirs_rec_meshes = [
# # '/home/perler/Nextcloud/point2surf results/figures/noise comparison/extra_noisy/point2surf/' + mesh_name,
# # '/home/perler/Nextcloud/point2surf results/figures/noise comparison/extra_noisy/spsr+pcpnet/' + mesh_name,
# # '/home/perler/Nextcloud/point2surf results/figures/noise comparison/extra_noisy/spsr+gt/' + mesh_name,
# # '/home/perler/Nextcloud/point2surf results/figures/noise comparison/extra_noisy/deepsdf/' + mesh_name,
# # '/home/perler/Nextcloud/point2surf results/figures/noise comparison/extra_noisy/atlasnet/' + mesh_name[:-4] + '.xyz.npy.ply',
# # #
# # '/home/perler/Nextcloud/point2surf results/figures/noise comparison/noisefree/point2surf/' + mesh_name,
# # '/home/perler/Nextcloud/point2surf results/figures/noise comparison/noisefree/spsr+pcpnet/' + mesh_name,
# # '/home/perler/Nextcloud/point2surf results/figures/noise comparison/noisefree/spsr+gt/' + mesh_name,
# # '/home/perler/Nextcloud/point2surf results/figures/noise comparison/noisefree/deepsdf/' + mesh_name,
# # '/home/perler/Nextcloud/point2surf results/figures/noise comparison/noisefree/atlasnet/' + mesh_name[:-4] + '.xyz.npy.ply',
# # #
# # '/home/perler/Nextcloud/point2surf results/figures/noise comparison/original/point2surf/' + mesh_name,
# # '/home/perler/Nextcloud/point2surf results/figures/noise comparison/original/spsr+pcpnet/' + mesh_name,
# # '/home/perler/Nextcloud/point2surf results/figures/noise comparison/original/spsr+gt/' + mesh_name,
# # '/home/perler/Nextcloud/point2surf results/figures/noise comparison/original/deepsdf/' + mesh_name,
# # '/home/perler/Nextcloud/point2surf results/figures/noise comparison/original/atlasnet/' + mesh_name[:-4] + '.xyz.npy.ply',
# #]
# #in_dirs_gt_meshes = '/home/perler/Nextcloud/point2surf results/figures/noise comparison/extra_noisy/gt/' + mesh_name
# #main(in_dirs_rec_meshes, in_dirs_gt_meshes, cut_percentile=0.9)
# # denoising (different rotation unfortunately)
# mesh_name = '00010429_fc56088abf10474bba06f659_trimesh_004.ply'
# in_dirs_rec_meshes = [
# '/home/perler/Nextcloud/point2surf results/figures/noise comparison/extra_noisy/point2surf/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/noise comparison/extra_noisy/deepsdf/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/noise comparison/extra_noisy/atlasnet/' + mesh_name[:-4] + '.xyz.npy.ply',
# '/home/perler/Nextcloud/point2surf results/figures/noise comparison/extra_noisy/spsr+pcpnet/' + mesh_name,
# #
# '/home/perler/Nextcloud/point2surf results/figures/noise comparison/noisefree/point2surf/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/noise comparison/noisefree/deepsdf/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/noise comparison/noisefree/atlasnet/' + mesh_name[:-4] + '.xyz.npy.ply',
# '/home/perler/Nextcloud/point2surf results/figures/noise comparison/noisefree/spsr+pcpnet/' + mesh_name,
# #
# '/home/perler/Nextcloud/point2surf results/figures/noise comparison/original/point2surf/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/noise comparison/original/deepsdf/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/noise comparison/original/atlasnet/' + mesh_name[:-4] + '.xyz.npy.ply',
# '/home/perler/Nextcloud/point2surf results/figures/noise comparison/original/spsr+pcpnet/' + mesh_name,
# ]
# in_dirs_gt_meshes = [
# '/home/perler/Nextcloud/point2surf results/figures/noise comparison/extra_noisy/gt/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/noise comparison/extra_noisy/gt/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/noise comparison/extra_noisy/gt/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/noise comparison/extra_noisy/gt/' + mesh_name,
# #
# '/home/perler/Nextcloud/point2surf results/figures/noise comparison/noisefree/gt/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/noise comparison/noisefree/gt/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/noise comparison/noisefree/gt/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/noise comparison/noisefree/gt/' + mesh_name,
# #
# '/home/perler/Nextcloud/point2surf results/figures/noise comparison/original/gt/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/noise comparison/original/gt/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/noise comparison/original/gt/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/noise comparison/original/gt/' + mesh_name,
# ]
# main(in_dirs_rec_meshes, in_dirs_gt_meshes, cut_percentile=0.9)
# # qualitative abc original
# mesh_name = '00010218_4769314c71814669ba5d3512_trimesh_013.ply'
# in_dirs_rec_meshes = [
# '/home/perler/Nextcloud/point2surf results/figures/qualitative results/abc_original/point2surf/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/qualitative results/abc_original/spsr+pcpnet/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/qualitative results/abc_original/spsr+gt/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/qualitative results/abc_original/deepsdf/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/qualitative results/abc_original/atlasnet/' + mesh_name[:-4] + '.xyz.npy.ply',
# ]
# in_dirs_gt_meshes = '/home/perler/Nextcloud/point2surf results/figures/qualitative results/abc_original/gt/' + mesh_name
# main(in_dirs_rec_meshes, in_dirs_gt_meshes, cut_percentile=0.9)
# # qualitative abc noisefree
# mesh_name = '00994034_9299b4c10539bb6b50b162d7_trimesh_000.ply'
# in_dirs_rec_meshes = [
# '/home/perler/Nextcloud/point2surf results/figures/qualitative results/abc_noisefree/point2surf/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/qualitative results/abc_noisefree/spsr+pcpnet/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/qualitative results/abc_noisefree/spsr+gt/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/qualitative results/abc_noisefree/deepsdf/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/qualitative results/abc_noisefree/atlasnet/' + mesh_name[:-4] + '.xyz.npy.ply',
# ]
# in_dirs_gt_meshes = '/home/perler/Nextcloud/point2surf results/figures/qualitative results/abc_noisefree/gt/' + mesh_name
# main(in_dirs_rec_meshes, in_dirs_gt_meshes, cut_percentile=0.9)
# qualitative abc extra noisy
# mesh_name = '00993692_494894597fe7b39310a44a99_trimesh_000.ply'
# in_dirs_rec_meshes = [
# '/home/perler/Nextcloud/point2surf results/figures/qualitative results/abc_extra_noisy/point2surf/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/qualitative results/abc_extra_noisy/spsr+pcpnet/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/qualitative results/abc_extra_noisy/spsr+gt/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/qualitative results/abc_extra_noisy/deepsdf/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/qualitative results/abc_extra_noisy/atlasnet/' + mesh_name[:-4] + '.xyz.npy.ply',
# ]
# in_dirs_gt_meshes = '/home/perler/Nextcloud/point2surf results/figures/qualitative results/abc_extra_noisy/gt/' + mesh_name
# main(in_dirs_rec_meshes, in_dirs_gt_meshes, cut_percentile=0.9)
# # qualitative custom_dense
# mesh_name = 'horse.ply'
# in_dirs_rec_meshes = [
# '/home/perler/Nextcloud/point2surf results/figures/qualitative results/custom_dense/point2surf/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/qualitative results/custom_dense/spsr+pcpnet/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/qualitative results/custom_dense/spsr+gt/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/qualitative results/custom_dense/deepsdf/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/qualitative results/custom_dense/atlasnet/' + mesh_name[:-4] + '.xyz.npy.ply',
# ]
# in_dirs_gt_meshes = '/home/perler/Nextcloud/point2surf results/figures/qualitative results/custom_dense/gt/' + mesh_name
# main(in_dirs_rec_meshes, in_dirs_gt_meshes, cut_percentile=0.9)
# # qualitative custom_extra_noisy
# mesh_name = 'hand.ply'
# in_dirs_rec_meshes = [
# '/home/perler/Nextcloud/point2surf results/figures/qualitative results/custom_extra_noisy/point2surf/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/qualitative results/custom_extra_noisy/spsr+pcpnet/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/qualitative results/custom_extra_noisy/spsr+gt/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/qualitative results/custom_extra_noisy/deepsdf/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/qualitative results/custom_extra_noisy/atlasnet/' + mesh_name[:-4] + '.xyz.npy.ply',
# ]
# in_dirs_gt_meshes = '/home/perler/Nextcloud/point2surf results/figures/qualitative results/custom_extra_noisy/gt/' + mesh_name
# main(in_dirs_rec_meshes, in_dirs_gt_meshes, cut_percentile=0.9, batch_size=200)
# # qualitative custom_noisefree
# mesh_name = 'happy.ply'
# in_dirs_rec_meshes = [
# '/home/perler/Nextcloud/point2surf results/figures/qualitative results/custom_noisefree/point2surf/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/qualitative results/custom_noisefree/spsr+pcpnet/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/qualitative results/custom_noisefree/spsr+gt/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/qualitative results/custom_noisefree/deepsdf/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/qualitative results/custom_noisefree/atlasnet/' + mesh_name[:-4] + '.xyz.npy.ply',
# ]
# in_dirs_gt_meshes = '/home/perler/Nextcloud/point2surf results/figures/qualitative results/custom_noisefree/gt/' + mesh_name
# main(in_dirs_rec_meshes, in_dirs_gt_meshes, cut_percentile=0.9)
# # qualitative custom_original
# mesh_name = 'galera.ply'
# in_dirs_rec_meshes = [
# '/home/perler/Nextcloud/point2surf results/figures/qualitative results/custom_original/point2surf/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/qualitative results/custom_original/spsr+pcpnet/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/qualitative results/custom_original/spsr+gt/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/qualitative results/custom_original/deepsdf/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/qualitative results/custom_original/atlasnet/' + mesh_name[:-4] + '.xyz.npy.ply',
# ]
# in_dirs_gt_meshes = '/home/perler/Nextcloud/point2surf results/figures/qualitative results/custom_original/gt/' + mesh_name
# main(in_dirs_rec_meshes, in_dirs_gt_meshes, cut_percentile=0.9)
# # qualitative custom_sparse
# mesh_name = 'angel.ply'
# in_dirs_rec_meshes = [
# '/home/perler/Nextcloud/point2surf results/figures/qualitative results/custom_sparse/point2surf/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/qualitative results/custom_sparse/spsr+pcpnet/' + mesh_name,
# '/home/perler/Nextcloud/point2surf results/figures/qualitative | |
= output_text.split(
'name="csrf_token" type="hidden" value="'
)[1].split('">')[0]
data = {"email": "<EMAIL>"}
output = self.app.post(
"/settings/email/add", data=data, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn("<strong>Add new email</strong>", output_text)
self.assertEqual(output_text.count("<EMAIL>"), 1)
# New email
data = {"csrf_token": csrf_token, "email": "<EMAIL>"}
output = self.app.post(
"/settings/email/add", data=data, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn(
"<title>pingou's settings - Pagure</title>", output_text
)
self.assertIn("Email pending validation", output_text)
self.assertEqual(output_text.count("<EMAIL>"), 4)
self.assertEqual(output_text.count("<EMAIL>"), 5)
self.assertEqual(output_text.count("<EMAIL>"), 2)
# Email already pending
output = self.app.post(
"/settings/email/add", data=data, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn(
'<div class="card-header">\n '
"<strong>Add new email</strong>",
output_text,
)
self.assertIn(
"This email is already " "pending confirmation", output_text
)
# User already has this email
data = {"csrf_token": csrf_token, "email": "<EMAIL>"}
output = self.app.post(
"/settings/email/add", data=data, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertTrue("<strong>Add new email</strong>" in output_text)
self.assertTrue(
"Invalid value, can't be any of: <EMAIL>, "
"<EMAIL>. " in output_text
or "Invalid value, can't be any of: <EMAIL>, "
"<EMAIL>. " in output_text
)
self.assertEqual(output_text.count("<EMAIL>"), 6)
self.assertEqual(output_text.count("<EMAIL>"), 5)
self.assertEqual(output_text.count("<EMAIL>"), 0)
# Email registered by someone else
data = {"csrf_token": csrf_token, "email": "<EMAIL>"}
output = self.app.post(
"/settings/email/add", data=data, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertTrue("<strong>Add new email</strong>" in output_text)
self.assertIn(
"Invalid value, can't be any of: <EMAIL>. ",
output_text,
)
ast.return_value = True
output = self.app.post("/settings/email/add", data=data)
self.assertEqual(output.status_code, 302)
@patch("pagure.lib.notify.send_email")
@patch("pagure.ui.app.admin_session_timedout")
def test_set_default_email(self, ast, send_email):
""" Test the set_default_email endpoint. """
send_email.return_value = True
ast.return_value = False
user = tests.FakeUser()
with tests.user_set(self.app.application, user):
output = self.app.post("/settings/email/default")
self.assertEqual(output.status_code, 404)
self.assertTrue(
"<h2>Page not found (404)</h2>"
in output.get_data(as_text=True)
)
user.username = "pingou"
with tests.user_set(self.app.application, user):
output = self.app.get("/settings/")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn(
"<title>pingou's settings - Pagure</title>", output_text
)
csrf_token = self.get_csrf(output=output)
data = {"email": "<EMAIL>"}
output = self.app.post(
"/settings/email/default", data=data, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn(
"<title>pingou's settings - Pagure</title>", output_text
)
self.assertEqual(output_text.count("<EMAIL>"), 4)
# Set invalid default email
data = {"csrf_token": csrf_token, "email": "<EMAIL>"}
output = self.app.post(
"/settings/email/default", data=data, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn(
"<title>pingou's settings - Pagure</title>", output_text
)
self.assertEqual(output_text.count("<EMAIL>"), 4)
self.assertIn(
"You do not have the "
"email: <EMAIL>, nothing to set",
output_text,
)
# Set default email
data = {"csrf_token": csrf_token, "email": "<EMAIL>"}
output = self.app.post(
"/settings/email/default", data=data, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn(
"<title>pingou's settings - Pagure</title>", output_text
)
self.assertEqual(output_text.count("<EMAIL>"), 4)
self.assertIn(
"Default email set to: " "<EMAIL>", output_text
)
ast.return_value = True
output = self.app.post("/settings/email/default", data=data)
self.assertEqual(output.status_code, 302)
@patch("pagure.lib.notify.send_email")
@patch("pagure.ui.app.admin_session_timedout")
def test_reconfirm_email(self, ast, send_email):
""" Test the reconfirm_email endpoint. """
send_email.return_value = True
ast.return_value = False
# Add a pending email to pingou
userobj = pagure.lib.query.search_user(self.session, username="pingou")
self.assertEqual(len(userobj.emails), 2)
email_pend = pagure.lib.model.UserEmailPending(
user_id=userobj.id, email="<EMAIL>.o", token="abcdef"
)
self.session.add(email_pend)
self.session.commit()
user = tests.FakeUser()
with tests.user_set(self.app.application, user):
output = self.app.post("/settings/email/resend")
self.assertEqual(output.status_code, 404)
self.assertTrue(
"<h2>Page not found (404)</h2>"
in output.get_data(as_text=True)
)
user.username = "pingou"
with tests.user_set(self.app.application, user):
output = self.app.get("/settings/")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn(
"<title>pingou's settings - Pagure</title>", output_text
)
csrf_token = self.get_csrf(output=output)
data = {"email": "<EMAIL>"}
output = self.app.post(
"/settings/email/resend", data=data, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn(
"<title>pingou's settings - Pagure</title>", output_text
)
self.assertEqual(output_text.count("<EMAIL>"), 4)
# Set invalid default email
data = {"csrf_token": csrf_token, "email": "<EMAIL>"}
output = self.app.post(
"/settings/email/resend", data=data, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn(
"<title>pingou's settings - Pagure</title>", output_text
)
self.assertEqual(output_text.count("<EMAIL>"), 4)
self.assertIn(
"This email address has " "already been confirmed", output_text
)
# Validate a non-validated email
data = {"csrf_token": csrf_token, "email": "<EMAIL>"}
output = self.app.post(
"/settings/email/resend", data=data, follow_redirects=True
)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn(
"<title>pingou's settings - Pagure</title>", output_text
)
self.assertEqual(output_text.count("<EMAIL>"), 4)
self.assertIn("Confirmation email re-sent", output_text)
ast.return_value = True
output = self.app.post("/settings/email/resend", data=data)
self.assertEqual(output.status_code, 302)
@patch("pagure.ui.app.admin_session_timedout")
def test_confirm_email(self, ast):
""" Test the confirm_email endpoint. """
output = self.app.get("/settings/email/confirm/foobar")
self.assertEqual(output.status_code, 302)
ast.return_value = False
# Add a pending email to pingou
userobj = pagure.lib.query.search_user(self.session, username="pingou")
self.assertEqual(len(userobj.emails), 2)
email_pend = pagure.lib.model.UserEmailPending(
user_id=userobj.id, email="<EMAIL>", token="abcdef"
)
self.session.add(email_pend)
self.session.commit()
user = tests.FakeUser()
user.username = "pingou"
with tests.user_set(self.app.application, user):
# Wrong token
output = self.app.get(
"/settings/email/confirm/foobar", follow_redirects=True
)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn(
"<title>pingou's settings - Pagure</title>", output_text
)
self.assertIn("No email associated with this token.", output_text)
# Confirm email
output = self.app.get(
"/settings/email/confirm/abcdef", follow_redirects=True
)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn(
"<title>pingou's settings - Pagure</title>", output_text
)
self.assertIn("Email validated", output_text)
userobj = pagure.lib.query.search_user(self.session, username="pingou")
self.assertEqual(len(userobj.emails), 3)
ast.return_value = True
output = self.app.get("/settings/email/confirm/foobar")
self.assertEqual(output.status_code, 302)
def test_view_my_requests_no_user(self):
"""Test the view_user_requests endpoint."""
output = self.app.get("/user/somenonexistentuser/requests")
self.assertEqual(output.status_code, 404)
@patch("pagure.lib.git.update_git", MagicMock(return_value=True))
@patch("pagure.lib.notify.send_email", MagicMock(return_value=True))
def test_view_my_requests(self):
"""Test the view_user_requests endpoint. """
# Create the PR
tests.create_projects(self.session)
repo = pagure.lib.query._get_project(self.session, "test")
req = pagure.lib.query.new_pull_request(
session=self.session,
repo_from=repo,
branch_from="dev",
repo_to=repo,
branch_to="master",
title="test pull-request #1",
user="pingou",
)
self.session.commit()
self.assertEqual(req.id, 1)
self.assertEqual(req.title, "test pull-request #1")
output = self.app.get("/user/pingou/requests")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn("test pull-request #1", output_text)
self.assertEqual(output_text.count('pr-status pr-status-open"'), 1)
# Add a PR in a fork
item = pagure.lib.model.Project(
user_id=1, # pingou
name="test_fork",
description="test project #1",
is_fork=True,
parent_id=1,
hook_token="<PASSWORD>",
)
self.session.add(item)
repo = pagure.lib.query._get_project(
self.session, "test_fork", user="pingou"
)
req = pagure.lib.query.new_pull_request(
session=self.session,
repo_from=repo,
branch_from="dev",
repo_to=repo,
branch_to="master",
title="tést pull-request #2",
user="pingou",
)
self.session.commit()
self.assertEqual(req.id, 1)
self.assertEqual(req.title, "tést pull-request #2")
output = self.app.get("/user/pingou/requests")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn("test pull-request #1", output_text)
self.assertIn("tést pull-request #2", output_text)
self.assertEqual(output_text.count('pr-status pr-status-open"'), 2)
@patch("pagure.lib.git.update_git", MagicMock(return_value=True))
@patch("pagure.lib.notify.send_email", MagicMock(return_value=True))
def test_view_my_requests_pr_in_another_project(self):
"""Test the view_user_requests endpoint when the user opened a PR
in another project."""
# Pingou creates the PR on test
tests.create_projects(self.session)
repo = pagure.lib.query._get_project(self.session, "test")
req = pagure.lib.query.new_pull_request(
session=self.session,
repo_from=repo,
branch_from="dev",
repo_to=repo,
branch_to="master",
title="test pull-request #1",
user="pingou",
)
self.session.commit()
self.assertEqual(req.id, 1)
self.assertEqual(req.title, "test pull-request #1")
# foo creates the PR on test
repo = pagure.lib.query._get_project(self.session, "test")
req = pagure.lib.query.new_pull_request(
session=self.session,
repo_from=repo,
branch_from="dev",
repo_to=repo,
branch_to="master",
title="test pull-request #2",
user="foo",
)
self.session.commit()
self.assertEqual(req.id, 2)
self.assertEqual(req.title, "test pull-request #2")
# Check pingou's PR list
output = self.app.get("/user/pingou/requests?type=filed")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn("test pull-request #1", output_text)
self.assertNotIn("test pull-request #2", output_text)
self.assertEqual(output_text.count('pr-status pr-status-open"'), 1)
output = self.app.get("/user/pingou/requests?type=actionable")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertNotIn("test pull-request #1", output_text)
self.assertIn("test pull-request #2", output_text)
self.assertEqual(output_text.count('pr-status pr-status-open"'), 1)
# Check foo's PR list
output = self.app.get("/user/foo/requests")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertNotIn("test pull-request #1", output_text)
self.assertIn("test pull-request #2", output_text)
self.assertEqual(output_text.count('pr-status pr-status-open"'), 1)
@patch("pagure.lib.git.update_git", MagicMock(return_value=True))
@patch("pagure.lib.notify.send_email", MagicMock(return_value=True))
def test_view_my_requests_against_another_project(self):
"""Test the view_user_requests endpoint when there is a PR opened
by me against a project I do not have rights on."""
# Create the PR
tests.create_projects(self.session)
repo = pagure.lib.query._get_project(self.session, "test")
req = pagure.lib.query.new_pull_request(
session=self.session,
repo_from=repo,
branch_from="dev",
repo_to=repo,
branch_to="master",
title="test pull-request #1",
user="foo",
)
self.session.commit()
self.assertEqual(req.id, 1)
self.assertEqual(req.title, "test pull-request #1")
output = self.app.get("/user/foo/requests")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn("test pull-request #1", output_text)
self.assertEqual(output_text.count('pr-status pr-status-open"'), 1)
def test_view_my_issues_no_user(self):
"""Test the view_user_issues endpoint with a missing user."""
output = self.app.get("/user/somenonexistentuser/issues")
self.assertEqual(output.status_code, 404)
@patch("pagure.lib.git.update_git", MagicMock(return_value=True))
@patch("pagure.lib.notify.send_email", MagicMock(return_value=True))
def test_view_my_issues(self):
"""Test the view_user_issues endpoint when the user exists."""
# Create the issue
tests.create_projects(self.session)
repo = pagure.lib.query._get_project(self.session, "test")
msg = pagure.lib.query.new_issue(
session=self.session,
repo=repo,
title="Test issue #1",
content="We should work on this for the second time",
user="pingou",
status="Open",
)
self.session.commit()
self.assertEqual(msg.title, "Test issue #1")
output = self.app.get("/user/pingou/issues")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn("Test issue #1", output_text)
self.assertEqual(
output_text.count("issue-status issue-status-open"), 1
)
# Add an issue in a fork
item = pagure.lib.model.Project(
user_id=2, # foo
name="test_fork",
description="test project #1",
is_fork=True,
parent_id=1,
hook_token="<PASSWORD>",
)
self.session.add(item)
repo = pagure.lib.query._get_project(
self.session, "test_fork", user="foo"
)
msg = pagure.lib.query.new_issue(
session=self.session,
repo=repo,
title="Test issue #2",
content="We should work on this for the second time",
user="pingou",
status="Open",
)
self.session.commit()
self.assertEqual(msg.title, "Test issue #2")
# Test the assigned issue table. Create issue then set the assignee
msg = pagure.lib.query.new_issue(
session=self.session,
repo=repo,
title="Test issue #3",
content="This issue created by foo, but assigned to pingou",
user="foo",
status="Open",
)
self.session.commit()
self.assertEqual(msg.title, "Test issue #3")
msg = pagure.lib.query.add_issue_assignee(
session=self.session, issue=msg, assignee="pingou", user="foo"
)
self.session.commit()
self.assertEqual(msg, "Issue assigned to pingou")
output = self.app.get("/user/pingou/issues")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn("Test issue #1", output_text)
self.assertIn("Test issue #2", output_text)
self.assertIn("Test issue #3", output_text)
self.assertEqual(
output_text.count("issue-status issue-status-open"), 3
)
@patch("pagure.lib.git.update_git", MagicMock(return_value=True))
@patch("pagure.lib.notify.send_email", MagicMock(return_value=True))
def test_view_my_issues_disabled(self):
"""Test the view_user_issues endpoint when the project disabled issue
| |
"""
pyrad.proc.process_echoclass
===============================
Functions for echo classification and filtering
.. autosummary::
:toctree: generated/
process_echo_id
process_birds_id
process_clt_to_echo_id
process_hydro_mf_to_hydro
process_echo_filter
process_cdf
process_filter_snr
process_filter_vel_diff
process_filter_visibility
process_outlier_filter
process_hydroclass
process_melting_layer
process_zdr_column
"""
from copy import deepcopy
from warnings import warn
import numpy as np
import pyart
from ..io.io_aux import get_datatype_fields, get_fieldname_pyart
def process_echo_id(procstatus, dscfg, radar_list=None):
"""
identifies echoes as 0: No data, 1: Noise, 2: Clutter,
3: Precipitation
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'dBZ':
refl_field = get_fieldname_pyart(datatype)
if datatype == 'dBuZ':
refl_field = get_fieldname_pyart(datatype)
if datatype == 'ZDR':
zdr_field = get_fieldname_pyart(datatype)
if datatype == 'ZDRu':
zdr_field = get_fieldname_pyart(datatype)
if datatype == 'RhoHV':
rhv_field = get_fieldname_pyart(datatype)
if datatype == 'uPhiDP':
phi_field = get_fieldname_pyart(datatype)
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
if ((refl_field not in radar.fields) or
(zdr_field not in radar.fields) or
(rhv_field not in radar.fields) or
(phi_field not in radar.fields)):
warn('Unable to create radar_echo_id dataset. Missing data')
return None, None
echo_id = np.ma.zeros((radar.nrays, radar.ngates), dtype=np.uint8)+3
# look for clutter
gatefilter = pyart.filters.moment_and_texture_based_gate_filter(
radar, zdr_field=zdr_field, rhv_field=rhv_field, phi_field=phi_field,
refl_field=refl_field, textzdr_field=None, textrhv_field=None,
textphi_field=None, textrefl_field=None, wind_size=7,
max_textphi=20., max_textrhv=0.3, max_textzdr=2.85,
max_textrefl=8., min_rhv=0.6)
is_clutter = gatefilter.gate_excluded == 1
echo_id[is_clutter] = 2
# look for noise
is_noise = radar.fields[refl_field]['data'].data == (
pyart.config.get_fillvalue())
echo_id[is_noise] = 1
id_field = pyart.config.get_metadata('radar_echo_id')
id_field['data'] = echo_id
id_field.update({'_FillValue': 0})
# prepare for exit
new_dataset = {'radar_out': deepcopy(radar)}
new_dataset['radar_out'].fields = dict()
new_dataset['radar_out'].add_field('radar_echo_id', id_field)
return new_dataset, ind_rad
def process_birds_id(procstatus, dscfg, radar_list=None):
"""
identifies echoes as 0: No data, 1: Noise, 2: Clutter,
3: Birds
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'dBZ':
refl_field = get_fieldname_pyart(datatype)
if datatype == 'dBuZ':
refl_field = get_fieldname_pyart(datatype)
if datatype == 'ZDR':
zdr_field = get_fieldname_pyart(datatype)
if datatype == 'ZDRu':
zdr_field = get_fieldname_pyart(datatype)
if datatype == 'RhoHV':
rhv_field = get_fieldname_pyart(datatype)
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
if ((refl_field not in radar.fields) or
(zdr_field not in radar.fields) or
(rhv_field not in radar.fields)):
warn('Unable to create radar_echo_id dataset. Missing data')
return None, None
# user defined parameters
max_zdr = dscfg.get('max_zdr', 3.)
max_rhv = dscfg.get('max_rhv', 0.9)
max_refl = dscfg.get('max_refl', 20.)
rmin = dscfg.get('rmin', 2000.)
rmax = dscfg.get('rmax', 25000.)
elmin = dscfg.get('elmin', 1.5)
elmax = dscfg.get('elmax', 85.)
echo_id = np.zeros((radar.nrays, radar.ngates), dtype=np.uint8)+3
# look for clutter
gatefilter = pyart.filters.birds_gate_filter(
radar, zdr_field=zdr_field, rhv_field=rhv_field,
refl_field=refl_field, max_zdr=max_zdr, max_rhv=max_rhv,
max_refl=max_refl, rmin=rmin, rmax=rmax, elmin=elmin, elmax=elmax)
is_clutter = gatefilter.gate_excluded == 1
echo_id[is_clutter] = 2
# look for noise
is_noise = radar.fields[refl_field]['data'].data == (
pyart.config.get_fillvalue())
echo_id[is_noise] = 1
id_field = pyart.config.get_metadata('radar_echo_id')
id_field['data'] = echo_id
# prepare for exit
new_dataset = {'radar_out': deepcopy(radar)}
new_dataset['radar_out'].fields = dict()
new_dataset['radar_out'].add_field('radar_echo_id', id_field)
return new_dataset, ind_rad
def process_clt_to_echo_id(procstatus, dscfg, radar_list=None):
"""
Converts clutter exit code from rad4alp into pyrad echo ID
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'CLT':
clt_field = get_fieldname_pyart(datatype)
break
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
if clt_field not in radar.fields:
warn('rad4alp clutter exit code not present. Unable to obtain echoID')
return None, None
echo_id = np.zeros((radar.nrays, radar.ngates), dtype=np.uint8)+3
clt = radar.fields[clt_field]['data']
echo_id[clt == 1] = 1
echo_id[clt >= 100] = 2
id_field = pyart.config.get_metadata('radar_echo_id')
id_field['data'] = echo_id
# prepare for exit
new_dataset = {'radar_out': deepcopy(radar)}
new_dataset['radar_out'].fields = dict()
new_dataset['radar_out'].add_field('radar_echo_id', id_field)
return new_dataset, ind_rad
def process_hydro_mf_to_hydro(procstatus, dscfg, radar_list=None):
"""
Converts the hydrometeor classification from Météo France to
that of MeteoSwiss
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'hydroMF':
field = get_fieldname_pyart(datatype)
break
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
if field not in radar.fields:
warn('hydroMF not present. Unable to obtain hydro')
return None, None
hydro = np.zeros((radar.nrays, radar.ngates), dtype=np.uint8)
hydroMF = radar.fields[field]['data']
# BRUIT, ZH_MQT, SOL, INSECTES, OISEAUX, MER_CHAFF, PARASITES,
# ROND_CENTRAL, TYPE_INCONNU, SIMPLE_POLAR are classified as NC
hydro[hydroMF<8] = 1
hydro[hydroMF==30] = 1
hydro[hydroMF==31] = 1
# PRECIP_INDIFFERENCIEE, PLUIE, PRECIP are classified as RN
hydro[hydroMF==8] = 6
hydro[hydroMF==9] = 6
hydro[hydroMF==32] = 6
hydro[hydroMF==10] = 8 # NEIGE_MOUILLEE is WS
hydro[hydroMF==11] = 2 # NEIGE_SECHE is AG
hydro[hydroMF==12] = 3 # GLACE is CR
hydro[hydroMF==13] = 5 # PETITE_GRELE is RP
# MOYENNE_GRELE, GROSSE_GRELE is IH/HDG
hydro[hydroMF==14] = 10
hydro[hydroMF==15] = 10
# Light rain (LR), vertically oriented ice (VI) and melting hail (MH) have
# no equivalent in the Météo France classification
hydro_field = pyart.config.get_metadata('radar_echo_classification')
hydro_field['data'] = hydro
# prepare for exit
new_dataset = {'radar_out': deepcopy(radar)}
new_dataset['radar_out'].fields = dict()
new_dataset['radar_out'].add_field(
'radar_echo_classification', hydro_field)
return new_dataset, ind_rad
def process_echo_filter(procstatus, dscfg, radar_list=None):
"""
Masks all echo types that are not of the class specified in
keyword echo_type
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
echo_type : int or list of ints
The type of echoes to keep: 1 noise, 2 clutter, 3 precipitation.
Default 3
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
echoid_field = None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'echoID':
echoid_field = get_fieldname_pyart(datatype)
break
if echoid_field is None:
warn('echoID field required to filter data')
return None, None
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
if echoid_field not in radar.fields:
warn('Unable to filter data. Missing echo ID field')
return None, None
echo_type = dscfg.get('echo_type', 3)
mask = np.ma.isin(
radar.fields[echoid_field]['data'], echo_type, invert=True)
new_dataset = {'radar_out': deepcopy(radar)}
new_dataset['radar_out'].fields = dict()
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'echoID':
continue
field_name = get_fieldname_pyart(datatype)
if field_name not in radar.fields:
warn('Unable to filter '+field_name+' according to echo ID. ' +
'No valid input fields')
continue
radar_field = deepcopy(radar.fields[field_name])
radar_field['data'] = np.ma.masked_where(
mask, radar_field['data'])
if field_name.startswith('corrected_'):
new_field_name = field_name
elif field_name.startswith('uncorrected_'):
new_field_name = field_name.replace(
'uncorrected_', 'corrected_', 1)
else:
new_field_name = 'corrected_'+field_name
new_dataset['radar_out'].add_field(new_field_name, radar_field)
if not new_dataset['radar_out'].fields:
return None, None
return new_dataset, ind_rad
def process_cdf(procstatus, dscfg, radar_list=None):
"""
Collects the fields necessary | |
import os
import glob
import json
from collections import OrderedDict
from colorama import Fore
from toolset.databases import databases
from toolset.utils.output_helper import log
class Metadata:
supported_dbs = []
for name in databases:
supported_dbs.append((name, '...'))
def __init__(self, benchmarker=None):
self.benchmarker = benchmarker
def gather_languages(self):
'''
Gathers all the known languages in the suite via the folder names
beneath FWROOT.
'''
lang_dir = os.path.join(self.benchmarker.config.lang_root)
langs = []
for dir in glob.glob(os.path.join(lang_dir, "*")):
langs.append(dir.replace(lang_dir, "")[1:])
return langs
def gather_language_tests(self, language):
'''
Gathers all the test names from a known language
'''
try:
dir = os.path.join(self.benchmarker.config.lang_root, language)
tests = map(lambda x: os.path.join(language, x), os.listdir(dir))
return filter(lambda x: os.path.isdir(
os.path.join(self.benchmarker.config.lang_root, x)), tests)
except Exception:
raise Exception(
"Unable to locate language directory: {!s}".format(language))
def get_framework_config(self, test_dir):
'''
Gets a framework's benchmark_config from the given
test directory
'''
dir_config_files = glob.glob("{!s}/{!s}/benchmark_config.json".format(
self.benchmarker.config.lang_root, test_dir))
if len(dir_config_files):
return dir_config_files[0]
else:
raise Exception(
"Unable to locate tests in test-dir: {!s}".format(test_dir))
def gather_tests(self, include=None, exclude=None):
'''
Given test names as strings, returns a list of FrameworkTest objects.
For example, 'aspnet-mysql-raw' turns into a FrameworkTest object with
variables for checking the test directory, the test database os, and
other useful items.
With no arguments, every test in this framework will be returned.
With include, only tests with this exact name will be returned.
With exclude, all tests but those excluded will be returned.
'''
# Help callers out a bit
include = include or []
exclude = exclude or []
# Search for configuration files
config_files = []
if self.benchmarker.config.test_lang:
self.benchmarker.config.test_dir = []
for lang in self.benchmarker.config.test_lang:
self.benchmarker.config.test_dir.extend(
self.gather_language_tests(lang))
if self.benchmarker.config.test_dir:
for test_dir in self.benchmarker.config.test_dir:
config_files.append(self.get_framework_config(test_dir))
else:
config_files.extend(
glob.glob("{!s}/*/*/benchmark_config.json".format(
self.benchmarker.config.lang_root)))
tests = []
for config_file_name in config_files:
config = None
with open(config_file_name, 'r') as config_file:
try:
config = json.load(config_file)
except ValueError:
log("Error loading config: {!s}".format(config_file_name),
color=Fore.RED)
raise Exception("Error loading config file")
# Find all tests in the config file
config_tests = self.parse_config(config,
os.path.dirname(config_file_name))
# Filter
for test in config_tests:
if hasattr(test, "tags"):
if "broken" in test.tags:
continue
if self.benchmarker.config.tag:
for t in self.benchmarker.config.tag:
if t in test.tags and test.name not in exclude:
tests.append(test)
break
if len(include) > 0:
if test.name in include:
tests.append(test)
elif test.name not in exclude and not self.benchmarker.config.tag:
tests.append(test)
# Ensure we were able to locate everything that was
# explicitly included
if len(include):
names = {test.name for test in tests}
if len(set(include) - set(names)):
missing = list(set(include) - set(names))
raise Exception("Unable to locate tests %s" % missing)
tests = list(set(tests))
tests.sort(key=lambda x: x.name)
return tests
def tests_to_run(self):
'''
Gathers all tests for current benchmark run.
'''
return self.gather_tests(self.benchmarker.config.test,
self.benchmarker.config.exclude)
def gather_frameworks(self, include=None, exclude=None):
'''
Return a dictionary mapping frameworks->[test1,test2,test3]
for quickly grabbing all tests in a grouped manner.
Args have the same meaning as gather_tests
'''
tests = self.gather_tests(include, exclude)
frameworks = dict()
for test in tests:
if test.framework not in frameworks:
frameworks[test.framework] = []
frameworks[test.framework].append(test)
return frameworks
def has_file(self, test_dir, filename):
'''
Returns True if the file exists in the test dir
'''
path = test_dir
if not self.benchmarker.config.lang_root in path:
path = os.path.join(self.benchmarker.config.lang_root, path)
return os.path.isfile("{!s}/{!s}".format(path, filename))
@staticmethod
def test_order(type_name):
"""
This sort ordering is set up specifically to return the length
of the test name. There were SO many problems involved with
'plaintext' being run first (rather, just not last) that we
needed to ensure that it was run last for every framework.
"""
return len(type_name)
def parse_config(self, config, directory):
"""
Parses a config file into a list of FrameworkTest objects
"""
from toolset.benchmark.framework_test import FrameworkTest
tests = []
# The config object can specify multiple tests
# Loop over them and parse each into a FrameworkTest
for test in config['tests']:
tests_to_run = [name for (name, keys) in test.iteritems()]
if "default" not in tests_to_run:
log("Framework %s does not define a default test in benchmark_config.json"
% config['framework'],
color=Fore.YELLOW)
# Check that each test configuration is acceptable
# Throw exceptions if a field is missing, or how to improve the field
for test_name, test_keys in test.iteritems():
# Validates and normalizes the benchmark_config entry
test_keys = Metadata.validate_test(test_name, test_keys,
config['framework'], directory)
# Map test type to a parsed FrameworkTestType object
runTests = dict()
# TODO: remove self.benchmarker.config.types
for type_name, type_obj in self.benchmarker.config.types.iteritems():
try:
# Makes a FrameWorkTestType object using some of the keys in config
# e.g. JsonTestType uses "json_url"
runTests[type_name] = type_obj.copy().parse(test_keys)
except AttributeError:
# This is quite common - most tests don't support all types
# Quitely log it and move on (debug logging is on in travis and this causes
# ~1500 lines of debug, so I'm totally ignoring it for now
# log("Missing arguments for test type %s for framework test %s" % (type_name, test_name))
pass
# We need to sort by test_type to run
sortedTestKeys = sorted(
runTests.keys(), key=Metadata.test_order)
sortedRunTests = OrderedDict()
for sortedTestKey in sortedTestKeys:
sortedRunTests[sortedTestKey] = runTests[sortedTestKey]
# Prefix all test names with framework except 'default' test
# Done at the end so we may still refer to the primary test as `default` in benchmark config error messages
if test_name == 'default':
test_name = config['framework']
else:
test_name = "%s-%s" % (config['framework'], test_name)
# By passing the entire set of keys, each FrameworkTest will have a member for each key
tests.append(
FrameworkTest(test_name, directory, self.benchmarker,
sortedRunTests, test_keys))
return tests
def to_jsonable(self):
'''
Returns an array suitable for jsonification
'''
all_tests = self.gather_tests()
return map(lambda test: {
"project_name": test.project_name,
"name": test.name,
"approach": test.approach,
"classification": test.classification,
"database": test.database,
"framework": test.framework,
"language": test.language,
"orm": test.orm,
"platform": test.platform,
"webserver": test.webserver,
"os": test.os,
"database_os": test.database_os,
"display_name": test.display_name,
"notes": test.notes,
"versus": test.versus,
"tags": hasattr(test, "tags") and test.tags or []
}, all_tests)
def list_test_metadata(self):
'''
Prints the metadata for all the available tests
'''
all_tests_json = json.dumps(self.to_jsonable())
with open(
os.path.join(self.benchmarker.results.directory,
"test_metadata.json"), "w") as f:
f.write(all_tests_json)
@staticmethod
def validate_test(test_name, test_keys, project_name, directory):
"""
Validate and normalizes benchmark config values for this test based on a schema
"""
recommended_lang = directory.split('/')[-2]
windows_url = "https://github.com/TechEmpower/FrameworkBenchmarks/issues/1038"
schema = {
'language': {
# Language is the only key right now with no 'allowed' key that can't
# have a "None" value
'required':
True,
'help': ('language',
'The language of the framework used, suggestion: %s' %
recommended_lang)
},
'webserver': {
'help':
('webserver',
'Name of the webserver also referred to as the "front-end server"'
)
},
'classification': {
'allowed': [('Fullstack', '...'), ('Micro', '...'),
('Platform', '...')]
},
'database': {
'allowed':
Metadata.supported_dbs +
[('None',
'No database was used for these tests, as is the case with Json Serialization and Plaintext'
)]
},
'approach': {
'allowed': [('Realistic', '...'), ('Stripped', '...')]
},
'orm': {
'required_with':
'database',
'allowed':
[('Full',
'Has a full suite of features like lazy loading, caching, multiple language support, sometimes pre-configured with scripts.'
),
('Micro',
'Has basic database driver capabilities such as establishing a connection and sending queries.'
),
('Raw',
'Tests that do not use an ORM will be classified as "raw" meaning they use the platform\'s raw database connectivity.'
)]
},
'platform': {
'help':
('platform',
'Name of the platform this framework runs on, e.g. Node.js, PyPy, hhvm, JRuby ...'
)
},
'framework': {
# Guaranteed to be here and correct at this point
# key is left here to produce the set of required keys
},
'os': {
'allowed':
[('Linux',
'Our best-supported host OS, it is recommended that you build your tests for Linux hosts'
),
('Windows',
'TFB is not fully-compatible on windows, contribute towards our work on compatibility: %s'
% windows_url)]
},
'database_os': {
'required_with':
'database',
'allowed':
[('Linux',
'Our best-supported host OS, it is recommended that you build your tests for Linux hosts'
),
('Windows',
'TFB is not fully-compatible on windows, contribute towards our work on compatibility: %s'
% windows_url)]
}
}
# Check the (all optional) test urls
Metadata.validate_urls(test_name, test_keys)
def get_test_val(k):
return test_keys.get(k, "none").lower()
def throw_incorrect_key(k, acceptable_values, descriptors):
msg = (
"`%s` is a required key for test \"%s\" in framework \"%s\"\n"
% (k, test_name, project_name))
if acceptable_values:
msg = (
"Invalid `%s` value specified for | |
<reponame>maojanlin/gAIRRsuite<gh_stars>1-10
import argparse
import pickle
import os
import numpy as np
#from parse_contig_realign import mark_edit_region, variant_link_graph, haplotyping_link_graph, output_contig_correction
from parse_contig_realign import variant_link_graph, output_contig_correction, parse_CIGAR, parse_MD, trim_dict, find_double_pos, get_farthest_ext
from utils import get_reverse_complement
import sys
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'-fs', '--fn_sam',
help = 'sam file of reads realign to contig'
)
parser.add_argument(
'-fc', '--fn_cluster_contig',
help = 'cropped contig file, corrected or not'
)
parser.add_argument(
'-for', '--fo_report',
help = 'output report file'
)
parser.add_argument(
'-foc', '--fo_corrected_alleles',
help = 'output corrected alleles fasta file'
)
args = parser.parse_args()
return args
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def cluster_separate(fn_cluster_contig, fn_sam):
# dict_contig {}
# - keys: contig_name
# - values: [edit_histogram, cover_histogram, contig_SEQ, list_read_field[]]
dict_contig = {}
# dict_contig's initialization
with open(fn_cluster_contig, 'r') as f_c:
contig_name = ""
contig_SEQ = ""
for line in f_c:
if line[0] == '>':
if contig_name != "":
dict_contig[contig_name] = [np.zeros(len(contig_SEQ) + 1), np.zeros(len(contig_SEQ) + 1), contig_SEQ, []]
contig_name = line.strip()[1:].split()[0]
contig_SEQ = ""
else:
contig_SEQ += line.strip()
dict_contig[contig_name] = [np.zeros(len(contig_SEQ) + 1), np.zeros(len(contig_SEQ) + 1), contig_SEQ, []]
with open(fn_sam, 'r') as f_r:
read_name = ""
read_SEQ = ""
for line in f_r:
if line[0] != '@':
fields = line.split()
if fields[2] == '*':
continue
else:
contig_name = fields[2]
dict_contig[contig_name][3].append(fields)
return dict_contig
def mark_edit_region(contig_name, contig_info, ignore_S=False):
# contig_info = [edit_histogram, cov_histogram, contig_SEQ, list_read]
edit_histogram = contig_info[0]
cov_histogram = contig_info[1]
# list_read_info: [ (start_pos, end_pos, read_name, even_odd_flag, mis_region) ]
list_read_info = []
even_odd_flag = 1
list_read_field = contig_info[3]
for fields in list_read_field:
read_name = fields[0]
read_SEQ = fields[9]
cigar = fields[5]
sam_flag = int(fields[1])
# if the alignment is a supplementary alignment, pass, it does not matter the even odd
# read BWA manual "Supplementary Alignment" for more information
if sam_flag > 1024:
continue
S_flag = False
number, operate = parse_CIGAR(cigar)
if ignore_S and 'S' in cigar:
if operate[0] == 'S':
if number[0] >= len(read_SEQ)/15:
S_flag = True
if operate[-1] == 'S':
if number[-1] >= len(read_SEQ)/15:
S_flag = True
# if cigar == '*', means alignment is bad, pass
# if the read align to incorrect contigs, pass
if cigar == '*' or contig_name != fields[2] or S_flag:
# list_read_info.append((start_pos, end_pos, read_name, even_odd_flag, mis_region))
list_read_info.append((0, 0, read_name, even_odd_flag, [], "", read_SEQ))
if even_odd_flag == 1:
even_odd_flag = 2
else:
even_odd_flag = 1
continue
edit_dist = int(fields[11].split(':')[2]) # NM:i:2 tag
MD_tag = fields[12].split(':')[2] # MD:Z:38G2A20
start_pos = int(fields[3])
mis_region_MD = parse_MD(MD_tag)
mis_region_MD = [ele + start_pos - 1 for ele in mis_region_MD] # change to ref coordinate
mis_region_I = [] # insertion boundary region
diff_len = 0 # len contribution of D, I, and S
if 'I' in operate or 'D' in operate or 'S' in operate:
idx_I = start_pos - 1 # index in reference
for idx, op in enumerate(operate):
if op == 'I':
diff_len -= number[idx]
mis_region_I.append(idx_I)
mis_region_I.append(idx_I+1)
else:
if op == 'S':
diff_len -= number[idx]
else:
idx_I += number[idx]
if op == 'D':
diff_len += number[idx]
end_pos = start_pos + len(fields[9]) + diff_len
match_len = end_pos - start_pos
mis_region_S = []
recover_S_flag = False
if operate[0] == 'S':
left_S_len = min(number[0], start_pos-1)
if left_S_len < match_len/10: # if S len is not too long, we accept it as mismatch
mis_region_S = [pos for pos in range(start_pos-left_S_len,start_pos)]
start_pos -= left_S_len
operate[0] = 'M'
if left_S_len != number[0]:
operate = ['S'] + operate
number = [number[0]-left_S_len] + number
number[1] = left_S_len
recover_S_flag = True
if operate[-1] == 'S':
right_S_len = min(number[-1], len(cov_histogram)-end_pos)
if right_S_len < match_len/10: # if S len is not to long, we accept it as mismatch
mis_region_S += [pos for pos in range(end_pos,end_pos+right_S_len)]
end_pos += right_S_len
operate[-1] = 'M'
if right_S_len != number[-1]:
operate = operate + ['S']
number = number + [number[-1]-right_S_len]
number[-2] = right_S_len
recover_S_flag = True
if recover_S_flag:
cigar = ""
for cigar_id, element in enumerate(number):
cigar += str(element)
cigar += operate[cigar_id]
#print(read_name + '\t', start_pos, end_pos)
cov_histogram[start_pos:end_pos] += 1
mis_region = mis_region_MD + mis_region_I + mis_region_S
mis_region.sort()
edit_histogram[mis_region] += 1
# record the reads information
list_read_info.append((start_pos, end_pos, read_name, even_odd_flag, mis_region, cigar, read_SEQ))
if even_odd_flag == 1:
even_odd_flag = 2
else:
even_odd_flag = 1
return edit_histogram, cov_histogram, list_read_info
def haplotyping_link_graph(dict_link_graph, dict_var_weight, dict_link_outward, dict_link_inward, edit_region):
# sort the potential variants on the interested site, can only use these variants bases
list_pos_weight = []
print("Trimming the significant bases at interested site:")
print("Original site-base dict", dict_var_weight)
for key in sorted(dict_var_weight.keys()):
dict_part = dict_var_weight[key]
trim_dict(dict_part, 10)
list_pos_weight.append((key, sorted(dict_part.items(), key=lambda pair:pair[1], reverse=True)))
print("Final site-base list:", list_pos_weight)
eprint("#### max site-base variant #", max([len(ele[1]) for ele in list_pos_weight]))
if list_pos_weight == []:
print("There is no variant detected!")
return [], []
print("+++++++++++++++++++", "dict_link_graph", "+++++++++++++++++++")
for key in sorted(dict_link_graph.keys()):
print(key, dict_link_graph[key])
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
# initializing the haplotype list, the cursor, and the last_ext
haplotype_0 = [] # record the (position, base) pair of the haplotype
hap_cursor_0 = 0 # record the position got the linking information (still useless in this version)
break_flag_0 = False # the flag indicating of the haplotype is breaked
haplotype_1 = []
hap_cursor_1 = 0
break_flag_1 = False
pos_start_idx = 0
# find the first variant site with two variants
pos_start_idx, haplotype_0, haplotype_1, hap_cursor_0, hap_cursor_1 = find_double_pos(pos_start_idx, list_pos_weight, haplotype_0, haplotype_1, hap_cursor_0, hap_cursor_1)
# haplotyping from list_pos_weight:
for pos_idx in range(pos_start_idx, len(list_pos_weight)):
pos_weight = list_pos_weight[pos_idx]
position = pos_weight[0]
list_pos_base = pos_weight[1]
print("XXXXXXXXXXXXXX", position, "XXXXXXXXXXXXXXXX")
# deal with haplotype_0's outward lin
dict_outward_0 = {}
if dict_link_outward.get(haplotype_0[hap_cursor_0]):
dict_outward_0 = dict_link_outward[haplotype_0[hap_cursor_0]]
trim_dict(dict_outward_0)
if position > get_farthest_ext(dict_outward_0, haplotype_0[hap_cursor_0]):
break_flag_0 = True
eprint("Haplotype 0 has a break at", haplotype_0[hap_cursor_0], "to", position)
print(dict_outward_0)
# deal with haplotype_1's outward link
print("--------------------")
dict_outward_1 = {}
if dict_link_outward.get(haplotype_1[hap_cursor_1]):
dict_outward_1 = dict_link_outward[haplotype_1[hap_cursor_1]]
trim_dict(dict_outward_1)
if position > get_farthest_ext(dict_outward_1, haplotype_1[hap_cursor_1]):
break_flag_1 = True
eprint("Haplotype 1 has a break at", haplotype_1[hap_cursor_1], "to", position)
print(dict_outward_1)
# deal with position's inward link
print("--------------------")
dict_inward_0 = {}
if dict_link_inward.get((position, list_pos_base[0][0])):
dict_inward_0 = dict_link_inward[(position, list_pos_base[0][0])]
trim_dict(dict_inward_0)
print(dict_inward_0)
#print(dict_link_graph[(position, list_pos_base[1][0])])
if len(list_pos_base) > 1:
print("--------------------")
dict_inward_1 = {}
if dict_link_inward.get((position, list_pos_base[1][0])):
dict_inward_1 = dict_link_inward[(position, list_pos_base[1][0])]
trim_dict(dict_inward_1)
print(dict_inward_1)
connect_info_0 = None
connect_info_1 = None
# There must be at least one kind of base in the position
for (outward_key, weight) in sorted(dict_outward_0.items(), key=lambda pair:pair[1], reverse=True):
if dict_inward_0.get(outward_key):
print("Potential Connect: ", outward_key, 0, 0)
connect_info_0 = (dict_outward_0[outward_key], (position, outward_key[1][1]))
break
for (outward_key, weight) in sorted(dict_outward_1.items(), key=lambda pair:pair[1], reverse=True):
if dict_inward_0.get(outward_key):
print("Potential Connect: ", outward_key, 1, 0)
connect_info_1 = (dict_outward_1[outward_key], (position, outward_key[1][1]))
break
# if there are two variants in the position
if len(list_pos_base) > 1:
for (outward_key, weight) in sorted(dict_outward_0.items(), key=lambda pair:pair[1], reverse=True):
if dict_inward_1.get(outward_key):
print("Potential Connect: ", outward_key, 0, 1)
if connect_info_0 == None or connect_info_0[0] < weight:
connect_info_0 = (dict_outward_0[outward_key], (position, outward_key[1][1]))
break
for (outward_key, weight) in sorted(dict_outward_1.items(), key=lambda pair:pair[1], reverse=True):
if dict_inward_1.get(outward_key):
print("Potential Connect: ", outward_key, 1, 1)
if connect_info_1 == None or connect_info_1[0] < weight:
connect_info_1 = (dict_outward_1[outward_key], (position, outward_key[1][1]))
break
# the case that two haplotypes may collapse into one
if connect_info_0 and connect_info_1:
if connect_info_0[1] == connect_info_1[1]: # two haplotypes are collapsed
record_info_0 = [connect_info_0[1]]
record_info_1 = [connect_info_1[1]]
for redouble_idx in range(pos_idx, len(list_pos_weight)):
rd_pos_weight = list_pos_weight[redouble_idx]
rd_position = rd_pos_weight[0]
rd_list_pos_base = rd_pos_weight[1]
if(len(rd_list_pos_base)) >= 2: # if there are two variants at the site
# call the potential connections
last_info_0 = haplotype_0[hap_cursor_0]
last_info_1 = haplotype_1[hap_cursor_1]
dict_info_0 = dict_link_graph[last_info_0]
dict_info_1 = dict_link_graph[last_info_1]
# connect them
rd_info_0 = None
rd_info_1 = None
for rd_link_info, rd_weight in sorted(dict_info_0.items(), key=lambda pair:pair[1], reverse=True):
variant_flag = False
for info_pair in rd_link_info[1]:
tmp_rd_info = []
if info_pair == connect_info_0[1]:
variant_flag = True
tmp_rd_info.append(info_pair)
if variant_flag:
tmp_rd_info.append(info_pair)
if info_pair[0] == rd_position:
rd_info_0 = tmp_rd_info
break
if rd_info_0:
break
for rd_link_info, rd_weight in sorted(dict_info_1.items(), key=lambda pair:pair[1], reverse=True):
for info_pair in rd_link_info[1]:
tmp_rd_info = []
if info_pair == connect_info_1[1]:
variant_flag = True
tmp_rd_info.append(info_pair)
if variant_flag:
tmp_rd_info.append(info_pair)
if info_pair[0] == rd_position:
rd_info_1 = tmp_rd_info
break
if rd_info_1:
break
print("connect_info_0", record_info_0)
print("connect_info_1", record_info_1)
print("rd_info_0", rd_info_0)
print("rd_info_1", rd_info_1)
if rd_info_0:
record_info_0 += rd_info_0
if | |
I1Ii111 . II111iiii
i11 = threading . Timer ( 60 , oo0 ,
[ II1iII1i , iiI1iIiI ] )
i11 . start ( )
if 62 - 62: OoooooooOO . I11i
if 61 - 61: OoOoOO00 - OOooOOo - i1IIi
if 25 - 25: O0 * I11i + I1ii11iIi11i . o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 58 - 58: I1IiiI
threading . Timer ( lisp . LISP_DEFAULT_DYN_EID_TIMEOUT ,
OO0oOoOO0oOO0 , [ oO0oIIII ] ) . start ( )
return ( True )
if 53 - 53: i1IIi
if 59 - 59: o0oOOo0O0Ooo
if 81 - 81: OoOoOO00 - OoOoOO00 . iII111i
if 73 - 73: I11i % i11iIiiIii - I1IiiI
if 7 - 7: O0 * i11iIiiIii * Ii1I + ooOoO0o % OoO0O00 - ooOoO0o
if 39 - 39: Oo0Ooo * OOooOOo % OOooOOo - OoooooooOO + o0oOOo0O0Ooo - I11i
if 23 - 23: i11iIiiIii
if 30 - 30: o0oOOo0O0Ooo - i1IIi % II111iiii + I11i * iIii1I11I1II1
def o0ooooO0o0O ( ) :
iiIi11iI1iii = open ( "./lisp.config" , "r" )
if 67 - 67: O0 / I1Ii111
OOO0000oO = False
iI1i111I1Ii = 0
for i11i1ii1I in iiIi11iI1iii :
if ( i11i1ii1I == "lisp database-mapping {\n" ) : OOO0000oO = True
if ( i11i1ii1I == "}\n" ) : OOO0000oO = False
if ( OOO0000oO == False ) : continue
if ( i11i1ii1I [ 0 ] == " " and i11i1ii1I . find ( "prefix {" ) != - 1 ) : iI1i111I1Ii += 1
if 88 - 88: I11i % I1ii11iIi11i
iiIi11iI1iii . close ( )
return ( iI1i111I1Ii )
if 48 - 48: ooOoO0o / I1Ii111 . iIii1I11I1II1 * OoOoOO00 * oO0o / i1IIi
if 92 - 92: Oo0Ooo % Oo0Ooo - o0oOOo0O0Ooo / OoOoOO00
if 10 - 10: iII111i + Oo0Ooo * I1ii11iIi11i + iIii1I11I1II1 / I1Ii111 / I1ii11iIi11i
if 42 - 42: I1IiiI
if 38 - 38: OOooOOo + II111iiii % ooOoO0o % OoOoOO00 - Ii1I / OoooooooOO
if 73 - 73: o0oOOo0O0Ooo * O0 - i11iIiiIii
if 85 - 85: Ii1I % iII111i + I11i / o0oOOo0O0Ooo . oO0o + OOooOOo
if 62 - 62: i11iIiiIii + i11iIiiIii - o0oOOo0O0Ooo
if 28 - 28: iII111i . iII111i % iIii1I11I1II1 * iIii1I11I1II1 . o0oOOo0O0Ooo / iII111i
if 27 - 27: OoO0O00 + ooOoO0o - i1IIi
def O00oOOooo ( ) :
if 50 - 50: I1ii11iIi11i % O0 * o0oOOo0O0Ooo
if 5 - 5: IiII * OoOoOO00
if 5 - 5: I1Ii111
if 90 - 90: I1Ii111 . ooOoO0o / Ii1I - I11i
if 40 - 40: OoooooooOO
iI1i111I1Ii = o0ooooO0o0O ( )
if 25 - 25: IiII + Ii1I / ooOoO0o . o0oOOo0O0Ooo % O0 * OoO0O00
if 84 - 84: ooOoO0o % Ii1I + i11iIiiIii
if 28 - 28: Oo0Ooo + OoO0O00 * OOooOOo % oO0o . I11i % O0
if 16 - 16: I11i - iIii1I11I1II1 / I1IiiI . II111iiii + iIii1I11I1II1
if 19 - 19: OoO0O00 - Oo0Ooo . O0
if 60 - 60: II111iiii + Oo0Ooo
I1IiIiiIiIII = os . getenv ( "LISP_ITR_WAIT_TIME" )
I1IiIiiIiIII = 1 if ( I1IiIiiIiIII == None ) else int ( I1IiIiiIiIII )
if 8 - 8: oO0o / I1ii11iIi11i
if 20 - 20: I1IiiI
if 95 - 95: iII111i - I1IiiI
if 34 - 34: ooOoO0o * I1IiiI . i1IIi * ooOoO0o / ooOoO0o
if 30 - 30: I1ii11iIi11i + Oo0Ooo / Oo0Ooo % I1ii11iIi11i . I1ii11iIi11i
while ( iI1i111I1Ii != len ( lisp . lisp_db_list ) ) :
lisp . lprint ( ( "Waiting {} second(s) for {} database-mapping EID-" + "prefixes, {} processed so far ..." ) . format ( I1IiIiiIiIII , iI1i111I1Ii ,
# I1ii11iIi11i % OoOoOO00 * OoO0O00 % II111iiii
len ( lisp . lisp_db_list ) ) )
time . sleep ( I1IiIiiIiIII )
if 70 - 70: OoO0O00 % oO0o + OOooOOo / Ii1I % O0
if 100 - 100: o0oOOo0O0Ooo + OOooOOo * o0oOOo0O0Ooo
if 80 - 80: o0oOOo0O0Ooo * O0 - Ii1I
if 66 - 66: i11iIiiIii - OOooOOo * Oo0Ooo
if 76 - 76: i11iIiiIii + o0oOOo0O0Ooo / I1ii11iIi11i - OoO0O00 - Ii1I + I1ii11iIi11i
if 51 - 51: iIii1I11I1II1 . ooOoO0o + iIii1I11I1II1
oOoOO = [ ]
Ii1i1 = [ ]
for Oo0ooOo0o in lisp . lisp_db_list :
if ( Oo0ooOo0o . eid . is_ipv4 ( ) or Oo0ooOo0o . eid . is_ipv6 ( ) or Oo0ooOo0o . eid . is_mac ( ) ) :
oOOo0oOo0 = Oo0ooOo0o . eid . print_prefix_no_iid ( )
if ( Oo0ooOo0o . dynamic_eid_configured ( ) ) : Ii1i1 . append ( oOOo0oOo0 )
oOoOO . append ( oOOo0oOo0 )
if 65 - 65: ooOoO0o . OoooooooOO / I1ii11iIi11i . i1IIi * OoO0O00
if 19 - 19: i11iIiiIii + OoooooooOO - Oo0Ooo - I11i
return ( oOoOO , Ii1i1 )
if 21 - 21: O0 % IiII . I1IiiI / II111iiii + IiII
if 53 - 53: oO0o - I1IiiI - oO0o * iII111i
if 71 - 71: O0 - iIii1I11I1II1
if 12 - 12: OOooOOo / o0oOOo0O0Ooo
if 42 - 42: Oo0Ooo
if 19 - 19: oO0o % I1ii11iIi11i * iIii1I11I1II1 + I1IiiI
if 46 - 46: Oo0Ooo
if 1 - 1: iII111i
def OoOOo0OOoO ( ) :
global i1iIIi1
if 97 - 97: OOooOOo + iII111i + O0 + i11iIiiIii
lisp . lisp_set_exception ( )
if 77 - 77: o0oOOo0O0Ooo / OoooooooOO
if 46 - 46: o0oOOo0O0Ooo % iIii1I11I1II1 . iII111i % iII111i + i11iIiiIii
if 72 - 72: iIii1I11I1II1 * Ii1I % ooOoO0o / OoO0O00
if 35 - 35: ooOoO0o + i1IIi % I1ii11iIi11i % I11i + oO0o
if 17 - 17: i1IIi
oOoOO , Ii1i1 = O00oOOooo ( )
if 21 - 21: Oo0Ooo
if 29 - 29: I11i / II111iiii / ooOoO0o * OOooOOo
if 10 - 10: I1Ii111 % IiII * IiII . I11i / Ii1I % OOooOOo
if 49 - 49: OoO0O00 / oO0o + O0 * o0oOOo0O0Ooo
if 28 - 28: ooOoO0o + i11iIiiIii / I11i % OoOoOO00 % Oo0Ooo - O0
if 54 - 54: i1IIi + II111iiii
if 83 - 83: I1ii11iIi11i - I1IiiI + OOooOOo
if 5 - 5: Ii1I
if 46 - 46: IiII
ii1iIi1iIiI1i = None
if ( lisp . lisp_ipc_data_plane ) :
lisp . lprint ( lisp . bold ( "Data-plane packet capture disabled" , False ) )
ii1iIi1iIiI1i = "(udp src port 4342 and ip[28] == 0x28)" + " or (ip[16] >= 224 and ip[16] < 240 and (ip[28] & 0xf0) == 0x30)"
if 40 - 40: i1IIi % OOooOOo
if 71 - 71: OoOoOO00
lisp . lprint ( "Control-plane capture: '{}'" . format ( ii1iIi1iIiI1i ) )
else :
lisp . lprint ( "Capturing packets for source-EIDs {}" . format ( lisp . green ( str ( oOoOO ) , False ) ) )
if 14 - 14: i11iIiiIii % OOooOOo
if 82 - 82: iIii1I11I1II1 + Oo0Ooo . iIii1I11I1II1 % IiII / Ii1I . Ii1I
if ( lisp . lisp_pitr ) : lisp . lprint ( "Configured for PITR functionality" )
if 14 - 14: o0oOOo0O0Ooo . OOooOOo . I11i + OoooooooOO - OOooOOo + IiII
if 9 - 9: Ii1I
if 59 - 59: I1IiiI * II111iiii . O0
if 56 - 56: Ii1I - iII111i % I1IiiI - o0oOOo0O0Ooo
if 51 - 51: O0 / ooOoO0o * iIii1I11I1II1 + I1ii11iIi11i + o0oOOo0O0Ooo
if 98 - 98: iIii1I11I1II1 * I1ii11iIi11i * OOooOOo + ooOoO0o % i11iIiiIii % O0
i1 = lisp . lisp_l2_overlay
if ( i1 == False ) :
if ( lisp . lisp_is_linux ( ) ) : OO0oOOoo ( oOoOO , Ii1i1 )
if 52 - 52: o0oOOo0O0Ooo % Oo0Ooo
if 64 - 64: O0 % I11i % O0 * OoO0O00 . oO0o + I1IiiI
if 75 - 75: I11i . OoooooooOO % o0oOOo0O0Ooo * I11i % OoooooooOO
| |
<filename>radical_translations/core/models.py
from typing import Dict, List, Optional
from django.conf import settings
from django.db import models
from django.db.models.query import QuerySet
from model_utils.models import TimeStampedModel
from controlled_vocabulary.models import (
ControlledTerm,
ControlledTermField,
ControlledTermsField,
)
from controlled_vocabulary.utils import search_term_or_none
from geonames_place.models import Place
from radical_translations.agents.models import Agent, Organisation, Person
from radical_translations.utils.models import (
Date,
EditorialClassificationModel,
date_to_dict,
get_controlled_terms_str,
get_geonames_place_from_gsx_place,
get_gsx_entry_value,
place_to_dict_value,
)
csv_field_sep = settings.EXPORT_FIELD_SEPARATOR
csv_multi_sep = settings.EXPORT_MULTIVALUE_SEPARATOR
# These models are based on the BIBFRAME 2.0 Model
# https://www.loc.gov/bibframe/docs/bibframe2-model.html
class Title(TimeStampedModel):
"""Title information relating to a resource: work title, preferred title, instance
title, transcribed title, translated title, variant form of title, etc."""
main_title = models.CharField(
max_length=768, help_text="Title being addressed. Possible title component."
)
subtitle = models.CharField(
max_length=256,
blank=True,
default="",
help_text=(
"Word, character, or group of words and/or characters that contains the "
"remainder of the title after the main title. Possible title component."
),
)
class Meta:
ordering = ["main_title", "subtitle"]
unique_together = ["main_title", "subtitle"]
def __str__(self) -> str:
if self.subtitle:
return f"{self.main_title}: {self.subtitle}"
return self.main_title
def to_dict(self) -> Dict:
return {"title.main_title": self.main_title, "title.subtitle": self.subtitle}
@staticmethod
def get_or_create(
title: str, subtitle: str = "", increment: bool = True
) -> Optional["Title"]:
"""Gets or creates a new title object. If `increment` is True and if the `title`
is Untitled or Translation, it will automatically add a counter to the
`main_title`."""
if not title:
return None
if subtitle is None:
subtitle = ""
if increment:
title_lower = title.lower()
if title_lower in ["untitled", "translation"]:
subtitle = (
Title.objects.filter(main_title__iexact=title_lower).count() + 1
)
title, _ = Title.objects.get_or_create(main_title=title, subtitle=subtitle)
return title
class Resource(TimeStampedModel):
"""Resource reflecting a conceptual essence of a cataloging resource."""
_is_paratext = models.BooleanField(default=False, editable=False)
title = models.ForeignKey(
Title,
on_delete=models.CASCADE,
related_name="resources",
help_text=(
"Title information relating to a resource: work title, preferred title, "
"instance title, transcribed title, translated title, variant form of "
"title, etc."
),
)
title_variant = models.ForeignKey(
Title,
blank=True,
null=True,
on_delete=models.CASCADE,
related_name="variant_of",
help_text=(
"Title associated with the resource that is different from the Work or "
"Instance title (titles in another language and/or script etc.)."
),
)
subjects = ControlledTermsField(
["fast-forms", "fast-topic", "rt-agt", "wikidata"],
blank=True,
related_name="resources",
help_text="Subject term(s) describing a resource",
)
date = models.OneToOneField(
Date,
blank=True,
null=True,
on_delete=models.CASCADE,
help_text=(
"Date designation associated with a resource or element of description, "
"such as date of title variation; year a degree was awarded; date "
"associated with the publication, printing, distribution, issue, release "
"or production of a resource. May be date typed."
),
)
edition_enumeration = models.CharField(
max_length=128,
blank=True,
null=True,
help_text="Enumeration of the edition; usually transcribed.",
)
summary = models.TextField(
blank=True,
null=True,
help_text=(
"Description of the content of a resource, such as an abstract, "
"summary, citation, etc."
),
)
held_by = models.ManyToManyField(
Agent,
blank=True,
related_name="resources",
help_text="Entity holding the item or from which it is available.",
)
electronic_locator = models.URLField(
max_length=1280,
blank=True,
null=True,
help_text="Electronic location from which the resource is available.",
)
notes = models.TextField(
blank=True,
null=True,
help_text=(
"Information, usually in textual form, on attributes of a resource or some "
"aspect of a resource."
),
)
class Meta:
ordering = ["title"]
unique_together = ["title", "date", "_is_paratext"]
def __str__(self) -> str:
title = str(self.title)
if self.date:
title = f"{title} ({str(self.date)})"
if self.is_paratext():
title = f"[paratext] {title}"
return title
def get_authors(self) -> str:
role = "author"
if self.is_translation():
role = "translator"
return "; ".join(
[c.agent.name for c in self.contributions.filter(roles__label=role)]
)
get_authors.short_description = "Authors/translators" # type: ignore
def get_authors_source_text(self) -> Optional[List[Agent]]:
if self.is_original():
return None
authors = []
for rel in self.relationships.filter(
relationship_type__label__in=["derivative of", "translation of"]
):
resource = rel.related_to
authors.extend(
[
c.agent
for c in resource.contributions.filter(
roles__label__in=["author", "translator"]
)
]
)
return list(set(authors))
def get_classification_edition(self) -> str:
return "; ".join([c.edition.label for c in self.classifications.all()])
get_classification_edition.short_description = "Edition" # type: ignore
def get_contributions(
self, include_paratext: bool = False
) -> Optional[List["Contribution"]]:
contributions = []
for role in settings.CONTRIBUTION_MAIN_ROLES:
contributions.extend(self.get_contributions_by_role(role))
for role in settings.CONTRIBUTION_MAIN_ROLES:
contributions.extend(
self.get_contributions_by_role(
role, include_resource=False, include_paratext=include_paratext
)
)
for role in settings.CONTRIBUTION_OTHER_ROLES:
contributions.extend(
self.get_contributions_by_role(role, include_paratext=include_paratext)
)
# remove duplicate contributions keeping the order
contributions = list(dict.fromkeys(contributions).keys())
return contributions
def get_contributions_by_role(
self, role: str, include_resource: bool = True, include_paratext: bool = False
) -> Optional[List["Contribution"]]:
contributions = []
if include_resource:
contributions = list(self.contributions.filter(roles__label=role))
if include_paratext:
for relationship in self.get_paratext():
contributions.extend(
relationship.resource.get_contributions_by_role(
role, include_paratext
)
)
return contributions
def get_language_names(self) -> str:
return "; ".join([rl.language.label for rl in self.languages.all()])
get_language_names.short_description = "Languages" # type: ignore
def get_place_names(self) -> str:
return "; ".join([str(rp) for rp in self.places.all()])
get_place_names.short_description = "Places" # type: ignore
def get_radical_markers_range(self) -> List[int]:
return range(0, self.get_radical_markers())
def get_radical_markers(self) -> int:
markers = 0
if not self.is_paratext() and self.has_date_radical():
markers = markers + 1
markers = markers + self._get_radical_markers()
return markers
def _get_radical_markers(self) -> int:
markers = 0
for subject in self.subjects.all():
if self._is_radical_label(subject.label):
markers = markers + 1
for classification in self.classifications.all():
for tag in classification.classification.all():
if self._is_radical_label(tag.label):
markers = markers + 1
for contribution in self.contributions.all():
for classification in contribution.classification.all():
if self._is_radical_label(classification.label):
markers = markers + 1
for relationship in self.get_paratext():
markers = markers + relationship.resource._get_radical_markers()
return markers
def _is_radical_label(self, label):
return "radical" in label
def get_related_resources(self):
return self.related_to.order_by(
"resource__date", "relationship_type", "resource__title"
)
def get_subjects_topic(self) -> List[ControlledTerm]:
return self.subjects.filter(vocabulary__prefix="fast-topic").order_by("label")
def get_subjects_other(self) -> List[ControlledTerm]:
return (
self.subjects.exclude(vocabulary__prefix="fast-topic")
.exclude(label="radicalism")
.order_by("label")
)
def has_date_radical(self) -> bool:
if self.date:
return self.date.is_radical
return False
def is_original(self) -> bool:
return any(
term in self.get_classification_edition().lower()
for term in ["original", "source-text"]
)
is_original.boolean = True # type: ignore
is_original.short_description = "Is source text"
def is_paratext(self) -> bool:
return (
self.relationships.filter(relationship_type__label="paratext of").count()
> 0
)
is_paratext.boolean = True # type: ignore
def is_radical(self) -> bool:
return self.subjects.filter(label__iexact="radicalism").count() == 1
def is_translation(self) -> bool:
return not self.is_original() and (
self.relationships.filter(relationship_type__label="translation of").count()
> 0
or self.classifications.filter(
edition__label__contains="translation"
).count()
> 0
or self.relationships.filter(
relationship_type__label="other edition"
).count()
> 0
)
is_translation.boolean = True # type: ignore
def get_paratext(self) -> QuerySet:
return self.related_to.filter(relationship_type__label="paratext of")
def paratext_of(self) -> Optional["Resource"]:
if not self.is_paratext:
return None
relationship = self.relationships.filter(
relationship_type__label="paratext of"
).first()
if relationship:
return relationship.related_to
return None
def get_date(self) -> Optional[Date]:
if self.is_paratext and not self.date:
relationship = self.relationships.filter(
relationship_type__label="paratext of"
).first()
if relationship:
return relationship.related_to.date
return self.date
def get_labels(self) -> Optional[List[str]]:
labels = []
if self.is_translation():
labels.append("translation")
if self.related_to.filter(relationship_type__label="translation of"):
labels.append("has translation")
if self.is_paratext():
labels.append("paratext")
if self.get_paratext():
labels.append("has paratext")
if self.related_to.filter(relationship_type__label="other edition"):
labels.append("has other edition")
return labels
def to_dict(self) -> Dict:
return {
"id": self.id,
**self.title.to_dict(),
**date_to_dict(self.get_date()),
"subjects.topics": get_controlled_terms_str(self.get_subjects_topic()),
"subjects.form_genre": get_controlled_terms_str(self.get_subjects_other()),
"edition_enumeration": self.edition_enumeration,
"classifications": f"{csv_multi_sep} ".join(
[c.to_dict_value() for c in self.classifications.all()]
),
"contributions": f"{csv_multi_sep} ".join(
[c.to_dict_value() for c in self.contributions.all()]
),
"languages": f"{csv_multi_sep} ".join(
[str(lang) for lang in self.languages.all()]
),
"places": f"{csv_multi_sep} ".join(
[rp.to_dict_value() for rp in self.places.all()]
),
"relationships": f"{csv_multi_sep} ".join(
[r.to_dict_value() for r in self.relationships.all()]
),
"held_by": f"{csv_multi_sep} ".join(
[lib.to_dict_value() for lib in self.held_by.all()]
),
"electronic_locator": self.electronic_locator,
"summary": self.summary,
"notes": self.notes,
}
def to_dict_value(self) -> str:
return f"{self.id}{csv_field_sep}{self.title}"
@staticmethod
def from_gsx_entry(entry: Dict[str, Dict[str, str]]) -> Optional["Resource"]:
"""Gets or creates a new `Resource` from a Google Spreadsheet dictionary
`entry`."""
if not entry:
return None
main_title = get_gsx_entry_value(entry, "title")
if not main_title:
return None
title = Title.get_or_create(main_title)
date_display = get_gsx_entry_value(entry, "year")
if date_display:
resource, _ = Resource.objects.get_or_create(
_is_paratext=False, title=title, date__date_display=date_display
)
date = Date.from_date_display(date_display)
resource.date = date
else:
resource, _ = Resource.objects.get_or_create(
_is_paratext=False, title=title
)
Contribution.from_gsx_entry(resource, entry, "authors", "author")
Resource.languages_from_gsx_entry(resource, entry)
Resource.subjects_from_gsx_entry(resource, entry)
Classification.get_or_create(resource, get_gsx_entry_value(entry, "status"))
value = get_gsx_entry_value(entry, "editionnumber")
if value:
resource.edition_enumeration = value
value = get_gsx_entry_value(entry, "location")
if value:
for name in value.split("; "):
place = get_geonames_place_from_gsx_place(name)
if place:
ResourcePlace.objects.get_or_create(resource=resource, place=place)
Contribution.from_gsx_entry(resource, entry, "organisation", "publisher")
value = get_gsx_entry_value(entry, "notes")
if value:
resource.notes = value
Resource.paratext_from_gsx_entry(entry, resource)
libraries = get_gsx_entry_value(entry, "libraries")
if libraries:
for library in libraries.split("; "):
library = library.strip()
if library:
org, _ = Organisation.objects.get_or_create(name=library)
resource.held_by.add(org)
url = get_gsx_entry_value(entry, "url")
if url:
resource.electronic_locator = url
resource.save()
return resource
@staticmethod
def languages_from_gsx_entry(
resource: "Resource", entry: Dict[str, Dict[str, str]]
) -> Optional[List[Optional[ControlledTerm]]]:
"""Adds languages, from a Google Spreadsheet dictionary `entry`, to the
`resource`."""
if not resource or not entry:
return None
names = get_gsx_entry_value(entry, "language")
if not names:
return None
languages = []
for name in | |
that the record is enabled.
dns_name: The name for this shared record in punycode format.
extattrs: Extensible attributes associated with the object.For valid
values for extensible attributes, see the following information.
name: Name for this shared record. This value can be in unicode
format.
shared_record_group: The name of the shared record group in which
the record resides.
text: Text associated with the shared record. It can contain up to
255 bytes per substring and up a total of 512 bytes. To enter
leading, trailing or embedded spaces in the text, add quotes ("
") around the text to preserve the spaces.
ttl: The Time To Live (TTL) value for this shared record. A 32-bit
unsigned integer that represents the duration, in seconds, for
which the shared record is valid (cached). Zero indicates that
the shared record should not be cached.
use_ttl: Use flag for: ttl
"""
_infoblox_type = 'sharedrecord:txt'
_fields = ['comment', 'disable', 'dns_name', 'extattrs', 'name',
'shared_record_group', 'text', 'ttl', 'use_ttl']
_search_for_update_fields = ['name', 'text']
_updateable_search_fields = ['comment', 'name', 'text']
_all_searchable_fields = ['comment', 'name', 'text']
_return_fields = ['extattrs', 'name', 'shared_record_group', 'text']
_remap = {}
_shadow_fields = ['_ref']
class Sharedrecordgroup(InfobloxObject):
""" Sharedrecordgroup: DNS Shared Record Group object.
Corresponds to WAPI object 'sharedrecordgroup'
A shared record group (SRG) is created to contain DNS shared records
and share them between different zones. For example, if a group of
DNS records needs to be in three different zones, you can include
the records in a shared record group and assign the group to the
three zones. For more information about shared record groups and
shared records, please refer to Infoblox Administrator Guide.
Fields:
comment: The descriptive comment of this shared record group.
extattrs: Extensible attributes associated with the object.For valid
values for extensible attributes, see the following information.
name: The name of this shared record group.
record_name_policy: The record name policy of this shared record
group.
use_record_name_policy: Use flag for: record_name_policy
zone_associations: The list of zones associated with this shared
record group.
"""
_infoblox_type = 'sharedrecordgroup'
_fields = ['comment', 'extattrs', 'name', 'record_name_policy',
'use_record_name_policy', 'zone_associations']
_search_for_update_fields = ['name']
_updateable_search_fields = ['comment', 'name']
_all_searchable_fields = ['comment', 'name']
_return_fields = ['comment', 'extattrs', 'name']
_remap = {}
_shadow_fields = ['_ref']
class SmartfolderChildren(InfobloxObject):
""" SmartfolderChildren: Smart Folder children object.
Corresponds to WAPI object 'smartfolder:children'
The Smart Folder children object is used to read the objects that
are associated with either a Smart Folder (global or personal) or a
set of queries that users can make without saving a Smart Folder
object on the appliance.
The Smart Folder children object can be used for both "filtering"
and "grouping" the results of Smart Folder associated objects.
Fields:
resource: The object retuned by the Smart Folder query.
value: The value returned by the Smart Folder query.
value_type: The type of the returned value.
"""
_infoblox_type = 'smartfolder:children'
_fields = ['resource', 'value', 'value_type']
_search_for_update_fields = []
_updateable_search_fields = []
_all_searchable_fields = []
_return_fields = ['resource', 'value', 'value_type']
_remap = {}
_shadow_fields = ['_ref']
class SmartfolderGlobal(InfobloxObject):
""" SmartfolderGlobal: Global Smart Folder object.
Corresponds to WAPI object 'smartfolder:global'
Smart Folders are used to organize your core network services data.
Depending on your administrative roles and business needs, you can
filter your data object types, names, extensible attributes and
discovered data and then place the filtered results in a Smart
Folder.
The global Smart Folders are created to be shared among
administrators.
Fields:
comment: The global Smart Folder descriptive comment.
group_bys: Global Smart Folder grouping rules.
name: The global Smart Folder name.
query_items: The global Smart Folder filter queries.
"""
_infoblox_type = 'smartfolder:global'
_fields = ['comment', 'group_bys', 'name', 'query_items']
_search_for_update_fields = ['name']
_updateable_search_fields = ['comment', 'name']
_all_searchable_fields = ['comment', 'name']
_return_fields = ['comment', 'name']
_remap = {}
_shadow_fields = ['_ref']
_custom_field_processing = {
'group_bys': SmartfolderGroupby.from_dict,
'query_items': SmartfolderQueryitem.from_dict,
}
def save_as(self, *args, **kwargs):
return self._call_func("save_as", *args, **kwargs)
class SmartfolderPersonal(InfobloxObject):
""" SmartfolderPersonal: Personal Smart Folder object.
Corresponds to WAPI object 'smartfolder:personal'
Smart Folders are used to organize your core network services data.
Depending on your administrative roles and business needs, you can
filter your data object types, names, extensible attributes and
discovered data and then place the filtered results in a Smart
Folder.
The personal Smart Folder is used to Smart Folders available only to
a administrator that have created the Smart Folder.
Fields:
comment: The personal Smart Folder descriptive comment.
group_bys: The personal Smart Folder groupping rules.
is_shortcut: Determines whether the personal Smart Folder is a
shortcut.
name: The personal Smart Folder name.
query_items: The personal Smart Folder filter queries.
"""
_infoblox_type = 'smartfolder:personal'
_fields = ['comment', 'group_bys', 'is_shortcut', 'name', 'query_items']
_search_for_update_fields = ['is_shortcut', 'name']
_updateable_search_fields = ['comment', 'name']
_all_searchable_fields = ['comment', 'is_shortcut', 'name']
_return_fields = ['comment', 'is_shortcut', 'name']
_remap = {}
_shadow_fields = ['_ref']
_custom_field_processing = {
'group_bys': SmartfolderGroupby.from_dict,
'query_items': SmartfolderQueryitem.from_dict,
}
def save_as(self, *args, **kwargs):
return self._call_func("save_as", *args, **kwargs)
class Snmpuser(InfobloxObject):
""" Snmpuser: SNMP user object.
Corresponds to WAPI object 'snmpuser'
This object contains information related to SNMPv3 users.
Fields:
authentication_password: Determines an authentication password for
the user. This is a write-only attribute.
authentication_protocol: The authentication protocol to be used for
this user.
comment: A descriptive comment for the SNMPv3 User.
disable: Determines if SNMPv3 user is disabled or not.
extattrs: Extensible attributes associated with the object.For valid
values for extensible attributes, see the following information.
name: The name of the user.
privacy_password: Determines a password for the privacy protocol.
privacy_protocol: The privacy protocol to be used for this user.
"""
_infoblox_type = 'snmpuser'
_fields = ['authentication_password', 'authentication_protocol', 'comment',
'disable', 'extattrs', 'name', 'privacy_password',
'privacy_protocol']
_search_for_update_fields = ['name']
_updateable_search_fields = ['comment', 'name']
_all_searchable_fields = ['comment', 'name']
_return_fields = ['comment', 'extattrs', 'name']
_remap = {}
_shadow_fields = ['_ref']
class Superhost(InfobloxObject):
""" Superhost: SuperHost object.
Corresponds to WAPI object 'superhost'
The collection of correlated IPAM records which is related to single
device.
Fields:
comment: The comment for Super Host.
delete_associated_objects: True if we have to delete all DNS/DHCP
associated objects with Super Host, false by default.
dhcp_associated_objects: A list of DHCP objects refs which are
associated with Super Host.
disabled: Disable all DNS/DHCP associated objects with Super Host if
True, False by default.
dns_associated_objects: A list of object refs of the DNS resource
records which are associated with Super Host.
extattrs: Extensible attributes associated with the object.For valid
values for extensible attributes, see the following information.
name: Name of the Superhost.
"""
_infoblox_type = 'superhost'
_fields = ['comment', 'delete_associated_objects',
'dhcp_associated_objects', 'disabled', 'dns_associated_objects',
'extattrs', 'name']
_search_for_update_fields = ['name']
_updateable_search_fields = ['comment', 'disabled', 'name']
_all_searchable_fields = ['comment', 'disabled', 'name']
_return_fields = ['comment', 'extattrs', 'name']
_remap = {}
_shadow_fields = ['_ref']
class Superhostchild(InfobloxObject):
""" Superhostchild: Super Host Child object.
Corresponds to WAPI object 'superhostchild'
The superhostchild object is a read-only synthetic object used to
retrieve records assigned to superhost.
Since this is a synthetic object, it supports reading only by
specifying search parameters, not by reference.
Fields:
associated_object: The record object, if supported by the WAPI.
Otherwise, the value is "None".
comment: The record comment.
creation_timestamp: Time at which DNS RR was created.
data: Specific data of DNS/DHCP records.
disabled: True if the child DNS/DHCP object is disabled.
name: Name of the associated DNS/DHCP object.
network_view: The name of the network view in which this network
record resides.
parent: Name of the Super Host object in which record resides.
record_parent: Name of a parent zone/network.
type: The record type. When searching for an unspecified record
type, the search is performed for all records.
view: Name of the DNS View in which the record resides.
"""
_infoblox_type = 'superhostchild'
_fields = ['associated_object', 'comment', 'creation_timestamp', 'data',
'disabled', 'name', 'network_view', 'parent', 'record_parent',
'type', 'view']
_search_for_update_fields = ['data', 'name', 'network_view',
'parent', 'record_parent', 'type', 'view']
_updateable_search_fields = []
_all_searchable_fields = ['comment', 'creation_timestamp', 'data', 'name',
'network_view', 'parent', 'record_parent',
'type', 'view']
_return_fields = ['comment', 'data', 'name', 'network_view', 'parent',
'record_parent', 'type', 'view']
_remap = {}
_shadow_fields = | |
import re
from base64 import b64encode, b64decode
from datetime import datetime
from typing import Dict, Any, Optional, Sequence, List
from uuid import uuid4
import time
from authlib.common.security import generate_token
from fastapi import HTTPException
from pydantic import BaseModel
from pydantic.datetime_parse import parse_datetime, parse_date
from pyisemail import is_email
from unidecode import unidecode
from user_manager.common.config import config
from user_manager.common.models import DbUser, DbUserPasswordAccessToken, UserPropertyType, DbUserHistory, DbChange, \
DbManagerSchema, DbUserProperty
from user_manager.common.mongo import async_user_collection, \
async_client_user_cache_collection, async_authorization_code_collection, async_session_collection, \
async_token_collection, async_user_group_collection, user_collection, async_user_history_collection, \
async_read_schema
from user_manager.common.password_helper import verify_and_update, create_password, PasswordLeakedException
from user_manager.manager.helper import get_regex, DotDict
from user_manager.manager.mailer import mailer
class ValidateAccessToken(BaseModel):
id: Optional[str] = None
description: str
token: Optional[str] = None
replace_dot_re = re.compile(r'\b[\s]+\b')
remove_re = re.compile(r'[^a-z0-9.-]')
def normalize_username(display_name: str) -> str:
if config.oauth2.use_german_username_translation:
display_name = display_name.replace('ä', 'ae').replace('ö', 'oe').replace('ü', 'ue')
username = unidecode(display_name).lower()
username = replace_dot_re.sub('.', username)
username = remove_re.sub('', username)
return username
async def async_send_mail_register(
user_data: DotDict,
schema: DbManagerSchema,
token_valid_until: int,
locale: str = None,
tz: datetime.tzinfo = None,
author_id: str = None,
history_entry: DbUserHistory = None,
):
if tz is None:
tz = schema.get_tz(user_data.get('zoneinfo'))
if locale is None:
locale = user_data.get('locale', 'en_us'),
await mailer.async_send_mail(
locale,
'register',
user_data['email'],
{
'registration_link': f"register/{user_data['registration_token']}",
'valid_until': datetime.fromtimestamp(token_valid_until, tz),
'user': user_data,
},
)
if history_entry is None:
await async_user_history_collection.insert_one(
DbUserHistory(
id=str(uuid4()),
user_id=user_data['_id'] if author_id is None else author_id,
timestamp=datetime.utcnow(),
author_id=user_data['_id'],
changes=[
DbChange(property='email', value="Sent Registration E-Mail"),
],
).dict(by_alias=True, exclude_none=True)
)
else:
history_entry.changes.append(DbChange(property='email', value="Sent Registration E-Mail"))
async def async_send_mail_verify(
locale: Optional[str],
mail: str,
user_data: DotDict,
token_valid_until: int,
tz: datetime.tzinfo,
author_id: str = None,
history_entry: DbUserHistory = None,
):
if locale is None:
locale = user_data.get('locale', 'en_us'),
await mailer.async_send_mail(
locale,
'verify_mail',
mail,
{
'verify_link': f"verify-email/{user_data['email_verification_token']}",
'valid_until': datetime.fromtimestamp(token_valid_until, tz),
'user': user_data,
},
)
if history_entry is None:
await async_user_history_collection.insert_one(
DbUserHistory(
id=str(uuid4()),
user_id=user_data['_id'] if author_id is None else author_id,
timestamp=datetime.utcnow(),
author_id=user_data['_id'],
changes=[
DbChange(property='email', value="Sent E-Mail Verification E-Mail"),
],
).dict(by_alias=True, exclude_none=True)
)
else:
history_entry.changes.append(
DbChange(property='email', value="Sent E-Mail Verification E-Mail")
)
async def async_send_mail_reset_password(
user_data: DotDict,
schema: DbManagerSchema,
token_valid_until: int,
tz: datetime.tzinfo = None,
author_id: str = None,
):
if tz is None:
tz = schema.get_tz(user_data.get('zoneinfo'))
await mailer.async_send_mail(
user_data.get('locale', 'en_us'),
'password_reset',
user_data['email'],
{
'password_reset_link': f"reset-password/{user_data['password_reset_token']}",
'valid_until': datetime.fromtimestamp(token_valid_until, tz),
'user': user_data,
},
)
await async_user_history_collection.insert_one(
DbUserHistory(
id=str(uuid4()),
user_id=user_data['_id'] if author_id is None else author_id,
timestamp=datetime.utcnow(),
author_id=user_data['_id'],
changes=[
DbChange(property='email', value="Sent Reset Password E-Mail"),
],
).dict(by_alias=True, exclude_none=True)
)
def create_token(data: str, valid_until: int):
return (
b64encode(data.encode()).decode('utf-8').replace('/', '_').replace('=', '') +
'-' + generate_token(48) + '-' + str(valid_until)
)
def check_token(token: str) -> str:
"""Checks the token for validity and returns the associated data"""
token_parts = token.split('-')
if len(token_parts) != 3:
raise HTTPException(400, "Token invalid")
data_b64, token, valid_until_raw = token_parts
try:
data_b64.replace('_', '/')
if len(data_b64) % 4 == 2:
data_b64 += '=='
elif len(data_b64) % 4 == 3:
data_b64 += '='
data = b64decode(data_b64).decode()
valid_until = int(valid_until_raw)
except ValueError:
raise HTTPException(400, "Token invalid")
if valid_until < int(time.time()):
raise HTTPException(400, "Token expired")
return data
async def update_resend_registration(
user_data: DotDict,
schema: DbManagerSchema,
author_id: str,
):
token_valid_until = int(time.time() + config.manager.token_valid.registration)
user_data['registration_token'] = create_token(user_data['_id'], token_valid_until)
await async_user_collection.update_one({'_id': user_data['_id']}, {
'$set': {
'registration_token': user_data['registration_token'],
'updated_at': int(time.time()),
}
})
await async_client_user_cache_collection.delete_many({'user_id': user_data['_id']})
await async_send_mail_register(user_data, schema, token_valid_until, author_id=author_id)
def validate_property_write(schema: DbManagerSchema, key: str, is_self: bool, is_admin: bool):
prop = schema.properties_by_key.get(key)
if prop is None:
raise HTTPException(400, f"{repr(key)}={repr(prop)} is not a valid property")
elif not prop.can_edit.has_access(is_self, is_admin):
raise HTTPException(400, f"Cannot modify {repr(key)}")
def make_username(name: str) -> str:
username = base_username = normalize_username(name)
username_counter = 2
while user_collection.count_documents({'preferred_username': username}, limit=1) != 0:
username = base_username + str(username_counter)
username_counter += 1
return username
async def _update_groups(
user_data: DotDict,
schema: DbManagerSchema,
update_data: Dict[str, Any],
history_entry: DbUserHistory,
property_key: str,
is_self: bool,
is_admin: bool,
existence_check_property: Optional[str],
groups_add_property: str,
groups_pull_properties: Sequence[str],
members_pull_properties: Sequence[str] = (),
) -> bool:
if not isinstance(update_data[property_key], list) or \
not all(isinstance(group, str) for group in update_data[property_key]):
raise HTTPException(400, f"{repr(property_key)} must be a string")
validate_property_write(schema, property_key, is_self, is_admin)
reset_user_cache = False
new_groups = update_data[property_key]
new_groups_set = set(new_groups)
if existence_check_property is None:
if await async_user_group_collection.count_documents({'_id': {'$in': new_groups}}) != len(new_groups):
raise HTTPException(400, "At least one group does not exist")
else:
if not new_groups_set.issubset(user_data[existence_check_property]):
raise HTTPException(400, f"{property_key} contains invalid group")
added_groups = list(new_groups_set.difference(user_data[property_key]))
removed_groups = list(set(user_data[property_key]).difference(new_groups))
user_data[property_key] = new_groups
if added_groups:
await async_user_group_collection.update_many(
{'_id': {'$in': added_groups}},
{'$addToSet': {groups_add_property: user_data['_id']}},
)
history_entry.changes.append(DbChange(property=groups_add_property, value=f"Added {', '.join(added_groups)}"))
reset_user_cache = True
if removed_groups:
await async_user_group_collection.update_many(
{'_id': {'$in': removed_groups}},
{'$pull': {
prop: user_data['_id']
for prop in groups_pull_properties
}},
)
history_entry.changes.extend(
DbChange(property=prop, value=f"Removed {', '.join(removed_groups)}")
for prop in groups_pull_properties
)
for member_property_attr in members_pull_properties:
member_property: List[str] = user_data.get(member_property_attr, [])
for group in removed_groups:
try:
member_property.remove(group)
except ValueError:
pass
reset_user_cache = True
del update_data[property_key]
return reset_user_cache
def apply_property_template(user_data: DotDict, prop: DbUserProperty):
assert "'''" not in prop.template, f"Invalid ''' in template: {prop.template}"
user_data[prop.key] = eval(
f"f'''{prop.template}'''",
{'make_username': make_username, 'config': config},
user_data,
)
async def update_user(
user_data: DotDict,
update_data: Dict[str, Any],
author_user_id: str,
is_new: bool = False,
is_registering: bool = False,
is_admin: bool = False,
is_self: bool = False,
no_registration: bool = False,
schema: DbManagerSchema = None,
):
if 'sub' in update_data or 'id' in update_data or '_id' in update_data or 'picture' in update_data:
raise HTTPException(400, f"Cannot modify 'sub', 'id', '_id' or 'picture'")
was_active = user_data.get('active', False)
reset_user_cache = False
if schema is None:
schema = await async_read_schema()
if is_new:
assert '_id' not in user_data
user_data['_id'] = generate_token(48)
history_entry: DbUserHistory = DbUserHistory(
id=str(uuid4()),
user_id=user_data['_id'],
timestamp=datetime.utcnow(),
author_id=author_user_id,
changes=[],
)
if 'password' in update_data:
if not isinstance(update_data['password'], str):
raise HTTPException(400, "'password' must be a string")
validate_property_write(schema, 'password', is_self, is_admin)
if is_self and not is_registering and user_data.get('password') is not None:
if 'old_password' not in update_data:
raise HTTPException(400, f"Need {repr('old_password')} for setting password")
if not isinstance(update_data['old_password'], str):
raise HTTPException(400, f"{repr('old_password')} is not a string")
is_valid, _ = verify_and_update(update_data['old_password'], user_data['password'])
if not is_valid:
raise HTTPException(401, "Old password does not match")
try:
user_data['password'] = <PASSWORD>(update_data['password'])
del update_data['password']
history_entry.changes.append(DbChange(property='password', value="Set"))
except PasswordLeakedException:
raise HTTPException(400, "Password is leaked and cannot be used. See https://haveibeenpwned.com/")
async def send_mail():
pass
if is_registering and update_data.get('email', user_data['email']) == user_data['email']:
user_data['email_verified'] = True
if 'email' in update_data:
del update_data['email']
elif 'email' in update_data:
new_mail = update_data['email']
if not isinstance(new_mail, str):
raise HTTPException(400, "'email' must be a string")
validate_property_write(schema, 'email', is_self, is_admin)
if not is_email(new_mail, check_dns=True):
raise HTTPException(400, "E-Mail address not accepted")
if new_mail != user_data.get('email') and \
await async_user_collection.count_documents({'email': new_mail}, limit=1) != 0:
raise HTTPException(400, "E-Mail address already in use, please use existing account")
locale = update_data.get('locale', user_data.get('locale', schema.properties_by_key['locale'].default))
if locale is None:
locale = 'en_us'
tz = schema.get_tz(update_data.get('zoneinfo', user_data.get('zoneinfo')))
del update_data['email']
history_entry.changes.append(DbChange(property='email', value=new_mail))
if is_new and not no_registration:
user_data['email'] = new_mail
user_data['email_verified'] = False
token_valid_until = int(time.time() + config.manager.token_valid.registration)
user_data['registration_token'] = create_token(user_data['_id'], token_valid_until)
async def send_mail():
await async_send_mail_register(
user_data,
schema,
token_valid_until,
locale,
tz,
author_id=author_user_id,
history_entry=history_entry,
)
elif not is_admin:
token_valid_until = int(time.time() + config.manager.token_valid.email_set)
user_data['email_verification_token'] = create_token(new_mail, token_valid_until)
if is_registering:
user_data['email'] = new_mail
user_data['email_verified'] = False
async def send_mail():
await async_send_mail_verify(
locale,
new_mail,
user_data,
token_valid_until,
tz,
author_id=author_user_id,
history_entry=history_entry
)
else:
user_data['email'] = new_mail
user_data['email_verified'] = False
if 'access_tokens' in update_data:
if not isinstance(update_data['access_tokens'], list):
raise HTTPException(400, "'access_tokens' must be a list")
try:
access_tokens = [ValidateAccessToken.validate(val) for val in update_data['access_tokens']]
except ValueError as err:
raise HTTPException(400, str(err))
validate_property_write(schema, 'access_tokens', is_self, is_admin)
existing_access_tokens = [
DbUserPasswordAccessToken.validate_document(access_token)
for access_token in user_data.get('access_tokens', [])
]
existing_access_tokens_by_id = {
existing_access_token.id: existing_access_token
for existing_access_token in existing_access_tokens
}
has_change = False
new_access_tokens = []
for access_token in access_tokens:
if access_token.id is not None:
store_token = existing_access_tokens_by_id.pop(access_token.id, None)
if store_token is None:
raise HTTPException(400, f"Invalid token ID {access_token.id}")
if store_token.description != access_token.description:
has_change = True
history_entry.changes.append(DbChange(
property='access_tokens',
value=f"Rename {store_token.description} -> {access_token.description}",
))
store_token.description = access_token.description
if access_token.token is not None:
has_change = True
history_entry.changes.append(DbChange(
property='access_tokens', value=f"Regenerate {store_token.description}"
))
store_token.token = access_token.token
else:
has_change = True
store_token = DbUserPasswordAccessToken(
id=generate_token(24),
description=access_token.description,
token=access_token.token,
)
history_entry.changes.append(DbChange(
property='access_tokens', value=f"Added {store_token.description}"
))
new_access_tokens.append(store_token)
history_entry.changes.extend(DbChange(
property='access_tokens', value=f"Deleted {deleted_token.description}"
) for deleted_token in existing_access_tokens_by_id.values())
del update_data['access_tokens']
user_data['access_tokens'] = [access_token.dict() for access_token in new_access_tokens]
if has_change:
history_entry.changes.append(DbChange(property='access_tokens', value="Updated"))
if 'groups' in update_data:
if await _update_groups(
user_data,
schema,
update_data,
history_entry,
property_key='groups',
is_self=is_self,
is_admin=is_admin,
existence_check_property=None,
groups_add_property='members',
groups_pull_properties=(
'members', 'email_allowed_forward_members', 'email_forward_members', 'email_postbox_access_members',
),
members_pull_properties=(
'email_allowed_forward_members', 'email_forward_members', 'email_postbox_access_members',
),
):
reset_user_cache = True
if 'email_allowed_forward_groups' in update_data:
await _update_groups(
user_data,
schema,
update_data,
history_entry,
property_key='email_allowed_forward_groups',
is_self=is_self,
is_admin=is_admin,
existence_check_property='groups',
groups_add_property='email_allowed_forward_members',
groups_pull_properties=('email_allowed_forward_members', 'email_forward_members'),
members_pull_properties=('email_forward_members',)
)
| |
#! -*- coding: utf-8 -*-
import pprint
from decimal import Decimal
from botocore.exceptions import ClientError
from botocore.vendored.requests.exceptions import ConnectionError
from .helpers import get_attribute_type
from .errors import ClientException, ConnectionException, ParameterException
pp = pprint.PrettyPrinter(indent=4)
pprint = pp.pprint
__all__ = ['Table']
class Table(object):
def __init__(self, instance, db):
self.instance = instance
self.db=db
self.table_name = instance.__table_name__
self.table = db.Table(self.table_name)
def info(self):
try:
response = self.db.meta.client.describe_table(TableName=self.table_name)
except ClientError as e:
raise ClientException(e.response['Error']['Message'])
else:
table_info = response['Table']
return table_info
def _prepare_hash_key(self):
hash_key = self.instance._hash_key
param = {
'AttributeName': hash_key,
'KeyType': 'HASH'
}
return param
def _prepare_range_key(self, range_key=None):
if not range_key:
range_key = self.instance._range_key
if range_key:
param = {
'AttributeName': range_key,
'KeyType': 'RANGE'
}
return param
return {}
def _prepare_key_schema(self):
KeySchema = []
hash_key_param = self._prepare_hash_key()
KeySchema.append(hash_key_param)
range_key_param = self._prepare_range_key()
if range_key_param:
KeySchema.append(range_key_param)
return KeySchema
def _prepare_attribute_definitions(self):
AttributeDefinitions = []
attributes = self.instance.attributes
hash_key = self.instance._hash_key
_fields_indexed = {}
AttributeDefinitions.append({
'AttributeName': hash_key,
'AttributeType': get_attribute_type(attributes[hash_key]),
})
_fields_indexed[hash_key] = True
range_key = self.instance._range_key
if range_key:
AttributeDefinitions.append({
'AttributeName': range_key,
'AttributeType': get_attribute_type(attributes[range_key]),
})
_fields_indexed[range_key] = True
for field in self.instance._local_indexed_fields:
AttributeDefinitions.append({
'AttributeName': field,
'AttributeType': get_attribute_type(attributes[field]),
})
_fields_indexed[field] = True
#for global indexes
for index_name, attribute_dict in self.instance._global_indexes.items():
field = attribute_dict["hash_key"]
if(not _fields_indexed.get(field, None)):
AttributeDefinitions.append({
'AttributeName': field,
'AttributeType': get_attribute_type(attributes[field])
})
range_key = attribute_dict.get("range_key", None)
if(range_key and not _fields_indexed.get(range_key, None)):
AttributeDefinitions.append({
'AttributeName': range_key,
'AttributeType': get_attribute_type(attributes[range_key])
})
return AttributeDefinitions
def _prepare_primary_key(self, params):
params['KeySchema'] = self._prepare_key_schema()
params['AttributeDefinitions'] = self._prepare_attribute_definitions()
return params
def _prepare_local_indexes(self):
indexes = []
for field in self.instance._local_indexed_fields:
index_name = '{table_name}_ix_{field}'.format(
table_name=self.table_name, field=field)
KeySchema = [self._prepare_hash_key()]
range_key_param = self._prepare_range_key(field)
if range_key_param:
KeySchema.append(range_key_param)
index_properties = {
'IndexName': index_name,
'KeySchema': KeySchema,
}
field_object = self.instance.attributes[field]
if(field_object.projections):
index_properties['Projection'] = {'ProjectionType': 'INCLUDE',
'NonKeyAttributes': field_object.projections
}
else:
index_properties['Projection'] = {'ProjectionType': 'KEYS_ONLY'}
indexes.append(index_properties)
return indexes
def _prepare_global_indexes(self):
indexes = []
for index_name, attribute_dict in self.instance._global_indexes.items():
KeySchema = [{
'AttributeName': attribute_dict["hash_key"],
'KeyType': 'HASH'
}]
if attribute_dict.get("range_key", None):
KeySchema.append({
'AttributeName': attribute_dict.get("range_key"),
'KeyType': 'RANGE'
})
index_properties = {
'IndexName': index_name,
'KeySchema': KeySchema
}
read_capacity_units = attribute_dict.get("read_capacity")
write_capacity_units = attribute_dict.get("write_capacity")
if(read_capacity_units and write_capacity_units):
index_properties['ProvisionedThroughput'] = {
'ReadCapacityUnits': read_capacity_units,
'WriteCapacityUnits': write_capacity_units
}
index_properties['BillingMode'] = "PROVISIONED"
else:
#this should not appear but due to a bug we are forced to user this
# index_properties['ProvisionedThroughput'] = {
# 'ReadCapacityUnits': 1,
# 'WriteCapacityUnits': 1
# }
#assuming billing is pay per request
index_properties['BillingMode'] = "PAY_PER_REQUEST"
if("projections" in attribute_dict):
index_properties['Projection'] = {'ProjectionType': 'INCLUDE',
'NonKeyAttributes': attribute_dict["projections"]
}
else:
index_properties['Projection'] = {'ProjectionType': 'KEYS_ONLY'}
indexes.append(index_properties)
return indexes
def _prepare_create_table_params(self):
# TableName
table_params = {
'TableName': self.table_name
}
# KeySchema && AttributeDefinitions
table_params = self._prepare_primary_key(table_params)
# LocalSecondaryIndexes
local_indexes = self._prepare_local_indexes()
if local_indexes:
table_params['LocalSecondaryIndexes'] = local_indexes
# GlobalSecondaryIndexes
global_indexes = self._prepare_global_indexes()
if global_indexes:
table_params['GlobalSecondaryIndexes'] = global_indexes
# ProvisionedThroughput
read_capacity_units = getattr(self.instance, 'ReadCapacityUnits', None)
write_capacity_units = getattr(self.instance, 'WriteCapacityUnits', None)
if(read_capacity_units and write_capacity_units):
table_params['ProvisionedThroughput'] = {
'ReadCapacityUnits': read_capacity_units,
'WriteCapacityUnits': write_capacity_units
}
table_params['BillingMode'] = "PROVISIONED"
else:
#assuming billing is pay per request
table_params['BillingMode'] = "PAY_PER_REQUEST"
return table_params
def create(self):
'''
# create table
create_table Request Syntax
# http://boto3.readthedocs.io/en/sinstance/reference/services/dynamodb.html#DynamoDB.Client.create_instance
response = client.create_table(
AttributeDefinitions=[
{
'AttributeName': 'string',
'AttributeType': 'S'|'N'|'B'
},
],
TableName='string',
KeySchema=[
{
'AttributeName': 'string',
'KeyType': 'HASH'|'RANGE'
},
],
LocalSecondaryIndexes=[
{
'IndexName': 'string',
'KeySchema': [
{
'AttributeName': 'string',
'KeyType': 'HASH'|'RANGE'
},
],
'Projection': {
'ProjectionType': 'ALL'|'KEYS_ONLY'|'INCLUDE',
'NonKeyAttributes': [
'string',
]
}
},
],
GlobalSecondaryIndexes=[
{
'IndexName': 'string',
'KeySchema': [
{
'AttributeName': 'string',
'KeyType': 'HASH'|'RANGE'
},
],
'Projection': {
'ProjectionType': 'ALL'|'KEYS_ONLY'|'INCLUDE',
'NonKeyAttributes': [
'string',
]
},
'ProvisionedThroughput': {
'ReadCapacityUnits': 123,
'WriteCapacityUnits': 123
}
},
],
ProvisionedThroughput={
'ReadCapacityUnits': 123,
'WriteCapacityUnits': 123
},
StreamSpecification={
'StreamEnabled': True|False,
'StreamViewType': 'NEW_IMAGE'|'OLD_IMAGE'|'NEW_AND_OLD_IMAGES'|'KEYS_ONLY'
}
)
AttributeType (string) -- [REQUIRED]
The data type for the attribute, where:
* S - the attribute is of type String
* N - the attribute is of type Number
* B - the attribute is of type Binary
KeySchema (list) -- [REQUIRED]
KeyType - The role that the key attribute will assume:
* HASH - partition key
* RANGE - sort key
'''
try:
params = self._prepare_create_table_params()
return self.db.create_table(**params)
except ClientError as e:
raise ClientException(e.response['Error']['Message'])
except ConnectionError:
raise ConnectionException('Connection refused')
def _update_throughput(self, ProvisionedThroughput):
ReadCapacityUnits = ProvisionedThroughput.get('ReadCapacityUnits', None)
WriteCapacityUnits = ProvisionedThroughput.get('WriteCapacityUnits', None)
read_capacity_units = getattr(self.instance, 'ReadCapacityUnits', None)
write_capacity_units = getattr(self.instance, 'WriteCapacityUnits', None)
if (read_capacity_units != ReadCapacityUnits or
write_capacity_units != WriteCapacityUnits):
self.table.update(ProvisionedThroughput={
'ReadCapacityUnits': read_capacity_units,
'WriteCapacityUnits': write_capacity_units
})
def _update_streams(self):
# TODO
pass
def _update_global_indexes(self):
# TODO
pass
def _update_billing_mode(self):
pass
def update(self):
'''
# update table
http://boto3.readthedocs.io/en/stable/reference/services/dynamodb.html#DynamoDB.Table.update
You can only perform one of the following operations at once:
* Modify the provisioned throughput settings of the table.
* Enable or disable Streams on the table.
* Remove a global secondary index from the table.
* Create a new global secondary index on the table.
Once the index begins backfilling, you can use UpdateTable to perform
other operations.
UpdateTable is an asynchronous operation; while it is executing,
the table status changes from ACTIVE to UPDATING. While it is UPDATING,
you cannot issue another UpdateTable request.
When the table returns to the ACTIVE state, the UpdateTable operation is
complete.
# Request Syntax
AttributeDefinitions=[
{
'AttributeName': 'string',
'AttributeType': 'S'|'N'|'B'
},
],
BillingMode='PROVISIONED'|'PAY_PER_REQUEST',
ProvisionedThroughput={
'ReadCapacityUnits': 123,
'WriteCapacityUnits': 123
},
GlobalSecondaryIndexUpdates=[
{
'Update': {
'IndexName': 'string',
'ProvisionedThroughput': {
'ReadCapacityUnits': 123,
'WriteCapacityUnits': 123
}
},
'Create': {
'IndexName': 'string',
'KeySchema': [
{
'AttributeName': 'string',
'KeyType': 'HASH'|'RANGE'
},
],
'Projection': {
'ProjectionType': 'ALL'|'KEYS_ONLY'|'INCLUDE',
'NonKeyAttributes': [
'string',
]
},
'ProvisionedThroughput': {
'ReadCapacityUnits': 123,
'WriteCapacityUnits': 123
}
},
'Delete': {
'IndexName': 'string'
}
},
],
StreamSpecification={
'StreamEnabled': True|False,
'StreamViewType': 'NEW_IMAGE'|'OLD_IMAGE'|'NEW_AND_OLD_IMAGES'|'KEYS_ONLY'
},
SSESpecification={
'Enabled': True|False,
'SSEType': 'AES256'|'KMS',
'KMSMasterKeyId': 'string'
},
ReplicaUpdates=[
{
'Create': {
'RegionName': 'string',
'KMSMasterKeyId': 'string',
'ProvisionedThroughputOverride': {
'ReadCapacityUnits': 123
},
'GlobalSecondaryIndexes': [
{
'IndexName': 'string',
'ProvisionedThroughputOverride': {
'ReadCapacityUnits': 123
}
},
]
},
'Update': {
'RegionName': 'string',
'KMSMasterKeyId': 'string',
'ProvisionedThroughputOverride': {
'ReadCapacityUnits': 123
},
'GlobalSecondaryIndexes': [
{
'IndexName': 'string',
'ProvisionedThroughputOverride': {
'ReadCapacityUnits': 123
}
},
]
},
'Delete': {
'RegionName': 'string'
}
},
]
'''
table_info = self.info()
ProvisionedThroughput = table_info.get('ProvisionedThroughput')
self._update_throughput(ProvisionedThroughput)
self._update_billing_mode(table_info.get('BillingMode'))
def delete(self):
# delete table
try:
return self.table.delete()
except ClientError:
raise ClientException('Cannot do operations on a non-existent table')
except ConnectionError:
raise ConnectionException('Connection refused')
def _get_primary_key(self, **kwargs):
hash_key, range_key = self.instance._hash_key, self.instance._range_key
hash_value = kwargs.get(hash_key) or getattr(self.instance, hash_key)
if isinstance(hash_value, (int, float)):
hash_value = Decimal(hash_value)
key = {
hash_key: hash_value
}
if(range_key):
range_value = kwargs.get(range_key) or getattr(self.instance, range_key, None)
if not range_value:
raise ParameterException('Invalid range key value type')
if isinstance(range_value, (int, float)):
range_value = Decimal(range_value)
key[range_key] = range_value
return key
def get_item(self, **kwargs):
"""
primary_key: params: primary_key dict
"""
kwargs['Key'] = kwargs.get('Key') or self._get_primary_key()
try:
response = self.table.get_item(**kwargs)
except ClientError as e:
if e.response['Error']['Code'] == 'ValidationException':
return None
raise ClientException(e.response['Error']['Message'])
else:
item = response.get('Item')
return item
def batch_get_item(self, *primary_keys, **kwargs):
"""
primary_key: params: primary_keys list
"""
_primary_keys = []
for primary_key in primary_keys:
key = self._get_primary_key(**primary_key)
_primary_keys.append(key)
params = {
'RequestItems': {
self.table_name: {
'Keys': _primary_keys
}
},
'ReturnConsumedCapacity': 'TOTAL'
}
projections = kwargs.get("projections", None)
if(projections):
params["RequestItems"]["ProjectionExpression"] = ",".join(projections)
try:
response = self.db.batch_get_item(**params)
except ClientError as e:
raise ClientException(e.response['Error']['Message'])
else:
items = response['Responses'][self.table_name]
return items
def put_item(self, item, overwrite=True):
args = dict(Item=item)
if(not overwrite):
expr = 'attribute_not_exists(%s)'%(self.instance.__class__._hash_key,)
if(self.instance.__class__._range_key):
expr += (" AND attribute_not_exists(%s)"%(self.instance.__class__._range_key,))
args["ConditionExpression"] = expr
self.table.put_item(**args)
return True
def batch_write(self, items, overwrite=False):
pkeys = []
if overwrite:
instance = self.instance
pkeys = [instance._hash_key, instance._range_key]
try:
with self.table.batch_writer(overwrite_by_pkeys=pkeys) as batch:
for item in items:
batch.put_item(Item=item)
except ClientError as e:
raise ClientException(e.response['Error']['Message'])
def batch_delete(self, items=None, keys=None):
try:
with self.table.batch_writer() as batch:
if(items):
for item in items:
batch.delete_item(Key=Table(item)._get_primary_key())
if(keys):
for key in keys:
batch.delete_item(Key=key)
except ClientError as e:
raise ClientException(e.response['Error']['Message'])
def query(self, **kwargs):
"""
response = table.query(
IndexName='string',
Select='ALL_ATTRIBUTES'|'ALL_PROJECTED_ATTRIBUTES'|'SPECIFIC_ATTRIBUTES'|'COUNT',
Limit=123,
ConsistentRead=True|False,
ScanIndexForward=True|False,
ExclusiveStartKey={
'string': 'string'|123|Binary(b'bytes')|True|None|set(['string'])|set([123])|set([Binary(b'bytes')])|[]|{}
},
ReturnConsumedCapacity='INDEXES'|'TOTAL'|'NONE',
ProjectionExpression='string',
FilterExpression=Attr('myattribute').eq('myvalue'),
KeyConditionExpression=Key('mykey').eq('myvalue'),
ExpressionAttributeNames={
'string': 'string'
},
ExpressionAttributeValues={
'string': 'string'|123|Binary(b'bytes')|True|None|set(['string'])|set([123])|set([Binary(b'bytes')])|[]|{}
}
)
"""
try:
#print(kwargs)
response = self.table.query(**kwargs)
except ClientError as e:
raise ClientException(e.response['Error']['Message'])
return response
def scan(self, **kwargs):
try:
#print(kwargs)
response = self.table.scan(**kwargs)
except ClientError as e:
raise ClientException(e.response['Error']['Message'])
return response
def _prepare_update_item_params(self, update_fields=None, *args, **kwargs):
params = {
'Key': self._get_primary_key()
}
_condition_expression = kwargs.pop('ConditionExpression', None)
if _condition_expression:
params['ConditionExpression'] = _condition_expression
| |
planet_tags]
elif args.eph_planetmassprior == 'loguniform':
parameters += ["planet{0}_delta_amp".format(ii) for ii in planet_tags]
parameters += ["planet{0}_delta_sign".format(ii) for ii in planet_tags]
if num_ephs > 1:
for ii in planet_tags:
for jj in range(num_ephs-1):
parameters.append("planet{0}_orbitwgts{1}".format(ii,jj))
if args.eph_planetoffset:
for ii in planet_tags:
for axis in ['x','y','z']:
parameters.append("planet{0}_orbitoffsetaxis{1}".format(ii,axis))
elif args.eph_roemermix:
for key in ephnames[:-1]:
parameters.append("roemerweight_{0}".format(key))
elif args.eph_physmodel:
parameters += ["frame_rate", "jupiter_dM", "saturn_dM",
"uranus_dM", "neptune_dM"]
# jupiter orbit
if args.incJuporb:
if args.jup_orbmodel == 'angles':
parameters += ["jupiter_ang1", "jupiter_ang2", "jupiter_ang3"]
elif args.jup_orbmodel == 'orbelements':
parameters += ["jupiter_orbel1", "jupiter_orbel2", "jupiter_orbel3",
"jupiter_orbel4", "jupiter_orbel5", "jupiter_orbel6"]
# saturn orbit
if args.incSatorb:
if args.sat_orbmodel == 'angles':
parameters += ["saturn_ang1", "saturn_ang2", "saturn_ang3"]
elif args.sat_orbmodel == 'orbelements':
parameters += ["saturn_orbel1", "saturn_orbel2", "saturn_orbel3",
"saturn_orbel4", "saturn_orbel5", "saturn_orbel6"]
elif args.eph_roemermix_dx:
for key in ephnames:
parameters.append("roemerweight_dx_{0}".format(key))
n_params = len(parameters)
if rank==0:
print "\n You are searching for the following parameters: {0}\n".format(parameters)
print "\n The total number of parameters is {0}\n".format(n_params)
if rank == 0:
print "\n Now, we sample... \n"
print """\
_______ .__ __. _______ ___ _______ _______ __
| ____|| \ | | / _____| / \ / _____|| ____|| |
| |__ | \| | | | __ / ^ \ | | __ | |__ | |
| __| | . ` | | | |_ | / /_\ \ | | |_ | | __| | |
| |____ | |\ | | |__| | / _____ \ | |__| | | |____ |__|
|_______||__| \__| \______| /__/ \__\ \______| |_______|(__)
"""
##########################
# Define function wrappers
##########################
if args.sampler == 'mnest':
if args.shortFileTag is not None:
dir_name = args.dirExt+args.shortFileTag+'_mnest'
else:
dir_name = args.dirExt+file_tag+'_mnest'
if rank == 0:
if not os.path.exists(dir_name):
os.makedirs(dir_name)
if args.incCorr:
# Copy the anisotropy modefile into the results directory
if args.anis_modefile is not None:
os.system('cp {0} {1}'.format(args.anis_modefile,dir_name))
# Printing out the list of searched parameters
fil = open(dir_name+'/parameter_list.txt','w')
for ii,parm in enumerate(parameters):
print >>fil, ii, parm
fil.close()
# Printing out the array of frequencies in the rank-reduced spectrum
np.save(dir_name+'/freq_array_red.npy', fqs_red)
if args.incDM:
np.save(dir_name+'/freq_array_dm.npy', fqs_dm)
if args.incBand:
np.save(dir_name+'/freq_array_band.npy', fqs_band)
if args.incEph and not args.jplBasis:
np.save(dir_name+'/freq_array_eph.npy', fqs_eph)
# Printing out the array of random phase shifts
psr_phaseshifts = OrderedDict.fromkeys([p.name for p in psr])
for ii,name in enumerate(psr_phaseshifts):
psr_phaseshifts[name] = list(psr[ii].ranphase)
with open(dir_name+'/psr_phaseshifts.json', 'w') as fp:
json.dump(psr_phaseshifts, fp)
fp.close()
# Saving command-line arguments to file
with open(dir_name+'/run_args.json', 'w') as frun:
json.dump(vars(args), frun)
frun.close()
def prior_func(xx,ndim,nparams):
for ii in range(nparams):
xx[ii] = pmin[ii] + xx[ii]*(pmax[ii]-pmin[ii])
def like_func(xx,ndim,nparams):
xx = np.array([xx[ii] for ii in range(nparams)])
return lnprob(xx)
pymultinest.run(like_func, prior_func, n_params,
importance_nested_sampling = args.ins,
resume = args.resume, verbose = True,
n_live_points = args.nlive,
outputfiles_basename=u'{0}/mnest_'.format(dir_name),
sampling_efficiency = args.sampleEff,
const_efficiency_mode = args.constEff)
elif args.sampler == 'pchord':
dir_name = args.dirExt+file_tag+'_pchord'
if rank == 0:
if not os.path.exists(dir_name):
os.makedirs(dir_name)
if not os.path.exists(dir_name+'/clusters'):
os.mkdir(dir_name+'/clusters')
if args.incCorr:
# Copy the anisotropy modefile into the results directory
if args.anis_modefile is not None:
os.system('cp {0} {1}'.format(args.anis_modefile,dir_name))
# Printing out the list of searched parameters
fil = open(dir_name+'/parameter_list.txt','w')
for ii,parm in enumerate(parameters):
print >>fil, ii, parm
fil.close()
# Printing out the array of frequencies in the rank-reduced spectrum
np.save(dir_name+'/freq_array_red.npy', fqs_red)
if args.incDM:
np.save(dir_name+'/freq_array_dm.npy', fqs_dm)
if args.incBand:
np.save(dir_name+'/freq_array_band.npy', fqs_band)
if args.incEph and not args.jplBasis:
np.save(dir_name+'/freq_array_eph.npy', fqs_eph)
# Printing out the array of random phase shifts
psr_phaseshifts = OrderedDict.fromkeys([p.name for p in psr])
for ii,name in enumerate(psr_phaseshifts):
psr_phaseshifts[name] = list(psr[ii].ranphase)
with open(dir_name+'/psr_phaseshifts.json', 'w') as fp:
json.dump(psr_phaseshifts, fp)
fp.close()
# Saving command-line arguments to file
with open(dir_name+'/run_args.json', 'w') as frun:
json.dump(vars(args), frun)
frun.close()
def prior_func(xx):
for ii in range(len(xx)):
xx[ii] = pmin[ii] + xx[ii]*(pmax[ii]-pmin[ii])
return xx
def like_func(xx):
xx = np.array([xx[ii] for ii in range(len(xx))])
return lnprob(xx)
pypolychord.run(like_func, prior_func, n_params,
n_live = args.nlive, n_chords = args.nchords,
output_basename='{0}/pchord_'.format(dir_name))
elif args.sampler == 'ptmcmc':
# Start the sampling off with some reasonable parameter choices
x0 = np.array([])
if not args.fixRed:
if args.redSpecModel == 'powerlaw':
# starting red parameters at single pulsar values
startRedamp = np.log10(np.array([np.max([p.parRedamp, p.Redamp]) for p in psr]))
startRedind = np.array([np.max([p.parRedind, p.Redind]) for p in psr])
x0 = np.append(x0,startRedamp)
x0 = np.append(x0,startRedind)
elif args.redSpecModel == 'spectrum':
x0 = np.append(x0,np.random.uniform(-7.0,-3.0,len(psr)*nmodes_red))
if args.incDM and not args.fixDM:
if args.dmSpecModel == 'powerlaw':
# starting dm parameters at single pulsar values
startDMamp = np.log10(np.array([np.max([p.parDMamp, p.DMamp]) for p in psr]))
startDMind = np.array([np.max([p.parDMind, p.DMind]) for p in psr])
x0 = np.append(x0,startDMamp)
x0 = np.append(x0,startDMind)
elif args.dmSpecModel == 'spectrum':
x0 = np.append(x0,np.random.uniform(-7.0,-3.0,len(psr)*nmodes_dm))
if args.varyWhite:
for ii,p in enumerate(psr):
systems = p.sysflagdict[args.sysflag_target]
x0 = np.append(x0,np.random.uniform(0.75,1.25,len(systems)))
x0 = np.append(x0,np.random.uniform(-10.0,-5.0,len(systems)))
if 'nano-f' in p.sysflagdict.keys() and len(p.sysflagdict['nano-f'].keys())>0:
x0 = np.append(x0, np.random.uniform(-8.5,-5.0,len(p.sysflagdict['nano-f'].keys())))
if args.incBand:
if args.bandSpecModel == 'powerlaw':
x0 = np.append(x0,np.random.uniform(-20.0,-11.0,len(bands)-1))
x0 = np.append(x0,np.random.uniform(0.0,7.0,len(bands)-1))
elif args.bandSpecModel == 'spectrum':
x0 = np.append(x0,np.random.uniform(-30.0,-3.0,(len(bands)-1)*nmodes_band))
if args.incClk:
if args.clkSpecModel == 'powerlaw':
# starting clock parameters at random positions
x0 = np.append(x0,np.random.uniform(-20.0,-11.0))
x0 = np.append(x0,np.random.uniform(0.0,7.0))
elif args.clkSpecModel == 'spectrum':
x0 = np.append(x0,np.random.uniform(-7.0,-3.0,nmodes_red))
if args.incCm:
if args.cmSpecModel == 'powerlaw':
# starting cm parameters at random positions
x0 = np.append(x0,np.random.uniform(-20.0,-11.0))
x0 = np.append(x0,np.random.uniform(0.0,7.0))
elif args.cmSpecModel == 'spectrum':
x0 = np.append(x0,np.random.uniform(-7.0,-3.0,nmodes_red))
if args.incEph and not args.jplBasis:
if args.ephSpecModel == 'powerlaw':
# starting eph parameters at random positions
x0 = np.append(x0,np.random.uniform(-20.0,-11.0,3))
x0 = np.append(x0,np.random.uniform(0.0,7.0,3))
elif args.ephSpecModel == 'spectrum':
x0 = np.append(x0,np.random.uniform(-7.0,-3.0,3*nmodes_eph))
if args.incDip:
if args.dipSpecModel == 'powerlaw':
# starting cosinusoidal parameters at random positions
x0 = np.append(x0,np.random.uniform(-20.0,-11.0))
x0 = np.append(x0,np.random.uniform(0.0,7.0))
elif args.dipSpecModel == 'spectrum':
x0 = np.append(x0,np.random.uniform(-7.0,-3.0,nmodes_red))
if args.incGWB:
if args.gwbSpecModel == 'powerlaw':
x0 = np.append(x0,-15.0)
if args.fix_slope is None:
x0 = np.append(x0,13./3.)
elif args.gwbSpecModel == 'spectrum':
if args.gwbPrior != 'gaussProc':
x0 = np.append(x0,np.random.uniform(-7.0,-3.0,nmodes_red))
elif args.gwbPrior == 'gaussProc':
x0 = np.append(x0,np.random.uniform(-5.0,5.0,nmodes_red))
x0 = np.append(x0,-15.0)
if gwb_popparam == 'ecc':
x0 = np.append(x0,0.8)
elif gwb_popparam == 'stars':
x0 = np.append(x0,5.0)
elif gwb_popparam == 'gas':
x0 = np.append(x0,0.0)
elif gwb_popparam == 'starsecc':
x0 = np.append(x0,np.array([np.random.uniform(stars_range[0],stars_range[1]),
np.random.uniform(ecc_range[0],ecc_range[1])]))
elif gwb_popparam == 'alphastarsecc':
x0 = np.append(x0,np.array([np.random.uniform(alpha_range[0],alpha_range[1]),
np.random.uniform(stars_range[0],stars_range[1]),
np.random.uniform(ecc_range[0],ecc_range[1])]))
elif gwb_popparam == 'cosmicstring':
x0 = np.append(x0,np.array([np.random.uniform(gmu_range[0],gmu_range[1]),
np.random.uniform(stringprob_range[0],stringprob_range[1])]))
elif args.gwbSpecModel == 'turnover':
x0 = np.append(x0,-15.0)
if args.gwb_fb2env is not None:
if args.gwb_fb2env == 'stars':
x0 = np.append(x0,5.0)
elif args.gwb_fb2env == 'gas':
x0 = np.append(x0,0.0)
elif args.gwb_fb2env is None:
x0 = np.append(x0,np.array([13./3.,-8.0]))
elif args.gwbSpecModel == 'gpEnvInterp':
x0 = np.append(x0,np.array([-15.0,0.2]))
if args.incCorr:
if args.gwbTypeCorr == 'modelIndep':
x0 = np.append(x0,np.random.uniform(0.0,np.pi,num_corr_params))
elif args.gwbTypeCorr == 'pointSrc':
if args.fixPointSrcPhi is None and args.fixPointSrcTheta is None:
x0 = np.append(x0,np.tile([0.5,0.5],tmp_nwins))
elif args.gwbTypeCorr == 'spharmAnis':
x0 = np.append(x0,np.zeros(num_corr_params))
elif args.gwbTypeCorr == 'dipoleOrf':
x0 = np.append(x0,np.tile([0.5,0.5,0.5],tmp_nwins))
elif args.gwbTypeCorr == 'gwDisk':
x0 = np.append(x0,np.tile([0.5,0.5,0.1,0.0],tmp_nwins))
elif args.gwbTypeCorr == 'psrlocsVary':
x0 = np.append(x0,np.tile(positions[:,0],tmp_nwins))
x0 = np.append(x0,np.tile(np.cos(positions[:,1]),tmp_nwins))
if args.gwbModelSelect:
x0 = np.append(x0,0.2)
if args.incGWline:
x0 = np.append(x0,np.array([-6.0,-8.0,0.5,0.5]))
if args.det_signal:
if args.cgw_search:
x0 = np.append(x0,np.array([9.0, 0.5, 1.5, -15.0, -8.0,
0.5, 0.5, 0.5, 0.5, 0.5, 0.5]))
if args.ecc_search:
x0 = np.append(x0,0.1)
if args.psrTerm:
x0 = np.append(x0,np.array([p.h5Obj['pdist'].value
for p in psr]))
x0 = np.append(x0,np.random.uniform(0.0,2.0*np.pi,len(psr)))
x0 = np.append(x0,np.random.uniform(0.0,2.0*np.pi,len(psr)))
if args.cgwModelSelect:
x0 = np.append(x0,0.4)
if args.bwm_search:
x0 = np.append(x0,np.array([55100.0,-14.0,0.3,0.5,0.7]))
if args.bwm_model_select:
x0 = np.append(x0,0.4)
if args.eph_quadratic:
x0 = np.append(x0,np.zeros(9))
#x0 = np.append(x0,np.array(np.tile([-7.0],6)))
#x0 = np.append(x0,np.random.uniform(-1.0,1.0,6))
if args.eph_planetdelta:
if args.eph_planetmass:
if args.eph_planetmassprior == 'official':
x0 = np.append(x0,np.zeros(num_planets))
elif args.eph_planetmassprior == 'loguniform':
x0 = np.append(x0,np.random.uniform(-20.0,-5.0,num_planets))
x0 = np.append(x0,np.random.uniform(-1.0,1.0,num_planets))
if num_ephs > 1:
x0 = np.append(x0,np.random.uniform(0.0,1.0,(num_ephs-1)*num_planets))
if args.eph_planetoffset:
x0 = np.append(x0,np.random.uniform(-1e8,1e8,3*num_planets))
elif args.eph_roemermix:
if num_ephs > 1:
x0 = np.append(x0,np.random.uniform(0.0,1.0/num_ephs,num_ephs-1))
elif args.eph_physmodel:
x0 = np.append(x0,np.zeros(5))
if args.incJuporb:
if args.jup_orbmodel == 'angles':
x0 = np.append(x0,np.zeros(3))
elif args.jup_orbmodel == 'orbelements':
x0 = np.append(x0,np.zeros(6))
if args.incSatorb:
if args.sat_orbmodel == 'angles':
x0 = np.append(x0,np.zeros(3))
elif args.sat_orbmodel == 'orbelements':
x0 = np.append(x0,np.zeros(6))
elif args.eph_roemermix_dx:
if num_ephs > 1:
x0 = np.append(x0,np.random.uniform(0.0,1.0/num_ephs,num_ephs))
if rank==0:
print "\n Your initial parameters are {0}\n".format(x0)
# Make a reasonable covariance matrix to commence sampling
cov_diag = np.array([])
param_ephquad = 0
if not args.fixRed:
if args.redSpecModel == 'powerlaw':
cov_diag = np.append(cov_diag,0.5*np.ones(len(psr)))
cov_diag = np.append(cov_diag,0.5*np.ones(len(psr)))
param_ephquad += 2*len(psr)
elif args.redSpecModel == 'spectrum':
cov_diag = np.append(cov_diag,0.1*np.ones(len(psr)*nmodes_red))
param_ephquad += len(psr)*nmodes_red
if args.incDM and not args.fixDM:
if args.dmSpecModel == 'powerlaw':
cov_diag = np.append(cov_diag,0.5*np.ones(len(psr)))
cov_diag = np.append(cov_diag,0.5*np.ones(len(psr)))
param_ephquad += 2*len(psr)
elif args.dmSpecModel == 'spectrum':
cov_diag = np.append(cov_diag,0.1*np.ones(len(psr)*nmodes_dm))
param_ephquad += len(psr)*nmodes_dm
if args.varyWhite:
for ii,p in enumerate(psr):
systems = p.sysflagdict[args.sysflag_target]
cov_diag = np.append(cov_diag,0.5*np.ones(len(systems)))
cov_diag = np.append(cov_diag,0.5*np.ones(len(systems)))
param_ephquad += 2*len(systems)
if 'nano-f' in p.sysflagdict.keys() and len(p.sysflagdict['nano-f'].keys())>0:
| |
ValueError("Decay must be either 'EDT' or an integer \
corresponding to the amount of energy decayed to \
evaluate, e.g. (decay='20' | 20).")
RT = []
for ED in listEDC:
edc, edv = ED
RT.append(reverb_time_regression(edc, edv, y1, y2))
return RT
def G_Lpe(IR, nthOct, minFreq, maxFreq, IREndManualCut=None):
"""
Calculate the energy level from the room impulsive response.
Reference:
<NAME>.; <NAME>. H. APPLYING IN-SITU RECALIBRATION FOR
SOUND STRENGTH MEASUREMENTS IN AUDITORIA.
:param IR: one channel impulsive response
:type IR: ImpulsiveResponse
:param nthOct: number of fractions per octave
:type nthOct: int
:param minFreq: analysis inferior frequency limit
:type minFreq: float
:param maxFreq: analysis superior frequency limit
:type maxFreq: float
:return: Analysis object with the calculated parameter
:rtype: Analysis
"""
# Code snippet to guarantee that generated object name is
# the declared at global scope
# for frame, line in traceback.walk_stack(None):
for framenline in traceback.walk_stack(None):
# varnames = frame.f_code.co_varnames
varnames = framenline[0].f_code.co_varnames
if varnames is ():
break
# creation_file, creation_line, creation_function, \
# creation_text = \
extracted_text = \
traceback.extract_stack(framenline[0], 1)[0]
# traceback.extract_stack(frame, 1)[0]
# creation_name = creation_text.split("=")[0].strip()
creation_name = extracted_text[3].split("=")[0].strip()
# firstChNum = IR.systemSignal.channels.mapping[0]
# if not IR.systemSignal.channels[firstChNum].calibCheck:
# raise ValueError("'IR' must be a calibrated ImpulsiveResponse")
if isinstance(IR, SignalObj):
SigObj = cp.copy(IR)
elif isinstance(IR, ImpulsiveResponse):
SigObj = cp.copy(IR.systemSignal)
else:
raise TypeError("'IR' must be an ImpulsiveResponse or SignalObj.")
# Cutting the IR
if IREndManualCut is not None:
SigObj.crop(0, IREndManualCut)
timeSignal, _ = _circular_time_shift(SigObj.timeSignal[:,0])
# Bands filtering
# hSignal = SignalObj(SigObj.timeSignal[:,0],
hSignal = SignalObj(timeSignal,
SigObj.lengthDomain,
SigObj.samplingRate)
hSignal = _filter(signal=hSignal, nthOct=nthOct, minFreq=minFreq,
maxFreq=maxFreq)
bands = FOF(nthOct=nthOct,
freqRange=[minFreq,maxFreq])[:,1]
Lpe = []
for chIndex in range(hSignal.numChannels):
Lpe.append(
10*np.log10(np.trapz(y=hSignal.timeSignal[:,chIndex]**2/(2e-5**2),
x=hSignal.timeVector)))
LpeAnal = Analysis(anType='mixed', nthOct=nthOct, minBand=float(bands[0]),
maxBand=float(bands[-1]), data=Lpe,
comment='h**2 energy level')
LpeAnal.creation_name = creation_name
return LpeAnal
def G_Lps(IR, nthOct, minFreq, maxFreq):
"""G_Lps
Calculates the recalibration level, for both in-situ and
reverberation chamber. Lps is applied for G calculation.
During the recalibration: source height and mic heigth must be >= 1 [m],
while the distance between source and mic must be <= 1 [m]. The distances
must be the same for in-situ and reverberation chamber measurements.
Reference:
<NAME>.; <NAME>. APPLYING IN-SITU RECALIBRATION FOR
SOUND STRENGTH MEASUREMENTS IN AUDITORIA.
:param IR: one channel impulsive response
:type IR: ImpulsiveResponse
:param nthOct: number of fractions per octave
:type nthOct: int
:param minFreq: analysis inferior frequency limit
:type minFreq: float
:param maxFreq: analysis superior frequency limit
:type maxFreq: float
:return: Analysis object with the calculated parameter
:rtype: Analysis
"""
# Code snippet to guarantee that generated object name is
# the declared at global scope
# for frame, line in traceback.walk_stack(None):
for framenline in traceback.walk_stack(None):
# varnames = frame.f_code.co_varnames
varnames = framenline[0].f_code.co_varnames
if varnames is ():
break
# creation_file, creation_line, creation_function, \
# creation_text = \
extracted_text = \
traceback.extract_stack(framenline[0], 1)[0]
# traceback.extract_stack(frame, 1)[0]
# creation_name = creation_text.split("=")[0].strip()
creation_name = extracted_text[3].split("=")[0].strip()
# firstChNum = IR.systemSignal.channels.mapping[0]
# if not IR.systemSignal.channels[firstChNum].calibCheck:
# raise ValueError("'IR' must be a calibrated ImpulsiveResponse")
if isinstance(IR, SignalObj):
SigObj = IR
elif isinstance(IR, ImpulsiveResponse):
SigObj = IR.systemSignal
else:
raise TypeError("'IR' must be an ImpulsiveResponse or SignalObj.")
# Windowing the IR
# dBtoOnSet = 20
# dBIR = 10*np.log10((SigObj.timeSignal[:,0]**2)/((2e-5)**2))
# windowStart = np.where(dBIR > (max(dBIR) - dBtoOnSet))[0][0]
broadBandTimeSignal = cp.copy(SigObj.timeSignal[:,0])
broadBandTimeSignalNoStart, sampleShift = \
_circular_time_shift(broadBandTimeSignal)
windowLength = 0.0032 # [s]
windowEnd = int(windowLength*SigObj.samplingRate)
hSignal = SignalObj(broadBandTimeSignalNoStart[:windowEnd],
# hSignal = SignalObj(timeSignal,
SigObj.lengthDomain,
SigObj.samplingRate)
hSignal = _filter(signal=hSignal, nthOct=nthOct, minFreq=minFreq,
maxFreq=maxFreq)
bands = FOF(nthOct=nthOct,
freqRange=[minFreq,maxFreq])[:,1]
Lps = []
for chIndex in range(hSignal.numChannels):
timeSignal = cp.copy(hSignal.timeSignal[:,chIndex])
# timeSignalNoStart, sampleShift = _circular_time_shift(timeSignal)
# windowLength = 0.0032 # [s]
# windowEnd = int(windowLength*SigObj.samplingRate)
Lps.append(
# 10*np.log10(np.trapz(y=timeSignalNoStart[:windowEnd]**2/(2e-5**2),
10*np.log10(np.trapz(y=timeSignal**2/(2e-5**2),
# x=hSignal.timeVector[sampleShift:sampleShift+windowEnd])))
x=hSignal.timeVector)))
LpsAnal = Analysis(anType='mixed', nthOct=nthOct, minBand=float(bands[0]),
maxBand=float(bands[-1]), data=Lps,
comment='Source recalibration method IR')
LpsAnal.creation_name = creation_name
LpsAnal.windowLimits = ((sampleShift)/SigObj.samplingRate,
(sampleShift+windowEnd)/SigObj.samplingRate)
# Plot IR cutting
# fig = plt.figure(figsize=(10, 5))
# ax = fig.add_axes([0.08, 0.15, 0.75, 0.8], polar=False,
# projection='rectilinear', xscale='linear')
# ax.plot(SigObj.timeVector, 10*np.log10(SigObj.timeSignal**2/2e-5**2))
# ax.axvline(x=(sampleShift)/SigObj.samplingRate, linewidth=4, color='k')
# ax.axvline(x=(sampleShift+windowEnd)/SigObj.samplingRate, linewidth=4, color='k')
# ax.set_xlim([(sampleShift-100)/SigObj.samplingRate, (sampleShift+windowEnd+100)/SigObj.samplingRate])
return LpsAnal
def strength_factor(Lpe, Lpe_revCh, V_revCh, T_revCh, Lps_revCh, Lps_inSitu):
S0 = 1 # [m2]
bands = T_revCh.bands
nthOct = T_revCh.nthOct
terms = []
for bandData in T_revCh.data:
if bandData == 0:
terms.append(0)
else:
term = (V_revCh * 0.16) / (bandData * S0)
terms.append(term)
terms = [10*np.log10(term) if term != 0 else 0 for term in terms]
revChTerm = Analysis(anType='mixed', nthOct=nthOct, minBand=float(bands[0]),
maxBand=float(bands[-1]), data=terms)
Lpe.anType = 'mixed'
Lpe_revCh.anType = 'mixed'
Lps_revCh.anType = 'mixed'
Lps_inSitu.anType = 'mixed'
G = Lpe - Lpe_revCh - revChTerm + 37 \
+ Lps_revCh - Lps_inSitu
G.anType = 'G'
return G
def _clarity(temp, signalObj, nthOct, **kwargs): # TODO
"""
"""
# try:
# temp = int(temp)*signalObj.samplingRate//1000
# except ValueError:
# raise ValueError("The temp parameter must be an integer or a string \
# of integers, e.g. (temp='80' | 80).")
# output = []
# for ch in range(signalObj.num_channels()):
# filtResp = filtered_response(signalObj[ch], nthOct, **kwargs)
# C = []
# for bd in range(len(filtResp)):
# C.append(round(np.sum(filtResp[bd][:temp], axis=0)
# / np.sum(filtResp[bd][temp:], axis=0)[0], 2))
# output.append(C)
# return output
pass
def _definition(temp, signalObj, nthOct, **kwargs): # TODO
"""
"""
# try:
# temp = int(temp)*signalObj.samplingRate//1000
# except ValueError:
# raise ValueError("The temp parameter must be an integer or a string \
# of integers, e.g. (temp='50' | 50).")
# output = []
# for ch in range(signalObj.num_channels()):
# filtResp = filtered_response(signalObj[ch], nthOct, **kwargs)
# D = []
# for bd in range(len(filtResp)):
# D.append(round(10*np.log10(
# np.sum(filtResp[bd][:temp], axis=0)
# / np.sum(filtResp[bd][:], axis=0))[0], 2))
# output.append(D)
# return output
pass
def crop_IR(SigObj, IREndManualCut):
"""Cut the impulse response at background noise level."""
timeSignal = cp.copy(SigObj.timeSignal)
timeVector = SigObj.timeVector
samplingRate = SigObj.samplingRate
numSamples = SigObj.numSamples
# numChannels = SigObj.numChannels
if SigObj.numChannels > 1:
print('crop_IR: The provided impulsive response has more than one ' +
'channel. Cropping based on channel 1.')
numChannels = 1
# Cut the end automatically or manual
if IREndManualCut is None:
winTimeLength = 0.1 # [s]
meanSize = 5 # [blocks]
dBtoReplica = 6 # [dB]
blockSamples = int(winTimeLength * samplingRate)
timeWinData, timeVecWin = _level_profile(timeSignal, samplingRate,
numSamples, numChannels,
blockSamples)
endTimeCut = timeVector[-1]
for blockIdx, blockAmplitude in enumerate(timeWinData):
if blockIdx >= meanSize:
anteriorMean = 10*np.log10( \
np.sum(timeWinData[blockIdx-meanSize:blockIdx])/meanSize)
if 10*np.log10(blockAmplitude) > anteriorMean+dBtoReplica:
endTimeCut = timeVecWin[blockIdx-meanSize//2]
break
else:
endTimeCut = IREndManualCut
endTimeCutIdx = np.where(timeVector >= endTimeCut)[0][0]
timeSignal = timeSignal[:endTimeCutIdx]
# Cut the start automatically
timeSignal, _ = _circular_time_shift(timeSignal)
result = SignalObj(timeSignal,
'time',
samplingRate,
signalType='energy')
return result
def analyse(obj, *params,
bypassLundeby=False,
plotLundebyResults=False,
IREndManualCut=None, **kwargs):
"""
Receives an one channel SignalObj or ImpulsiveResponse and calculate the
room acoustic parameters especified in the positional input arguments.
:param obj: one channel impulsive response
:type obj: SignalObj or ImpulsiveResponse
Input parameters for reverberation time, 'RT':
:param RTdecay: decay interval for RT calculation. e.g. 20
:type RTdecay: int
Input parameters for clarity, 'C':
TODO
Input parameters for definition, 'D':
TODO
Input parameters for strength factor, 'G':
TODO
:param nthOct: number of fractions per octave
:type nthOct: int
:param minFreq: analysis inferior frequency limit
:type minFreq: float
:param maxFreq: analysis superior frequency limit
:type maxFreq: float
:param bypassLundeby: bypass lundeby correction
to False
:type bypassLundeby: bool, optional
:param plotLundebyResults: plot the Lundeby correction parameters, defaults to False
:type plotLundebyResults: bool, optional
:return: Analysis object with the calculated parameter
:rtype: Analysis
"""
# Code snippet to guarantee that generated object name is
# the declared at global scope
# for frame, line in traceback.walk_stack(None):
for framenline in traceback.walk_stack(None):
# varnames = frame.f_code.co_varnames
varnames = framenline[0].f_code.co_varnames
if varnames is ():
break
# creation_file, creation_line, creation_function, \
# creation_text = \
extracted_text = \
traceback.extract_stack(framenline[0], 1)[0]
# traceback.extract_stack(frame, 1)[0]
# creation_name = creation_text.split("=")[0].strip()
creation_name = extracted_text[3].split("=")[0].strip()
if not isinstance(obj, SignalObj) and not isinstance(obj, ImpulsiveResponse):
raise TypeError("'obj' must be an one channel SignalObj or" +
" ImpulsiveResponse.")
if isinstance(obj, ImpulsiveResponse):
SigObj = obj.systemSignal
else:
SigObj = obj
if SigObj.numChannels > 1:
raise TypeError("'obj' can't contain more than one channel.")
samplingRate = SigObj.samplingRate
SigObj = crop_IR(SigObj, IREndManualCut)
listEDC = cumulative_integration(SigObj,
bypassLundeby,
plotLundebyResults,
**kwargs)
for _ in params:
if 'RT' in params:
RTdecay = params[params.index('RT')+1]
nthOct = kwargs['nthOct']
RT = reverberation_time(RTdecay, nthOct, samplingRate, listEDC)
result = Analysis(anType='RT', nthOct=nthOct,
minBand=kwargs['minFreq'],
maxBand=kwargs['maxFreq'],
data=RT)
# if 'C' in prm:
# | |
"""
Compute mixed Nash equilibria of a 2-player normal form game by the
Lemke-Howson algorithm.
Edit
"""
import numbers
import numpy as np
from numba import jit
from .utilities import NashResult
TOL_PIV = 1e-10
TOL_RATIO_DIFF = 1e-15
def lemke_howson(g, init_pivot=0, max_iter=10**6, capping=None,
full_output=False):
"""
Find one mixed-action Nash equilibrium of a 2-player normal form
game by the Lemke-Howson algorithm [2]_, implemented with
"complementary pivoting" (see, e.g., von Stengel [3]_ for details).
Parameters
----------
g : NormalFormGame
NormalFormGame instance with 2 players.
init_pivot : scalar(int), optional(default=0)
Initial pivot, an integer k such that 0 <= k < m+n, where
integers 0, ..., m-1 and m, ..., m+n-1 correspond to the actions
of players 0 and 1, respectively.
max_iter : scalar(int), optional(default=10**6)
Maximum number of pivoting steps.
capping : scalar(int), optional(default=None)
If supplied, the routine is executed with the heuristics
proposed by Codenotti et al. [1]_; see Notes below for details.
full_output : bool, optional(default=False)
If False, only the computed Nash equilibrium is returned. If
True, the return value is `(NE, res)`, where `NE` is the Nash
equilibrium and `res` is a `NashResult` object.
Returns
-------
NE : tuple(ndarray(float, ndim=1))
Tuple of computed Nash equilibrium mixed actions.
res : NashResult
Object containing information about the computation. Returned
only when `full_output` is True. See `NashResult` for details.
Examples
--------
Consider the following game from von Stengel [3]_:
>>> np.set_printoptions(precision=4) # Reduce the digits printed
>>> bimatrix = [[(3, 3), (3, 2)],
... [(2, 2), (5, 6)],
... [(0, 3), (6, 1)]]
>>> g = NormalFormGame(bimatrix)
Obtain a Nash equilibrium of this game by `lemke_howson` with player
0's action 1 (out of the three actions 0, 1, and 2) as the initial
pivot:
>>> lemke_howson(g, init_pivot=1)
(array([ 0. , 0.3333, 0.6667]), array([ 0.3333, 0.6667]))
>>> g.is_nash(_)
True
Additional information is returned if `full_output` is set True:
>>> NE, res = lemke_howson(g, init_pivot=1, full_output=True)
>>> res.converged # Whether the routine has converged
True
>>> res.num_iter # Number of pivoting steps performed
4
Notes
-----
* This routine is implemented with floating point arithmetic and
thus is subject to numerical instability.
* If `capping` is set to a positive integer, the routine is executed
with the heuristics proposed by [1]_:
* For k = `init_pivot`, `init_pivot` + 1, ..., `init_pivot` +
(m+n-2), (modulo m+n), the Lemke-Howson algorithm is executed
with k as the initial pivot and `capping` as the maximum number
of pivoting steps. If the algorithm converges during this loop,
then the Nash equilibrium found is returned.
* Otherwise, the Lemke-Howson algorithm is executed with
`init_pivot` + (m+n-1) (modulo m+n) as the initial pivot, with a
limit `max_iter` on the total number of pivoting steps.
Accoding to the simulation results for *uniformly random games*,
for medium- to large-size games this heuristics outperforms the
basic Lemke-Howson algorithm with a fixed initial pivot, where
[1]_ suggests that `capping` be set to 10.
References
----------
.. [1] <NAME>, <NAME>, and <NAME>, "An Experimental
Analysis of Lemke-Howson Algorithm," arXiv:0811.3247, 2008.
.. [2] <NAME> and <NAME>, "Equilibrium Points of Bimatrix
Games," Journal of the Society for Industrial and Applied
Mathematics (1964), 413-423.
.. [3] <NAME>, "Equilibrium Computation for Two-Player Games
in Strategic and Extensive Form," Chapter 3, <NAME>, T.
Roughgarden, <NAME>, and <NAME> eds., Algorithmic Game
Theory, 2007.
"""
try:
N = g.N
except:
raise TypeError('g must be a 2-player NormalFormGame')
if N != 2:
raise NotImplementedError('Implemented only for 2-player games')
payoff_matrices = g.payoff_arrays
nums_actions = g.nums_actions
total_num = sum(nums_actions)
msg = '`init_pivot` must be an integer k' + \
'such that 0 <= k < {0}'.format(total_num)
if not isinstance(init_pivot, numbers.Integral):
raise TypeError(msg)
if not (0 <= init_pivot < total_num):
raise ValueError(msg)
if capping is None:
capping = max_iter
tableaux = tuple(
np.empty((nums_actions[1-i], total_num+1)) for i in range(N)
)
bases = tuple(np.empty(nums_actions[1-i], dtype=int) for i in range(N))
converged, num_iter, init_pivot_used = \
_lemke_howson_capping(payoff_matrices, tableaux, bases, init_pivot,
max_iter, capping)
NE = _get_mixed_actions(tableaux, bases)
if not full_output:
return NE
res = NashResult(NE=NE,
converged=converged,
num_iter=num_iter,
max_iter=max_iter,
init=init_pivot_used)
return NE, res
@jit(nopython=True, cache=True)
def _lemke_howson_capping(payoff_matrices, tableaux, bases, init_pivot,
max_iter, capping):
"""
Execute the Lemke-Howson algorithm with the heuristics proposed by
Codenotti et al.
Parameters
----------
payoff_matrices : tuple(ndarray(ndim=2))
Tuple of two arrays representing payoff matrices, of shape
(m, n) and (n, m), respectively.
tableaux : tuple(ndarray(float, ndim=2))
Tuple of two arrays to be used to store the tableaux, of shape
(n, m+n+1) and (m, m+n+1), respectively. Modified in place.
bases : tuple(ndarray(int, ndim=1))
Tuple of two arrays to be used to store the bases, of shape (n,)
and (m,), respectively. Modified in place.
init_pivot : scalar(int)
Integer k such that 0 <= k < m+n.
max_iter : scalar(int)
Maximum number of pivoting steps.
capping : scalar(int)
Value for capping. If set equal to `max_iter`, then the routine
is equivalent to the standard Lemke-Howson algorithm.
"""
m, n = tableaux[1].shape[0], tableaux[0].shape[0]
init_pivot_curr = init_pivot
max_iter_curr = max_iter
total_num_iter = 0
for k in range(m+n-1):
capping_curr = min(max_iter_curr, capping)
_initialize_tableaux(payoff_matrices, tableaux, bases)
converged, num_iter = \
_lemke_howson_tbl(tableaux, bases, init_pivot_curr, capping_curr)
total_num_iter += num_iter
if converged or total_num_iter >= max_iter:
return converged, total_num_iter, init_pivot_curr
init_pivot_curr += 1
if init_pivot_curr >= m + n:
init_pivot_curr -= m + n
max_iter_curr -= num_iter
_initialize_tableaux(payoff_matrices, tableaux, bases)
converged, num_iter = \
_lemke_howson_tbl(tableaux, bases, init_pivot_curr, max_iter_curr)
total_num_iter += num_iter
return converged, total_num_iter, init_pivot_curr
@jit(nopython=True, cache=True)
def _initialize_tableaux(payoff_matrices, tableaux, bases):
"""
Given a tuple of payoff matrices, initialize the tableau and basis
arrays in place.
For each player `i`, if `payoff_matrices[i].min()` is non-positive,
then stored in the tableau are payoff values incremented by
`abs(payoff_matrices[i].min()) + 1` (to ensure for the tableau not
to have a negative entry or a column identically zero).
Suppose that the players 0 and 1 have m and n actions, respectively.
* `tableaux[0]` has n rows and m+n+1 columns, where columns 0, ...,
m-1 and m, ..., m+n-1 correspond to the non-slack and slack
variables, respectively.
* `tableaux[1]` has m rows and m+n+1 columns, where columns 0, ...,
m-1 and m, ..., m+n-1 correspond to the slack and non-slack
variables, respectively.
* In each `tableaux[i]`, column m+n contains the values of the basic
variables (which are initially 1).
* `bases[0]` and `bases[1]` contain basic variable indices, which
are initially m, ..., m+n-1 and 0, ..., m-1, respectively.
Parameters
----------
payoff_matrices : tuple(ndarray(ndim=2))
Tuple of two arrays representing payoff matrices, of shape
(m, n) and (n, m), respectively.
tableaux : tuple(ndarray(float, ndim=2))
Tuple of two arrays to be used to store the tableaux, of shape
(n, m+n+1) and (m, m+n+1), respectively. Modified in place.
bases : tuple(ndarray(int, ndim=1))
Tuple of two arrays to be used to store the bases, of shape (n,)
and (m,), respectively. Modified in place.
Returns
-------
tableaux : tuple(ndarray(float, ndim=2))
View to `tableaux`.
bases : tuple(ndarray(int, ndim=1))
View to `bases`.
Examples
--------
>>> A = np.array([[3, 3], [2, 5], [0, 6]])
>>> B = np.array([[3, 2, 3], [2, 6, 1]])
>>> m, n = A.shape
>>> tableaux = (np.empty((n, m+n+1)), np.empty((m, m+n+1)))
>>> bases = (np.empty(n, dtype=int), np.empty(m, dtype=int))
>>> tableaux, bases = _initialize_tableaux((A, B), tableaux, bases)
>>> tableaux[0]
array([[ 3., 2., 3., 1., 0., 1.],
[ 2., 6., 1., 0., 1., 1.]])
>>> tableaux[1]
array([[ 1., 0., 0., 4., 4., 1.],
[ 0., 1., 0., 3., 6., 1.],
[ 0., 0., 1., 1., 7., 1.]])
>>> bases
(array([3, 4]), array([0, 1, 2]))
"""
nums_actions = payoff_matrices[0].shape
consts = np.zeros(2) # To be added to payoffs if min <= 0
for pl in range(2):
min_ = payoff_matrices[pl].min()
if min_ <= 0:
consts[pl] = min_ * (-1) + 1
for pl, (py_start, sl_start) in enumerate(zip((0, nums_actions[0]),
(nums_actions[0], 0))):
for i in range(nums_actions[1-pl]):
for j in range(nums_actions[pl]):
tableaux[pl][i, py_start+j] = \
payoff_matrices[1-pl][i, j] + consts[1-pl]
for j in range(nums_actions[1-pl]):
if j == i:
tableaux[pl][i, sl_start+j] = 1
else:
tableaux[pl][i, sl_start+j] = 0
tableaux[pl][i, -1] = 1
for i in range(nums_actions[1-pl]):
bases[pl][i] = | |
return super(_ColumnClause, self).label(name)
def accept_visitor(self, visitor):
visitor.visit_column(self)
def to_selectable(self, selectable):
"""Given a ``Selectable``, return this column's equivalent in
that ``Selectable``, if any.
For example, this could translate the column *name* from a
``Table`` object to an ``Alias`` of a ``Select`` off of that
``Table`` object."""
return selectable.corresponding_column(self.original, False)
def _get_from_objects(self):
if self.table is not None:
return [self.table]
else:
return []
def _bind_param(self, obj):
return _BindParamClause(self._label, obj, shortname = self.name, type=self.type, unique=True)
def _make_proxy(self, selectable, name = None):
# propigate the "is_literal" flag only if we are keeping our name,
# otherwise its considered to be a label
is_literal = self.is_literal and (name is None or name == self.name)
c = _ColumnClause(name or self.name, selectable=selectable, _is_oid=self._is_oid, type=self.type, is_literal=is_literal)
c.orig_set = self.orig_set
if not self._is_oid:
selectable.columns[c.name] = c
return c
def _compare_type(self, obj):
return self.type
def _group_parenthesized(self):
return False
class TableClause(FromClause):
def __init__(self, name, *columns):
super(TableClause, self).__init__(name)
self.name = self.fullname = name
self._columns = ColumnCollection()
self._foreign_keys = util.OrderedSet()
self._primary_key = ColumnCollection()
for c in columns:
self.append_column(c)
self._oid_column = _ColumnClause('oid', self, _is_oid=True)
def named_with_column(self):
return True
def append_column(self, c):
self._columns[c.name] = c
c.table = self
def _locate_oid_column(self):
return self._oid_column
def _orig_columns(self):
try:
return self._orig_cols
except AttributeError:
self._orig_cols= {}
for c in self.columns:
for ci in c.orig_set:
self._orig_cols[ci] = c
return self._orig_cols
original_columns = property(_orig_columns)
def get_children(self, column_collections=True, **kwargs):
if column_collections:
return [c for c in self.c]
else:
return []
def accept_visitor(self, visitor):
visitor.visit_table(self)
def _exportable_columns(self):
raise NotImplementedError()
def _group_parenthesized(self):
return False
def count(self, whereclause=None, **params):
if len(self.primary_key):
col = list(self.primary_key)[0]
else:
col = list(self.columns)[0]
return select([func.count(col).label('tbl_row_count')], whereclause, from_obj=[self], **params)
def join(self, right, *args, **kwargs):
return Join(self, right, *args, **kwargs)
def outerjoin(self, right, *args, **kwargs):
return Join(self, right, isouter = True, *args, **kwargs)
def alias(self, name=None):
return Alias(self, name)
def select(self, whereclause = None, **params):
return select([self], whereclause, **params)
def insert(self, values = None):
return insert(self, values=values)
def update(self, whereclause = None, values = None):
return update(self, whereclause, values)
def delete(self, whereclause = None):
return delete(self, whereclause)
def _get_from_objects(self):
return [self]
class _SelectBaseMixin(object):
"""Base class for ``Select`` and ``CompoundSelects``."""
def supports_execution(self):
return True
def order_by(self, *clauses):
if len(clauses) == 1 and clauses[0] is None:
self.order_by_clause = ClauseList()
elif getattr(self, 'order_by_clause', None):
self.order_by_clause = ClauseList(*(list(self.order_by_clause.clauses) + list(clauses)))
else:
self.order_by_clause = ClauseList(*clauses)
def group_by(self, *clauses):
if len(clauses) == 1 and clauses[0] is None:
self.group_by_clause = ClauseList()
elif getattr(self, 'group_by_clause', None):
self.group_by_clause = ClauseList(*(list(clauses)+list(self.group_by_clause.clauses)))
else:
self.group_by_clause = ClauseList(*clauses)
def select(self, whereclauses = None, **params):
return select([self], whereclauses, **params)
def _get_from_objects(self):
if self.is_where or self.is_scalar:
return []
else:
return [self]
class CompoundSelect(_SelectBaseMixin, FromClause):
def __init__(self, keyword, *selects, **kwargs):
_SelectBaseMixin.__init__(self)
self.keyword = keyword
self.use_labels = kwargs.pop('use_labels', False)
self.parens = kwargs.pop('parens', False)
self.should_correlate = kwargs.pop('correlate', False)
self.for_update = kwargs.pop('for_update', False)
self.nowait = kwargs.pop('nowait', False)
self.limit = kwargs.pop('limit', None)
self.offset = kwargs.pop('offset', None)
self.is_compound = True
self.is_where = False
self.is_scalar = False
self.selects = selects
# some DBs do not like ORDER BY in the inner queries of a UNION, etc.
for s in selects:
s.group_by(None)
s.order_by(None)
self.group_by(*kwargs.pop('group_by', [None]))
self.order_by(*kwargs.pop('order_by', [None]))
if len(kwargs):
raise TypeError("invalid keyword argument(s) for CompoundSelect: %s" % repr(kwargs.keys()))
self._col_map = {}
name = property(lambda s:s.keyword + " statement")
def _locate_oid_column(self):
return self.selects[0].oid_column
def _exportable_columns(self):
for s in self.selects:
for c in s.c:
yield c
def _proxy_column(self, column):
if self.use_labels:
col = column._make_proxy(self, name=column._label)
else:
col = column._make_proxy(self)
try:
colset = self._col_map[col.name]
except KeyError:
colset = util.Set()
self._col_map[col.name] = colset
[colset.add(c) for c in col.orig_set]
col.orig_set = colset
return col
def get_children(self, column_collections=True, **kwargs):
return (column_collections and list(self.c) or []) + \
[self.order_by_clause, self.group_by_clause] + list(self.selects)
def accept_visitor(self, visitor):
visitor.visit_compound_select(self)
def _find_engine(self):
for s in self.selects:
e = s._find_engine()
if e:
return e
else:
return None
class Select(_SelectBaseMixin, FromClause):
"""Represent a ``SELECT`` statement, with appendable clauses, as
well as the ability to execute itself and return a result set.
"""
def __init__(self, columns=None, whereclause = None, from_obj = [], order_by = None, group_by=None, having=None, use_labels = False, distinct=False, for_update=False, engine=None, limit=None, offset=None, scalar=False, correlate=True):
_SelectBaseMixin.__init__(self)
self.__froms = util.OrderedSet()
self.__hide_froms = util.Set([self])
self.use_labels = use_labels
self.whereclause = None
self.having = None
self._engine = engine
self.limit = limit
self.offset = offset
self.for_update = for_update
self.is_compound = False
# indicates that this select statement should not expand its columns
# into the column clause of an enclosing select, and should instead
# act like a single scalar column
self.is_scalar = scalar
# indicates if this select statement, as a subquery, should automatically correlate
# its FROM clause to that of an enclosing select statement.
# note that the "correlate" method can be used to explicitly add a value to be correlated.
self.should_correlate = correlate
# indicates if this select statement is a subquery inside another query
self.is_subquery = False
# indicates if this select statement is a subquery as a criterion
# inside of a WHERE clause
self.is_where = False
self.distinct = distinct
self._raw_columns = []
self.__correlated = {}
self.__correlator = Select._CorrelatedVisitor(self, False)
self.__wherecorrelator = Select._CorrelatedVisitor(self, True)
if columns is not None:
for c in columns:
self.append_column(c)
self.order_by(*(order_by or [None]))
self.group_by(*(group_by or [None]))
for c in self.order_by_clause:
self.__correlator.traverse(c)
for c in self.group_by_clause:
self.__correlator.traverse(c)
for f in from_obj:
self.append_from(f)
# whereclauses must be appended after the columns/FROM, since it affects
# the correlation of subqueries. see test/sql/select.py SelectTest.testwheresubquery
if whereclause is not None:
self.append_whereclause(whereclause)
if having is not None:
self.append_having(having)
class _CorrelatedVisitor(NoColumnVisitor):
"""Visit a clause, locate any ``Select`` clauses, and tell
them that they should correlate their ``FROM`` list to that of
their parent.
"""
def __init__(self, select, is_where):
NoColumnVisitor.__init__(self)
self.select = select
self.is_where = is_where
def visit_compound_select(self, cs):
self.visit_select(cs)
for s in cs.selects:
s.parens = False
def visit_column(self, c):pass
def visit_table(self, c):pass
def visit_select(self, select):
if select is self.select:
return
select.is_where = self.is_where
select.is_subquery = True
select.parens = True
if not select.should_correlate:
return
[select.correlate(x) for x in self.select._Select__froms]
def append_column(self, column):
if _is_literal(column):
column = literal_column(str(column), table=self)
self._raw_columns.append(column)
if self.is_scalar and not hasattr(self, 'type'):
self.type = column.type
# if the column is a Select statement itself,
# accept visitor
self.__correlator.traverse(column)
# visit the FROM objects of the column looking for more Selects
for f in column._get_from_objects():
if f is not self:
self.__correlator.traverse(f)
self._process_froms(column, False)
def _make_proxy(self, selectable, name):
if self.is_scalar:
return self._raw_columns[0]._make_proxy(selectable, name)
else:
raise exceptions.InvalidRequestError("Not a scalar select statement")
def label(self, name):
if not self.is_scalar:
raise exceptions.InvalidRequestError("Not a scalar select statement")
else:
return label(name, self)
def _exportable_columns(self):
return [c for c in self._raw_columns if isinstance(c, Selectable)]
def _proxy_column(self, column):
if self.use_labels:
return column._make_proxy(self, name=column._label)
else:
return column._make_proxy(self)
def _process_froms(self, elem, asfrom):
for f in elem._get_from_objects():
self.__froms.add(f)
if asfrom:
self.__froms.add(elem)
for f in elem._hide_froms():
self.__hide_froms.add(f)
def append_whereclause(self, whereclause):
self._append_condition('whereclause', whereclause)
def append_having(self, having):
self._append_condition('having', having)
def _append_condition(self, attribute, condition):
if type(condition) == str:
condition = _TextClause(condition)
self.__wherecorrelator.traverse(condition)
self._process_froms(condition, False)
if getattr(self, attribute) is not None:
setattr(self, attribute, and_(getattr(self, attribute), condition))
else:
setattr(self, attribute, condition)
def correlate(self, from_obj):
"""Given a ``FROM`` object, correlate this ``SELECT`` statement to it.
This basically means the given from object will not come out
in this select statement's ``FROM`` clause when printed.
"""
self.__correlated[from_obj] = from_obj
def append_from(self, fromclause):
if type(fromclause) == str:
fromclause = FromClause(fromclause)
self.__correlator.traverse(fromclause)
self._process_froms(fromclause, True)
def _locate_oid_column(self):
for f in self.__froms:
if f is self:
# we might be in our own _froms list if a column with us as the parent is attached,
# which includes textual columns.
continue
oid = f.oid_column
if oid is not None:
return oid
else:
return None
def _calc_froms(self):
f = self.__froms.difference(self.__hide_froms)
if (len(f) > 1):
return f.difference(self.__correlated)
else:
return f
froms = property(_calc_froms, doc="""A collection containing all elements of the FROM clause""")
def get_children(self, column_collections=True, **kwargs):
return (column_collections and list(self.columns) or []) + \
list(self.froms) + \
[x for x in (self.whereclause, self.having) if x is not None] + \
[self.order_by_clause, self.group_by_clause]
def accept_visitor(self, visitor):
visitor.visit_select(self)
def union(self, other, **kwargs):
return union(self, | |
list):
self.storage['bucket_index'] = histogram[:]
self.storage['bucket_index'].append(maxint)
self.storage['buckets'] = [0] * (len(histogram) + 1)
self.storage['upstream_buckets'] = [0] * (len(histogram) + 1)
hist_lines = self.definitions['response_time_hist']['lines']
upstream_hist_lines = self.definitions['response_time_upstream_hist']['lines']
for i, le in enumerate(histogram):
hist_key = 'response_time_hist_%d' % i
upstream_hist_key = 'response_time_upstream_hist_%d' % i
hist_lines.append([hist_key, str(le), 'incremental', 1, 1])
upstream_hist_lines.append([upstream_hist_key, str(le), 'incremental', 1, 1])
hist_lines.append(['response_time_hist_%d' % len(histogram), '+Inf', 'incremental', 1, 1])
upstream_hist_lines.append(['response_time_upstream_hist_%d' % len(histogram), '+Inf', 'incremental', 1, 1])
elif histogram is not None:
self.error('expect histogram list, but was {0}'.format(type(histogram)))
if not self.configuration.get('all_time', True):
self.order.remove('clients_all')
# Add 'detailed_response_codes' chart if specified in the configuration
if self.configuration.get('detailed_response_codes', True):
if self.configuration.get('detailed_response_aggregate', True):
codes = DET_RESP_AGGR[:1]
else:
codes = DET_RESP_AGGR[1:]
for code in codes:
self.order.append('detailed_response_codes%s' % code)
self.definitions['detailed_response_codes%s' % code] = {
'options': [None, 'Detailed Response Codes %s' % code[1:], 'requests/s', 'responses',
'web_log.detailed_response_codes%s' % code, 'stacked'],
'lines': []
}
# Add 'requests_per_url' chart if specified in the configuration
if self.storage['url_pattern']:
for elem in self.storage['url_pattern']:
dim = [elem.description, elem.description[12:], 'incremental']
self.definitions['requests_per_url']['lines'].append(dim)
self.data[elem.description] = 0
self.data['url_pattern_other'] = 0
else:
self.order.remove('requests_per_url')
# Add 'requests_per_user_defined' chart if specified in the configuration
if self.storage['user_pattern'] and 'user_defined' in match_dict:
for elem in self.storage['user_pattern']:
dim = [elem.description, elem.description[13:], 'incremental']
self.definitions['requests_per_user_defined']['lines'].append(dim)
self.data[elem.description] = 0
self.data['user_pattern_other'] = 0
else:
self.order.remove('requests_per_user_defined')
def get_data(self, raw_data=None):
"""
Parses new log lines
:return: dict OR None
None if _get_raw_data method fails.
In all other cases - dict.
"""
if not raw_data:
return None if raw_data is None else self.data
filtered_data = filter_data(raw_data=raw_data, pre_filter=self.pre_filter)
unique_current = set()
timings = defaultdict(lambda: dict(minimum=None, maximum=0, summary=0, count=0))
for line in filtered_data:
match = self.storage['regex'].search(line)
if match:
match_dict = match.groupdict()
try:
code = match_dict['code'][0] + 'xx'
self.data[code] += 1
except KeyError:
self.data['0xx'] += 1
# detailed response code
if self.configuration.get('detailed_response_codes', True):
self.get_data_per_response_codes_detailed(code=match_dict['code'])
# response statuses
self.get_data_per_statuses(code=match_dict['code'])
# requests per user defined pattern
if self.storage['user_pattern'] and 'user_defined' in match_dict:
self.get_data_per_pattern(row=match_dict['user_defined'],
other='user_pattern_other',
pattern=self.storage['user_pattern'])
# method, url, http version
self.get_data_from_request_field(match_dict=match_dict)
# bandwidth sent
bytes_sent = match_dict['bytes_sent'] if '-' not in match_dict['bytes_sent'] else 0
self.data['bytes_sent'] += int(bytes_sent)
# request processing time and bandwidth received
if 'resp_length' in match_dict:
resp_length = match_dict['resp_length'] if '-' not in match_dict['resp_length'] else 0
self.data['resp_length'] += int(resp_length)
if 'resp_time' in match_dict:
resp_time = self.storage['func_resp_time'](float(match_dict['resp_time']))
get_timings(timings=timings['resp_time'], time=resp_time)
if 'bucket_index' in self.storage:
get_hist(self.storage['bucket_index'], self.storage['buckets'], resp_time / 1000)
if 'resp_time_upstream' in match_dict and match_dict['resp_time_upstream'] != '-':
resp_time_upstream = self.storage['func_resp_time'](float(match_dict['resp_time_upstream']))
get_timings(timings=timings['resp_time_upstream'], time=resp_time_upstream)
if 'bucket_index' in self.storage:
get_hist(self.storage['bucket_index'], self.storage['upstream_buckets'], resp_time / 1000)
# requests per ip proto
proto = 'ipv6' if ':' in match_dict['address'] else 'ipv4'
self.data['req_' + proto] += 1
# unique clients ips
if self.configuration.get('all_time', True):
if address_not_in_pool(pool=self.storage['unique_all_time'],
address=match_dict['address'],
pool_size=self.data['unique_tot_ipv4'] + self.data['unique_tot_ipv6']):
self.data['unique_tot_' + proto] += 1
if match_dict['address'] not in unique_current:
self.data['unique_cur_' + proto] += 1
unique_current.add(match_dict['address'])
else:
self.data['unmatched'] += 1
# timings
for elem in timings:
self.data[elem + '_min'] += timings[elem]['minimum']
self.data[elem + '_avg'] += timings[elem]['summary'] / timings[elem]['count']
self.data[elem + '_max'] += timings[elem]['maximum']
# histogram
if 'bucket_index' in self.storage:
buckets = self.storage['buckets']
upstream_buckets = self.storage['upstream_buckets']
for i in range(0, len(self.storage['bucket_index'])):
hist_key = 'response_time_hist_%d' % i
upstream_hist_key = 'response_time_upstream_hist_%d' % i
self.data[hist_key] = buckets[i]
self.data[upstream_hist_key] = upstream_buckets[i]
return self.data
def find_regex(self, last_line):
"""
:param last_line: str: literally last line from log file
:return: tuple where:
[0]: dict or None: match_dict or None
[1]: str: error description
We need to find appropriate pattern for current log file
All logic is do a regex search through the string for all predefined patterns
until we find something or fail.
"""
# REGEX: 1.IPv4 address 2.HTTP method 3. URL 4. Response code
# 5. Bytes sent 6. Response length 7. Response process time
default = re.compile(r'(?P<address>[\da-f.:]+|localhost)'
r' -.*?"(?P<request>[^"]*)"'
r' (?P<code>[1-9]\d{2})'
r' (?P<bytes_sent>\d+|-)')
apache_ext_insert = re.compile(r'(?P<address>[\da-f.:]+|localhost)'
r' -.*?"(?P<request>[^"]*)"'
r' (?P<code>[1-9]\d{2})'
r' (?P<bytes_sent>\d+|-)'
r' (?P<resp_length>\d+|-)'
r' (?P<resp_time>\d+) ')
apache_ext_append = re.compile(r'(?P<address>[\da-f.:]+|localhost)'
r' -.*?"(?P<request>[^"]*)"'
r' (?P<code>[1-9]\d{2})'
r' (?P<bytes_sent>\d+|-)'
r' .*?'
r' (?P<resp_length>\d+|-)'
r' (?P<resp_time>\d+)'
r'(?: |$)')
nginx_ext_insert = re.compile(r'(?P<address>[\da-f.:]+)'
r' -.*?"(?P<request>[^"]*)"'
r' (?P<code>[1-9]\d{2})'
r' (?P<bytes_sent>\d+)'
r' (?P<resp_length>\d+)'
r' (?P<resp_time>\d+\.\d+) ')
nginx_ext2_insert = re.compile(r'(?P<address>[\da-f.:]+)'
r' -.*?"(?P<request>[^"]*)"'
r' (?P<code>[1-9]\d{2})'
r' (?P<bytes_sent>\d+)'
r' (?P<resp_length>\d+)'
r' (?P<resp_time>\d+\.\d+)'
r' (?P<resp_time_upstream>[\d.-]+)')
nginx_ext_append = re.compile(r'(?P<address>[\da-f.:]+)'
r' -.*?"(?P<request>[^"]*)"'
r' (?P<code>[1-9]\d{2})'
r' (?P<bytes_sent>\d+)'
r' .*?'
r' (?P<resp_length>\d+)'
r' (?P<resp_time>\d+\.\d+)')
def func_usec(time):
return time
def func_sec(time):
return time * 1000000
r_regex = [apache_ext_insert, apache_ext_append,
nginx_ext2_insert, nginx_ext_insert, nginx_ext_append,
default]
r_function = [func_usec, func_usec, func_sec, func_sec, func_sec, func_usec]
regex_function = zip(r_regex, r_function)
match_dict = dict()
for regex, func in regex_function:
match = regex.search(last_line)
if match:
self.storage['regex'] = regex
self.storage['func_resp_time'] = func
match_dict = match.groupdict()
break
return find_regex_return(match_dict=match_dict or None,
msg='Unknown log format. You need to use "custom_log_format" feature.')
def find_regex_custom(self, last_line):
"""
:param last_line: str: literally last line from log file
:return: tuple where:
[0]: dict or None: match_dict or None
[1]: str: error description
We are here only if "custom_log_format" is in logs. We need to make sure:
1. "custom_log_format" is a dict
2. "pattern" in "custom_log_format" and pattern is <str> instance
3. if "time_multiplier" is in "custom_log_format" it must be <int> or <float> instance
If all parameters is ok we need to make sure:
1. Pattern search is success
2. Pattern search contains named subgroups (?P<subgroup_name>) (= "match_dict")
If pattern search is success we need to make sure:
1. All mandatory keys ['address', 'code', 'bytes_sent', 'method', 'url'] are in "match_dict"
If this is True we need to make sure:
1. All mandatory key values from "match_dict" have the correct format
("code" is integer, "method" is uppercase word, etc)
If non mandatory keys in "match_dict" we need to make sure:
1. All non mandatory key values from match_dict ['resp_length', 'resp_time'] have the correct format
("resp_length" is integer or "-", "resp_time" is integer or float)
"""
if not hasattr(self.configuration.get('custom_log_format'), 'keys'):
return find_regex_return(msg='Custom log: "custom_log_format" is not a <dict>')
pattern = self.configuration.get('custom_log_format', dict()).get('pattern')
if not (pattern and isinstance(pattern, str)):
return find_regex_return(msg='Custom log: "pattern" option is not specified or type is not <str>')
resp_time_func = self.configuration.get('custom_log_format', dict()).get('time_multiplier') or 0
if not isinstance(resp_time_func, (int, float)):
return find_regex_return(msg='Custom log: "time_multiplier" is not an integer or a float')
try:
regex = re.compile(pattern)
except re.error as error:
return find_regex_return(msg='Pattern compile error: %s' % str(error))
match = regex.search(last_line)
if not match:
return find_regex_return(msg='Custom log: pattern search FAILED')
match_dict = match.groupdict() or None
if match_dict is None:
return find_regex_return(msg='Custom log: search OK but contains no named subgroups'
' (you need to use ?P<subgroup_name>)')
mandatory_dict = {'address': r'[\w.:-]+',
'code': r'[1-9]\d{2}',
'bytes_sent': r'\d+|-'}
optional_dict = {'resp_length': r'\d+|-',
'resp_time': r'[\d.]+',
'resp_time_upstream': r'[\d.-]+',
'method': r'[A-Z]+',
'http_version': r'\d(?:.\d)?'}
mandatory_values = set(mandatory_dict) - set(match_dict)
if mandatory_values:
return find_regex_return(msg='Custom log: search OK but some mandatory keys (%s) are missing'
% list(mandatory_values))
for key in mandatory_dict:
if not re.search(mandatory_dict[key], match_dict[key]):
return find_regex_return(msg='Custom log: can\'t parse "%s": %s'
% (key, match_dict[key]))
optional_values = set(optional_dict) & set(match_dict)
for key in optional_values:
if not re.search(optional_dict[key], match_dict[key]):
return find_regex_return(msg='Custom log: can\'t parse "%s": %s'
% (key, match_dict[key]))
dot_in_time = '.' in match_dict.get('resp_time', '')
if dot_in_time:
self.storage['func_resp_time'] = lambda time: time * (resp_time_func or 1000000)
else:
self.storage['func_resp_time'] = lambda time: time * (resp_time_func or 1)
self.storage['regex'] = regex
return find_regex_return(match_dict=match_dict)
def get_data_from_request_field(self, match_dict):
if match_dict.get('request'):
match_dict = REQUEST_REGEX.search(match_dict['request'])
if match_dict:
match_dict = match_dict.groupdict()
else:
return
# requests per url
if match_dict.get('url') and self.storage['url_pattern']:
self.get_data_per_pattern(row=match_dict['url'],
other='url_pattern_other',
pattern=self.storage['url_pattern'])
# requests per http method
if match_dict.get('method'):
if match_dict['method'] not in self.data:
self.charts['http_method'].add_dimension([match_dict['method'],
match_dict['method'],
'incremental'])
self.data[match_dict['method']] = 0
self.data[match_dict['method']] += 1
# requests per http version
if match_dict.get('http_version'):
dim_id = match_dict['http_version'].replace('.', '_')
if dim_id not in self.data:
self.charts['http_version'].add_dimension([dim_id,
match_dict['http_version'],
'incremental'])
self.data[dim_id] = 0
self.data[dim_id] += 1
# requests per port number
if match_dict.get('port'):
if match_dict['port'] not in self.data:
self.charts['port'].add_dimension([match_dict['port'],
match_dict['port'],
'incremental'])
self.data[match_dict['port']] = 0
self.data[match_dict['port']] += 1
# requests per vhost
if match_dict.get('vhost'):
dim_id = match_dict['vhost'].replace('.', '_')
if dim_id not in self.data:
self.charts['vhost'].add_dimension([dim_id,
match_dict['vhost'],
'incremental'])
self.data[dim_id] = 0
self.data[dim_id] += 1
def get_data_per_response_codes_detailed(self, code):
"""
:param code: str: CODE from parsed line. Ex.: '202, '499'
:return:
Calls add_new_dimension method If the value is found for the first time
"""
if code not in self.data:
if self.configuration.get('detailed_response_aggregate', True):
self.charts['detailed_response_codes'].add_dimension([code, code, 'incremental'])
self.data[code] = | |
'cov_x_x' / construct.Float32l,
'cov_x_y' / construct.Float32l,
'cov_x_z' / construct.Float32l,
'cov_y_y' / construct.Float32l,
'cov_y_z' / construct.Float32l,
'cov_z_z' / construct.Float32l,
'n_sats' / construct.Int8ul,
'flags' / construct.Int8ul,)
__slots__ = [
'tow',
'x',
'y',
'z',
'cov_x_x',
'cov_x_y',
'cov_x_z',
'cov_y_y',
'cov_y_z',
'cov_z_z',
'n_sats',
'flags',
]
def __init__(self, sbp=None, **kwargs):
if sbp:
super( MsgVelECEFCovGnss,
self).__init__(sbp.msg_type, sbp.sender, sbp.length,
sbp.payload, sbp.crc)
self.from_binary(sbp.payload)
else:
super( MsgVelECEFCovGnss, self).__init__()
self.msg_type = SBP_MSG_VEL_ECEF_COV_GNSS
self.sender = kwargs.pop('sender', SENDER_ID)
self.tow = kwargs.pop('tow')
self.x = kwargs.pop('x')
self.y = kwargs.pop('y')
self.z = kwargs.pop('z')
self.cov_x_x = kwargs.pop('cov_x_x')
self.cov_x_y = kwargs.pop('cov_x_y')
self.cov_x_z = kwargs.pop('cov_x_z')
self.cov_y_y = kwargs.pop('cov_y_y')
self.cov_y_z = kwargs.pop('cov_y_z')
self.cov_z_z = kwargs.pop('cov_z_z')
self.n_sats = kwargs.pop('n_sats')
self.flags = kwargs.pop('flags')
def __repr__(self):
return fmt_repr(self)
@staticmethod
def from_json(s):
"""Given a JSON-encoded string s, build a message object.
"""
d = json.loads(s)
return MsgVelECEFCovGnss.from_json_dict(d)
@staticmethod
def from_json_dict(d):
sbp = SBP.from_json_dict(d)
return MsgVelECEFCovGnss(sbp, **d)
def from_binary(self, d):
"""Given a binary payload d, update the appropriate payload fields of
the message.
"""
p = MsgVelECEFCovGnss._parser.parse(d)
for n in self.__class__.__slots__:
setattr(self, n, getattr(p, n))
def to_binary(self):
"""Produce a framed/packed SBP message.
"""
c = containerize(exclude_fields(self))
self.payload = MsgVelECEFCovGnss._parser.build(c)
return self.pack()
def into_buffer(self, buf, offset):
"""Produce a framed/packed SBP message into the provided buffer and offset.
"""
self.payload = containerize(exclude_fields(self))
self.parser = MsgVelECEFCovGnss._parser
self.stream_payload.reset(buf, offset)
return self.pack_into(buf, offset, self._build_payload)
def to_json_dict(self):
self.to_binary()
d = super( MsgVelECEFCovGnss, self).to_json_dict()
j = walk_json_dict(exclude_fields(self))
d.update(j)
return d
SBP_MSG_VEL_NED_GNSS = 0x022E
class MsgVelNEDGnss(SBP):
"""SBP class for message MSG_VEL_NED_GNSS (0x022E).
You can have MSG_VEL_NED_GNSS inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
This message reports the velocity in local North East Down (NED)
coordinates. The NED coordinate system is defined as the local WGS84 tangent
plane centered at the current position. The full GPS time is given by the
preceding MSG_GPS_TIME with the matching time-of-week (tow).
Parameters
----------
sbp : SBP
SBP parent object to inherit from.
tow : int
GPS Time of Week
n : int
Velocity North coordinate
e : int
Velocity East coordinate
d : int
Velocity Down coordinate
h_accuracy : int
Horizontal velocity estimated standard deviation
v_accuracy : int
Vertical velocity estimated standard deviation
n_sats : int
Number of satellites used in solution
flags : int
Status flags
sender : int
Optional sender ID, defaults to SENDER_ID (see sbp/msg.py).
"""
_parser = construct.Struct(
'tow' / construct.Int32ul,
'n' / construct.Int32sl,
'e' / construct.Int32sl,
'd' / construct.Int32sl,
'h_accuracy' / construct.Int16ul,
'v_accuracy' / construct.Int16ul,
'n_sats' / construct.Int8ul,
'flags' / construct.Int8ul,)
__slots__ = [
'tow',
'n',
'e',
'd',
'h_accuracy',
'v_accuracy',
'n_sats',
'flags',
]
def __init__(self, sbp=None, **kwargs):
if sbp:
super( MsgVelNEDGnss,
self).__init__(sbp.msg_type, sbp.sender, sbp.length,
sbp.payload, sbp.crc)
self.from_binary(sbp.payload)
else:
super( MsgVelNEDGnss, self).__init__()
self.msg_type = SBP_MSG_VEL_NED_GNSS
self.sender = kwargs.pop('sender', SENDER_ID)
self.tow = kwargs.pop('tow')
self.n = kwargs.pop('n')
self.e = kwargs.pop('e')
self.d = kwargs.pop('d')
self.h_accuracy = kwargs.pop('h_accuracy')
self.v_accuracy = kwargs.pop('v_accuracy')
self.n_sats = kwargs.pop('n_sats')
self.flags = kwargs.pop('flags')
def __repr__(self):
return fmt_repr(self)
@staticmethod
def from_json(s):
"""Given a JSON-encoded string s, build a message object.
"""
d = json.loads(s)
return MsgVelNEDGnss.from_json_dict(d)
@staticmethod
def from_json_dict(d):
sbp = SBP.from_json_dict(d)
return MsgVelNEDGnss(sbp, **d)
def from_binary(self, d):
"""Given a binary payload d, update the appropriate payload fields of
the message.
"""
p = MsgVelNEDGnss._parser.parse(d)
for n in self.__class__.__slots__:
setattr(self, n, getattr(p, n))
def to_binary(self):
"""Produce a framed/packed SBP message.
"""
c = containerize(exclude_fields(self))
self.payload = MsgVelNEDGnss._parser.build(c)
return self.pack()
def into_buffer(self, buf, offset):
"""Produce a framed/packed SBP message into the provided buffer and offset.
"""
self.payload = containerize(exclude_fields(self))
self.parser = MsgVelNEDGnss._parser
self.stream_payload.reset(buf, offset)
return self.pack_into(buf, offset, self._build_payload)
def to_json_dict(self):
self.to_binary()
d = super( MsgVelNEDGnss, self).to_json_dict()
j = walk_json_dict(exclude_fields(self))
d.update(j)
return d
SBP_MSG_VEL_NED_COV_GNSS = 0x0232
class MsgVelNEDCovGnss(SBP):
"""SBP class for message MSG_VEL_NED_COV_GNSS (0x0232).
You can have MSG_VEL_NED_COV_GNSS inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
This message reports the velocity in local North East Down (NED)
coordinates. The NED coordinate system is defined as the local WGS84 tangent
plane centered at the current position. The full GPS time is given by the
preceding MSG_GPS_TIME with the matching time-of-week (tow). This message is
similar to the MSG_VEL_NED, but it includes the upper triangular portion of
the 3x3 covariance matrix.
Parameters
----------
sbp : SBP
SBP parent object to inherit from.
tow : int
GPS Time of Week
n : int
Velocity North coordinate
e : int
Velocity East coordinate
d : int
Velocity Down coordinate
cov_n_n : float
Estimated variance of northward measurement
cov_n_e : float
Covariance of northward and eastward measurement
cov_n_d : float
Covariance of northward and downward measurement
cov_e_e : float
Estimated variance of eastward measurement
cov_e_d : float
Covariance of eastward and downward measurement
cov_d_d : float
Estimated variance of downward measurement
n_sats : int
Number of satellites used in solution
flags : int
Status flags
sender : int
Optional sender ID, defaults to SENDER_ID (see sbp/msg.py).
"""
_parser = construct.Struct(
'tow' / construct.Int32ul,
'n' / construct.Int32sl,
'e' / construct.Int32sl,
'd' / construct.Int32sl,
'cov_n_n' / construct.Float32l,
'cov_n_e' / construct.Float32l,
'cov_n_d' / construct.Float32l,
'cov_e_e' / construct.Float32l,
'cov_e_d' / construct.Float32l,
'cov_d_d' / construct.Float32l,
'n_sats' / construct.Int8ul,
'flags' / construct.Int8ul,)
__slots__ = [
'tow',
'n',
'e',
'd',
'cov_n_n',
'cov_n_e',
'cov_n_d',
'cov_e_e',
'cov_e_d',
'cov_d_d',
'n_sats',
'flags',
]
def __init__(self, sbp=None, **kwargs):
if sbp:
super( MsgVelNEDCovGnss,
self).__init__(sbp.msg_type, sbp.sender, sbp.length,
sbp.payload, sbp.crc)
self.from_binary(sbp.payload)
else:
super( MsgVelNEDCovGnss, self).__init__()
self.msg_type = SBP_MSG_VEL_NED_COV_GNSS
self.sender = kwargs.pop('sender', SENDER_ID)
self.tow = kwargs.pop('tow')
self.n = kwargs.pop('n')
self.e = kwargs.pop('e')
self.d = kwargs.pop('d')
self.cov_n_n = kwargs.pop('cov_n_n')
self.cov_n_e = kwargs.pop('cov_n_e')
self.cov_n_d = kwargs.pop('cov_n_d')
self.cov_e_e = kwargs.pop('cov_e_e')
self.cov_e_d = kwargs.pop('cov_e_d')
self.cov_d_d = kwargs.pop('cov_d_d')
self.n_sats = kwargs.pop('n_sats')
self.flags = kwargs.pop('flags')
def __repr__(self):
return fmt_repr(self)
@staticmethod
def from_json(s):
"""Given a JSON-encoded string s, build a message object.
"""
d = json.loads(s)
return MsgVelNEDCovGnss.from_json_dict(d)
@staticmethod
def from_json_dict(d):
sbp = SBP.from_json_dict(d)
return MsgVelNEDCovGnss(sbp, **d)
def from_binary(self, d):
"""Given a binary payload d, update the appropriate payload fields of
the message.
"""
p = MsgVelNEDCovGnss._parser.parse(d)
for n in self.__class__.__slots__:
setattr(self, n, getattr(p, n))
def to_binary(self):
"""Produce a framed/packed SBP message.
"""
c = containerize(exclude_fields(self))
self.payload = MsgVelNEDCovGnss._parser.build(c)
return self.pack()
def into_buffer(self, buf, offset):
"""Produce a framed/packed SBP message into the provided buffer and offset.
"""
self.payload = containerize(exclude_fields(self))
self.parser = MsgVelNEDCovGnss._parser
self.stream_payload.reset(buf, offset)
return self.pack_into(buf, offset, self._build_payload)
def to_json_dict(self):
self.to_binary()
d = super( MsgVelNEDCovGnss, self).to_json_dict()
j = walk_json_dict(exclude_fields(self))
d.update(j)
return d
SBP_MSG_VEL_BODY = 0x0213
class MsgVelBody(SBP):
"""SBP class for message MSG_VEL_BODY (0x0213).
You can have MSG_VEL_BODY inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
This message reports the velocity in the Vehicle Body Frame. By convention,
the x-axis should point out the nose of the vehicle and represent the
forward direction, while as the y-axis should point out the right hand side
of the vehicle. Since this is a right handed system, z should point out the
bottom of the vehicle. The orientation and origin of the Vehicle Body Frame
are specified via the device settings. The full GPS time is given by the
preceding MSG_GPS_TIME with the matching time-of-week (tow). This message is
only produced by inertial versions of Swift products and is not available
from Piksi Multi or Duro.
Parameters
----------
sbp : SBP
SBP parent object to inherit from.
tow : int
GPS Time of Week
x : int
Velocity in x direction
y : int
Velocity in y direction
z : int
Velocity in z direction
cov_x_x : float
Estimated variance of x
cov_x_y : float
Covariance of x and y
cov_x_z : float
Covariance of x and z
cov_y_y : float
Estimated variance of y
cov_y_z : float
Covariance of y and z
cov_z_z : float
Estimated variance of z
n_sats : int
Number of satellites used in solution
flags : int
Status flags
sender : int
Optional sender ID, defaults to SENDER_ID (see sbp/msg.py).
"""
_parser = construct.Struct(
'tow' | |
response = self.client.post(reverse('api-v3-validate-declaration'), {'entity_id': self.test_producer.id, 'period_year': today.year, 'period_month': today.strftime('%m')})
self.assertEqual(response.status_code, 400)
jsoned = response.json()
self.assertEqual(jsoned['message'], "PENDING_TRANSACTIONS_CANNOT_DECLARE")
# as client, accept lots
tx.delivery_status = LotTransaction.ACCEPTED
tx.save()
# try validate declaration (ok)
response = self.client.post(reverse('api-v3-validate-declaration'), {'entity_id': self.test_producer.id, 'period_year': today.year, 'period_month': today.strftime('%m')})
self.assertEqual(response.status_code, 200)
tx = LotTransaction.objects.get(id=tx_id)
self.assertEqual(tx.delivery_status, LotTransaction.FROZEN)
class CorrectionTests(TransactionTestCase):
home = os.environ['CARBURE_HOME']
fixtures = ['{home}/web/fixtures/json/countries.json'.format(home=home),
'{home}/web/fixtures/json/feedstock.json'.format(home=home),
'{home}/web/fixtures/json/biofuels.json'.format(home=home),
'{home}/web/fixtures/json/depots.json'.format(home=home)]
def setUp(self):
user_model = get_user_model()
self.user_email = '<EMAIL>'
self.user_password = '<PASSWORD>'
self.user1 = user_model.objects.create_user(email=self.user_email, name='Le Super Testeur 1', password=self.user_password)
# a few entities
self.test_producer, _ = Entity.objects.update_or_create(name='Le Super Producteur 1', entity_type='Producteur')
self.test_operator, _ = Entity.objects.update_or_create(name='OPERATEUR1', entity_type='Opérateur')
self.test_trader, _ = Entity.objects.update_or_create(name='Trader1', entity_type=Entity.TRADER)
# some rights
UserRights.objects.update_or_create(user=self.user1, entity=self.test_producer, defaults={'role': 'RW'})
UserRights.objects.update_or_create(user=self.user1, entity=self.test_operator, defaults={'role': 'RW'})
UserRights.objects.update_or_create(user=self.user1, entity=self.test_trader, defaults={'role': 'RW'})
# a production site and delivery_site
france = Pays.objects.get(code_pays='FR')
Depot.objects.update_or_create(name='Depot Test', depot_id='001', country=france)
Depot.objects.update_or_create(name='Depot Test 2', depot_id='002', country=france)
today = datetime.date.today()
d = {'country': france, 'date_mise_en_service': today, 'site_id':'SIRET XXX',
'city': 'paris', 'postal_code': '75001', 'manager_name':'<NAME>',
'manager_phone':'0145247000', 'manager_email': '<EMAIL>'}
self.production_site, _ = ProductionSite.objects.update_or_create(producer=self.test_producer, name='PSITE1', defaults=d)
loggedin = self.client.login(username=self.user_email, password=self.<PASSWORD>)
self.assertTrue(loggedin)
# pass otp verification
response = self.client.get(reverse('otp-verify'))
self.assertEqual(response.status_code, 200)
device = EmailDevice.objects.get(user=self.user1)
response = self.client.post(reverse('otp-verify'), {'otp_token': device.token})
self.assertEqual(response.status_code, 302)
def create_lot(self, **kwargs):
lot = {
'supplier_certificate': 'ISCC-TOTO-02',
'biocarburant_code': 'ETH',
'matiere_premiere_code': 'BLE',
'producer': self.test_producer.name,
'production_site': "PSITE1",
'volume': 15000,
'pays_origine_code': 'FR',
'eec': 1,
'ep': 5,
'etd': 12,
'dae': get_random_dae(),
'delivery_date': '2020-12-31',
'client': self.test_trader.name,
'delivery_site': '001',
'entity_id': self.test_producer.id,
}
lot.update(kwargs)
response = self.client.post(reverse('api-v3-add-lot'), lot)
self.assertEqual(response.status_code, 200)
data = response.json()['data']
tx_id = data['id']
lot_id = data['lot']['id']
return tx_id, lot_id, lot
def test_delivery_site_by_name(self):
# 1 create lot as producer and send it
tx_id, lot_id, j = self.create_lot(client=self.test_operator.name, delivery_site='depot test')
tx = LotTransaction.objects.get(id=tx_id)
self.assertEqual(tx.carbure_delivery_site.depot_id, '001')
tx_id, lot_id, j = self.create_lot(client=self.test_operator.name, delivery_site='001')
tx = LotTransaction.objects.get(id=tx_id)
self.assertEqual(tx.carbure_delivery_site.depot_id, '001')
def test_only_creator_can_validate(self):
# 1 create lot as producer and send it
tx_id, lot_id, j = self.create_lot(client=self.test_operator.name)
# try validate as client
response = self.client.post(reverse('api-v3-validate-lot'), {'entity_id': self.test_operator.id, 'tx_ids': [tx_id]})
self.assertEqual(response.status_code, 403)
# try validate as unknown
response = self.client.post(reverse('api-v3-validate-lot'), {'entity_id': self.test_trader.id, 'tx_ids': [tx_id]})
self.assertEqual(response.status_code, 403)
# try validate as creator
response = self.client.post(reverse('api-v3-validate-lot'), {'entity_id': self.test_producer.id, 'tx_ids': [tx_id]})
self.assertEqual(response.status_code, 200)
def test_only_creator_can_validate__createdbyoperator(self):
# 1 create lot as producer and send it
tx_id, lot_id, j = self.create_lot(producer='unknownproducer', production_site_reference='psitereference', date_mise_en_service='12/12/2012', client=self.test_operator.name, entity_id=self.test_operator.id)
# try validate as unknown
response = self.client.post(reverse('api-v3-validate-lot'), {'entity_id': self.test_producer.id, 'tx_ids': [tx_id]})
self.assertEqual(response.status_code, 403)
# try validate as client
response = self.client.post(reverse('api-v3-validate-lot'), {'entity_id': self.test_operator.id, 'tx_ids': [tx_id]})
self.assertEqual(response.status_code, 200)
def test_only_client_can_accept(self):
# 1 create lot as producer and send it
tx_id, lot_id, j = self.create_lot(client=self.test_operator.name)
response = self.client.post(reverse('api-v3-validate-lot'), {'entity_id': self.test_producer.id, 'tx_ids': [tx_id]})
self.assertEqual(response.status_code, 200)
# 2 producer cannot accept
response = self.client.post(reverse('api-v3-accept-lot'), {'entity_id': self.test_producer.id, 'tx_ids': [tx_id]})
self.assertEqual(response.status_code, 403)
# random cannot accept
response = self.client.post(reverse('api-v3-accept-lot'), {'entity_id': self.test_trader.id, 'tx_ids': [tx_id]})
self.assertEqual(response.status_code, 403)
# client can accept
response = self.client.post(reverse('api-v3-accept-lot'), {'entity_id': self.test_operator.id, 'tx_ids': [tx_id]})
self.assertEqual(response.status_code, 200)
def test_only_creator_can_update_lot__producer(self):
# 1 producer creates lot
tx_id, lot_id, j = self.create_lot()
# update some data
j['tx_id'] = tx_id
j['volume'] = 45000
j['delivery_date'] = '2021-01-15'
j['entity_id'] = self.test_operator.id
response = self.client.post(reverse('api-v3-update-lot'), j)
self.assertEqual(response.status_code, 403)
j['entity_id'] = self.test_trader.id
response = self.client.post(reverse('api-v3-update-lot'), j)
self.assertEqual(response.status_code, 403)
j['entity_id'] = self.test_producer.id
response = self.client.post(reverse('api-v3-update-lot'), j)
self.assertEqual(response.status_code, 200)
def test_only_creator_can_update_lot__trader(self):
# 1 operator creates lot
tx_id, lot_id, j = self.create_lot(producer='UNKNOWNPRODUCER', entity_id=self.test_trader.id)
# update some data
j['tx_id'] = tx_id
j['volume'] = 45000
j['delivery_date'] = '2021-01-15'
j['entity_id'] = self.test_producer.id
response = self.client.post(reverse('api-v3-update-lot'), j)
self.assertEqual(response.status_code, 403)
j['entity_id'] = self.test_trader.id
response = self.client.post(reverse('api-v3-update-lot'), j)
self.assertEqual(response.status_code, 200)
j['entity_id'] = self.test_operator.id
response = self.client.post(reverse('api-v3-update-lot'), j)
self.assertEqual(response.status_code, 403)
def test_only_creator_can_update_lot__operator(self):
# 1 operator creates lot
tx_id, lot_id, j = self.create_lot(producer='UNKNOWNPRODUCER', entity_id=self.test_operator.id)
# update some data
j['tx_id'] = tx_id
j['volume'] = 45000
j['delivery_date'] = '2021-01-15'
j['entity_id'] = self.test_producer.id
response = self.client.post(reverse('api-v3-update-lot'), j)
self.assertEqual(response.status_code, 403)
j['entity_id'] = self.test_trader.id
response = self.client.post(reverse('api-v3-update-lot'), j)
self.assertEqual(response.status_code, 403)
j['entity_id'] = self.test_operator.id
response = self.client.post(reverse('api-v3-update-lot'), j)
self.assertEqual(response.status_code, 200)
def test_only_client_can_request_correction(self):
# 1 producer creates lot
tx_id, lot_id, j = self.create_lot()
# 2 validate
response = self.client.post(reverse('api-v3-validate-lot'), {'entity_id': self.test_producer.id, 'tx_ids': [tx_id]})
self.assertEqual(response.status_code, 200)
# 3 request a correction
# 3.1 the sender requests a correction - 403
response = self.client.post(reverse('api-v3-accept-lot-with-reserves'), {'entity_id': self.test_producer.id, 'tx_ids': [tx_id]})
self.assertEqual(response.status_code, 403)
# 3.2 someone else requests a correction - 403
response = self.client.post(reverse('api-v3-accept-lot-with-reserves'), {'entity_id': self.test_operator.id, 'tx_ids': [tx_id]})
self.assertEqual(response.status_code, 403)
# 3.3 the real client requests a correction - 200
response = self.client.post(reverse('api-v3-accept-lot-with-reserves'), {'entity_id': self.test_trader.id, 'tx_ids': [tx_id]})
self.assertEqual(response.status_code, 200)
def test_only_creator_can_amend(self):
# 1 producer creates lot
tx_id, lot_id, j = self.create_lot()
# 2 validate
response = self.client.post(reverse('api-v3-validate-lot'), {'entity_id': self.test_producer.id, 'tx_ids': [tx_id]})
self.assertEqual(response.status_code, 200)
# 3 request to edit lot
# 3.1 the sender requests to amend - 200
response = self.client.post(reverse('api-v3-amend-lot'), {'entity_id': self.test_producer.id, 'tx_id': tx_id})
self.assertEqual(response.status_code, 200)
# 3.2 someone else requests to amend - 403
response = self.client.post(reverse('api-v3-amend-lot'), {'entity_id': self.test_operator.id, 'tx_id': tx_id})
self.assertEqual(response.status_code, 403)
# 3.3 the client requests to amend - 403
response = self.client.post(reverse('api-v3-amend-lot'), {'entity_id': self.test_trader.id, 'tx_id': tx_id})
self.assertEqual(response.status_code, 403)
def test_split_rights(self):
# producer creates lot and validates
tx_id, lot_id, j = self.create_lot()
response = self.client.post(reverse('api-v3-validate-lot'), {'entity_id': self.test_producer.id, 'tx_ids': [tx_id]})
self.assertEqual(response.status_code, 200)
response = self.client.post(reverse('api-v3-amend-lot'), {'entity_id': self.test_producer.id, 'tx_id': tx_id})
self.assertEqual(response.status_code, 200)
# producer can edit all parts of the lot
j['tx_id'] = tx_id
j['volume'] = 45000
j['delivery_date'] = '2021-01-15'
response = self.client.post(reverse('api-v3-update-lot'), j)
self.assertEqual(response.status_code, 200)
tx = LotTransaction.objects.get(id=tx_id)
self.assertEqual(tx.lot.volume, 45000)
self.assertEqual(tx.delivery_date, datetime.date(2021,1,15))
self.assertEqual(tx.carbure_delivery_site.depot_id, '001')
# send it back
response = self.client.post(reverse('api-v3-validate-lot'), {'entity_id': self.test_producer.id, 'tx_ids': [tx_id]})
self.assertEqual(response.status_code, 200)
# accept lot
response = self.client.post(reverse('api-v3-accept-lot'), {'entity_id': self.test_trader.id, 'tx_ids': [tx_id]})
self.assertEqual(response.status_code, 200)
# now start playing with fire
# split lot, send it to a different client
drafts = json.dumps([{'volume':5000, 'client': self.test_operator.name, 'dae':'DAESPLIT01', 'delivery_site':'001', 'delivery_date': '12/05/2021'}])
response = self.client.post(reverse('api-v3-stocks-create-drafts'), {'entity_id': self.test_trader.id, 'drafts': drafts})
self.assertEqual(response.status_code, 200)
split_tx_id = response.json()['data']['tx_ids'][0]
# it's a draft so parent volumes are unaffected
split_tx = LotTransaction.objects.get(id=split_tx_id)
self.assertEqual(split_tx.lot.status, LotV2.DRAFT)
self.assertEqual(split_tx.delivery_status, LotTransaction.PENDING)
self.assertEqual(split_tx.lot.parent_lot.remaining_volume, 45000)
self.assertEqual(split_tx.lot.parent_lot.volume, 45000)
# validate draft
response = self.client.post(reverse('api-v3-stocks-send-drafts'), {'entity_id': self.test_trader.id, 'tx_ids': [split_tx_id]})
self.assertEqual(response.status_code, 200)
# split is validated, stock is affected
split_tx = LotTransaction.objects.get(id=split_tx_id)
self.assertEqual(split_tx.lot.status, LotV2.VALIDATED)
self.assertEqual(split_tx.delivery_status, LotTransaction.PENDING)
self.assertEqual(split_tx.lot.parent_lot.remaining_volume, 40000)
self.assertEqual(split_tx.lot.parent_lot.volume, 45000)
# as final client, accept and request correction
response = self.client.post(reverse('api-v3-accept-lot-with-reserves'), {'entity_id': self.test_operator.id, 'tx_ids': [split_tx_id]})
self.assertEqual(response.status_code, 200)
split_tx = LotTransaction.objects.get(id=split_tx_id)
self.assertEqual(split_tx.lot.status, LotV2.VALIDATED)
self.assertEqual(split_tx.delivery_status, LotTransaction.TOFIX)
self.assertEqual(split_tx.lot.volume, 5000)
self.assertEqual(split_tx.lot.remaining_volume, 5000)
self.assertEqual(split_tx.lot.parent_lot.remaining_volume, 40000) # volume has not been re-credited because tx status is TOFIX
self.assertEqual(split_tx.lot.parent_lot.volume, 45000)
# as initial producer, try update lot and tx. nothing should work because it's a split. only the "master" or "parent" lot can be updated
j['entity_id'] = self.test_producer.id
j['tx_id'] = split_tx_id
j['volume'] = 15000 # lot is split - volume can only be updated by trader
j['pays_origine_code'] = 'DE'
j['delivery_site'] = '002' # should not change
response = self.client.post(reverse('api-v3-update-lot'), j)
self.assertEqual(response.status_code, 200)
split_tx = LotTransaction.objects.get(id=split_tx_id)
self.assertEqual(split_tx.lot.volume, 5000) # volume is unchanged
self.assertEqual(split_tx.lot.pays_origine.code_pays, 'FR') # pays_origine is unchanged
self.assertEqual(split_tx.carbure_delivery_site.depot_id, '001') # delivery_site is unchanged
# 3 as initial producer, send lot back to final client
response = self.client.post(reverse('api-v3-validate-lot'), {'entity_id': self.test_producer.id, 'tx_ids': [split_tx_id]})
self.assertEqual(response.status_code, 403) # only the intermediary can send it back
# as the trader, send split lot back to client
response = self.client.post(reverse('api-v3-validate-lot'), {'entity_id': self.test_trader.id, 'tx_ids': [split_tx_id]})
self.assertEqual(response.status_code, 200)
split_tx = LotTransaction.objects.get(id=split_tx_id)
self.assertEqual(split_tx.lot.volume, 5000)
self.assertEqual(split_tx.lot.remaining_volume, 5000)
self.assertEqual(split_tx.lot.parent_lot.volume, 45000)
self.assertEqual(split_tx.lot.parent_lot.remaining_volume, 40000)
self.assertEqual(split_tx.delivery_status, LotTransaction.FIXED)
# 4 as trader, re-open lot (amend-lot)
response = self.client.post(reverse('api-v3-amend-lot'), {'entity_id': self.test_trader.id, 'tx_id': split_tx_id})
self.assertEqual(response.status_code, 200)
split_tx = LotTransaction.objects.get(id=split_tx_id)
self.assertEqual(split_tx.lot.volume, 5000)
self.assertEqual(split_tx.lot.remaining_volume, 5000)
self.assertEqual(split_tx.lot.parent_lot.volume, 45000)
self.assertEqual(split_tx.lot.parent_lot.remaining_volume, 40000)
self.assertEqual(split_tx.delivery_status, LotTransaction.TOFIX)
# 4 as trader, try update lot and tx, only tx should work
j['entity_id'] = self.test_trader.id
j['tx_id'] = split_tx_id
j['volume'] = 5001 # volume change by the trader
j['pays_origine_code'] = 'DE' # should not change
j['delivery_site'] = '002' # should change
response = self.client.post(reverse('api-v3-update-lot'), j)
self.assertEqual(response.status_code, 200)
split_tx = LotTransaction.objects.get(id=split_tx_id)
self.assertEqual(split_tx.lot.pays_origine.code_pays, 'FR') # pays_origine has NOT been updated
self.assertEqual(split_tx.carbure_delivery_site.depot_id, '002') # delivery_site has been updated
self.assertEqual(split_tx.lot.volume, 5001)
self.assertEqual(split_tx.lot.remaining_volume, 5001)
self.assertEqual(split_tx.lot.parent_lot.volume, 45000)
self.assertEqual(split_tx.lot.parent_lot.remaining_volume, 39999) # volume has been recredited when moved to "correction"
self.assertEqual(split_tx.delivery_status, LotTransaction.TOFIX)
# 5 send the lot back
response = self.client.post(reverse('api-v3-validate-lot'), {'entity_id': self.test_trader.id, 'tx_ids': [split_tx_id]})
self.assertEqual(response.status_code, 200)
split_tx = LotTransaction.objects.get(id=split_tx_id)
self.assertEqual(split_tx.delivery_status, LotTransaction.FIXED)
self.assertEqual(split_tx.lot.parent_lot.volume, 45000)
self.assertEqual(split_tx.lot.parent_lot.remaining_volume, | |
<reponame>uktrade/jupyterhub-data-auth-admin
import datetime
import json
import os
from functools import wraps
import hashlib
import itertools
import logging
import random
import re
import secrets
import string
import csv
from io import StringIO
from typing import Tuple
from timeit import default_timer as timer
import gevent
import gevent.queue
import psycopg2
import requests
from mohawk import Sender
from psycopg2 import connect, sql
from psycopg2.sql import SQL
from django.contrib.auth import get_user_model
from django.core.cache import cache
from django.http import StreamingHttpResponse
from django.db import connections, connection
from django.db.models import Q
from django.conf import settings
from tableschema import Schema
from dataworkspace.apps.core.boto3_client import get_s3_client, get_iam_client
from dataworkspace.apps.core.constants import (
PostgresDataTypes,
SCHEMA_POSTGRES_DATA_TYPE_MAP,
TABLESCHEMA_FIELD_TYPE_MAP,
)
from dataworkspace.apps.core.models import Database, DatabaseUser, Team
from dataworkspace.apps.datasets.constants import UserAccessType
from dataworkspace.apps.datasets.models import DataSet, SourceTable, ReferenceDataset
logger = logging.getLogger("app")
USER_SCHEMA_STEM = "_user_"
def database_dsn(database_data):
return (
f'host={database_data["HOST"]} port={database_data["PORT"]} '
f'dbname={database_data["NAME"]} user={database_data["USER"]} '
f'password={database_data["PASSWORD"]} sslmode=require'
)
def postgres_user(stem, suffix=""):
if len(suffix) > 10:
raise ValueError(
"The user suffix should be no more than 10 characters to ensure that the stem "
"doesn't get truncated too severely."
)
user_alphabet = string.ascii_lowercase + string.digits
unique_enough = "".join(secrets.choice(user_alphabet) for i in range(5))
suffix = f"_{suffix}" if suffix else ""
# Postgres identifiers can be up to 63 characters.
# Between `user_`, `_`, and `unique_enough` we use 11 of these characters.
# This leaves 52 characters for the email and suffix parts.
# So let's truncate the email address based on the remaining characters we have available.
max_email_length = 52 - len(suffix)
return (
"user_"
+ re.sub("[^a-z0-9]", "_", stem.lower())[:max_email_length]
+ "_"
+ unique_enough
+ suffix
)
def db_role_schema_suffix_for_user(user):
return stable_identification_suffix(str(user.profile.sso_id), short=True)
def db_role_schema_suffix_for_app(application_template):
return "app_" + application_template.host_basename
def new_private_database_credentials(
db_role_and_schema_suffix,
source_tables,
db_user,
dw_user: get_user_model(),
valid_for: datetime.timedelta,
force_create_for_databases: Tuple[Database] = tuple(),
):
db_team_roles = [team.schema_name for team in Team.objects.filter(member=dw_user)]
db_team_roles_set = set(db_team_roles)
db_team_schemas = db_team_roles
# This function can take a while. That isn't great, but also not great to
# hold a connection to the admin database
close_admin_db_connection_if_not_in_atomic_block()
password_alphabet = string.ascii_letters + string.digits
def postgres_password():
return "".join(secrets.choice(password_alphabet) for i in range(64))
def get_new_credentials(database_obj, tables):
# Each real-world user is given
# - a private and permanent schema where they can manage tables and rows as needed
# - a permanent database role that is the owner of the schema
# - temporary database users, each of which are GRANTed the role
db_password = postgres_password()
db_role = f"{USER_SCHEMA_STEM}{db_role_and_schema_suffix}"
db_schema = f"{USER_SCHEMA_STEM}{db_role_and_schema_suffix}"
database_data = settings.DATABASES_DATA[database_obj.memorable_name]
valid_until = (datetime.datetime.now() + valid_for).isoformat()
with connections[database_obj.memorable_name].cursor() as cur:
existing_tables_and_views_set = set(tables_and_views_that_exist(cur, tables))
allowed_tables_that_exist = [
(schema, table)
for schema, table in tables
if (schema, table) in existing_tables_and_views_set
]
allowed_tables_that_exist_set = set(allowed_tables_that_exist)
allowed_schemas_that_exist = without_duplicates_preserve_order(
schema for schema, _ in allowed_tables_that_exist
)
allowed_schemas_that_exist_set = set(allowed_schemas_that_exist)
def ensure_db_role(db_role_name):
cur.execute(
sql.SQL(
"""
DO $$
BEGIN
CREATE ROLE {role};
EXCEPTION WHEN OTHERS THEN
RAISE DEBUG 'Role {role} already exists';
END
$$;
"""
).format(role=sql.Identifier(db_role_name))
)
for db_role_name in db_team_roles + [db_role]:
ensure_db_role(db_role_name)
# On RDS, to do SET ROLE, you have to GRANT the role to the current master user. You also
# have to have (at least) USAGE on each user schema to call has_table_privilege. So,
# we make sure the master user has this before the user schema is even created. But, since
# this would involve a GRANT, and since GRANTs have to be wrapped in the lock, we check if
# we need to do it first
cur.execute(
sql.SQL(
"""
SELECT
rolname
FROM
pg_roles
WHERE
(
rolname SIMILAR TO '\\_user\\_[0-9a-f]{8}' OR
rolname LIKE '\\_user\\_app\\_%' OR
rolname LIKE '\\_team\\_%'
)
AND NOT pg_has_role(rolname, 'member');
"""
)
)
missing_db_roles = [role for (role,) in cur.fetchall()]
if missing_db_roles:
with cache.lock(
"database-grant-v1",
blocking_timeout=15,
timeout=60,
), connections[database_obj.memorable_name].cursor() as cur:
cur.execute(
sql.SQL("GRANT {} TO {};").format(
sql.SQL(",").join(
sql.Identifier(missing_db_role) for missing_db_role in missing_db_roles
),
sql.Identifier(database_data["USER"]),
)
)
with connections[database_obj.memorable_name].cursor() as cur:
# Find existing permissions
cur.execute(
sql.SQL(
"""
SELECT
schemaname AS schema,
tablename as name
FROM
pg_catalog.pg_tables
WHERE
schemaname NOT IN ('information_schema', 'pg_catalog', 'pg_toast', {schema})
AND schemaname NOT LIKE 'pg_temp_%'
AND schemaname NOT LIKE 'pg_toast_temp_%'
AND schemaname NOT LIKE '_team_%'
AND tablename !~ '_\\d{{8}}t\\d{{6}}'
AND has_table_privilege({role}, quote_ident(schemaname) || '.' ||
quote_ident(tablename), 'SELECT, INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER') = true
UNION ALL
SELECT
schemaname AS schema,
viewname as name
FROM
pg_catalog.pg_views
WHERE
schemaname NOT IN ('information_schema', 'pg_catalog', 'pg_toast', {schema})
AND schemaname NOT LIKE 'pg_temp_%'
AND schemaname NOT LIKE 'pg_toast_temp_%'
AND schemaname NOT LIKE '_team_%'
AND has_table_privilege({role}, quote_ident(schemaname) || '.' ||
quote_ident(viewname), 'SELECT, INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER')
UNION ALL
SELECT
schemaname AS schema,
matviewname as name
FROM
pg_catalog.pg_matviews
WHERE
schemaname NOT IN ('information_schema', 'pg_catalog', 'pg_toast', {schema})
AND schemaname NOT LIKE 'pg_temp_%'
AND schemaname NOT LIKE 'pg_toast_temp_%'
AND schemaname NOT LIKE '_team_%'
AND has_table_privilege({role}, quote_ident(schemaname) || '.' ||
quote_ident(matviewname), 'SELECT, INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER') = true
ORDER BY schema, name;
"""
).format(role=sql.Literal(db_role), schema=sql.Literal(db_schema))
)
tables_with_existing_privs = cur.fetchall()
tables_with_existing_privs_set = set(tables_with_existing_privs)
cur.execute(
sql.SQL(
"""
SELECT
nspname AS name
FROM
pg_namespace
WHERE
nspname NOT IN ('information_schema', 'pg_catalog', 'pg_toast', {schema})
AND nspname NOT LIKE 'pg_temp_%'
AND nspname NOT LIKE 'pg_toast_temp_%'
AND has_schema_privilege({role}, nspname, 'CREATE, USAGE')
ORDER BY nspname;
"""
).format(role=sql.Literal(db_role), schema=sql.Literal(db_schema))
)
schemas_with_existing_privs = [row[0] for row in cur.fetchall()]
schemas_with_existing_privs_set = set(schemas_with_existing_privs)
# Existing granted team roles to permanant user role
cur.execute(
sql.SQL(
"""
SELECT
rolname
FROM
pg_roles
WHERE
(
rolname LIKE '\\_team\\_'
)
AND pg_has_role({db_role}, rolname, 'member');
"""
).format(db_role=sql.Literal(db_role))
)
db_team_roles_previously_granted = [role for (role,) in cur.fetchall()]
db_team_roles_previously_granted_set = set(db_team_roles_previously_granted)
tables_to_revoke = [
(schema, table)
for (schema, table) in tables_with_existing_privs
if (schema, table) not in allowed_tables_that_exist_set
]
tables_to_grant = [
(schema, table)
for (schema, table) in allowed_tables_that_exist
if (schema, table) not in tables_with_existing_privs_set
]
schemas_to_revoke = [
schema
for schema in schemas_with_existing_privs
if schema not in allowed_schemas_that_exist_set
]
schemas_to_grant = [
schema
for schema in allowed_schemas_that_exist
if schema not in schemas_with_existing_privs_set
]
db_team_roles_to_revoke = [
db_team_role
for db_team_role in db_team_roles_previously_granted
if db_team_role not in db_team_roles_set
]
db_team_roles_to_grant = [
db_team_role
for db_team_role in db_team_roles
if db_team_role not in db_team_roles_previously_granted_set
]
# Create user. Note that in PostgreSQL a USER and ROLE are almost the same thing, the
# difference is that by default a ROLE is "NOLOGIN", so can't be used to connect to
# the database, i.e. it really is more of a "group".
cur.execute(
sql.SQL(
"CREATE USER {user} WITH PASSWORD {password} VALID UNTIL {valid_until}"
).format(
user=sql.Identifier(db_user),
password=sql.Literal(db_password),
valid_until=sql.Literal(valid_until),
),
)
# ... create schemas
for _db_role, _db_schema in list(zip(db_team_roles, db_team_schemas)) + [
(db_role, db_schema)
]:
cur.execute(
sql.SQL("CREATE SCHEMA IF NOT EXISTS {} AUTHORIZATION {};").format(
sql.Identifier(_db_schema),
sql.Identifier(_db_role),
)
)
# Give the roles reasonable timeouts...
# [Out of paranoia on all roles in case the user change role mid session]
for _db_user in [db_role, db_user] + db_team_roles:
cur.execute(
sql.SQL(
"ALTER USER {} SET idle_in_transaction_session_timeout = '60min';"
).format(sql.Identifier(_db_user))
)
cur.execute(
sql.SQL("ALTER USER {} SET statement_timeout = '60min';").format(
sql.Identifier(_db_user)
)
)
cur.execute(
sql.SQL("ALTER USER {} SET pgaudit.log = {};").format(
sql.Identifier(_db_user),
sql.Literal(settings.PGAUDIT_LOG_SCOPES),
)
)
cur.execute(
sql.SQL("ALTER USER {} SET pgaudit.log_catalog = off;").format(
sql.Identifier(_db_user),
sql.Literal(settings.PGAUDIT_LOG_SCOPES),
)
)
cur.execute(
sql.SQL("ALTER USER {} WITH CONNECTION LIMIT 10;").format(
sql.Identifier(_db_user),
)
)
# PostgreSQL doesn't handle concurrent GRANT/REVOKEs on the same objects well, so we lock
with cache.lock(
"database-grant-v1",
blocking_timeout=15,
timeout=180,
), connections[database_obj.memorable_name].cursor() as cur:
logger.info(
"Revoking permissions ON %s %s from %s",
database_obj.memorable_name,
schemas_to_revoke,
db_role,
)
if schemas_to_revoke:
cur.execute(
sql.SQL("REVOKE ALL PRIVILEGES ON SCHEMA {} FROM {};").format(
sql.SQL(",").join(sql.Identifier(schema) for schema in schemas_to_revoke),
sql.Identifier(db_role),
)
)
logger.info(
"Revoking permissions ON %s %s from %s",
database_obj.memorable_name,
tables_to_revoke,
db_role,
)
if tables_to_revoke:
cur.execute(
sql.SQL("REVOKE ALL PRIVILEGES ON {} FROM {};").format(
sql.SQL(",").join(
[sql.Identifier(schema, table) for schema, table in tables_to_revoke]
),
sql.Identifier(db_role),
)
)
logger.info(
"Granting permissions ON %s %s from %s",
database_obj.memorable_name,
schemas_to_grant,
db_role,
)
if schemas_to_grant:
cur.execute(
sql.SQL("GRANT USAGE ON SCHEMA {} TO {};").format(
sql.SQL(",").join([sql.Identifier(schema) for schema in schemas_to_grant]),
sql.Identifier(db_role),
)
)
logger.info(
"Granting SELECT ON %s %s from %s",
database_obj.memorable_name,
tables_to_grant,
db_role,
)
if tables_to_grant:
cur.execute(
sql.SQL("GRANT SELECT ON {} TO {};").format(
sql.SQL(",").join(
[sql.Identifier(schema, table) for schema, table in tables_to_grant]
),
sql.Identifier(db_role),
)
)
logger.info(
"Revoking %s from %s",
db_team_roles_to_revoke,
db_role,
)
if db_team_roles_to_revoke:
cur.execute(
sql.SQL("REVOKE {} FROM {};").format(
sql.SQL(",").join(
[
sql.Identifier(db_team_role)
for db_team_role in db_team_roles_to_revoke
]
),
sql.Identifier(db_role),
)
)
for team_role in db_team_roles_to_revoke:
cur.execute(
sql.SQL(
| |
If the given trigger rule configuration is
invalid
:raises :class:`job.configuration.data.exceptions.InvalidConnection`: If the trigger rule connection to the job
type interface is invalid
"""
# Create manifest and validate
manifest = SeedManifest(manifest_dict)
for field_name in kwargs:
if field_name in JobType.UNEDITABLE_FIELDS_V6:
raise Exception('%s is not an editable field' % field_name)
self._validate_job_type_fields(**kwargs)
trigger_rule = self._create_seed_job_trigger_rule(manifest, trigger_rule_dict)
secrets = None
if configuration_dict:
configuration = JobConfigurationV6(configuration_dict, do_validate=True).get_configuration()
configuration.validate(manifest)
secrets = configuration.remove_secret_settings(manifest)
# Create/update any errors defined in manifest
error_mapping = manifest.get_error_mapping()
error_mapping.save_models()
# Create the new job type
job_type = JobType(**kwargs)
job_type.name = manifest.get_name()
job_type.version = manifest.get_job_version()
job_type.title = manifest.get_title()
job_type.description = manifest.get_description()
job_type.manifest = manifest_dict
job_type.trigger_rule = trigger_rule
if configuration_dict:
job_type.configuration = configuration_dict
if 'is_active' in kwargs:
job_type.deprecated = None if kwargs['is_active'] else timezone.now()
if 'is_paused' in kwargs:
job_type.paused = timezone.now() if kwargs['is_paused'] else None
job_type.save()
# Save any secrets to Vault
if secrets:
self.set_job_type_secrets(job_type.get_secrets_key(), secrets)
# Create first revision of the job type
JobTypeRevision.objects.create_job_type_revision(job_type)
return job_type
@staticmethod
def _create_seed_job_trigger_rule(manifest, trigger_rule_dict):
"""Creates a trigger rule to be attached to a Seed job type.
Must be called from within an existing transaction. This is intended for use with create and edit job type
methods.
:param manifest: Instantiated Manifest to pass to trigger validation
:type manifest: :class:`job.seed.manifest.SeedManifest`
:param trigger_rule_dict: Trigger rule definition to be used for data models
:type trigger_rule_dict: dict
:returns: The new trigger rule
:rtype: :class:`trigger.models.TriggerRule`
:raises trigger.configuration.exceptions.InvalidTriggerMissingConfiguration:
If both rule and configuration do not exist
:raises trigger.configuration.exceptions.InvalidTriggerRule: If the configuration is invalid
:raises trigger.configuration.exceptions.InvalidTriggerType: If the trigger is invalid
"""
if (('type' in trigger_rule_dict and 'configuration' not in trigger_rule_dict) or
('type' not in trigger_rule_dict and 'configuration' in trigger_rule_dict)):
raise InvalidTriggerMissingConfiguration('Trigger type and configuration are required together.')
# Attempt to look up the trigger handler for the type
rule_handler = None
if trigger_rule_dict and 'type' in trigger_rule_dict:
rule_handler = trigger_handler.get_trigger_rule_handler(trigger_rule_dict['type'])
# Attempt to create the trigger rule
is_active = trigger_rule_dict.get('is_active', True)
trigger_rule = None
if rule_handler and 'configuration' in trigger_rule_dict:
trigger_rule = rule_handler.create_trigger_rule(trigger_rule_dict['configuration'], manifest.get_name(),
is_active)
# Validate the trigger rule
if trigger_rule:
trigger_config = trigger_rule.get_configuration()
if not isinstance(trigger_config, JobTriggerRuleConfiguration):
raise InvalidTriggerType('%s is an invalid trigger rule type for creating jobs' % trigger_rule.type)
trigger_config.validate_trigger_for_job(manifest)
return trigger_rule
@transaction.atomic
def edit_job_type_v5(self, job_type_id, interface=None, trigger_rule=None, remove_trigger_rule=False,
error_mapping=None, custom_resources=None, configuration=None, secrets=None, **kwargs):
"""Edits the given job type and saves the changes in the database. The caller must provide the related
trigger_rule model. All database changes occur in an atomic transaction. An argument of None for a field
indicates that the field should not change. The remove_trigger_rule parameter indicates the difference between
no change to the trigger rule (False) and removing the trigger rule (True) when trigger_rule is None.
:param job_type_id: The unique identifier of the job type to edit
:type job_type_id: int
:param interface: The interface for running a job of this type, possibly None
:type interface: :class:`job.configuration.interface.job_interface.JobInterface`
:param trigger_rule: The trigger rule that creates jobs of this type, possibly None
:type trigger_rule: :class:`trigger.models.TriggerRule`
:param remove_trigger_rule: Indicates whether the trigger rule should be unchanged (False) or removed (True)
when trigger_rule is None
:type remove_trigger_rule: bool
:param error_mapping: Mapping for translating an exit code to an error type
:type error_mapping: :class:`job.error.mapping.JobErrorMapping`
:param custom_resources: Custom resources required by this job type
:type custom_resources: :class:`node.resources.json.resources.Resources`
:param configuration: The configuration for running a job of this type, possibly None
:type configuration: :class:`job.configuration.json.job_config_2_0.JobConfigurationV2`
:param secrets: Secret settings required by this job type
:type secrets: dict
:raises :class:`job.exceptions.InvalidJobField`: If a given job type field has an invalid value
:raises :class:`trigger.configuration.exceptions.InvalidTriggerType`: If the given trigger rule is an invalid
type for creating jobs
:raises :class:`trigger.configuration.exceptions.InvalidTriggerRule`: If the given trigger rule configuration is
invalid
:raises :class:`job.configuration.data.exceptions.InvalidConnection`: If the trigger rule connection to the job
type interface is invalid
:raises :class:`recipe.configuration.definition.exceptions.InvalidDefinition`: If the interface change
invalidates any existing recipe type definitions
"""
for field_name in kwargs:
if field_name in JobType.UNEDITABLE_FIELDS:
raise Exception('%s is not an editable field' % field_name)
self._validate_job_type_fields(**kwargs)
recipe_types = []
if interface:
# Lock all recipe types so they can be validated after changing job type interface
from recipe.models import RecipeType
recipe_types = list(RecipeType.objects.select_for_update().order_by('id').iterator())
# Acquire model lock for job type
job_type = JobType.objects.select_for_update().get(pk=job_type_id)
if job_type.is_system:
if len(kwargs) > 1 or 'is_paused' not in kwargs:
raise InvalidJobField('You can only modify the is_paused field for a System Job')
if interface:
# New job interface, validate all existing recipes
job_type.manifest = interface.get_dict()
job_type.revision_num += 1
job_type.save()
for recipe_type in recipe_types:
recipe_type.get_recipe_definition().validate_job_interfaces()
# New job configuration
if configuration:
configuration.validate(job_type.manifest)
job_type.configuration = configuration.get_dict()
if trigger_rule or remove_trigger_rule:
if job_type.trigger_rule:
# Archive old trigger rule since we are changing to a new one
TriggerRule.objects.archive_trigger_rule(job_type.trigger_rule_id)
job_type.trigger_rule = trigger_rule
# Validate updated trigger rule against updated interface
if job_type.trigger_rule:
trigger_config = job_type.trigger_rule.get_configuration()
if not isinstance(trigger_config, JobTriggerRuleConfiguration):
msg = '%s is an invalid trigger rule type for creating jobs'
raise InvalidTriggerType(msg % job_type.trigger_rule.type)
trigger_config.validate_trigger_for_job(job_type.get_job_interface())
if error_mapping:
error_mapping.validate_legacy()
job_type.error_mapping = error_mapping.error_dict
if custom_resources:
job_type.custom_resources = custom_resources.get_dict()
if 'is_active' in kwargs and job_type.is_active != kwargs['is_active']:
job_type.deprecated = None if kwargs['is_active'] else timezone.now()
if 'is_paused' in kwargs and job_type.is_paused != kwargs['is_paused']:
job_type.paused = timezone.now() if kwargs['is_paused'] else None
for field_name in kwargs:
setattr(job_type, field_name, kwargs[field_name])
job_type.save()
# Save any secrets to Vault
if secrets:
self.set_job_type_secrets(job_type.get_secrets_key(), secrets)
if interface:
# Create new revision of the job type for new interface
JobTypeRevision.objects.create_job_type_revision(job_type)
@transaction.atomic
def edit_job_type_v6(self, job_type_id, manifest_dict=None, trigger_rule_dict=None, configuration_dict=None,
remove_trigger_rule=False, **kwargs):
"""Edits the given job type and saves the changes in the database. The caller must provide the related
trigger_rule model. All database changes occur in an atomic transaction. An argument of None for a field
indicates that the field should not change. If trigger_rule_dict is set to None it is assumed trigger
is being removed.
:param job_type_id: The unique identifier of the job type to edit
:type job_type_id: int
:param manifest_dict: The Seed Manifest defining the interface for running a job of this type
:type manifest_dict: dict
:param trigger_rule_dict: The trigger rule that creates jobs of this type, None results in removal
:type trigger_rule_dict: dict
:param configuration_dict: The configuration for running a job of this type, possibly None
:type configuration_dict: dict
:param remove_trigger_rule: Indicates whether the trigger rule should be unchanged (False) or removed (True)
when trigger_rule is None
:type remove_trigger_rule: bool
:raises :class:`job.exceptions.InvalidJobField`: If a given job type field has an invalid value
:raises :class:`trigger.configuration.exceptions.InvalidTriggerType`: If the given trigger rule is an invalid
type for creating jobs
:raises :class:`trigger.configuration.exceptions.InvalidTriggerRule`: If the given trigger rule configuration is
invalid
:raises :class:`job.configuration.data.exceptions.InvalidConnection`: If the trigger rule connection to the job
type interface is invalid
"""
for field_name in kwargs:
if field_name in JobType.UNEDITABLE_FIELDS_V6:
raise Exception('%s is not an editable field' % field_name)
self._validate_job_type_fields(**kwargs)
recipe_types = []
if manifest_dict:
# Lock all recipe types so they can be validated after changing job type manifest
from recipe.models import RecipeType
recipe_types = list(RecipeType.objects.select_for_update().order_by('id').iterator())
# Acquire model lock for job type
job_type = JobType.objects.select_for_update().get(pk=job_type_id)
if job_type.is_system:
if len(kwargs) > 1 or 'is_paused' not in kwargs:
raise InvalidJobField('You can only modify the is_paused field for a System Job')
if manifest_dict:
manifest = SeedManifest(manifest_dict, do_validate=True)
job_type.manifest = manifest_dict
job_type.revision_num += 1
# Create/update any errors defined in manifest
error_mapping = manifest.get_error_mapping()
error_mapping.save_models()
job_type.title = manifest.get_title()
job_type.description = manifest.get_description()
job_type.save()
# New job interface, validate all existing recipes
for recipe_type in recipe_types:
recipe_type.get_recipe_definition().validate_job_interfaces()
else:
manifest = SeedManifest(job_type.manifest)
secrets = None
if configuration_dict:
configuration = JobConfigurationV6(configuration_dict, do_validate=True).get_configuration()
configuration.validate(manifest)
secrets = configuration.remove_secret_settings(manifest)
job_type.configuration = convert_config_to_v6_json(configuration).get_dict()
if trigger_rule_dict or remove_trigger_rule:
if job_type.trigger_rule:
# Archive old trigger rule since we are changing to a new one
TriggerRule.objects.archive_trigger_rule(job_type.trigger_rule_id)
if not remove_trigger_rule:
job_type.trigger_rule = self._create_seed_job_trigger_rule(manifest, trigger_rule_dict)
if 'is_active' in kwargs and job_type.is_active != kwargs['is_active']:
job_type.deprecated = None if kwargs['is_active'] else timezone.now()
if 'is_paused' in kwargs and job_type.is_paused != kwargs['is_paused']:
job_type.paused = timezone.now() if kwargs['is_paused'] else None
for field_name in kwargs:
setattr(job_type, field_name, kwargs[field_name])
job_type.save()
# Save any secrets to Vault
if secrets:
self.set_job_type_secrets(job_type.get_secrets_key(), secrets)
if manifest_dict:
# Create new revision of the | |
<reponame>ikedim01/secscan<gh_stars>0
# AUTOGENERATED! DO NOT EDIT! File to edit: 10_scrape13G.ipynb (unless otherwise specified).
__all__ = ['default13GDir', 'getSec13NshAndPctFromText2', 'addNshAndPct', 'cusipChecksum', 'monthNameToIso',
'getMonthPatStr', 'parseEventDate', 'parse13GD', 'scraper13G', 'reOPTS', 'aggregatePatStr',
'percentOfClassPatStr', 'typeOfRepPatStr', 'form13PiecesPat1', 'form13PiecesPat2', 'form13PiecesPat3',
'nSharesPatStr', 'nPctBarePatStr', 'nPctWithPctPatStr', 'nshAndPctPat1Pref', 'form13NshAndPctPats1',
'form13NshAndPctPat2', 'form13NshAndPctPat3', 'purposePat', 'strictCusipPatStr', 'cusipPatStr',
'cusipNumberPatStr', 'cusipSearchPats', 'spaceDashPat', 'monthNames', 'monthAbbrevStrs', 'monthPatStr',
'monthDayPatStr', 'possCommaPatStr', 'yearPatStr', 'dateOfEventPatStr', 'dateOfEventAtStartPatStr',
'dateOfEventAtEndPatStr', 'dateOfEventMonthPat1', 'dateOfEventMonthRevPat1', 'dateOfEventMonthPat2',
'dateOfEventMonthRevPat2', 'isoSepPatStr', 'dateOfEventIsoPat1', 'dateOfEventIsoRevPat1',
'dateOfEventIsoPat2', 'dateOfEventIsoRevPat2', 'whitespacePat', 'updateCik13GDPos', 'cikSymStr',
'calcBonusMap']
# Cell
import collections
import datetime
import itertools
import os
import re
from secscan import utils, dailyList, basicInfo, infoScraper
default13GDir = os.path.join(utils.stockDataRoot,'scraped13G')
# Cell
reOPTS = re.IGNORECASE|re.DOTALL
# aggregatePatStr = r'aggregate.?\s+amount\s+(?:ben|own)'
# percentOfClassPatStr = r'percent\s+(?:of|or)\s+class\s+(?:re|pr)'
# typeOfRepPatStr = r'type.?\s+(?:of|or)\s+(?:rep|per)'
aggregatePatStr = r'aggregated?\s+amount\s+ben'
percentOfClassPatStr = r'percent(?:age)?\s+of\s+class\s+(?:re|pr)'
typeOfRepPatStr = r'type\s+of\s+(?:rep|per)'
item9PatStr,item11PatStr,item12PatStr,item13PatStr,item14PatStr = (
r'item\s+' + itemNo + r'\s*:' for itemNo in ('9','11','12','13','14'))
form13PiecesPat1 = re.compile(r'.*?'.join([aggregatePatStr,percentOfClassPatStr,typeOfRepPatStr]),reOPTS)
form13PiecesPat2 = re.compile(r'.*?'.join([item9PatStr,item11PatStr,item12PatStr]),reOPTS)
form13PiecesPat3 = re.compile(r'.*?'.join([item11PatStr,item13PatStr,item14PatStr]),reOPTS)
# nSharesPatStr = r'\D(?!9\s|9\D\D)(\d+(?:[,.]\d\d\d)*)' # try to avoid taking item number 9 as share count
nSharesPatStr = r'(?<!\d)(?!9\s|9\D\D)(\d+[,.\d]*)' # try to avoid taking item number 9 as share count
nPctBarePatStr = r'(\d+(?:\.\d*)?|\.\d+)'
nPctWithPctPatStr = r'((?:\d+(?:[\.,]\d*)?|[\.,]\d+)\s*%)'
nshAndPctPat1Pref = r'.*?'.join([aggregatePatStr,nSharesPatStr,percentOfClassPatStr])
form13NshAndPctPats1 = [
re.compile(r'.*?'.join([nshAndPctPat1Pref,nPctWithPctPatStr,typeOfRepPatStr]),reOPTS),
# if percentage isn't followed by a % character, look for a plain number but
# try to avoid using the 9 in "mentioned in item 9" verbiage for the percentage
re.compile(r'.*?'.join([nshAndPctPat1Pref,r'\D9(?!\.\d)\D.*?'+nPctBarePatStr,typeOfRepPatStr]),reOPTS),
# ditto for "mentioned in item 11" verbiage
re.compile(r'.*?'.join([nshAndPctPat1Pref,r'\D11(?!\.\d)\D.*?'+nPctBarePatStr,typeOfRepPatStr]),reOPTS),
re.compile(r'.*?'.join([nshAndPctPat1Pref,nPctBarePatStr,typeOfRepPatStr]),reOPTS)
]
form13NshAndPctPat2 = re.compile(r'.*?'.join([item9PatStr,nSharesPatStr,item11PatStr,
nPctWithPctPatStr,item12PatStr]), reOPTS)
form13NshAndPctPat3 = re.compile(r'.*?'.join([item11PatStr,nSharesPatStr,item13PatStr,
nPctWithPctPatStr,item14PatStr]), reOPTS)
def getSec13NshAndPctFromText2(txt,accNo, debug=False) :
"Returns a list [(nShares, percent) ... ] parsed from form 13G or 13D."
if debug : print(txt)
res = []
pat1Pieces = form13PiecesPat1.findall(txt)
for piece in pat1Pieces :
if debug : print('********1',piece)
if not any(addNshAndPct(pat.match(piece),res) for pat in form13NshAndPctPats1) :
print("??????1", accNo, piece)
if res :
return res
pat2Pieces = form13PiecesPat2.findall(txt)
for piece in pat2Pieces :
if debug : print('********2',piece)
if not addNshAndPct(form13NshAndPctPat2.match(piece),res) :
print("??????2", accNo, piece)
if res :
return res
pat3Pieces = form13PiecesPat3.findall(txt)
for piece in pat3Pieces :
if debug : print('********3',piece)
if not addNshAndPct(form13NshAndPctPat3.match(piece),res) :
print("??????3", accNo, piece)
return res
def addNshAndPct(m,res) :
if not m :
return False
# print(m.groups())
nSh,pct = m.groups()
if nSh in ['10','10.','11','11.','12','12.'] :
nSh = '999'
if pct in ['9','9.','11','11.','12','12.','14','14.'] :
return False
# nSh = nSh.replace('.',',')
pct = pct.replace(',','.').replace('%','').rstrip()
res.append((nSh,pct))
return True
purposePat = re.compile(r'4\s*\.?\s*purpose\s*of\s*(?:the\s*)?transaction(?:\s*\.?\s*)(.{1,20000}?)'
+ r'(?:\s*(?:item\s*)?5\s*\.?\s*interest'
+ r'|\s*(?:item\s*)?6\s*\.?\s*contracts'
+ r'|\s*(?:item\s*)?7\s*\.?\s*material'
+ r'|\s*after\s*reasonable\s*inquiry'
+ r'|\s*$'
+ r')', reOPTS)
def cusipChecksum(cusip) :
s = 0
for i,c in enumerate(cusip[:8]) :
if c.isdigit() :
v = ord(c) - ord('0')
elif c.isalpha() :
v = 10 + ord(c.upper()) - ord('A')
if (i&1) == 1 :
v *= 2
s += (v//10) + (v%10)
return str((10 - (s%10)) % 10)
strictCusipPatStr = r'[\dA-Z]\d[\dA-Z]\d[\dA-Z]{4}\d'
cusipPatStr = (r'[\dA-Z]\d[\dA-Z][-_\s]*\d[-_\s]*[\dA-Z][-_\s]*[\dA-Z]'
+ r'(?:[-_\s]*[\dA-Z]{2}(?:[-_\s]*\d)?)?')
cusipNumberPatStr = r'cusip\s*(?:number|#|no)'
cusipSearchPats = [re.compile(patStr, reOPTS) for patStr in [
r'.{1,3000}?[^\dA-Z](' + cusipPatStr + r')[^2-9A-Z]{0,200}?\s*' + cusipNumberPatStr,
r'.{1,3000}?\s*' + cusipNumberPatStr + r'[^\dA-Z]{0,200}?(' + cusipPatStr + r')[^\dA-Z]',
r'.{1,2000}?\s(' + strictCusipPatStr + r')\s',
]]
spaceDashPat = re.compile(r'[-\s]*')
monthNames = ['January', 'February', 'March', 'April', 'May', 'June',
'July', 'August', 'September', 'October', 'November', 'December']
monthAbbrevStrs = ''.join(monthName[:3].lower() for monthName in monthNames)
def monthNameToIso(monthName) :
return str(1+(monthAbbrevStrs.find(monthName[:3].lower())//3)).zfill(2)
def getMonthPatStr() :
monthNamePatStrs = []
for monthName in monthNames :
monthNamePatStr = monthName[:3]
if monthName != 'May' :
monthNamePatStr += r'(?:'
if monthName == 'September' :
monthNamePatStr += r't|t\.|'
monthNamePatStr += monthName[3:]
monthNamePatStr += r'|\.)?'
monthNamePatStrs.append(monthNamePatStr)
return '(' + '|'.join(monthNamePatStrs) + ')'
monthPatStr = getMonthPatStr()
monthDayPatStr = r'(\d\d?)(?:\s*th|\s*st)?'
possCommaPatStr = r'[.,\s]'
yearPatStr = r'(\d\d\s*\d\d)'
dateOfEventPatStr = r'dates?\s*of(?:\s*the)?\s*events?\s*which'
dateOfEventAtStartPatStr = r'.{1,3000}?'+dateOfEventPatStr+r'.{0,120}?'
dateOfEventAtEndPatStr = r'[^\d].{0,120}?'+dateOfEventPatStr
dateOfEventMonthPat1 = re.compile(r'.{1,3000}?[^\dA-Z]'
+ r'\s*'.join([monthPatStr,monthDayPatStr,possCommaPatStr,yearPatStr])
+ dateOfEventAtEndPatStr, reOPTS)
dateOfEventMonthRevPat1 = re.compile(dateOfEventAtStartPatStr + r'[^\dA-Z]'
+ r'\s*'.join([monthPatStr,monthDayPatStr,possCommaPatStr,yearPatStr])
+ r'[^\d]', reOPTS)
dateOfEventMonthPat2 = re.compile(r'.{1,3000}?[^\d]'
+ r'\s*'.join([monthDayPatStr,monthPatStr,possCommaPatStr,yearPatStr])
+ dateOfEventAtEndPatStr, reOPTS)
dateOfEventMonthRevPat2 = re.compile(dateOfEventAtStartPatStr + r'[^\d]'
+ r'\s*'.join([monthDayPatStr,monthPatStr,possCommaPatStr,yearPatStr])
+ r'[^\d]', reOPTS)
isoSepPatStr = r'\s*[-/]\s*'
dateOfEventIsoPat1 = re.compile(r'.{1,3000}?[^\d]'
+ isoSepPatStr.join([r'(\d\d?)',r'(\d\d?)',r'(\d\d(?:\d\d)?)'])
+ dateOfEventAtEndPatStr, reOPTS)
dateOfEventIsoRevPat1 = re.compile(dateOfEventAtStartPatStr + r'[^\d]'
+ isoSepPatStr.join([r'(\d\d?)',r'(\d\d?)',r'(\d\d(?:\d\d)?)'])
+ r'[^\d]', reOPTS)
dateOfEventIsoPat2 = re.compile(r'.{1,3000}?[^\d]'
+ isoSepPatStr.join([r'(\d\d\d\d)',r'(\d\d?)',r'(\d\d?)'])
+ dateOfEventAtEndPatStr, reOPTS)
dateOfEventIsoRevPat2 = re.compile(dateOfEventAtStartPatStr + r'[^\d]'
+ isoSepPatStr.join([r'(\d\d\d\d)',r'(\d\d?)',r'(\d\d?)'])
+ r'[^\d]', reOPTS)
whitespacePat = re.compile(r'\s*', reOPTS)
def parseEventDate(info,mainText) :
m = dateOfEventMonthPat1.match(mainText) or dateOfEventMonthRevPat1.match(mainText)
if m :
info['eventDate'] = '-'.join([whitespacePat.sub('',m.group(3)),
monthNameToIso(m.group(1)),m.group(2).zfill(2)])
return
m = dateOfEventMonthPat2.match(mainText) or dateOfEventMonthRevPat2.match(mainText)
if m :
info['eventDate'] = '-'.join([whitespacePat.sub('',m.group(3)),
monthNameToIso(m.group(2)),m.group(1).zfill(2)])
return
m = dateOfEventIsoPat1.match(mainText) or dateOfEventIsoRevPat1.match(mainText)
if m :
info['eventDate'] = '-'.join([('20' if len(m.group(3))==2 else '')+m.group(3),
m.group(1).zfill(2),m.group(2).zfill(2)])
return
m = dateOfEventIsoPat2.match(mainText) or dateOfEventIsoRevPat2.match(mainText)
if m :
info['eventDate'] = '-'.join([m.group(1),m.group(2).zfill(2),m.group(3).zfill(2)])
return
print('NO EVENT DATE!', end=' ')
def parse13GD(accNo, formType=None, info=None, textLimit=basicInfo.defaultTextLimit, debug=False) :
if info is None :
info = basicInfo.getSecFormInfo(accNo, formType=formType)
if 'filedByCik' not in info :
print('No filed by CIK!', end=' ')
links = info['links']
if len(links) == 0 :
print('NO LINKS LIST!', end=' ')
info['positions'] = []
else :
toFormat = 'text' if links[0][3].endswith('.txt') else 'souptext'
mainText = utils.downloadSecUrl(links[0][3], toFormat=toFormat)
parseEventDate(info,mainText)
info['positions'] = getSec13NshAndPctFromText2(mainText,accNo, debug=debug)
for cusipSearchPat in cusipSearchPats :
m = cusipSearchPat.match(mainText)
if m is not None :
break
if m is None :
if not ('0001504304' in info['ciks'] or '0001067621' in info['ciks']) :
# suppress the message for 0001504304 - Bulldog Investors
# and 0001067621 - <NAME>
# - they don't report CUSIPs in their filings
print('no CUSIP found!', end=' ')
else :
cusip = spaceDashPat.sub('',m.group(1))
if len(cusip) == 6 :
print('adding 10 to CUSIP', cusip, end=' ')
cusip = cusip + '10'
if len(cusip) == 8 :
print('adding checksum to CUSIP', cusip, end=' ')
if cusipChecksum('0'+cusip[:7]) == cusip[7] :
cusip = '0'+cusip
else :
cusip = cusip + cusipChecksum(cusip)
if len(cusip)!=9 or cusip[8]!=cusipChecksum(cusip) :
print('invalid CUSIP!', cusip, end=' ')
info['cusip'] = cusip.upper()
# print('CUSIP-'+cusip,end=' ')
if formType is None :
formType = links[0][2]
if formType.upper().startswith('SC 13D') :
m = purposePat.search(mainText)
if m is None :
print('no purpose!', end=' ')
else :
info['purpose'] = m.group(1)[:textLimit]
if len(info['positions']) == 0 :
print('no positions found!', end=' ')
return info #,mainText
class scraper13G(infoScraper.scraperBase) :
@utils.delegates(infoScraper.scraperBase.__init__)
def __init__(self, infoDir=default13GDir, **kwargs) :
super().__init__(infoDir, 'SC 13G', **kwargs)
def init_for_13D(self, infoDir, **kwargs) :
super().__init__(infoDir, 'SC 13D', **kwargs)
def scrapeInfo(self, accNo, formType=None) :
return parse13GD(accNo, formType=formType), None
def rescrapeInfo(self, accNo, info) :
return parse13GD(accNo, info=info)
# Cell
def updateCik13GDPos(scrapers, cik13GDPosMap=None,
cusipNames=None, cikNames=None, includeTickers=False) :
"""
Generate or update a combined dict of percentage holdings:
cik13GDPosMap: cik -> {cusip -> (eventDate, accNo, pct)}
based on a list of 13G and 13D scrapers - pct is in the range [0.0 .. 100.0],
as given in the 13G and 13D filings.
If cusipNames and cikNames are supplied, both should be dicts, and CUSIPs
encountered in 13G/D filings will have the corresponding CIK names added
to the CUSIP names based on the filings (i.e. each 13D/G filing includes
a CUSIP, and a corresponding subject CIK). If includeTickers is True,
the ticker names will also be added based on the CIK to ticker file
provided by the SEC.
"""
if cik13GDPosMap is None :
cik13GDPosMap = collections.defaultdict(dict)
cikTo13GDs = collections.defaultdict(list)
count = 0
extraCusipNames = None if (cusipNames is None or cikNames is None) else {
# CUSIP -> CIK mapping for some stocks that don't currently appear in any 13D or 13G filings:
'931142103' : ('0000','walmart','104169'),
'084670108' : ('0000','berkshire','1067983'),
}
for scraper in scrapers :
for dStr, accNoToInfo in scraper.infoMap.items() :
for accNo, info in accNoToInfo.items() :
if info == 'ERROR' :
print('*** ERROR in ',accNo)
elif 'filedByCik' not in info :
print('*** No filed-by CIK in',accNo)
elif 'cusip' not in info :
print('No CUSIP in',accNo)
else :
if len(info['positions']) == 0 :
print('*** No positions found in',accNo)
maxPctPos = 0.0
else :
maxPctPos = max(float(pct) for _,pct in info['positions'])
if 'eventDate' not in info :
eventDate = (utils.toDate(dStr)-datetime.timedelta(7)).isoformat()
print(f'No event date in {accNo}; using {eventDate}')
else :
eventDate = info['eventDate']
cusip = info['cusip']
filedByCik = info['filedByCik']
cikTo13GDs[filedByCik.lstrip('0')].append((cusip, eventDate, accNo,maxPctPos))
count += 1
if extraCusipNames is not None :
subjectCik = [cik for cik in info['ciks'] if cik!=filedByCik]
if len(subjectCik) != 1 :
print(f"missing or ambiguous subject CIK '{accNo}'")
elif cusip not in extraCusipNames or extraCusipNames[cusip][0] < dStr :
subjectCik = subjectCik[0].lstrip('0')
if subjectCik in cikNames :
extraCusipNames[cusip] = (dStr, cikNames[subjectCik], subjectCik)
else :
print(f"subject CIK {subjectCik} name not found '{accNo}'")
if extraCusipNames is not None :
count1 = count2 = 0
if includeTickers :
cikToTickers = dailyList.getCikToTickersMap()
else :
cikToTickers = collections.defaultdict(list)
for cusip,name in cusipNames.items() :
if cusip in extraCusipNames and 'CIK-' not in name :
_,subjectCikName,subjectCik = extraCusipNames[cusip]
if subjectCikName[:8].strip().lower() != | |
<gh_stars>0
#!/usr/bin/env python3
from olctools.accessoryFunctions.accessoryFunctions import GenObject, make_path, MetadataObject, relative_symlink, \
run_subprocess, SetupLogging
from genewrappers.biotools import bbtools
from argparse import ArgumentParser
from Bio import SeqIO
from glob import glob
from time import time
import logging
import psutil
import json
import os
__author__ = 'adamkoziol'
class ReadPrep(object):
def main(self):
self.strains()
self.sequence_prep()
self.assembly_length()
self.simulate_reads()
self.read_length_adjust('simulated')
self.link_reads('simulated')
self.read_quality_trim()
self.sample_reads()
self.read_length_adjust('sampled')
self.link_reads('sampled')
self.run_genesippr()
# self.parse_genesippr()
# self.run_cowbat()
def strains(self):
"""
Create a dictionary of SEQID: OLNID from the supplied
"""
with open(os.path.join(self.path, 'strains.csv')) as strains:
next(strains)
for line in strains:
oln, seqid = line.split(',')
self.straindict[oln] = seqid.rstrip()
self.strainset.add(oln)
logging.debug(oln)
if self.debug:
break
def sequence_prep(self):
"""
Create metadata objects for all PacBio assembly FASTA files in the sequencepath.
Create individual subdirectories for each sample.
Relative symlink the original FASTA file to the appropriate subdirectory
"""
# Create a sorted list of all the FASTA files in the sequence path
strains = sorted(glob(os.path.join(self.fastapath, '*.fa*'.format(self.fastapath))))
for sample in strains:
# Create the object
metadata = MetadataObject()
# Set the sample name to be the file name of the sequence by removing the path and file extension
sample_name = os.path.splitext(os.path.basename(sample))[0]
if sample_name in self.strainset:
# Extract the OLNID from the dictionary using the SEQID
samplename = self.straindict[sample_name]
# samplename = sample_name
# Set and create the output directory
outputdir = os.path.join(self.path, samplename)
make_path(outputdir)
# Set the name of the JSON file
json_metadata = os.path.join(outputdir, '{name}.json'.format(name=samplename))
if not os.path.isfile(json_metadata):
# Create the name and output directory attributes
metadata.name = samplename
metadata.seqid = sample_name
metadata.outputdir = outputdir
metadata.jsonfile = json_metadata
# Set the name of the FASTA file to use in the analyses
metadata.bestassemblyfile = os.path.join(metadata.outputdir,
'{name}.fasta'.format(name=metadata.name))
# Symlink the original file to the output directory
relative_symlink(sample, outputdir, '{sn}.fasta'.format(sn=metadata.name))
# Associate the corresponding FASTQ files with the assembly
metadata.fastqfiles = sorted(glob(os.path.join(self.fastqpath,
'{name}*.gz'.format(name=metadata.name))))
metadata.forward_fastq, metadata.reverse_fastq = metadata.fastqfiles
# Write the object to file
self.write_json(metadata)
else:
metadata = self.read_json(json_metadata)
# Add the metadata object to the list of objects
self.metadata.append(metadata)
@staticmethod
def write_json(metadata):
"""
Write the metadata object to file
:param metadata: Metadata object
"""
# Open the metadata file to write
with open(metadata.jsonfile, 'w') as metadatafile:
# Write the json dump of the object dump to the metadata file
json.dump(metadata.dump(), metadatafile, sort_keys=True, indent=4, separators=(',', ': '))
@staticmethod
def read_json(json_metadata):
"""
Read the metadata object from file
:param json_metadata: Path and file name of JSON-formatted metadata object file
:return: metadata object
"""
# Load the metadata object from the file
with open(json_metadata) as metadatareport:
jsondata = json.load(metadatareport)
# Create the metadata objects
metadata = MetadataObject()
# Initialise the metadata categories as GenObjects created using the appropriate key
for attr in jsondata:
if not isinstance(jsondata[attr], dict):
setattr(metadata, attr, jsondata[attr])
else:
setattr(metadata, attr, GenObject(jsondata[attr]))
return metadata
def assembly_length(self):
"""
Use SeqIO.parse to extract the total number of bases in each assembly file
"""
for sample in self.metadata:
# Only determine the assembly length if is has not been previously calculated
if not GenObject.isattr(sample, 'assembly_length'):
# Create the assembly_length attribute, and set it to 0
sample.assembly_length = 0
for record in SeqIO.parse(sample.bestassemblyfile, 'fasta'):
# Update the assembly_length attribute with the length of the current contig
sample.assembly_length += len(record.seq)
# Write the updated object to file
self.write_json(sample)
def simulate_reads(self):
"""
Use the PacBio assembly FASTA files to generate simulated reads of appropriate forward and reverse lengths
at different depths of sequencing using randomreads.sh from the bbtools suite
"""
logging.info('Read simulation')
for sample in self.metadata:
# Create the simulated_reads GenObject
sample.simulated_reads = GenObject()
# Iterate through all the desired depths of coverage
for depth in self.read_depths:
# Create the depth GenObject
setattr(sample.simulated_reads, depth, GenObject())
# Set the depth and output directory attributes for the depth GenObject
sample.simulated_reads[depth].depth = depth
sample.simulated_reads[depth].depth_dir = os.path.join(sample.outputdir, 'simulated', depth)
# Create the output directory
make_path(sample.simulated_reads[depth].depth_dir)
# Iterate through all the desired forward and reverse read pair lengths
for read_pair in self.read_lengths:
# Create the read_pair GenObject within the depth GenObject
setattr(sample.simulated_reads[depth], read_pair, GenObject())
# Set and create the output directory
sample.simulated_reads[depth][read_pair].outputdir = \
os.path.join(sample.simulated_reads[depth].depth_dir, read_pair)
make_path(sample.simulated_reads[depth][read_pair].outputdir)
# Create both forward_reads and reverse_reads sub-GenObjects
sample.simulated_reads[depth][read_pair].forward_reads = GenObject()
sample.simulated_reads[depth][read_pair].reverse_reads = GenObject()
# Extract the forward and reverse reads lengths from the read_pair variable
sample.simulated_reads[depth][read_pair].forward_reads.length, \
sample.simulated_reads[depth][read_pair].reverse_reads.length = read_pair.split('_')
# Set the name of the forward reads - include the depth and read length information
sample.simulated_reads[depth][read_pair].forward_reads.fastq = \
os.path.join(sample.simulated_reads[depth][read_pair].outputdir,
'{name}_{depth}_{read_pair}_R1.fastq.gz'
.format(name=sample.name,
depth=depth,
read_pair=read_pair))
# Reverse reads
sample.simulated_reads[depth][read_pair].reverse_reads.fastq = \
os.path.join(sample.simulated_reads[depth][read_pair].outputdir,
'{name}_{depth}_{read_pair}_R2.fastq.gz'
.format(name=sample.name,
depth=depth,
read_pair=read_pair))
# Create the trimmed output directory attribute
sample.simulated_reads[depth][read_pair].simulated_trimmed_outputdir \
= os.path.join(sample.simulated_reads[depth][read_pair].outputdir,
'simulated_trimmed')
# Set the name of the forward trimmed reads - include the depth and read length information
# This is set now, as the untrimmed files will be removed, and a check is necessary
sample.simulated_reads[depth][read_pair].forward_reads.trimmed_simulated_fastq = \
os.path.join(sample.simulated_reads[depth][read_pair].simulated_trimmed_outputdir,
'{name}_simulated_{depth}_{read_pair}_R1.fastq.gz'
.format(name=sample.name,
depth=depth,
read_pair=read_pair))
# Reverse reads
sample.simulated_reads[depth][read_pair].reverse_reads.trimmed_simulated_fastq = \
os.path.join(sample.simulated_reads[depth][read_pair].simulated_trimmed_outputdir,
'{name}_simulated_{depth}_{read_pair}_R2.fastq.gz'
.format(name=sample.name,
depth=depth,
read_pair=read_pair))
# Calculate the number of reads required for the forward and reverse reads to yield the
# desired coverage depth e.g. 5Mbp genome at 20X coverage: 100Mbp in reads. 50bp forward reads
# 150bp reverse reads: forward proportion is 50 / (150 + 50) = 0.25 (and reverse is 0.75).
# Forward total reads is 25Mbp (75Mbp reverse). Number of reads required = 25Mbp / 50 bp
# 500000 reads total (same for reverse, as the reads are longer)
sample.simulated_reads[depth][read_pair].num_reads = \
int(sample.assembly_length *
int(depth) *
(int(sample.simulated_reads[depth][read_pair].forward_reads.length) /
(int(sample.simulated_reads[depth][read_pair].forward_reads.length) +
int(sample.simulated_reads[depth][read_pair].reverse_reads.length)
)
) /
int(sample.simulated_reads[depth][read_pair].forward_reads.length)
)
logging.info(
'Simulating {num_reads} paired reads for sample {name} with the following parameters:\n'
'depth {dp}, forward reads {fl}bp, and reverse reads {rl}bp'
.format(num_reads=sample.simulated_reads[depth][read_pair].num_reads,
dp=depth,
name=sample.name,
fl=sample.simulated_reads[depth][read_pair].forward_reads.length,
rl=sample.simulated_reads[depth][read_pair].reverse_reads.length))
# If the reverse reads are set to 0, supply different parameters to randomreads
if sample.simulated_reads[depth][read_pair].reverse_reads.length != '0':
# Ensure that both the simulated reads, and the trimmed simulated reads files don't
# exist before simulating the reads
if not os.path.isfile(sample.simulated_reads[depth][read_pair].forward_reads.fastq) and \
not os.path.isfile(
sample.simulated_reads[depth][read_pair].forward_reads.trimmed_simulated_fastq):
# Use the randomreads method in the OLCTools bbtools wrapper to simulate the reads
out, \
err, \
sample.simulated_reads[depth][read_pair].forward_reads.simulate_call = bbtools\
.randomreads(reference=sample.bestassemblyfile,
length=sample.simulated_reads[depth][read_pair].reverse_reads.length,
reads=sample.simulated_reads[depth][read_pair].num_reads,
out_fastq=sample.simulated_reads[depth][read_pair].forward_reads.fastq,
paired=True,
returncmd=True,
**{'ziplevel': '9',
'illuminanames': 't',
'Xmx': self.mem}
)
else:
try:
forward_size = os.path.getsize(sample.simulated_reads[depth][read_pair]
.forward_reads.fastq)
except FileNotFoundError:
forward_size = 0
try:
reverse_size = os.path.getsize(sample.simulated_reads[depth][read_pair]
.reverse_reads.fastq)
except FileNotFoundError:
reverse_size = 0
if forward_size <= 100 or reverse_size <= 100:
try:
os.remove(sample.simulated_reads[depth][read_pair].forward_reads.fastq)
except FileNotFoundError:
pass
try:
os.remove(sample.simulated_reads[depth][read_pair].reverse_reads.fastq)
except FileNotFoundError:
pass
# Use the randomreads method in the OLCTools bbtools wrapper to simulate the reads
out, \
err, \
sample.simulated_reads[depth][read_pair].forward_reads.simulate_call = bbtools \
.randomreads(reference=sample.bestassemblyfile,
length=sample.simulated_reads[depth][read_pair].reverse_reads.length,
reads=sample.simulated_reads[depth][read_pair].num_reads,
out_fastq=sample.simulated_reads[depth][read_pair].forward_reads.fastq,
paired=True,
returncmd=True,
**{'ziplevel': '9',
'illuminanames': 't'}
)
else:
if not os.path.isfile(sample.simulated_reads[depth][read_pair].forward_reads.fastq):
# Use the randomreads method in the OLCTools bbtools wrapper to simulate the reads
out, \
err, \
sample.simulated_reads[depth][read_pair].forward_reads.simulate_call = bbtools\
.randomreads(reference=sample.bestassemblyfile,
length=sample.simulated_reads[depth][read_pair].forward_reads.length,
reads=sample.simulated_reads[depth][read_pair].num_reads,
out_fastq=sample.simulated_reads[depth][read_pair].forward_reads.fastq,
paired=False,
returncmd=True,
**{'ziplevel': '9',
'illuminanames': 't'}
)
# Update the JSON file
self.write_json(sample)
def read_length_adjust(self, analysistype):
"""
Trim the reads to the correct length using reformat.sh
:param analysistype: current analysis type. Will be either 'simulated' or 'sampled'
"""
logging.info('Trimming {at} reads'.format(at=analysistype))
for sample in self.metadata:
# Iterate through all the desired depths of coverage
for depth in self.read_depths:
for read_pair in self.read_lengths:
# Create variables using the analysis type. These will be used in setting GenObject attributes
read_type = '{at}_reads'.format(at=analysistype)
fastq_type = 'trimmed_{at}_fastq'.format(at=analysistype)
logging.info(
'Trimming forward {at} reads for sample {name} at depth {depth} to length {length}'
.format(at=analysistype,
name=sample.name,
depth=depth,
length=sample[read_type][depth][read_pair].forward_reads.length))
# Create the output path if necessary
make_path(os.path.dirname(sample[read_type][depth][read_pair].forward_reads[fastq_type]))
if sample[read_type][depth][read_pair].reverse_reads.length != '0':
# Use the reformat method in the OLCTools bbtools wrapper to trim the reads
out, \
err, \
sample[read_type][depth][read_pair].forward_reads.sample_call = bbtools \
.reformat_reads(forward_in=sample[read_type][depth][read_pair].forward_reads.fastq,
reverse_in=None,
forward_out=sample[read_type][depth][read_pair].forward_reads[fastq_type],
returncmd=True,
**{'ziplevel': '9',
'forcetrimright':
sample[read_type][depth][read_pair].forward_reads.length,
'tossbrokenreads': 't',
'tossjunk': 't',
'Xmx': self.mem
}
)
# # Remove the untrimmed reads
# try:
# os.remove(sample[read_type][depth][read_pair].forward_reads.fastq)
# except FileNotFoundError:
# pass
else:
# If the files do not need to be trimmed, create a symlink to the original file
relative_symlink(sample[read_type][depth][read_pair].forward_reads.fastq,
os.path.dirname(sample[read_type][depth][read_pair].
forward_reads[fastq_type]),
os.path.basename(sample[read_type][depth][read_pair].
forward_reads[fastq_type])
)
# Same as above, but for the reverse reads
logging.info(
| |
<reponame>atptro/alibabacloud-sdk<gh_stars>0
# This file is auto-generated, don't edit it. Thanks.
from alibabacloud_tea_rpc.client import Client as RPCClient
from alibabacloud_polardb20170801 import models as polardb_20170801_models
from alibabacloud_tea_util import models as util_models
from alibabacloud_tea_util.client import Client as UtilClient
from alibabacloud_endpoint_util.client import Client as EndpointUtilClient
class Client(RPCClient):
def __init__(self, config):
super().__init__(config)
self._endpoint_rule = "regional"
self._endpoint_map = {
"cn-qingdao": "polardb.aliyuncs.com",
"cn-beijing": "polardb.aliyuncs.com",
"cn-hangzhou": "polardb.aliyuncs.com",
"cn-shanghai": "polardb.aliyuncs.com",
"cn-shenzhen": "polardb.aliyuncs.com",
"cn-hongkong": "polardb.aliyuncs.com",
"ap-southeast-1": "polardb.aliyuncs.com",
"us-west-1": "polardb.aliyuncs.com",
"cn-hangzhou-finance": "polardb.aliyuncs.com",
"cn-shanghai-finance-1": "polardb.aliyuncs.com",
"us-east-1": "polardb.ap-northeast-1.aliyuncs.com",
"cn-shenzhen-finance-1": "polardb.aliyuncs.com",
"cn-north-2-gov-1": "polardb.aliyuncs.com"
}
self.check_config(config)
self._endpoint = self.get_endpoint("polardb", self._region_id, self._endpoint_rule, self._network, self._suffix, self._endpoint_map, self._endpoint)
def describe_dbcluster_audit_log_collector_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.DescribeDBClusterAuditLogCollectorResponse().from_map(self.do_request("DescribeDBClusterAuditLogCollector", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def describe_dbcluster_audit_log_collector(self, request):
runtime = util_models.RuntimeOptions(
)
return self.describe_dbcluster_audit_log_collector_with_options(request, runtime)
def modify_dbcluster_audit_log_collector_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.ModifyDBClusterAuditLogCollectorResponse().from_map(self.do_request("ModifyDBClusterAuditLogCollector", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def modify_dbcluster_audit_log_collector(self, request):
runtime = util_models.RuntimeOptions(
)
return self.modify_dbcluster_audit_log_collector_with_options(request, runtime)
def describe_detached_backups_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.DescribeDetachedBackupsResponse().from_map(self.do_request("DescribeDetachedBackups", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def describe_detached_backups(self, request):
runtime = util_models.RuntimeOptions(
)
return self.describe_detached_backups_with_options(request, runtime)
def describe_dbclusters_with_backups_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.DescribeDBClustersWithBackupsResponse().from_map(self.do_request("DescribeDBClustersWithBackups", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def describe_dbclusters_with_backups(self, request):
runtime = util_models.RuntimeOptions(
)
return self.describe_dbclusters_with_backups_with_options(request, runtime)
def describe_log_backup_policy_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.DescribeLogBackupPolicyResponse().from_map(self.do_request("DescribeLogBackupPolicy", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def describe_log_backup_policy(self, request):
runtime = util_models.RuntimeOptions(
)
return self.describe_log_backup_policy_with_options(request, runtime)
def modify_log_backup_policy_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.ModifyLogBackupPolicyResponse().from_map(self.do_request("ModifyLogBackupPolicy", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def modify_log_backup_policy(self, request):
runtime = util_models.RuntimeOptions(
)
return self.modify_log_backup_policy_with_options(request, runtime)
def modify_dbcluster_monitor_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.ModifyDBClusterMonitorResponse().from_map(self.do_request("ModifyDBClusterMonitor", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def modify_dbcluster_monitor(self, request):
runtime = util_models.RuntimeOptions(
)
return self.modify_dbcluster_monitor_with_options(request, runtime)
def describe_dbcluster_monitor_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.DescribeDBClusterMonitorResponse().from_map(self.do_request("DescribeDBClusterMonitor", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def describe_dbcluster_monitor(self, request):
runtime = util_models.RuntimeOptions(
)
return self.describe_dbcluster_monitor_with_options(request, runtime)
def describe_dbcluster_available_resources_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.DescribeDBClusterAvailableResourcesResponse().from_map(self.do_request("DescribeDBClusterAvailableResources", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def describe_dbcluster_available_resources(self, request):
runtime = util_models.RuntimeOptions(
)
return self.describe_dbcluster_available_resources_with_options(request, runtime)
def describe_backup_logs_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.DescribeBackupLogsResponse().from_map(self.do_request("DescribeBackupLogs", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def describe_backup_logs(self, request):
runtime = util_models.RuntimeOptions(
)
return self.describe_backup_logs_with_options(request, runtime)
def modify_dbcluster_sslwith_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.ModifyDBClusterSSLResponse().from_map(self.do_request("ModifyDBClusterSSL", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def modify_dbcluster_ssl(self, request):
runtime = util_models.RuntimeOptions(
)
return self.modify_dbcluster_sslwith_options(request, runtime)
def describe_dbcluster_sslwith_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.DescribeDBClusterSSLResponse().from_map(self.do_request("DescribeDBClusterSSL", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def describe_dbcluster_ssl(self, request):
runtime = util_models.RuntimeOptions(
)
return self.describe_dbcluster_sslwith_options(request, runtime)
def describe_dbcluster_migration_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.DescribeDBClusterMigrationResponse().from_map(self.do_request("DescribeDBClusterMigration", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def describe_dbcluster_migration(self, request):
runtime = util_models.RuntimeOptions(
)
return self.describe_dbcluster_migration_with_options(request, runtime)
def close_dbcluster_migration_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.CloseDBClusterMigrationResponse().from_map(self.do_request("CloseDBClusterMigration", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def close_dbcluster_migration(self, request):
runtime = util_models.RuntimeOptions(
)
return self.close_dbcluster_migration_with_options(request, runtime)
def modify_dbcluster_migration_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.ModifyDBClusterMigrationResponse().from_map(self.do_request("ModifyDBClusterMigration", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def modify_dbcluster_migration(self, request):
runtime = util_models.RuntimeOptions(
)
return self.modify_dbcluster_migration_with_options(request, runtime)
def modify_auto_renew_attribute_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.ModifyAutoRenewAttributeResponse().from_map(self.do_request("ModifyAutoRenewAttribute", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def modify_auto_renew_attribute(self, request):
runtime = util_models.RuntimeOptions(
)
return self.modify_auto_renew_attribute_with_options(request, runtime)
def modify_dbnode_class_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.ModifyDBNodeClassResponse().from_map(self.do_request("ModifyDBNodeClass", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def modify_dbnode_class(self, request):
runtime = util_models.RuntimeOptions(
)
return self.modify_dbnode_class_with_options(request, runtime)
def describe_auto_renew_attribute_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.DescribeAutoRenewAttributeResponse().from_map(self.do_request("DescribeAutoRenewAttribute", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def describe_auto_renew_attribute(self, request):
runtime = util_models.RuntimeOptions(
)
return self.describe_auto_renew_attribute_with_options(request, runtime)
def create_dbnodes_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.CreateDBNodesResponse().from_map(self.do_request("CreateDBNodes", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def create_dbnodes(self, request):
runtime = util_models.RuntimeOptions(
)
return self.create_dbnodes_with_options(request, runtime)
def delete_dbnodes_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.DeleteDBNodesResponse().from_map(self.do_request("DeleteDBNodes", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def delete_dbnodes(self, request):
runtime = util_models.RuntimeOptions(
)
return self.delete_dbnodes_with_options(request, runtime)
def untag_resources_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.UntagResourcesResponse().from_map(self.do_request("UntagResources", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def untag_resources(self, request):
runtime = util_models.RuntimeOptions(
)
return self.untag_resources_with_options(request, runtime)
def tag_resources_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.TagResourcesResponse().from_map(self.do_request("TagResources", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def tag_resources(self, request):
runtime = util_models.RuntimeOptions(
)
return self.tag_resources_with_options(request, runtime)
def list_tag_resources_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.ListTagResourcesResponse().from_map(self.do_request("ListTagResources", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def list_tag_resources(self, request):
runtime = util_models.RuntimeOptions(
)
return self.list_tag_resources_with_options(request, runtime)
def modify_dbendpoint_address_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.ModifyDBEndpointAddressResponse().from_map(self.do_request("ModifyDBEndpointAddress", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def modify_dbendpoint_address(self, request):
runtime = util_models.RuntimeOptions(
)
return self.modify_dbendpoint_address_with_options(request, runtime)
def modify_dbdescription_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.ModifyDBDescriptionResponse().from_map(self.do_request("ModifyDBDescription", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def modify_dbdescription(self, request):
runtime = util_models.RuntimeOptions(
)
return self.modify_dbdescription_with_options(request, runtime)
def modify_dbcluster_parameters_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.ModifyDBClusterParametersResponse().from_map(self.do_request("ModifyDBClusterParameters", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def modify_dbcluster_parameters(self, request):
runtime = util_models.RuntimeOptions(
)
return self.modify_dbcluster_parameters_with_options(request, runtime)
def modify_dbcluster_endpoint_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.ModifyDBClusterEndpointResponse().from_map(self.do_request("ModifyDBClusterEndpoint", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def modify_dbcluster_endpoint(self, request):
runtime = util_models.RuntimeOptions(
)
return self.modify_dbcluster_endpoint_with_options(request, runtime)
def modify_account_password_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.ModifyAccountPasswordResponse().from_map(self.do_request("ModifyAccountPassword", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def modify_account_password(self, request):
runtime = util_models.RuntimeOptions(
)
return self.modify_account_password_with_options(request, runtime)
def describe_dbcluster_performance_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.DescribeDBClusterPerformanceResponse().from_map(self.do_request("DescribeDBClusterPerformance", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def describe_dbcluster_performance(self, request):
runtime = util_models.RuntimeOptions(
)
return self.describe_dbcluster_performance_with_options(request, runtime)
def describe_dbcluster_parameters_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.DescribeDBClusterParametersResponse().from_map(self.do_request("DescribeDBClusterParameters", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def describe_dbcluster_parameters(self, request):
runtime = util_models.RuntimeOptions(
)
return self.describe_dbcluster_parameters_with_options(request, runtime)
def describe_dbcluster_endpoints_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.DescribeDBClusterEndpointsResponse().from_map(self.do_request("DescribeDBClusterEndpoints", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def describe_dbcluster_endpoints(self, request):
runtime = util_models.RuntimeOptions(
)
return self.describe_dbcluster_endpoints_with_options(request, runtime)
def delete_dbendpoint_address_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.DeleteDBEndpointAddressResponse().from_map(self.do_request("DeleteDBEndpointAddress", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def delete_dbendpoint_address(self, request):
runtime = util_models.RuntimeOptions(
)
return self.delete_dbendpoint_address_with_options(request, runtime)
def delete_dbcluster_endpoint_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.DeleteDBClusterEndpointResponse().from_map(self.do_request("DeleteDBClusterEndpoint", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def delete_dbcluster_endpoint(self, request):
runtime = util_models.RuntimeOptions(
)
return self.delete_dbcluster_endpoint_with_options(request, runtime)
def create_dbendpoint_address_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.CreateDBEndpointAddressResponse().from_map(self.do_request("CreateDBEndpointAddress", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def create_dbendpoint_address(self, request):
runtime = util_models.RuntimeOptions(
)
return self.create_dbendpoint_address_with_options(request, runtime)
def create_dbcluster_endpoint_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.CreateDBClusterEndpointResponse().from_map(self.do_request("CreateDBClusterEndpoint", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def create_dbcluster_endpoint(self, request):
runtime = util_models.RuntimeOptions(
)
return self.create_dbcluster_endpoint_with_options(request, runtime)
def restart_dbnode_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.RestartDBNodeResponse().from_map(self.do_request("RestartDBNode", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def restart_dbnode(self, request):
runtime = util_models.RuntimeOptions(
)
return self.restart_dbnode_with_options(request, runtime)
def describe_dbnode_performance_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.DescribeDBNodePerformanceResponse().from_map(self.do_request("DescribeDBNodePerformance", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def describe_dbnode_performance(self, request):
runtime = util_models.RuntimeOptions(
)
return self.describe_dbnode_performance_with_options(request, runtime)
def modify_dbcluster_access_whitelist_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.ModifyDBClusterAccessWhitelistResponse().from_map(self.do_request("ModifyDBClusterAccessWhitelist", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def modify_dbcluster_access_whitelist(self, request):
runtime = util_models.RuntimeOptions(
)
return self.modify_dbcluster_access_whitelist_with_options(request, runtime)
def describe_dbcluster_access_whitelist_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.DescribeDBClusterAccessWhitelistResponse().from_map(self.do_request("DescribeDBClusterAccessWhitelist", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def describe_dbcluster_access_whitelist(self, request):
runtime = util_models.RuntimeOptions(
)
return self.describe_dbcluster_access_whitelist_with_options(request, runtime)
def modify_dbcluster_maintain_time_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.ModifyDBClusterMaintainTimeResponse().from_map(self.do_request("ModifyDBClusterMaintainTime", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def modify_dbcluster_maintain_time(self, request):
runtime = util_models.RuntimeOptions(
)
return self.modify_dbcluster_maintain_time_with_options(request, runtime)
def revoke_account_privilege_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.RevokeAccountPrivilegeResponse().from_map(self.do_request("RevokeAccountPrivilege", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def revoke_account_privilege(self, request):
runtime = util_models.RuntimeOptions(
)
return self.revoke_account_privilege_with_options(request, runtime)
def reset_account_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.ResetAccountResponse().from_map(self.do_request("ResetAccount", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def reset_account(self, request):
runtime = util_models.RuntimeOptions(
)
return self.reset_account_with_options(request, runtime)
def grant_account_privilege_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.GrantAccountPrivilegeResponse().from_map(self.do_request("GrantAccountPrivilege", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def grant_account_privilege(self, request):
runtime = util_models.RuntimeOptions(
)
return self.grant_account_privilege_with_options(request, runtime)
def describe_databases_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.DescribeDatabasesResponse().from_map(self.do_request("DescribeDatabases", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def describe_databases(self, request):
runtime = util_models.RuntimeOptions(
)
return self.describe_databases_with_options(request, runtime)
def delete_database_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.DeleteDatabaseResponse().from_map(self.do_request("DeleteDatabase", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def delete_database(self, request):
runtime = util_models.RuntimeOptions(
)
return self.delete_database_with_options(request, runtime)
def delete_account_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.DeleteAccountResponse().from_map(self.do_request("DeleteAccount", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def delete_account(self, request):
runtime = util_models.RuntimeOptions(
)
return self.delete_account_with_options(request, runtime)
def create_database_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.CreateDatabaseResponse().from_map(self.do_request("CreateDatabase", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def create_database(self, request):
runtime = util_models.RuntimeOptions(
)
return self.create_database_with_options(request, runtime)
def delete_backup_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.DeleteBackupResponse().from_map(self.do_request("DeleteBackup", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def delete_backup(self, request):
runtime = util_models.RuntimeOptions(
)
return self.delete_backup_with_options(request, runtime)
def describe_slow_logs_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.DescribeSlowLogsResponse().from_map(self.do_request("DescribeSlowLogs", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def describe_slow_logs(self, request):
runtime = util_models.RuntimeOptions(
)
return self.describe_slow_logs_with_options(request, runtime)
def describe_slow_log_records_with_options(self, request, runtime):
UtilClient.validate_model(request)
return polardb_20170801_models.DescribeSlowLogRecordsResponse().from_map(self.do_request("DescribeSlowLogRecords", "HTTPS", "POST", "2017-08-01", "AK", None, request.to_map(), runtime))
def describe_slow_log_records(self, request):
runtime = util_models.RuntimeOptions(
)
return | |
#!/usr/bin/python
"""
opx_get_packages -- fetch a list of debian packages, and all their
run-time dependencies
"""
from __future__ import print_function
import apt
import apt_pkg
import collections
import sys
import os
import shutil
import subprocess
import argparse
import logging
import itertools
from distutils.version import LooseVersion
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
# defaults for the OpxPackages constructor (__init__) function
# also used in command invocation method below
DEFAULT_SYSROOT = None
DEFAULT_SYSROOTDEV = None
DEFAULT_PKG_URL = "http://deb.openswitch.net/"
DEFAULT_PKG_DISTRIBUTION = "unstable"
DEFAULT_PKG_COMPONENT = "main opx opx-non-free"
class VersionWrapper(object):
"""
:class:`apt_pkg.Version` wrapper
We need to do set operations on :class:`apt_pkg.Version` objects,
but they are not hashable. This thin wrapper does just enough to
make the objects hashable.
"""
def __init__(self, version):
self._ver = version
def __hash__(self):
return hash((self._ver.parent_pkg.name, self._ver.ver_str))
def __eq__(self, other):
return (self._ver.parent_pkg.name == other._ver.parent_pkg.name and
self._ver.ver_str == other._ver.ver_str)
def __str__(self):
return self._ver.__str__()
@property
def parent_pkg(self):
"""
apt Parent Package accessor
"""
return self._ver.parent_pkg
@property
def ver_str(self):
"""
apt Package Version string accessor
"""
return self._ver.ver_str
class OpxPackagesError(Exception):
"""
OpxPackgesError - OPX get package general exception
"""
pass
class OpxPackageSource(object):
"""
Represents package source (sources.list entry)
"""
def __init__(self, url, distribution, component):
"""
Construct a :class:`OpxPackageSource` object
:param url:
The url to the base of the package repository.
:param distribution:
The distribution (also called 'suite') reflects the
level of testing/acceptance of a particular package.
In the Debian package repository, packages start as
unstable, and are promoted to testing, stable, and a
release codename like 'wheezy' or 'jessie'.
:param component:
The component (also called 'section'). In the Debian
package repository, component is 'main', 'contrib', or
'non-free'. Other repositories have their own naming
conventions.
"""
self.url = url
self.distribution = distribution
self.component = component
class OpxPackages(object):
"""
Provides interface to the python apt and apt_pkg libraries
Used to fulfill build and dev dependencies for clone and
clone-all actions.
Will be used to assemble from packages
"""
def __init__(self,
sysroot,
pkg_sources,
default_solver=False,
sysrootdev=None,
install_recommends=False,
install_suggests=False):
"""
Construct a :class:`OpxPackages` object
:param sysroot:
Path to sysroot
:param pkg_sources:
List of :class:`OpxPackageSource` objects, used to create
/etc/apt/sources.list file used to fetch packages.
:param sysrootdev:
Path to sysroot-dev
:param install_recomends:
If ``True``, install recommended packages.
:param install_suggests:
If ``True``, install suggested packages.
"""
self._apt_cache = None
self._cache = None
self._default_solver = default_solver
self._pkg_sources = pkg_sources
self._folder = sysroot
self._build_folder = sysrootdev
if self._folder[-1:] == '/':
self._folder = self._folder[:-1]
if not os.path.exists(self._folder):
raise OpxPackagesError(self._folder + " does not exist")
_msg = "Sysroot is in " + self._folder
if not self._build_folder:
self._build_folder = self._folder + "-dev"
if self._build_folder and os.path.exists(self._build_folder):
_msg += " Development rootfs is in " + self._build_folder
else:
self._build_folder = None
print(_msg)
# Set up pointers to and create the dpkg package cache
# within the specified sysroot
self._apt_cache = os.path.join(self._folder, "var", "lib", "dpkg")
# Standard debian packages are maintained in a seperate root
# file system image to keep isolation between the AR
# generate package and the standard distribution packages
# Development packages from the distro are imported in
# a sysroot-dev root file system image with a package
# cache, that package cache is used to seed the sysroot
# for individual package build or development, so seed
# this sysroot's package cache from the sysroot-dev if
# it exists ...
if self._build_folder:
_build_cache = os.path.join(self._build_folder,
"var", "lib", "dpkg")
print("Checking..." + self._apt_cache + " and " + _build_cache)
if not os.path.exists(self._apt_cache) \
and os.path.exists(_build_cache):
print("Copying.. " + _build_cache)
shutil.copytree(_build_cache, self._apt_cache, symlinks=True)
self._apt_cache = os.path.join(self._folder, "var", "cache",
"apt", "archives")
self.sources = os.path.join(self._folder, "etc", "apt", "sources.list")
if not os.path.exists(self.sources):
if not os.path.exists(os.path.dirname(self.sources)):
os.makedirs(os.path.dirname(self.sources))
else:
shutil.copy(self.sources, self.sources + ".save")
# create sources.list file with url, distribution, and component.
with open(self.sources, "w") as f:
for pkg_source in self._pkg_sources:
source = "{} {} {}".format(
pkg_source.url,
pkg_source.distribution,
pkg_source.component,
)
# local packages must be explicitly trusted
if "copy:/mnt" in pkg_source.url:
options = "[arch=amd64 trusted=yes]"
else:
options = "[arch=amd64]"
print("Using {}".format(source))
f.write("deb %s %s\n" % (options, source))
# create apt preferences file to always use local packages
with open(os.path.join(self._folder, "etc", "apt", "preferences"), "w") as f:
f.write('Package: *\nPin: origin ""\nPin-Priority: 1100\n\n')
f.write('Package: *\nPin: origin "deb.openswitch.net"\nPin-Priority: 750\n\n')
# create cache and update it
self._cache = apt.Cache(rootdir=self._folder, memonly=True)
# set Install-Recommends and Install-Suggests configuration options
apt_pkg.config['APT::Install-Recommends'] = \
"1" if install_recommends else "0"
apt_pkg.config['APT::Install-Suggests'] = \
"1" if install_suggests else "0"
try:
self._cache.update()
except Exception as ex:
print("\nCache update error ignored : %s\n" % (ex))
self._cache.open()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""
close and clean-up of an object instance
"""
self._cache.close()
if os.path.exists(self.sources + '.save'):
shutil.copy(self.sources + ".save", self.sources)
def list_packages(self):
"""
List packages available in cache
"""
print("Packages available are as follows:")
for i in self._cache.keys():
print(str(i))
@property
def _depcache(self):
"""
Dependency cache state accessor
"""
return self._cache._depcache
def _dump_package(self, pkg):
"""
dump_package
dump metadata from :class:`apt_pkg.Package` object
"""
logger.debug("%s:", pkg.name)
logger.debug(" marked_delete: %s",
self._depcache.marked_delete(pkg))
logger.debug(" marked_downgrade: %s",
self._depcache.marked_downgrade(pkg))
logger.debug(" marked_install: %s",
self._depcache.marked_install(pkg))
logger.debug(" marked_keep: %s",
self._depcache.marked_keep(pkg))
logger.debug(" marked_reinstall: %s",
self._depcache.marked_reinstall(pkg))
logger.debug(" marked_upgrade: %s",
self._depcache.marked_upgrade(pkg))
logger.debug(" is_auto_installed: %s",
self._depcache.is_auto_installed(pkg))
logger.debug(" is_garbage: %s",
self._depcache.is_garbage(pkg))
logger.debug(" is_inst_broken: %s",
self._depcache.is_inst_broken(pkg))
logger.debug(" is_now_broken: %s",
self._depcache.is_now_broken(pkg))
logger.debug(" is_upgradable %s",
self._depcache.is_upgradable(pkg))
def _fetch_package(self, pkg, from_user=False, backtrace=[]):
"""
Get the dependencies of the package's desired (candidate)
version and compute the set of dependent packages. If the
dependent package is not already installed, recursively
invoke this function.
:meth:`apt_pkg.Dependency.all_targets` returns the set of
dependent package versions that that satisfy a dependency.
However, since a package may have more than one dependency
for a given dependent package (e.g., one dependency with a
version floor, another with a version ceiling), we compute
the set of dependent packages which satisfy all of the
dependencies.
This is done with two dictionaries. pkg_versions is the
dictionary of all dependent packages and versions, while
dep_versions is the dictionary of packages and versions
for a single :class:`apt.pkg.Dependency`.
TODO: This function only handles simple dependencies,
not Breaks, Conflicts, or Replaces.
"""
version = self._depcache.get_candidate_ver(pkg)
logger.debug("version: %s", version)
logger.debug(" %s", backtrace)
if 'Depends' in version.depends_list:
pkg_versions = dict()
for or_deps in version.depends_list["Depends"]:
logger.debug("or_deps: %s", or_deps)
# In general, this script does not handle "or"
# dependencies. However, We have special cased
# makedev/udev and debconf/debconf-2.0 to make
# it good enough for NGOS image creation until
# it can.
if len(or_deps) != 1:
logger.debug("pre: %s", or_deps)
or_deps = [dep for dep in or_deps
if dep.target_pkg.name
not in ('makedev', 'debconf-2.0')]
logger.debug("post: %s", or_deps)
if len(or_deps) != 1:
raise OpxPackagesError("Can't handle or-dependencies")
for dep in or_deps:
logger.debug("dep: %s", dep)
logger.debug("%s is satisfied by:", dep.target_pkg.name)
for v in dep.all_targets():
logger.debug(" %s", v)
dep_versions = collections.defaultdict(set)
for v in dep.all_targets():
dep_versions[dep.target_pkg.name].add(VersionWrapper(v))
for name, versions in dep_versions.items():
if not name in pkg_versions:
pkg_versions[name] = set(versions)
else:
pkg_versions[name] &= versions
# We now have list of :class:`apt_pkg.Version` objects that satisfy
# the dependencies for the package. Next we identify what packages
# may need to be installed.
for name, versions in pkg_versions.items():
logger.debug("pkg_versions: %s -> %s", pkg.name, name)
if len(versions) == 0:
raise OpxPackagesError(
"Unable to satisfy dependency: %s %s" %
(pkg.name, name))
# Identify a list of candidate packages
logger.debug("start iterating group")
candidate_versions = []
sv = sorted(versions, key=lambda x: x._ver.parent_pkg.name)
for k, vx in itertools.groupby(sv,
key=lambda x: x._ver.parent_pkg.name):
# change vx from an iterator to a list, as we need to
# traverse it multiple times
vx = list(vx)
# While the library returns the versions in order, the
# set operations destroy that order. So use the Loose
# Version() function from distutils to sort
best_v = sorted(vx,
key=lambda x: LooseVersion(x.ver_str),
reverse=True)
logger.debug("%s", k)
for v in best_v:
logger.debug(" %s", v.ver_str)
best_v = best_v[0]
logger.debug("best candidate is %s", best_v)
candidate_versions.append(best_v)
logger.debug("done iterating group")
# Determine whether any of the candidates are already installed
installed = False
for v in candidate_versions:
dep_pkg = v.parent_pkg
if dep_pkg.id in [xpkg.id for xpkg in backtrace]:
installed = True
break
if dep_pkg.current_state != apt_pkg.CURSTATE_NOT_INSTALLED:
installed = True
break
if self._depcache.marked_install(dep_pkg):
installed = True
break
# If dependent package is not installed, then select | |
<reponame>CloudVelox/simple-cloud-shell
#
# Copyright 2014-2016 CloudVelox Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""This module contains the implementation of the 'elb' command
"""
import getopt
import boto
import common
from common import CommandError
from common import DisplayOptions
from common import CommandOutput
from common import optional
from common import amazon2localtime
def _boto_min_version(vers):
"""Returns True if boto_version >= vers
"""
vers_list = [int(v) for v in vers.split('.')]
boto_vers_list = [int(v) for v in boto.__version__.split('.')]
for i in range(len(boto_vers_list)):
if i > len(vers_list):
return True
if boto_vers_list[i] > vers_list[i]:
return True
if boto_vers_list[i] < vers_list[i]:
return False
return True
class ELBCommand(common.BaseCommand):
def __elb_display(self, elb, disp, pg, region):
"""Display information about the specified ELB.
"""
if disp.display_policies:
pg.prt("%s", elb.name)
if elb.policies.app_cookie_stickiness_policies:
for policy in elb.policies.app_cookie_stickiness_policies:
pg.prt("%15s : %-15s cookie=%s",
"App-cookie",
policy.policy_name,
policy.cookie_name)
if elb.policies.lb_cookie_stickiness_policies:
for policy in elb.policies.lb_cookie_stickiness_policies:
pg.prt("%15s : %-15s expiration=%s",
"LB-cookie",
policy.policy_name,
policy.cookie_expiration_period)
elif disp.display == DisplayOptions.LONG:
pg.prt("%-20s %-30s %s",
elb.name,
elb.dns_name,
optional(elb.vpc_id))
elif disp.display == DisplayOptions.EXTENDED:
pg.prt("%s", elb.name)
pg.prt("%15s : %s", "DNS-name", elb.dns_name)
pg.prt("%15s : %s", "CNAME", elb.canonical_hosted_zone_name)
pg.prt("%15s : %s", "Create-time",
amazon2localtime(elb.created_time))
for listener in elb.listeners:
pg.prt("%15s : in=%-4s out=%-4s proto=%-5s",
"Listener",
listener.load_balancer_port,
listener.instance_port,
listener.protocol)
if listener.policy_names:
pg.prt("%15s policy=%s", "", listener.policy_names[0])
if listener.ssl_certificate_id:
cert_name = listener.ssl_certificate_id.split('/', 1)[1]
pg.prt("%15s cert=%s", "", cert_name)
pg.prt("%15s : %s" % ("Group", elb.source_security_group.name))
if elb.vpc_id:
pg.prt("%15s : %s", "VPC-id", elb.vpc_id)
self.cache_insert(region, [elb.vpc_id])
if elb.subnets:
pg.prt("%15s : %s", "Subnets", ", ".join(elb.subnets))
self.cache_insert(region, elb.subnets)
if elb.availability_zones:
pg.prt("%15s : %s", "Zones", ", ".join(elb.availability_zones))
if elb.health_check:
pg.prt("%15s : i=%s t=%s ht=%s ut=%s %s",
"Healthcheck",
elb.health_check.interval,
elb.health_check.timeout,
elb.health_check.healthy_threshold,
elb.health_check.unhealthy_threshold,
optional(elb.health_check.target))
if elb.policies.app_cookie_stickiness_policies:
for policy in elb.policies.app_cookie_stickiness_policies:
pg.prt("%15s : %-15s cookie=%s",
"App-cookie",
policy.policy_name,
policy.cookie_name)
if elb.policies.lb_cookie_stickiness_policies:
for policy in elb.policies.lb_cookie_stickiness_policies:
pg.prt("%15s : %-15s expiration=%s",
"LB-cookie",
policy.policy_name,
policy.cookie_expiration_period)
if elb.instances:
for instance_info in elb.instances:
pg.prt("%15s : %-12s", "Instance", instance_info.id)
self.cache_insert(region, [instance_info.id])
else:
pg.prt("%s", elb.name)
def __elb_list_cmd(self, region, elb_names, disp):
"""Implement the list functionality of the elb command
"""
elb_conn = self.get_elb_conn(region)
elb_list = elb_conn.get_all_load_balancers(
load_balancer_names=elb_names)
with CommandOutput() as pg:
for elb in elb_list:
self.__elb_display(elb, disp, pg, region)
@staticmethod
def __elb_parse_listeners(listener_spec_list):
"""Convert a list of listener specs to a list of listeners
suitable to use in the boto API. A listener spec looks like this:
lb_port,instance_port,lb_proto,instance_proto[,cert-arn]
"""
if not _boto_min_version('2.9.9'):
raise CommandError(
"This command requires at least boto version 2.9.9")
listener_list = []
for spec in listener_spec_list:
fields = spec.split(',')
n_fields = len(fields)
if n_fields not in [4, 5]:
raise CommandError("Bad ELB listener spec: %s" % (spec,))
try:
lb_port = int(fields[0])
instance_port = int(fields[1])
lb_proto = fields[2].upper()
instance_proto = fields[3].upper()
except ValueError:
raise CommandError("Bad port number in %s" % (spec,))
if lb_proto not in ['HTTP', 'HTTPS', 'TCP']:
raise CommandError("Bad LB protocol in spec: %s" % (spec,))
if instance_proto not in ['HTTP', 'HTTPS', 'TCP']:
raise CommandError(
"Bad instance protocol in spec: %s" % (spec,))
if lb_proto == 'HTTPS':
if n_fields != 5:
raise CommandError(
"SSL Certificate ARN is required for %s" % (spec,))
arn = fields[4]
listener = (lb_port, instance_port,
lb_proto, instance_proto, arn)
else:
listener = (lb_port, instance_port, lb_proto, instance_proto)
listener_list.append(listener)
return listener_list
def __elb_create_cmd(self, region, subnet_list,
listener_spec_list, sg_id_list, args):
"""Create an ELB
"""
if not subnet_list:
raise CommandError("No subnets specified")
if not args:
raise CommandError("No ELB name specified")
listener_list = self.__elb_parse_listeners(listener_spec_list)
if not listener_list:
raise CommandError("You need to specify at least one listener")
elb_name = args[0]
elb_conn = self.get_elb_conn(region)
elb = elb_conn.create_load_balancer(
name=elb_name,
zones=None,
complex_listeners=listener_list,
subnets=subnet_list,
security_groups=sg_id_list)
if elb:
print elb.dns_name
else:
print "ELB creation failed"
def __elb_delete_cmd(self, region, arg_list):
"""Delete an ELB
"""
elb_conn = self.get_elb_conn(region)
for elb_name in arg_list:
elb_conn.delete_load_balancer(elb_name)
def __elb_modify_add_cmd(self, region,
listener_spec_list, sg_id_list,
subnet_list, instance_id_list, args):
"""Modify an existing ELB
"""
if not args:
raise CommandError("No ELB specified")
elb_name = args[0]
elb_conn = self.get_elb_conn(region)
if listener_spec_list:
listener_list = self.__elb_parse_listeners(listener_spec_list)
if not listener_list:
raise CommandError("No listeners specified")
status = elb_conn.create_load_balancer_listeners(
name=elb_name,
complex_listeners=listener_list)
if sg_id_list:
elb_conn.apply_security_groups_to_lb(elb_name, sg_id_list)
if subnet_list:
elb_conn.attach_lb_to_subnets(elb_name, subnet_list)
if instance_id_list:
elb_conn.register_instances(elb_name, instance_id_list)
def __elb_modify_remove_cmd(self, region,
port_spec_list, sg_id_list,
subnet_list, instance_id_list, args):
"""Modify an existing ELB
"""
if not args:
raise CommandError("No ELB specified")
elb_name = args[0]
elb_conn = self.get_elb_conn(region)
if port_spec_list:
try:
port_list = [int(p) for p in port_spec_list]
except ValueError, ve:
raise CommandError(
"Bad port specification: %s" % (",".join(port_spec_list)))
status = elb_conn.delete_load_balancer_listeners(
name=elb_name,
ports=port_list)
if sg_id_list:
print "The ability to unapply security groups to an ELB" \
" is not available via boto"
if subnet_list:
elb_conn.detach_lb_from_subnets(elb_name, subnet_list)
if instance_id_list:
elb_conn.deregister_instances(elb_name, instance_id_list)
def __elb_instance_health(self, region, args):
"""Report instance health
"""
if not args:
raise CommandError("No ELB specified")
elb_name = args[0]
elb_conn = self.get_elb_conn(region)
instance_state_list = elb_conn.describe_instance_health(elb_name)
for instance_state in instance_state_list:
print "%-12s %-10s %-6s '%s'" % (
instance_state.instance_id,
instance_state.state,
instance_state.reason_code,
instance_state.description,)
self.cache_insert(region, [instance_state.instance_id])
@staticmethod
def __parse_healthcheck(healthcheck_spec):
"""Parse a healthcheck specification and return a HealthCheck object.
The spec looks like this:
name=value[,name=value]...
where name is:
i : interval
t : timeout
ht : healthy threshold
ut : unhealthy threshold
l : link
"""
spec_list = healthcheck_spec.split(',')
interval = None
timeout = None
healthy_threshold = None
unhealthy_threshold = None
link = None
for spec in spec_list:
if '=' not in spec:
raise CommandError(
"Bad healthspec: missing '=' in %s" % (spec,))
name, value = spec.split('=', 1)
try:
if name == 'i':
if value:
interval = int(value)
elif name == 't':
if value:
timeout = int(value)
elif name == 'ht':
if value:
healthy_threshold = int(value)
elif name == 'ut':
if value:
unhealthy_threshold = int(value)
elif name == 'l':
if value:
link = value
else:
raise CommandError("Bad healthspec: %s" % (spec,))
except ValueError:
raise CommandError(
"Expecting an integer value for %s" % (name,))
healthcheck = boto.ec2.elb.healthcheck.HealthCheck(
interval=interval,
target=link,
timeout=timeout,
healthy_threshold=healthy_threshold,
unhealthy_threshold=unhealthy_threshold)
return healthcheck
def __elb_config_healthcheck(self, region, healthcheck_spec, args):
"""Configure (add) an ELB healthcheck
"""
healthcheck = self.__parse_healthcheck(healthcheck_spec)
if not args:
raise CommandError("No ELB specified")
elb_name = args[0]
elb_conn = self.get_elb_conn(region)
cur_healthcheck = elb_conn.configure_health_check(elb_name, healthcheck)
print "Healthcheck: i=%s t=%s ht=%s ut=%s %s" % (
cur_healthcheck.interval,
cur_healthcheck.timeout,
cur_healthcheck.healthy_threshold,
cur_healthcheck.unhealthy_threshold,
optional(cur_healthcheck.target))
def __elb_policy_add(self, region, policy_name, listener_list, args):
"""Add a policy or associate a policy with a listener
"""
if not args:
raise CommandError("No ELB specified")
elb_name = args.pop(0)
elb_conn = self.get_elb_conn(region)
if listener_list:
lb_port = int(listener_list[0])
elb_conn.set_lb_policies_of_listener(elb_name, lb_port, policy_name)
else:
# Create a new policy
if not args:
raise CommandError("No policy type for %s" % (policy_name,))
policy_type = args.pop(0)
if policy_type == 'lb-cookie':
try:
cookie_expiration_period = int(args.pop(0))
except IndexError:
raise CommandError("Missing expiration period")
except ValueError:
raise CommandError("Expiration period must be a number")
elb_conn.create_lb_cookie_stickiness_policy(
cookie_expiration_period,
elb_name,
policy_name)
elif policy_type == 'app-cookie':
try:
cookie_name = args.pop(0)
except IndexError:
raise CommandError("Missing cookie name")
elb_conn.create_app_cookie_stickiness_policy(
cookie_name,
elb_name,
policy_name)
else:
raise CommandError("Unknown policy type: " + policy_type)
def __elb_policy_remove(self, region, policy_name, listener_list, args):
"""Remove a policy or disassociate a policy from a listener
"""
if not args:
raise CommandError("No ELB specified")
elb_name = args.pop(0)
elb_conn = self.get_elb_conn(region)
if listener_list:
lb_port = int(listener_list[0])
elb_conn.set_lb_policies_of_listener(elb_name, lb_port, [])
else:
elb_conn.delete_lb_policy(elb_name, policy_name)
def __elb_cmd(self, argv):
"""Implements the elb command
"""
all_elbs = False
cmd_create_elb = False
cmd_delete_elb = False
cmd_modify_add = False
cmd_modify_remove = False
cmd_query_instance_health = False
cmd_config_healthcheck = False
disp = DisplayOptions()
disp.display_policies = False
region = None
policy_name = None
healthcheck_spec = None
subnet_list = []
listener_list = []
sg_id_list = []
instance_id_list = []
opt_list, args = getopt.getopt(argv, "AaCDg:H:hi:L:lpP:Rr:s:x")
if opt_list:
for opt in opt_list:
if opt[0] == '-a':
all_elbs = True
elif opt[0] == '-A':
cmd_modify_add = True
elif opt[0] == '-C':
cmd_create_elb = True
elif opt[0] == '-D':
cmd_delete_elb = True
elif opt[0] == '-g':
sg_id_list.extend(opt[1].split(','))
elif opt[0] == '-H':
healthcheck_spec = opt[1]
cmd_config_healthcheck = True
elif opt[0] == '-h':
cmd_query_instance_health = True
elif opt[0] == '-L':
listener_list.append(opt[1])
| |
str(t[2])+ ' ' + str(t[3]) + ' ' + str(t[4])
elif len(t) == 4:
t[0] = GenerarC3D()
t[0].code += str(t[1]) + ' ' + t[2].code + ' ' + t[3].code
else:
t[0] = GenerarC3D()
t[0].code += str(t[1]) + ' ' + t[2].code
def p_arg_precision(t):
'''arg_precision : PARABRE NUMERO PARCIERRE
| ''' #EPSILON
if len(t) == 4:
t[0] = GenerarC3D()
t[0].code += str(t[1]) + ' ' + str(t[2]) + ' ' + str(t[3])
else:
t[0] = GenerarC3D()
t[0].code += ''
def p_arg_tipo(t):
'''arg_tipo : MONTH
| YEAR
| HOUR
| MINUTE
| SECOND
| ''' #EPSILON
if len(t) == 2:
t[0] = GenerarC3D()
t[0].code += str(t[1])
else:
t[0] = GenerarC3D()
t[0].code += ''
def p_definicion_valor_defecto(t):
'''definicion_valor_defecto : DEFAULT tipo_default
| ''' #EPSILON
if len(t) == 3:
t[0] = GenerarC3D()
t[0].code += str(t[1]) + ' ' + t[2].code
else:
t[0] = GenerarC3D()
t[0].code += ''
def p_ins_constraint(t):
'''ins_constraint : ins_constraint constraint restriccion_columna
| restriccion_columna
|''' #EPSILON
if len(t) == 4:
t[0] = GenerarC3D()
t[0].code += t[1].code + ' ' + t[2].code+ ' ' + t[3].code
elif len(t) == 2:
t[0] = GenerarC3D()
t[0].code += t[1].code
else:
t[0] = GenerarC3D()
t[0].code += ''
def p_constraint(t):
'''constraint : CONSTRAINT ID
| '''
if len(t) == 3:
t[0] = GenerarC3D()
t[0].code += str(t[1]) + ' ' + str(t[2])
else:
t[0] = GenerarC3D()
t[0].code += ''
def p_restriccion_columna(t):
'''restriccion_columna : NOT NULL
| SET NOT NULL
| PRIMARY KEY
| UNIQUE
| NULL
| CHECK PARABRE exp PARCIERRE
'''
if len(t) == 5:
t[0] = GenerarC3D()
t[0].code += str(t[1]) + ' ' + str(t[2]) + ' ' + t[3].code + ' ' + str(t[4])
elif len(t) == 4:
t[0] = GenerarC3D()
t[0].code += str(t[1]) + ' ' + str(t[2]) + ' ' + str(t[3])
elif len(t) == 3:
t[0] = GenerarC3D()
t[0].code += str(t[1]) + ' ' + str(t[2])
elif len(t) == 2:
t[0] = GenerarC3D()
t[0].code += str(t[1])
else:
t[0] = GenerarC3D()
t[0].code += ''
def p_references(t):
'''ins_references : ON DELETE accion ins_references
| ON UPDATE accion ins_references
| '''
if len(t) == 5:
t[0] = GenerarC3D()
t[0].code += str(t[1])+ ' ' + str(t[2]) + ' ' + t[3].code + ' ' + t[4].code
else:
t[0] = GenerarC3D()
t[0].code += ''
def p_accion(t):
'''accion : CASCADE
| SET NULL
| SET DEFAULT
| NO ACTION'''
if len(t) == 2:
t[0] = GenerarC3D()
t[0].code += str(t[1])
else:
t[0] = GenerarC3D()
t[0].code += str(t[1]) + ' ' + str(t[2])
def p_tipo_default(t):
'''tipo_default : NUMERO
| NUM_DECIMAL
| CADENASIMPLE
| CADENA
| TRUE
| FALSE
| FECHA
| FECHA_HORA
| NULL
| '''
t[0] = GenerarC3D()
t[0].code += str(t[1])
def p_ins_replace(t):
'''ins_replace : OR REPLACE
| '''#EPSILON
if len(t) == 3:
t[0] = GenerarC3D()
t[0].code += str(t[1]) + ' ' + str(t[2])
else:
t[0] = GenerarC3D()
t[0].code += ''
def p_if_exists(t):
'''if_exists : IF NOT EXISTS
| IF EXISTS
| ''' # EPSILON
if len(t) == 4:
t[0] = GenerarC3D()
t[0].code += str(t[1]) + ' ' + str(t[2]) + ' ' + str(t[3])
elif len(t) == 3:
t[0] = GenerarC3D()
t[0].code += str(t[1]) + ' ' + str(t[2])
else:
t[0] = GenerarC3D()
t[0].code += ''
def p_create_opciones(t):
'''create_opciones : OWNER SIGNO_IGUAL user_name create_opciones
| MODE SIGNO_IGUAL NUMERO create_opciones
| '''
if len(t) == 5:
if t[1] == 'MODE':
t[0] = GenerarC3D()
t[0].code += str(t[1]) + ' ' + str(t[2]) + ' ' + str(t[3]) + ' ' + t[4].code
else:
t[0] = GenerarC3D()
t[0].code += str(t[1]) + ' ' + str(t[2]) + ' ' + t[3].code + ' ' + t[4].code
else:
t[0] = GenerarC3D()
t[0].code += ''
def p_user_name(t):
'''user_name : ID
| CADENA
| CADENASIMPLE'''
t[0] = GenerarC3D()
t[0].code += str(t[1])
def p_alter(t):
'''ins_alter : ALTER tipo_alter '''
t[0] = GenerarC3D()
t[0].code += str(t[1]) + ' ' + t[2].code
def p_tipo_alter(t):
'''tipo_alter : DATABASE ID alter_database PUNTO_COMA
| TABLE ID alteracion_tabla PUNTO_COMA'''
t[0] = GenerarC3D()
t[0].code += str(t[1]) + ' ' + str(t[2]) + ' ' + t[3].code + ' ' + str(t[4])
def p_alteracion_tabla(t):
'''alteracion_tabla : alteracion_tabla COMA alterar_tabla
| alterar_tabla'''
if len(t) == 4:
t[0] = GenerarC3D()
t[0].code += t[1].code + ' ' + str(t[2]) + ' ' + t[3].code
else:
t[0] = GenerarC3D()
t[0].code += t[1].code
def p_alterar_tabla(t):
'''alterar_tabla : ADD COLUMN ID tipo_dato
| ADD CONSTRAINT ID ins_constraint_dos
| ADD ins_constraint_dos
| ALTER COLUMN ID TYPE tipo_dato
| ALTER COLUMN ID SET NOT NULL
| DROP COLUMN ID
| DROP CONSTRAINT ID'''
if len(t) == 3:
t[0] = GenerarC3D()
t[0].code += str(t[1])+ ' ' + t[2].code
elif len(t) == 5:
t[0] = GenerarC3D()
t[0].code += str(t[1])+ ' ' + str(t[2]) + ' ' + str(t[3]) + ' ' + t[4].code
elif len(t) == 4:
if t[1] == 'DROP':
t[0] = GenerarC3D()
t[0].code += str(t[1])+ ' ' + str(t[2]) + ' ' + str(t[3])
else:
t[0] = GenerarC3D()
t[0].code += str(t[1])+ ' ' + str(t[2]) + ' ' + t[3].code
elif len(t) == 7:
t[0] = GenerarC3D()
t[0].code += str(t[1])+ ' ' + str(t[2]) + ' ' + str(t[3]) + ' ' + str(t[4]) + ' ' + str(t[5]) + ' ' + str(t[6])
elif len(t) == 6:
t[0] = GenerarC3D()
t[0].code += str(t[1])+ ' ' + str(t[2]) + ' ' + str(t[3]) + ' ' + str(t[4]) + ' ' + t[5].code
else:
t[0] = GenerarC3D()
t[0].code += str(t[1])+ ' ' + str(t[2]) + ' ' + str(t[3]) + ' ' + t[4].code
def p_ins_constraint_dos(t):
'''ins_constraint_dos : UNIQUE PARABRE ID PARCIERRE
| FOREIGN KEY PARABRE ID PARCIERRE REFERENCES fkid PARABRE ID PARCIERRE
| CHECK PARABRE exp PARCIERRE
| PRIMARY KEY PARABRE ID PARCIERRE'''
if len(t) == 5:
if t[1] == 'UNIQUE':
t[0] = GenerarC3D()
t[0].code += str(t[1]) + ' ' + str(t[2]) + ' ' + str(t[3]) + ' ' + str(t[4])
else:
t[0] = GenerarC3D()
t[0].code += str(t[1]) + ' ' + str(t[2]) + ' ' + t[3].code + ' ' + str(t[4])
if len(t) == 6:
t[0] = GenerarC3D()
t[0].code += str(t[1]) + ' ' + str(t[2]) + ' ' + str(t[3]) + ' ' + str(t[4]) + ' ' + str(t[5])
else:
t[0] = GenerarC3D()
t[0].code += str(t[1]) + ' ' + str(t[2]) + ' ' + str(t[3]) + ' ' + str(t[4]) + ' ' + str(t[5]) + ' ' + str(t[6]) + ' ' + t[7].code + ' ' + str(t[8]) + ' ' + str(t[9])
def p_fkid(t):
'''fkid : ID
| '''
if len(t) == 2:
t[0] = GenerarC3D()
t[0].code += str(t[1])
else:
t[0] = GenerarC3D()
t[0].code += ''
def p_alter_database(t):
'''alter_database : RENAME TO ID
| OWNER TO ID'''
t[0] = GenerarC3D()
t[0].code += str(t[1]) + ' ' + str(t[2]) + ' ' + str(t[3])
def p_drop(t):
'''ins_drop : DROP tipo_drop'''
t[0] = GenerarC3D()
t[0].code += str(t[1]) + ' ' + t[2].code
def p_tipo_drop(t):
'''tipo_drop : DATABASE if_exists ID PUNTO_COMA
| TABLE ID PUNTO_COMA'''
if len(t) == 5:
t[0] = GenerarC3D()
t[0].code += str(t[1]) + ' ' + t[2].code + ' ' + str(t[3]) + ' ' + str(t[4])
else:
t[0] = GenerarC3D()
t[0].code += str(t[1]) + ' ' + str(t[2]) + ' ' + str(t[3])
def p_ins_insert(t):
'''ins_insert : INSERT INTO ID VALUES PARABRE list_vls PARCIERRE PUNTO_COMA
| INSERT INTO ID PARABRE list_id PARCIERRE VALUES PARABRE list_vls PARCIERRE PUNTO_COMA'''
if len(t) == 9:
t[0] = GenerarC3D()
t[0].code += str(t[1]) + ' ' + str(t[2]) + ' ' + str(t[3]) + ' ' + str(t[4]) + ' ' + str(t[5]) + ' ' + t[6].code + ' ' + str(t[7]) + ' ' + str(t[8])
else:
t[0] = GenerarC3D()
t[0].code += str(t[1]) + ' ' + str(t[2]) + ' ' + str(t[3]) + ' ' + str(t[4]) + ' ' | |
<reponame>anderson2981/pyrometheus
"""
.. autofunction:: gen_thermochem_code
.. autofunction:: get_thermochem_class
Internal Functionality
^^^^^^^^^^^^^^^^^^^^^^
.. autofunction:: equilibrium_constants_expr
.. autofunction:: rate_coefficient_expr
.. autofunction:: third_body_efficiencies_expr
.. autofunction:: troe_falloff_expr
.. autofunction:: falloff_function_expr
.. autofunction:: rate_of_progress_expr
.. autofunction:: production_rate_expr
"""
__copyright__ = """
Copyright (C) 2020 <NAME>
Copyright (C) 2020 <NAME>
"""
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from numbers import Number
from functools import singledispatch
import pymbolic.primitives as p
from pymbolic.mapper.stringifier import StringifyMapper, PREC_NONE, PREC_CALL
import cantera as ct
import numpy as np # noqa: F401
from itertools import compress
from mako.template import Template
# {{{ code generation helpers
class CodeGenerationMapper(StringifyMapper):
def map_constant(self, expr, enclosing_prec):
return repr(expr)
def map_if(self, expr, enclosing_prec, *args, **kwargs):
return "self.usr_np.where(%s, %s, %s)" % (
self.rec(expr.condition, PREC_NONE, *args, **kwargs),
self.rec(expr.then, PREC_NONE, *args, **kwargs),
self.rec(expr.else_, PREC_NONE, *args, **kwargs),
)
def map_call(self, expr, enclosing_prec, *args, **kwargs):
return self.format(
"self.usr_np.%s(%s)",
self.rec(expr.function, PREC_CALL, *args, **kwargs),
self.join_rec(", ", expr.parameters, PREC_NONE, *args, **kwargs),
)
def str_np_inner(ary):
if isinstance(ary, Number):
return repr(ary)
elif ary.shape:
return "[%s]" % (", ".join(str_np_inner(ary_i) for ary_i in ary))
raise TypeError("invalid argument to str_np_inner")
def str_np(ary):
return "np.array(%s)" % str_np_inner(ary)
# }}}
# {{{ polynomial processing
def nasa7_conditional(t, poly, part_gen):
# FIXME: Should check minTemp, maxTemp
return p.If(
p.Comparison(t, ">", poly.coeffs[0]),
part_gen(poly.coeffs[1:8], t),
part_gen(poly.coeffs[8:15], t),
)
@singledispatch
def poly_to_expr(poly):
raise TypeError(f"unexpected argument type in poly_to_expr: {type(poly)}")
@poly_to_expr.register
def _(poly: ct.NasaPoly2, arg_name):
def gen(c, t):
assert len(c) == 7
return c[0] + c[1] * t + c[2] * t ** 2 + c[3] * t ** 3 + c[4] * t ** 4
return nasa7_conditional(p.Variable(arg_name), poly, gen)
@singledispatch
def poly_to_enthalpy_expr(poly, arg_name):
raise TypeError("unexpected argument type in poly_to_enthalpy_expr: "
f"{type(poly)}")
@poly_to_enthalpy_expr.register
def _(poly: ct.NasaPoly2, arg_name):
def gen(c, t):
assert len(c) == 7
return (
c[0]
+ c[1] / 2 * t
+ c[2] / 3 * t ** 2
+ c[3] / 4 * t ** 3
+ c[4] / 5 * t ** 4
+ c[5] / t
)
return nasa7_conditional(p.Variable(arg_name), poly, gen)
@singledispatch
def poly_to_entropy_expr(poly, arg_name):
raise TypeError("unexpected argument type in poly_to_entropy_expr: "
f"{type(poly)}")
@poly_to_entropy_expr.register
def _(poly: ct.NasaPoly2, arg_name):
log = p.Variable("log")
def gen(c, t):
assert len(c) == 7
return (
c[0] * log(t)
+ c[1] * t
+ c[2] / 2 * t ** 2
+ c[3] / 3 * t ** 3
+ c[4] / 4 * t ** 4
+ c[6]
)
return nasa7_conditional(p.Variable(arg_name), poly, gen)
# }}}
# {{{ Data-handling helper
def _zeros_like(argument):
# FIXME: This mishandles NaNs.
return 0 * argument
# }}}
# {{{ Equilibrium constants
def equilibrium_constants_expr(sol: ct.Solution, react: ct.Reaction, gibbs_rt):
"""Generate code for equilibrium constants.
:returns: Equilibrium constant expression for reaction *react* in terms of
the species Gibbs functions *gibbs_rt* as a
:class:`pymbolic.primitives.Expression`
"""
indices_reac = [sol.species_index(sp) for sp in react.reactants]
indices_prod = [sol.species_index(sp) for sp in react.products]
# Stoichiometric coefficients
#nu_reac = [react.reactants[sp] for sp in react.reactants]
#nu_prod = [react.products[sp] for sp in react.products]
nu_reac = [sol.reactant_stoich_coeff(sol.species_index(sp), int(react.ID)-1)
for sp in react.reactants]
nu_prod = [sol.product_stoich_coeff(sol.species_index(sp), int(react.ID)-1)
for sp in react.products]
sum_r = sum(nu_reac_i * gibbs_rt[indices_reac_i]
for indices_reac_i, nu_reac_i in zip(indices_reac, nu_reac))
sum_p = sum(nu_prod_i * gibbs_rt[indices_prod_i]
for indices_prod_i, nu_prod_i in zip(indices_prod, nu_prod))
# Check if reaction is termolecular
sum_nu_net = sum(nu_prod) - sum(nu_reac)
if sum_nu_net < 0:
# Three species on reactants side
return sum_p - sum_nu_net*p.Variable("c0") - sum_r
elif sum_nu_net > 0:
# Three species on products side
return sum_p - (sum_r - sum_nu_net*p.Variable("c0"))
else:
return sum_p - sum_r
# }}}
# {{{ Rate coefficients
def rate_coefficient_expr(rate_coeff: ct.Arrhenius, t):
"""
:returns: The rate coefficient expression for *rate_coeff* in terms
of the temperature *t* as a :class:`pymbolic.primitives.Expression`
"""
# Rate parameters
a = rate_coeff.pre_exponential_factor
b = rate_coeff.temperature_exponent
t_a = rate_coeff.activation_energy/ct.gas_constant
if t_a == 0:
# Weakly temperature-dependent rate
return a * t**b
else:
# Modified Arrhenius
return p.Variable("exp")(np.log(a)+b*p.Variable("log")(t)-t_a/t)
def third_body_efficiencies_expr(sol: ct.Solution, react: ct.Reaction, c):
"""
:returns: The third-body concentration expression for reaction *react* in terms
of the species concentrations *c* as a
:class:`pymbolic.primitives.Expression`
"""
efficiencies = [react.efficiencies[sp] for sp in react.efficiencies]
indices_nondef = [sol.species_index(sp) for sp in react.efficiencies]
indices_default = [i for i in range(sol.n_species) if i not in indices_nondef]
sum_nondef = sum(eff_i * c[index_i] for eff_i, index_i
in zip(np.array(efficiencies), indices_nondef))
sum_default = react.default_efficiency * sum(c[i] for i in indices_default)
return sum_nondef + sum_default
def troe_falloff_expr(react: ct.Reaction, t):
"""
:returns: The Troe falloff center expression for reaction *react* in terms of the
temperature *t* as a :class:`pymbolic.primitives.Expression`
"""
troe_params = react.falloff.parameters
troe_1 = (1.0-troe_params[0])*p.Variable("exp")(-t/troe_params[1])
troe_2 = troe_params[0]*p.Variable("exp")(-t/troe_params[2])
if troe_params[3]:
troe_3 = p.Variable("exp")(-troe_params[3]/t)
return troe_1 + troe_2 + troe_3
else:
return troe_1 + troe_2
def falloff_function_expr(react: ct.Reaction, i, t, red_pressure, falloff_center):
"""
:returns: Falloff function expression for reaction *react* in terms
of the temperature *t*, reduced pressure *red_pressure*, and falloff center
*falloff_center* as a :class:`pymbolic.primitives.Expression`
"""
if react.falloff.falloff_type == "Troe":
log_rp = p.Variable("log10")(red_pressure[i])
c = -0.4-0.67*falloff_center[i]
n = 0.75-1.27*falloff_center[i]
f = (log_rp+c)/(n-0.14*(log_rp+c))
return 10**((falloff_center[i])/(1+f**2))
else:
return 1
# }}}
# {{{ Rates of progress
def rate_of_progress_expr(sol: ct.Solution, react: ct.Reaction, c, k_fwd, k_eq):
"""
:returns: Rate of progress expression for reaction *react* in terms of
species concentrations *c* with rate coefficients *k_fwd* and equilbrium
constants *k_eq* as a :class:`pymbolic.primitives.Expression`
"""
indices_reac = [sol.species_index(sp) for sp in react.reactants]
indices_prod = [sol.species_index(sp) for sp in react.products]
if react.orders:
nu_reac = [react.orders[sp] for sp in react.orders]
else:
nu_reac = [react.reactants[sp] for sp in react.reactants]
r_fwd = np.prod([c[index]**nu for index, nu in zip(indices_reac, nu_reac)])
if react.reversible:
nu_prod = [react.products[sp] for sp in react.products]
r_rev = np.prod([c[index]**nu for index, nu in zip(indices_prod, nu_prod)])
# FIXME: It's not clear that this is available other than by this clunky,
# string-parsing route
reaction_index = int(react.ID)-1
return k_fwd[reaction_index] * (r_fwd - k_eq[reaction_index] * r_rev)
else:
return k_fwd[int(react.ID)-1] * r_fwd
# }}}
# {{{ Species production rates
def production_rate_expr(sol: ct.Solution, species, r_net):
"""
:returns: Species production rate for species *species* in terms of
the net reaction rates of progress *r_net* as a
:class:`pymbolic.primitives.Expression`
"""
ones = _zeros_like(r_net[0]) + 1.0
indices_fwd = [int(react.ID)-1 for react in sol.reactions()
if species in react.reactants]
indices_rev = [int(react.ID)-1 for react in sol.reactions()
if species in react.products]
nu_fwd = [sol.reactant_stoich_coeff(sol.species_index(species), react_index)
for react_index in indices_fwd]
nu_rev = [sol.product_stoich_coeff(sol.species_index(species), prod_index)
for prod_index in indices_rev]
sum_fwd = sum(nu*r_net[index] for nu, index in zip(nu_fwd, indices_fwd))
sum_rev = sum(nu*r_net[index] for nu, index in zip(nu_rev, indices_rev))
return (sum_rev - sum_fwd) * ones
# }}}
# {{{ main code template
code_tpl = Template(
"""\"""
.. autoclass:: Thermochemistry
\"""
import numpy as np
class Thermochemistry:
\"""
.. attribute:: model_name
.. attribute:: num_elements
.. attribute:: num_species
.. attribute:: num_reactions
.. attribute:: num_falloff
.. attribute:: one_atm
Returns 1 atm in SI units of pressure (Pa).
.. attribute:: gas_constant
.. attribute:: species_names
.. attribute:: species_indices
.. automethod:: get_specific_gas_constant
.. automethod:: get_density
.. automethod:: get_pressure
.. automethod:: get_mix_molecular_weight
.. automethod:: get_concentrations
.. automethod:: get_mixture_specific_heat_cp_mass
.. automethod:: get_mixture_specific_heat_cv_mass
.. automethod:: get_mixture_enthalpy_mass
.. automethod:: get_mixture_internal_energy_mass
.. automethod:: get_species_specific_heats_r
.. automethod:: get_species_enthalpies_rt
.. automethod:: get_species_entropies_r
.. automethod:: get_species_gibbs_rt
.. automethod:: get_equilibrium_constants
.. automethod:: get_temperature
.. automethod:: __init__
\"""
def __init__(self, usr_np=np):
\"""Initialize thermochemistry object for a mechanism.
Parameters
----------
usr_np
:mod:`numpy`-like namespace providing at least the following functions,
for any array ``X`` of the bulk array type:
- ``usr_np.log(X)`` (like :data:`numpy.log`)
- ``usr_np.log10(X)`` (like :data:`numpy.log10`)
- ``usr_np.exp(X)`` (like :data:`numpy.exp`)
- ``usr_np.where(X > 0, X_yes, X_no)`` (like :func:`numpy.where`)
- ``usr_np.linalg.norm(X, np.inf)`` (like :func:`numpy.linalg.norm`)
where the "bulk array type" is a type that offers arithmetic analogous
to :class:`numpy.ndarray` and is used to hold all types | |
from collections import deque
from dataclasses import dataclass, field
from typing import Any, List, Union, Dict, Sequence, NamedTuple, Optional
from mdiff.block_extractor import OpCodeDeleteThenInsertBlockExtractor, ConsecutiveVectorBlockExtractor, \
NonIntegersBlockExtractor
from mdiff.utils import OpCode, longest_increasing_subsequence, get_idx_or_default, OpCodeExtractable
@dataclass
class HeckelSymbolTableEntry:
"""
Heckel's diff algorithm symbol table entry.
"""
value: Any
oc: int = 0
nc: int = 0
olno: int = 0
HeckelSymbolTableEntryType = Union[int, HeckelSymbolTableEntry]
class HeckelSequenceMatcherException(Exception):
pass
class OpBlock(NamedTuple):
"""
Stores information about detected subsequence operation block in diff algorithm.
i: start position of subsequence in OA table.
n: first value of subsequence in OA table (used to detect blocks offset).
w: length of subsequence (weight).
"""
i: int
n: HeckelSymbolTableEntryType
w: int
def _map_replace_opcodes(opcodes: Sequence[OpCode]):
"""
This method takes sequence of OpCodes as an input, and merges consecutive pairs of "insert" and "delete"
blocks into "replace" operation.
"""
replace_blocks = list(OpCodeDeleteThenInsertBlockExtractor(opcodes).extract_blocks())
replace_block_idx = 0
replace_result = []
i = 0
while i < len(opcodes):
# check if replace block
if replace_block_idx < len(replace_blocks) and replace_blocks[replace_block_idx][0] == i:
rep_block = replace_blocks[replace_block_idx]
delete = opcodes[rep_block[0]]
insert = opcodes[rep_block[0] + 1]
replace = OpCode('replace', delete.i1, delete.i2, insert.j1, insert.j2)
replace_result.append(replace)
replace_block_idx += 1
i += 2
else:
replace_result.append(opcodes[i])
i += 1
return replace_result
class HeckelAlgorithm:
def __init__(self, a: Sequence[Any] = '', b: Sequence[Any] = ''):
self.a = a
self.b = b
self.st: Dict[Any, HeckelSymbolTableEntryType] = {}
self.na: List[HeckelSymbolTableEntryType] = []
self.oa: List[HeckelSymbolTableEntryType] = []
def run(self):
"""
Implementation of <NAME>el's algorithm described in "A Technique for Isolating Differences Between Files".
"""
# symbol table, NA array, OA array
st: Dict[Any, HeckelSymbolTableEntryType] = dict()
na: List[HeckelSymbolTableEntryType] = list()
oa: List[HeckelSymbolTableEntryType] = list()
# pass 1
for idx, i in enumerate(self.a):
ste = st.setdefault(i, HeckelSymbolTableEntry(i))
ste.nc += 1
na.append(ste)
# pass 2
for idx, i in enumerate(self.b):
ste = st.setdefault(i, HeckelSymbolTableEntry(i))
ste.oc += 1
oa.append(ste)
ste.olno = idx
# pass3
for i in range(len(na)):
if na[i].nc == na[i].oc == 1:
olno = na[i].olno
na[i] = olno
oa[olno] = i
# pass4
for i in range(len(na)):
try:
if isinstance(na[i], int):
j = na[i]
if isinstance(na[i + 1], HeckelSymbolTableEntry) and na[i + 1] == oa[j + 1]:
oa[j + 1] = i + 1
na[i + 1] = j + 1
except IndexError:
pass
# pass5
for i in reversed(range(1, len(na))):
try:
if isinstance(na[i], int):
j = na[i]
if isinstance(na[i - 1], HeckelSymbolTableEntry) and na[i - 1] == oa[j - 1] and i >= 1 and j >= 1:
oa[j - 1] = i - 1
na[i - 1] = j - 1
except IndexError:
pass
self.st = st
self.na = na
self.oa = oa
class HeckelOpCodeExtractor(OpCodeExtractable):
"""
This class extracts OpCodes based on data calculated by Heckel's algorithm class.
"""
def __init__(self, alg: HeckelAlgorithm, replace_mode: bool = True):
self.alg = alg
self.replace_mode = replace_mode
def _generate_move_and_equal_opcodes(self) -> Sequence[OpCode]:
"""
Generates sequence of OpCode tuples where tags are in: "equal", "move", "moved".
"""
# New order of sequence elements that exists in both sequences is stored in NA table after algorithm run.
# Integer type entries in NA indicates that element of sequence might be either equal or moved.
# Extract from NA tables only move/equal entries.
# Index is added because NA integer entries will be converted to consecutive entries blocks.
# Block will have form of tuple = (block_start_index, block_start_value, block_length) corresponding to NA table
# NA table can consist of SymbolTableEntry type rows which breaks the block.
# Adding enumerate index allow to detect block break caused by SymbolTableEntry type record.
na_indexed_moves = [(idx, i) for idx, i in enumerate(self.alg.na) if isinstance(i, int)]
# Longest increasing sequence finds "equal" entries.
# Indexed NA in form of tuples are used in order to use index to build proper MoveBlocks later.
lis = longest_increasing_subsequence(na_indexed_moves, key=lambda x: x[1])
lis_idx, lis_v = zip(*lis) if lis else ([], [])
# Finding consecutive vector blocks and mapping them to NA indexes and starting values.
cons_all_blocks = list(ConsecutiveVectorBlockExtractor(na_indexed_moves).extract_blocks())
all_blocks = [OpBlock(i=na_indexed_moves[i][0], n=na_indexed_moves[i][1], w=w) for i, w in cons_all_blocks]
# Finding consecutive vector blocks in LIS and mapping them to NA indexes and starting values.
cons_eq_blocks = list(ConsecutiveVectorBlockExtractor(lis_v).extract_blocks())
eq_blocks = [OpBlock(i=lis_v[i][0], n=lis_v[i][1], w=w) for i, w in cons_eq_blocks]
# The difference of all NA blocks and "equal" blocks found by LIS, gives list of optimal move operation blocks.
move_blocks = set(all_blocks) - set(eq_blocks)
# Yield OpCodes
for b in eq_blocks:
yield OpCode('equal', b.i, b.i + b.w, b.n, b.n + b.w)
for b in move_blocks:
yield OpCode('move', b.i, b.i + b.w, b.n, b.n)
yield OpCode('moved', b.i, b.i, b.n, b.n + b.w)
def _generate_insert_opcodes(self):
"""
Generates sequence of OpCode tuples with tag "insert".
"""
block_extractor = NonIntegersBlockExtractor(self.alg.oa)
insert_blocks = [OpBlock(i, self.alg.oa[i], w) for i, w in block_extractor.extract_blocks()]
for b in insert_blocks:
yield OpCode('insert', b.n.olno, b.n.olno, b.i, b.i + b.w)
def _generate_delete_opcodes(self):
"""
Generates sequence of OpCode tuples with tag "delete".
"""
block_extractor = NonIntegersBlockExtractor(self.alg.na)
delete_blocks = [OpBlock(i, self.alg.na[i], w) for i, w in block_extractor.extract_blocks()]
for b in delete_blocks:
yield OpCode('delete', b.i, b.i + b.w, b.n.olno, b.n.olno)
def get_opcodes(self) -> List[OpCode]:
# Prepare opcodes
insert_opcodes = list(self._generate_insert_opcodes())
delete_opcodes = list(self._generate_delete_opcodes())
move_opcodes = []
moved_opcodes = []
equal_opcodes = []
map_dict = {
'move': move_opcodes,
'moved': moved_opcodes,
'equal': equal_opcodes
}
for opcode in self._generate_move_and_equal_opcodes():
map_dict[opcode.tag].append(opcode)
# sort opcodes (insert, delete and equal opcodes are already sorted)
moved_opcodes.sort(key=lambda x: x.j1)
move_opcodes.sort(key=lambda x: x.i1)
# Fetch opcodes in correct order
result = []
ipos = 0
jpos = 0
while any([insert_opcodes, delete_opcodes, move_opcodes, moved_opcodes, equal_opcodes]):
if len(delete_opcodes) > 0 and delete_opcodes[0].i1 == ipos:
opcode = delete_opcodes.pop(0)
# j1 and j2 attributes are meaningless for delete operation. However replacing them with jpos
# keep j-indexes in sync with j-indexes in other returned tags, like in builtin difflib library.
result.append(OpCode(opcode.tag, opcode.i1, opcode.i2, jpos, jpos))
ipos = opcode.i2
continue
if len(move_opcodes) > 0 and move_opcodes[0].i1 == ipos:
opcode = move_opcodes.pop(0)
result.append(opcode)
ipos = opcode.i2
continue
if len(equal_opcodes) > 0 and equal_opcodes[0].i1 == ipos and equal_opcodes[0].j1 == jpos:
opcode = equal_opcodes.pop(0)
result.append(opcode)
ipos = opcode.i2
jpos = opcode.j2
continue
if len(insert_opcodes) > 0 and insert_opcodes[0].j1 == jpos:
opcode = insert_opcodes.pop(0)
# i1 and i2 attributes are meaningless for insert operation. However replacing them with ipos
# keep i-indexes in sync with i-indexes in other returned tags, like in builtin difflib library.
result.append(OpCode(opcode.tag, ipos, ipos, opcode.j1, opcode.j2))
jpos = opcode.j2
continue
if len(moved_opcodes) > 0 and moved_opcodes[0].j1 == jpos:
opcode = moved_opcodes.pop(0)
result.append(opcode)
jpos = opcode.j2
continue
raise HeckelSequenceMatcherException('Invalid indexes in generated OpCodes. Something went wrong.')
if self.replace_mode:
result = _map_replace_opcodes(result)
return result
#TODO: Correct implementation (it's buggy now)
class FastHeckelOpCodeExtractor(OpCodeExtractable):
"""
This class extracts OpCodes based on data calculated by Heckel's algorithm class.
"""
def __init__(self, alg: HeckelAlgorithm, replace_mode: bool = True):
self.alg = alg
self.replace_mode = replace_mode
self.eq_lis: Optional[deque] = None
def _na(self, i):
return get_idx_or_default(self.alg.na, i, None)
def _oa(self, i):
return get_idx_or_default(self.alg.oa, i, None)
def get_opcodes(self) -> List[OpCode]:
"""Extracts opcodes from Heckel's algorithm data."""
opcodes = []
i = j = 0 # na and oa indexes
na_indexed_moves = [(idx, i) for idx, i in enumerate(self.alg.na) if isinstance(i, int)]
# Longest increasing sequence finds "equal" entries.
# Indexes not present in LIS result determine least move blocks needed to convert a to b
lis = longest_increasing_subsequence(na_indexed_moves, key=lambda x: x[1])
eq_na_idxs = {i[1][0] for i in lis}
eq_oa_idxs = {i[1][1] for i in lis}
while i < len(self.alg.na) or j < len(self.alg.oa):
prev_i = i
prev_j = j
# Detect move block
prev_move_val = None
move_j_index = self._na(i)
while (i < len(self.alg.na) and i not in eq_na_idxs) and (
not prev_move_val or (isinstance(prev_move_val, int) and self._na(i) == prev_move_val + 1)):
prev_move_val = self._na(i)
i += 1
if i > prev_i:
opcodes.append(OpCode('move', prev_i, i, move_j_index, move_j_index))
continue
# Detect moved block
prev_moved_val = None
moved_i_index = self._oa(j)
while (j < len(self.alg.oa) and j not in eq_oa_idxs) and (
not prev_moved_val or (isinstance(prev_moved_val, int) and self._oa(j) == prev_moved_val + | |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
import os
import time
currentUrl = os.path.dirname(__file__)
parentUrl = os.path.abspath(os.path.join(currentUrl, os.pardir))
sys.path.append(parentUrl)
import argparse
import numpy as np
import torch
import torch.utils.data
from torch.utils.data import DataLoader
from torchvision.transforms import functional as F
import matplotlib.pyplot as plt
import cv2
from glob import glob
import os.path as osp
from utils.preprocessing import load_img, load_skeleton, get_bbox, process_bbox, augmentation, transform_input_to_output_space, trans_point2d
from utils.transforms import world2cam, cam2pixel, pixel2cam
from utils.vis import vis_keypoints, vis_3d_keypoints, plot_hand
from utils.standard_legends import idx_InterHand
from PIL import Image, ImageDraw
import random
import json
import math
from pycocotools.coco import COCO
import scipy.io as sio
class InterHandDataset(torch.utils.data.Dataset):
def __init__(self, cfg, transforms, mode):
self.cfg = cfg
self.name = 'InterHand'
self.mode = mode # train, test, val
self.img_path = osp.join(cfg.DATASET.DATA_DIR, 'images') # '../data/InterHand2.6M/images'
self.annot_path = osp.join(cfg.DATASET.DATA_DIR, 'annotations') # '../data/InterHand2.6M/annotations'
# if self.mode == 'val':
# self.rootnet_output_path = '../data/InterHand2.6M/rootnet_output/rootnet_interhand2.6m_output_val.json'
# else:
# self.rootnet_output_path = '../data/InterHand2.6M/rootnet_output/rootnet_interhand2.6m_output_test.json'
self.transform = transforms
self.joint_num = cfg.DATASET.NUM_JOINTS # 21 # single hand
self.root_joint_idx = {'right': 0, 'left': 21} # Please modify this idx after changing the order of joints
self.joint_type = {'right': np.arange(0,self.joint_num), 'left': np.arange(self.joint_num,self.joint_num*2)}
self.skeleton = load_skeleton(osp.join(self.annot_path, 'skeleton.txt'), self.joint_num*2)
self.datalist = []
self.datalist_sh = []
self.datalist_ih = []
self.sequence_names = []
# load annotation
print("Load annotation from " + osp.join(self.annot_path, self.mode))
t1 = time.time()
prefix = 'simple_'
db = COCO(osp.join(self.annot_path, self.mode, prefix+'InterHand2.6M_' + self.mode + '_data.json'))
with open(osp.join(self.annot_path, self.mode, 'InterHand2.6M_' + self.mode + '_camera.json')) as f:
cameras = json.load(f)
with open(osp.join(self.annot_path, self.mode, 'InterHand2.6M_' + self.mode + '_joint_3d.json')) as f:
joints = json.load(f)
print("Annotation loading spent {}s".format(time.time()-t1))
# if (self.mode == 'val' or self.mode == 'test') and cfg.trans_test == 'rootnet':
# print("Get bbox and root depth from " + self.rootnet_output_path)
# rootnet_result = {}
# with open(self.rootnet_output_path) as f:
# annot = json.load(f)
# for i in range(len(annot)):
# rootnet_result[str(annot[i]['annot_id'])] = annot[i]
# else:
# print("Get bbox and root depth from groundtruth annotation")
for aid in db.anns.keys():
ann = db.anns[aid]
image_id = ann['image_id']
img = db.loadImgs(image_id)[0]
capture_id = img['capture']
seq_name = img['seq_name']
cam = img['camera']
frame_idx = img['frame_idx']
img_path = osp.join(self.img_path, self.mode, img['file_name'])
campos, camrot = np.array(cameras[str(capture_id)]['campos'][str(cam)], dtype=np.float32), np.array(cameras[str(capture_id)]['camrot'][str(cam)], dtype=np.float32)
focal, princpt = np.array(cameras[str(capture_id)]['focal'][str(cam)], dtype=np.float32), np.array(cameras[str(capture_id)]['princpt'][str(cam)], dtype=np.float32)
# get the groundtruth pose and reorder it
joint_world = np.array(joints[str(capture_id)][str(frame_idx)]['world_coord'], dtype=np.float32)[idx_InterHand] # 42 x 3
joint_cam = world2cam(joint_world.transpose(1,0), camrot, campos.reshape(3,1)).transpose(1,0)
joint_img = cam2pixel(joint_cam, focal, princpt) # 42 x 2 [u,v]
# 1 if a joint is annotated and inside of image. 0 otherwise
joint_valid = np.array(ann['joint_valid'],dtype=np.float32).reshape(self.joint_num*2)
# if root is not valid -> root-relative 3D pose is also not valid. Therefore, mark all joints as invalid
joint_valid[self.joint_type['right']] *= joint_valid[self.root_joint_idx['right']]
joint_valid[self.joint_type['left']] *= joint_valid[self.root_joint_idx['left']]
hand_type = ann['hand_type']
# 1 if hand_type in ('right', 'left') or hand_type == 'interacting' and np.sum(joint_valid) > 30, 0 otherwise
hand_type_valid = np.array((ann['hand_type_valid']), dtype=np.float32)
# if (self.mode == 'val' or self.mode == 'test') and cfg.trans_test == 'rootnet':
# bbox = np.array(rootnet_result[str(aid)]['bbox'],dtype=np.float32)
# abs_depth = {'right': rootnet_result[str(aid)]['abs_depth'][0], 'left': rootnet_result[str(aid)]['abs_depth'][1]}
# else:
img_width, img_height = img['width'], img['height'] # original image size 344(w) x 512(h)
bbox = np.array(ann['bbox'],dtype=np.float32) # x,y,w,h
bbox = process_bbox(bbox, (img_height, img_width))
abs_depth = {'right': joint_cam[self.root_joint_idx['right'],2], 'left': joint_cam[self.root_joint_idx['left'],2]}
cam_param = {'focal': focal, 'princpt': princpt}
joint = {'cam_coord': joint_cam, 'img_coord': joint_img, 'valid': joint_valid}
data = {'img_path': img_path, 'seq_name': seq_name, 'cam_param': cam_param, 'bbox': bbox, 'joint': joint, 'hand_type': hand_type, 'hand_type_valid': hand_type_valid, 'abs_depth': abs_depth, 'file_name': img['file_name'], 'capture': capture_id, 'cam': cam, 'frame': frame_idx}
if hand_type == 'right' or hand_type == 'left':
self.datalist_sh.append(data)
else:
self.datalist_ih.append(data)
if seq_name not in self.sequence_names:
self.sequence_names.append(seq_name)
self.datalist = self.datalist_sh + self.datalist_ih
print('Number of annotations in single hand sequences: ' + str(len(self.datalist_sh)))
print('Number of annotations in interacting hand sequences: ' + str(len(self.datalist_ih)))
def handtype_str2array(self, hand_type):
if hand_type == 'right':
return np.array([1,0], dtype=np.float32)
elif hand_type == 'left':
return np.array([0,1], dtype=np.float32)
elif hand_type == 'interacting':
return np.array([1,1], dtype=np.float32)
else:
assert 0, print('Not supported hand type: ' + hand_type)
def __len__(self):
return len(self.datalist)
def __getitem__(self, idx):
data = self.datalist[idx]
img_path, bbox, joint, hand_type, hand_type_valid = data['img_path'], data['bbox'], data['joint'], data['hand_type'], data['hand_type_valid']
joint_cam = joint['cam_coord'].copy()
joint_img = joint['img_coord'].copy()
joint_valid = joint['valid'].copy() # 1 if inside the image, o other wise. # 42
hand_type_vec = self.handtype_str2array(hand_type)
joint_coord = np.concatenate((joint_img, joint_cam[:,2,None]),1) # 42 x 3 [u,v,z]
# input(joint_valid)
# input(joint_coord)
# image load
try:
img = cv2.cvtColor(cv2.imread(img_path, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION), cv2.COLOR_BGR2RGB) # 512 x 334 x 3
except:
print('[Warning] Invalid image path:', img_path)
# DEBUG
# f = plt.figure()
# ax1 = f.add_subplot(1,1,1)
# ax1.imshow(img)
# for k in range(joint_coord.shape[0]):
# print('[{:.4f}, {:.4f}, {:.4f}],'.format(*joint_coord[k]))
# print(hand_type_vec)
# if hand_type_vec[0] == 1:
# plot_hand(ax1, joint_coord[0:21,0:2], vis=joint_valid[0:21], order = 'uv')
# elif hand_type_vec[1] == 1:
# plot_hand(ax1, joint_coord[21:42,0:2], vis=joint_valid[21:42], order = 'uv')
# ax1.set_title(hand_type)
# plt.show()
# augmentation
img, joint_coord, joint_valid, hand_type_vec, inv_trans = augmentation(img, bbox, joint_coord, joint_valid, hand_type_vec, self.mode, self.joint_type, self.cfg.MODEL.INPUT_SIZE)
# f1 = plt.figure()
# ax1 = f1.add_subplot(1,1,1)
# ax1.imshow(img.astype(int))
# for k in range(joint_coord.shape[0]):
# print('[{:.4f}, {:.4f}, {:.4f}],'.format(*joint_coord[k]))
# print(joint_coord)
# if hand_type_vec[0] == 1:
# plot_hand(ax1, joint_coord[0:21,0:2], vis=joint_valid[0:21], order = 'uv')
# elif hand_type_vec[1] == 1:
# plot_hand(ax1, joint_coord[21:42,0:2], vis=joint_valid[21:42], order = 'uv')
# ax1.set_title(hand_type)
# plt.show()
#rel_root_depth = np.array([joint_coord[self.root_joint_idx['left'],2] - joint_coord[self.root_joint_idx['right'],2]],dtype=np.float32).reshape(1)
#root_valid = np.array([joint_valid[self.root_joint_idx['right']] * joint_valid[self.root_joint_idx['left']]],dtype=np.float32).reshape(1) if hand_type_vec[0]*hand_type_vec[1] == 1 else np.zeros((1),dtype=np.float32)
# transform to output heatmap space (this line of code is useless for anchor-based estimation)
#joint_coord, joint_valid, rel_root_depth, root_valid = transform_input_to_output_space(self.cfg, joint_coord, joint_valid, rel_root_depth, root_valid, self.root_joint_idx, self.joint_type)
img = self.transform(img.astype(np.float32) / 255.)
# inputs = {'img': img}
# targets = {'pose2d_gt': joint_coord, 'rel_root_depth': rel_root_depth, 'hand_type': hand_type_vec}
# meta_info = {'joint_valid': joint_valid, 'root_valid': root_valid, 'hand_type_valid': hand_type_valid, 'inv_trans': inv_trans, 'capture': int(data['capture']), 'cam': int(data['cam']), 'frame': int(data['frame'])}
return {'imgs': img, 'pose2d_gt': joint_coord, 'joint_valid': joint_valid, 'hand_type': hand_type_vec}
#return inputs, targets, meta_info
def evaluate(self, preds):
print()
print('Evaluation start...')
gts = self.datalist
preds_joint_coord, preds_rel_root_depth, preds_hand_type, inv_trans = preds['joint_coord'], preds['rel_root_depth'], preds['hand_type'], preds['inv_trans']
assert len(gts) == len(preds_joint_coord)
sample_num = len(gts)
mpjpe_sh = [[] for _ in range(self.joint_num*2)]
mpjpe_ih = [[] for _ in range(self.joint_num*2)]
mrrpe = []
acc_hand_cls = 0
hand_cls_cnt = 0
for n in range(sample_num):
data = gts[n]
bbox, cam_param, joint, gt_hand_type, hand_type_valid = data['bbox'], data['cam_param'], data['joint'], data['hand_type'], data['hand_type_valid']
focal = cam_param['focal']
princpt = cam_param['princpt']
gt_joint_coord = joint['cam_coord']
joint_valid = joint['valid']
# restore xy coordinates to original image space
pred_joint_coord_img = preds_joint_coord[n].copy()
pred_joint_coord_img[:,0] = pred_joint_coord_img[:,0]/cfg.output_hm_shape[2]*cfg.input_img_shape[1]
pred_joint_coord_img[:,1] = pred_joint_coord_img[:,1]/cfg.output_hm_shape[1]*cfg.input_img_shape[0]
for j in range(self.joint_num*2):
pred_joint_coord_img[j,:2] = trans_point2d(pred_joint_coord_img[j,:2],inv_trans[n])
# restore depth to original camera space
pred_joint_coord_img[:,2] = (pred_joint_coord_img[:,2]/cfg.output_hm_shape[0] * 2 - 1) * (cfg.bbox_3d_size/2)
# mrrpe
if gt_hand_type == 'interacting' and joint_valid[self.root_joint_idx['left']] and joint_valid[self.root_joint_idx['right']]:
pred_rel_root_depth = (preds_rel_root_depth[n]/cfg.output_root_hm_shape * 2 - 1) * (cfg.bbox_3d_size_root/2)
pred_left_root_img = pred_joint_coord_img[self.root_joint_idx['left']].copy()
pred_left_root_img[2] += data['abs_depth']['right'] + pred_rel_root_depth
pred_left_root_cam = pixel2cam(pred_left_root_img[None,:], focal, princpt)[0]
pred_right_root_img = pred_joint_coord_img[self.root_joint_idx['right']].copy()
pred_right_root_img[2] += data['abs_depth']['right']
pred_right_root_cam = pixel2cam(pred_right_root_img[None,:], focal, princpt)[0]
pred_rel_root = pred_left_root_cam - pred_right_root_cam
gt_rel_root = gt_joint_coord[self.root_joint_idx['left']] - gt_joint_coord[self.root_joint_idx['right']]
mrrpe.append(float(np.sqrt(np.sum((pred_rel_root - gt_rel_root)**2))))
# add root joint depth
pred_joint_coord_img[self.joint_type['right'],2] += data['abs_depth']['right']
pred_joint_coord_img[self.joint_type['left'],2] += data['abs_depth']['left']
# back project to camera coordinate system
pred_joint_coord_cam = pixel2cam(pred_joint_coord_img, focal, princpt)
# root joint alignment
for h in ('right', 'left'):
pred_joint_coord_cam[self.joint_type[h]] = pred_joint_coord_cam[self.joint_type[h]] - pred_joint_coord_cam[self.root_joint_idx[h],None,:]
gt_joint_coord[self.joint_type[h]] = gt_joint_coord[self.joint_type[h]] - gt_joint_coord[self.root_joint_idx[h],None,:]
# mpjpe
for j in range(self.joint_num*2):
if joint_valid[j]:
if gt_hand_type == 'right' or gt_hand_type == 'left':
mpjpe_sh[j].append(np.sqrt(np.sum((pred_joint_coord_cam[j] - gt_joint_coord[j])**2)))
else:
mpjpe_ih[j].append(np.sqrt(np.sum((pred_joint_coord_cam[j] - gt_joint_coord[j])**2)))
# handedness accuray
if hand_type_valid:
if gt_hand_type == 'right' and preds_hand_type[n][0] > 0.5 and preds_hand_type[n][1] < 0.5:
acc_hand_cls += 1
elif gt_hand_type == 'left' and preds_hand_type[n][0] < 0.5 and preds_hand_type[n][1] > 0.5:
acc_hand_cls += 1
elif gt_hand_type == 'interacting' and preds_hand_type[n][0] > 0.5 and preds_hand_type[n][1] > 0.5:
acc_hand_cls += 1
hand_cls_cnt += 1
vis = False
if vis:
img_path = data['img_path']
cvimg = cv2.imread(img_path, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
_img = cvimg[:,:,::-1].transpose(2,0,1)
vis_kps = pred_joint_coord_img.copy()
vis_valid = joint_valid.copy()
capture = str(data['capture'])
cam = str(data['cam'])
frame = str(data['frame'])
filename = 'out_' + str(n) + '_' + gt_hand_type + '.jpg'
vis_keypoints(_img, vis_kps, vis_valid, self.skeleton, filename)
vis = False
if vis:
filename = 'out_' + str(n) + '_3d.jpg'
vis_3d_keypoints(pred_joint_coord_cam, joint_valid, | |
'0x' and 'L' appendages brought on by the hex() function. pads to 8 characters
hexEntry.colorSwatch = ColorSwatch( guiFrame, fieldValueHexString, hexEntry )
hexEntry.colorSwatch.grid( column=2, row=i, pady=verticalPadding )
else:
# Add an editable field for this field's actual decoded value (and attach the hex edit widget for later auto-updating)
valueEntry = HexEditEntry( guiFrame, absoluteFieldOffset, fieldByteLength, fieldFormatting, propertyName )
valueEntry.insert( 0, fieldValue )
valueEntry.hexEntryWidget = hexEntry
hexEntry.valueEntryWidget = valueEntry
# Bind an event handler (pressing 'Enter' to save)
valueEntry.bind( '<Return>', updateEntryValue )
valueEntry.grid( column=2, row=i, pady=verticalPadding )
relativeFieldOffset += fieldByteLength
absoluteFieldOffset += fieldByteLength
# toc = time.clock()
# print 'time to draw known struct properties:', toc - tic
def showUnknownStructProperties( structure, guiFrame ):
fieldOffset = 0
tableRow = 0
for i in range( len(structure.data) / 4 ):
# Check if this is a pointer, and construct the field name for this property
absoluteFieldOffset = structure.offset + fieldOffset
if absoluteFieldOffset in globalDatFile.pointerOffsets:
hexString = uHex( fieldOffset )
numberOfSpaces = 5 - len( hexString )
fieldName = hexString + numberOfSpaces * ' ' + ' (Pointer):'
else:
fieldName = uHex( fieldOffset ) + ':'
# Add the property/field name, and a tooltip for its file offset
fieldLabel = ttk.Label( guiFrame, text=fieldName )
fieldLabel.grid( column=0, row=tableRow, padx=(0, 7), pady=0, sticky='w' )
ToolTip( fieldLabel, text='Offset in file: 0x{:X}'.format(0x20+structure.offset+fieldOffset), delay=300 )
# Add an editable field for the raw hex data
hexEntry = HexEditEntry( guiFrame, absoluteFieldOffset, 4, None, structure.name )
hexEntry.insert( 0, hexlify(structure.data[fieldOffset:fieldOffset+4]).upper() )
hexEntry.bind( '<Return>', updateEntryHex )
hexEntry.grid( column=1, row=tableRow, pady=0 )
fieldOffset += 4
tableRow += 1
if absoluteFieldOffset in globalDatFile.pointerOffsets: # It's a pointer
fieldValue = structure.getValues()[i]
PointerLink( guiFrame, fieldValue ).grid( column=2, row=i, pady=0, padx=7 )
def showFrameDataStringParsing( frameObjString, structTable, infoPaneInterior ):
# Get some info from the parent struct (a FObjDesc)
parentOffset = frameObjString.getAnyDataSectionParent()
parentStruct = globalDatFile.getStruct( parentOffset )
_, _, startFrame, _, dataTypeAndScale, slopeDataTypeAndScale, _, _ = parentStruct.getValues()
# Create a new frame to attach basic info to (since we want to use grid without interfering with pack)
frameDetailsGrid = ttk.Frame( infoPaneInterior )
ttk.Label( frameDetailsGrid, text='General Track Type:' ).grid( column=0, row=0, sticky='e', padx=(0, 10) )
ttk.Label( frameDetailsGrid, text='Specific Track Type:' ).grid( column=0, row=1, sticky='e', padx=(0, 10) )
# Add the general (and specific) track type
trackNames = frameObjString.identifyTrack()
ttk.Label( frameDetailsGrid, text=trackNames[0] ).grid( column=1, row=0, sticky='w' )
ttk.Label( frameDetailsGrid, text=trackNames[1] ).grid( column=1, row=1, sticky='w' )
# Parse the FObjString
interpolationID, arrayCount, keyFrames = frameObjString.parse()
# Display the opcode's interpolation type and array/keyframe count
ttk.Label( frameDetailsGrid, text='Interpolation:' ).grid( column=0, row=2, sticky='e', padx=(0, 10) )
ttk.Label( frameDetailsGrid, text='Keyframe Count:' ).grid( column=0, row=3, sticky='e', padx=(0, 10) )
ttk.Label( frameDetailsGrid, text=frameObjString.interpolationTypes[interpolationID] ).grid( column=1, row=2, sticky='w' )
ttk.Label( frameDetailsGrid, text=arrayCount ).grid( column=1, row=3, sticky='w' )
# Display the data types used in the string
ttk.Label( frameDetailsGrid, text='Data Type:' ).grid( column=0, row=4, sticky='e', padx=(0, 10) )
ttk.Label( frameDetailsGrid, text='Data Scale:' ).grid( column=0, row=5, sticky='e', padx=(0, 10) )
if interpolationID == 0 or interpolationID == 5:
ttk.Label( frameDetailsGrid, text='Not Used' ).grid( column=1, row=4, sticky='w' )
ttk.Label( frameDetailsGrid, text='Not Used' ).grid( column=1, row=5, sticky='w' )
else:
dataType = dataTypeAndScale >> 5 # Use the first (left-most) 3 bits
dataScale = 1 << ( dataTypeAndScale & 0b11111 ) # Use the last 5 bits
ttk.Label( frameDetailsGrid, text=parentStruct.dataTypes[dataType][0] + 's' ).grid( column=1, row=4, sticky='w' )
ttk.Label( frameDetailsGrid, text='1 / {} ({})'.format(dataScale, 1.0/dataScale) ).grid( column=1, row=5, sticky='w' )
# Display the slope/tangent data types used in the string
ttk.Label( frameDetailsGrid, text='Slope Data Type:' ).grid( column=0, row=6, sticky='e', padx=(0, 10) )
ttk.Label( frameDetailsGrid, text='Slope Data Scale:' ).grid( column=0, row=7, sticky='e', padx=(0, 10) )
if interpolationID == 4 or interpolationID == 5:
slopeDataType = slopeDataTypeAndScale >> 5 # Use the first (left-most) 3 bits
slopeDataScale = 1 << ( slopeDataTypeAndScale & 0b11111 ) # Use the last 5 bits
ttk.Label( frameDetailsGrid, text=parentStruct.dataTypes[slopeDataType][0] + 's' ).grid( column=1, row=6, sticky='w' )
ttk.Label( frameDetailsGrid, text='1 / {} ({})'.format(slopeDataScale, 1.0/slopeDataScale) ).grid( column=1, row=7, sticky='w' )
else:
ttk.Label( frameDetailsGrid, text='Not Used' ).grid( column=1, row=6, sticky='w' )
ttk.Label( frameDetailsGrid, text='Not Used' ).grid( column=1, row=7, sticky='w' )
frameDetailsGrid.pack( pady=(14, 0) )
# Start a new frame for the keyframe data, and create a table header
if len( keyFrames ) < 40: # Avoid loading up the GUI too much; could bog it down. Needs testing
keyFramesFrame = ttk.Frame( infoPaneInterior )
ttk.Label( keyFramesFrame, text='Keyframes / States', font="-weight bold" ).grid( column=0, row=0, columnspan=2 )
ttk.Label( keyFramesFrame, text='Start Frame:' ).grid( column=2, row=1, padx=3 )
ttk.Label( keyFramesFrame, text=startFrame ).grid( column=3, row=1, padx=3 )
ttk.Label( keyFramesFrame, text='Keyframe:' ).grid( column=0, row=2, padx=3 )
ttk.Label( keyFramesFrame, text='Data Value:' ).grid( column=1, row=2, padx=3 )
ttk.Label( keyFramesFrame, text='Slope Value:' ).grid( column=2, row=2, padx=3 )
ttk.Label( keyFramesFrame, text='Target Frame:' ).grid( column=3, row=2, padx=3 )
# Display the keyframe data
frameCount = startFrame
csvFormatText = []
row = 3
for dataValue, tangent, frameWait in keyFrames:
ttk.Label( keyFramesFrame, text=row - 2 ).grid( column=0, row=row )
ttk.Label( keyFramesFrame, text=dataValue ).grid( column=1, row=row )
ttk.Label( keyFramesFrame, text=tangent ).grid( column=2, row=row )
ttk.Label( keyFramesFrame, text=frameCount ).grid( column=3, row=row )
csvFormatText.append( '{}, {}, {}'.format(dataValue, tangent, frameCount) )
frameCount += frameWait
row += 1
# Set the end frame, taken from the grandparent Animation Object
animParentOffset = parentStruct.getAnyDataSectionParent()
animParentStruct = globalDatFile.getStruct( animParentOffset )
endFrame = animParentStruct.getValues( 'End_Frame' )
ttk.Label( keyFramesFrame, text='End Frame:' ).grid( column=2, row=row )
ttk.Label( keyFramesFrame, text=endFrame ).grid( column=3, row=row )
keyFramesFrame.pack( pady=(14, 0) )
else:
ttk.Label( infoPaneInterior, text='Avoiding Full Analysis;\nlarge array length detected.' ).pack( pady=(14, 0) )
csvFormatText = []
for dataValue, tangent, frameWait in keyFrames:
csvFormatText.append( '{}, {}, {}'.format(dataValue, tangent, startFrame) )
# Repackage the data so that it can be collected and used by the user in other ways
csvFormatText = '\n'.join( csvFormatText )
label = ttk.Label( infoPaneInterior, text='Show Keyframes in CSV Format', foreground='#00F', cursor='hand2' )
label.pack( pady=(9, 0) )
label.bind( '<1>', lambda event, message=csvFormatText, title=frameObjString.name + ' Keyframes': cmsg(message, title) )
label = ttk.Label( infoPaneInterior, text='Show Keyframes in TSV Format', foreground='#00F', cursor='hand2' )
label.pack( pady=(3, 0) )
label.bind( '<1>', lambda event, message=csvFormatText.replace(', ', '\t'), title=frameObjString.name + ' Keyframes': cmsg(message, title) )
def onStructureTreeSelect( event ):
""" This is called upon a structure in the Structure Tree being selected.
This will populate the right-hand panel with the structure's name and basic
information (including handlers for clicking on some of them), and will then kick
off a separate function for displaying the structure's values and their offsets. """
# Destroy the existing widgets in the properties frame
Gui.structurePropertiesFrame.clear()
iid = str( Gui.fileStructureTree.selection()[0] )
itemName = Gui.fileStructureTree.item( iid, 'text' )
Gui.structurePropertiesFrame.structTable = None
if itemName == 'File Header':
showFileProperties()
return
elif itemName == 'Relocation Table':
showRelocationTableInfo()
return
elif itemName == 'Root Nodes Table':
showNodeTableInfo( itemName )
return
elif itemName == 'Reference Nodes Table':
showNodeTableInfo( itemName )
return
elif itemName == 'String Table':
showStringTableInfo()
return
elif itemName == 'Sword Swing Colors':
showSwordSwingInfo()
return
elif itemName == '20XX HP Supplemental Data':
show20XXsupplementalData()
return
elif itemName == 'Orphan Structures':
orphanNotes = ( 'Orphan structures are not attached to the file structure tree in the usual way (i.e. having '
'parents that lead all the way up to the root/reference node tables).' )
ttk.Label( Gui.structurePropertiesFrame.interior, text=orphanNotes, wraplength=Gui.structPropFrameWrapLength ).pack( pady=(36,0) )
return
# Get the struct offset and the initialized struct object itself
structOffset = int( iid.split( '/' )[-1] )
structure = globalDatFile.structs[structOffset]
# Display the structure's name and label
ttk.Label( Gui.structurePropertiesFrame.interior, text=structure.name, font="-weight bold" ).pack( pady=(12,0) )
if structure.label:
ttk.Label( Gui.structurePropertiesFrame.interior, text=structure.label, font="-weight bold" ).pack( pady=(3, 0) )
# Add a "button" for help text
helpText = ( 'Offsets shown on the left (for unknown structs) are absolute file offsets. However, keep in mind that pointers '
"shown on the right, the actual values in the file, are relative to the file's data section (meaning they do not "
'account for the 0x20 file header, and will be that much smaller than the actual file offset).\n\n'
'If a structure has multiple parents, it may appear under multiple branches, thus the addition of all branch'
'sizes will be larger than the total file size.' )
addHelpBtn( helpText )
# Gather struct info
structParents = structure.getParents( includeNodeTables=True )
structSiblings = structure.getSiblings()
structChildren = structure.getChildren()
# Add general struct info; need to encapsulate these in a Frame so that pack and grid don't conflict
emptyWidget = Tk.Frame( relief='flat' ) # This is used as a simple workaround for the labelframe, so we can have no text label with no label gap.
basicDetailsFrame = ttk.Labelframe( Gui.structurePropertiesFrame.interior, labelwidget=emptyWidget, padding=(20, 4) )
# Get the structure depth
if iid.startswith( 'orphans' ):
structDepthText = 'N/A'
else:
depth = structure.getStructDepth()
if depth:
fileDepth, siblingIndex = depth
structDepthText = '{}, {}'.format( fileDepth, siblingIndex )
else: # Failsafe; not expected
structDepthText = str(getTreeviewDepth( Gui.fileStructureTree, iid )) + ', n/a'
# General Struct Info, column 1 (parents/sibs/children text)
ttk.Label( basicDetailsFrame, text='Parents:' ).grid( column=0, row=0, sticky='e', padx=(0, 5) )
ttk.Label( basicDetailsFrame, text='Siblings:' ).grid( column=0, row=1, sticky='e', padx=(0, 5) )
ttk.Label( basicDetailsFrame, text='Children:' ).grid( column=0, row=2, sticky='e', padx=(0, 5) )
# General Struct Info, column 2 (parents/sibs/children info/links)
if structParents:
structParentsString = ', '.join([ uHex(0x20+offset) for offset in structParents | |
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This file provides classes and helper functions for parsing/building frames
of the WebSocket protocol (RFC 6455).
Specification:
http://tools.ietf.org/html/rfc6455
"""
from collections import deque
import os
import struct
from mod_pywebsocket import common
from mod_pywebsocket import util
from mod_pywebsocket._stream_base import BadOperationException
from mod_pywebsocket._stream_base import ConnectionTerminatedException
from mod_pywebsocket._stream_base import InvalidFrameException
from mod_pywebsocket._stream_base import InvalidUTF8Exception
from mod_pywebsocket._stream_base import StreamBase
from mod_pywebsocket._stream_base import UnsupportedFrameException
_NOOP_MASKER = util.NoopMasker()
class Frame(object):
def __init__(self, fin=1, rsv1=0, rsv2=0, rsv3=0,
opcode=None, payload=''):
self.fin = fin
self.rsv1 = rsv1
self.rsv2 = rsv2
self.rsv3 = rsv3
self.opcode = opcode
self.payload = payload
# Helper functions made public to be used for writing unittests for WebSocket
# clients.
def create_length_header(length, mask):
"""Creates a length header.
Args:
length: Frame length. Must be less than 2^63.
mask: Mask bit. Must be boolean.
Raises:
ValueError: when bad data is given.
"""
if mask:
mask_bit = 1 << 7
else:
mask_bit = 0
if length < 0:
raise ValueError('length must be non negative integer')
elif length <= 125:
return chr(mask_bit | length)
elif length < (1 << 16):
return chr(mask_bit | 126) + struct.pack('!H', length)
elif length < (1 << 63):
return chr(mask_bit | 127) + struct.pack('!Q', length)
else:
raise ValueError('Payload is too big for one frame')
def create_header(opcode, payload_length, fin, rsv1, rsv2, rsv3, mask):
"""Creates a frame header.
Raises:
Exception: when bad data is given.
"""
if opcode < 0 or 0xf < opcode:
raise ValueError('Opcode out of range')
if payload_length < 0 or (1 << 63) <= payload_length:
raise ValueError('payload_length out of range')
if (fin | rsv1 | rsv2 | rsv3) & ~1:
raise ValueError('FIN bit and Reserved bit parameter must be 0 or 1')
header = ''
first_byte = ((fin << 7)
| (rsv1 << 6) | (rsv2 << 5) | (rsv3 << 4)
| opcode)
header += chr(first_byte)
header += create_length_header(payload_length, mask)
return header
def _build_frame(header, body, mask):
if not mask:
return header + body
masking_nonce = os.urandom(4)
masker = util.RepeatedXorMasker(masking_nonce)
return header + masking_nonce + masker.mask(body)
def _filter_and_format_frame_object(frame, mask, frame_filters):
for frame_filter in frame_filters:
frame_filter.filter(frame)
header = create_header(
frame.opcode, len(frame.payload), frame.fin,
frame.rsv1, frame.rsv2, frame.rsv3, mask)
return _build_frame(header, frame.payload, mask)
def create_binary_frame(
message, opcode=common.OPCODE_BINARY, fin=1, mask=False, frame_filters=[]):
"""Creates a simple binary frame with no extension, reserved bit."""
frame = Frame(fin=fin, opcode=opcode, payload=message)
return _filter_and_format_frame_object(frame, mask, frame_filters)
def create_text_frame(
message, opcode=common.OPCODE_TEXT, fin=1, mask=False, frame_filters=[]):
"""Creates a simple text frame with no extension, reserved bit."""
encoded_message = message.encode('utf-8')
return create_binary_frame(encoded_message, opcode, fin, mask,
frame_filters)
class FragmentedFrameBuilder(object):
"""A stateful class to send a message as fragments."""
def __init__(self, mask, frame_filters=[]):
"""Constructs an instance."""
self._mask = mask
self._frame_filters = frame_filters
self._started = False
# Hold opcode of the first frame in messages to verify types of other
# frames in the message are all the same.
self._opcode = common.OPCODE_TEXT
def build(self, message, end, binary):
if binary:
frame_type = common.OPCODE_BINARY
else:
frame_type = common.OPCODE_TEXT
if self._started:
if self._opcode != frame_type:
raise ValueError('Message types are different in frames for '
'the same message')
opcode = common.OPCODE_CONTINUATION
else:
opcode = frame_type
self._opcode = frame_type
if end:
self._started = False
fin = 1
else:
self._started = True
fin = 0
if binary:
return create_binary_frame(
message, opcode, fin, self._mask, self._frame_filters)
else:
return create_text_frame(
message, opcode, fin, self._mask, self._frame_filters)
def _create_control_frame(opcode, body, mask, frame_filters):
frame = Frame(opcode=opcode, payload=body)
return _filter_and_format_frame_object(frame, mask, frame_filters)
def create_ping_frame(body, mask=False, frame_filters=[]):
return _create_control_frame(common.OPCODE_PING, body, mask, frame_filters)
def create_pong_frame(body, mask=False, frame_filters=[]):
return _create_control_frame(common.OPCODE_PONG, body, mask, frame_filters)
def create_close_frame(body, mask=False, frame_filters=[]):
return _create_control_frame(
common.OPCODE_CLOSE, body, mask, frame_filters)
class StreamOptions(object):
"""Holds option values to configure Stream objects."""
def __init__(self):
"""Constructs StreamOptions."""
# Enables deflate-stream extension.
self.deflate_stream = False
# Filters applied to frames.
self.outgoing_frame_filters = []
self.incoming_frame_filters = []
self.mask_send = False
self.unmask_receive = True
class Stream(StreamBase):
"""A class for parsing/building frames of the WebSocket protocol
(RFC 6455).
"""
def __init__(self, request, options):
"""Constructs an instance.
Args:
request: mod_python request.
"""
StreamBase.__init__(self, request)
self._logger = util.get_class_logger(self)
self._options = options
if self._options.deflate_stream:
self._logger.debug('Setup filter for deflate-stream')
self._request = util.DeflateRequest(self._request)
self._request.client_terminated = False
self._request.server_terminated = False
# Holds body of received fragments.
self._received_fragments = []
# Holds the opcode of the first fragment.
self._original_opcode = None
self._writer = FragmentedFrameBuilder(
self._options.mask_send, self._options.outgoing_frame_filters)
self._ping_queue = deque()
def _receive_frame(self):
"""Receives a frame and return data in the frame as a tuple containing
each header field and payload separately.
Raises:
ConnectionTerminatedException: when read returns empty
string.
InvalidFrameException: when the frame contains invalid data.
"""
received = self.receive_bytes(2)
first_byte = ord(received[0])
fin = (first_byte >> 7) & 1
rsv1 = (first_byte >> 6) & 1
rsv2 = (first_byte >> 5) & 1
rsv3 = (first_byte >> 4) & 1
opcode = first_byte & 0xf
second_byte = ord(received[1])
mask = (second_byte >> 7) & 1
payload_length = second_byte & 0x7f
if (mask == 1) != self._options.unmask_receive:
raise InvalidFrameException(
'Mask bit on the received frame did\'nt match masking '
'configuration for received frames')
# The Hybi-13 and later specs disallow putting a value in 0x0-0xFFFF
# into the 8-octet extended payload length field (or 0x0-0xFD in
# 2-octet field).
valid_length_encoding = True
length_encoding_bytes = 1
if payload_length == 127:
extended_payload_length = self.receive_bytes(8)
payload_length = struct.unpack(
'!Q', extended_payload_length)[0]
if payload_length > 0x7FFFFFFFFFFFFFFF:
raise InvalidFrameException(
'Extended payload length >= 2^63')
if self._request.ws_version >= 13 and payload_length < 0x10000:
valid_length_encoding = False
length_encoding_bytes = 8
elif payload_length == 126:
extended_payload_length = self.receive_bytes(2)
payload_length = struct.unpack(
'!H', extended_payload_length)[0]
if self._request.ws_version >= 13 and payload_length < 126:
valid_length_encoding = False
length_encoding_bytes = 2
if not valid_length_encoding:
self._logger.warning(
'Payload length is not encoded using the minimal number of '
'bytes (%d is encoded using %d bytes)',
payload_length,
length_encoding_bytes)
if mask == 1:
masking_nonce = self.receive_bytes(4)
masker = util.RepeatedXorMasker(masking_nonce)
else:
masker = _NOOP_MASKER
bytes = masker.mask(self.receive_bytes(payload_length))
return opcode, bytes, fin, rsv1, rsv2, rsv3
def _receive_frame_as_frame_object(self):
opcode, bytes, fin, rsv1, rsv2, rsv3 = self._receive_frame()
return Frame(fin=fin, rsv1=rsv1, rsv2=rsv2, rsv3=rsv3,
opcode=opcode, payload=bytes)
def send_message(self, message, end=True, binary=False):
"""Send message.
Args:
message: text in unicode or binary in str to send.
binary: send message as binary frame.
Raises:
BadOperationException: when called on a server-terminated
connection or called with inconsistent message type or
binary parameter.
"""
if self._request.server_terminated:
raise BadOperationException(
'Requested send_message after sending out a closing handshake')
if binary and isinstance(message, unicode):
raise BadOperationException(
'Message for binary frame must be instance of str')
try:
self._write(self._writer.build(message, end, binary))
except ValueError, e:
raise BadOperationException(e)
def receive_message(self):
"""Receive a WebSocket frame and return its payload as a text in
unicode or a binary in str.
Returns:
payload data of the frame
- as unicode instance if received text frame
- as str instance if received binary frame
or None iff received closing handshake.
Raises:
BadOperationException: when called on a client-terminated
connection.
ConnectionTerminatedException: when | |
<filename>acq4/util/clibrary/CLibrary.py
# -*- coding: utf-8 -*-
from __future__ import print_function
from six.moves import map
from six.moves import range
"""
CLibrary.py - Provides CLibrary class
Copyright 2010 <NAME>
Distributed under MIT/X11 license. See license.txt for more infomation.
Proxy to both CHeader and ctypes, allowing automatic type conversion and
function calling based on C header definitions.
"""
import ctypes
from ctypes import c_wchar_p, pointer, c_ubyte, c_long, c_longlong, c_short, c_char, c_int, c_void_p, Union, \
c_ulonglong, c_wchar, cast, c_ushort, c_float, c_double, c_uint, POINTER, c_char_p, c_longdouble, Structure, c_ulong
import sys, os, platform
import six
def find_lib(name, paths=[], dirHints=[]):
"""Search through likely directories to find non-system dlls. Return the first filepath that is found. Currently only supported on Windows.
**Arguments**
============= ==============================================
name (str) The name of the file to look for.
paths (list)(optional) A list of directory paths to search.
dirHints (list)(optional) A list of directory names within ProgramFiles or ProgramFiles(x86) to search first. Used to reduce search time.
Directories are searched in the order specified in paths, then in 'ProgramFiles', then 'ProgramFiles(x86)'
"""
if platform.system() != 'Windows':
raise Exception("CLibrary.find_lib is currently only supported on Windows machines. Sorry.")
searchPaths = paths
for directory in ['PROGRAMFILES', 'PROGRAMFILES(X86)', 'SYSTEMDRIVE']:
p = os.environ.get(directory, None)
if directory == 'SYSTEMDRIVE': ## fixes bug(feature?) where os.join doesn't insert slashes between c: and whatever you're trying to join
p = os.path.join(p, os.sep)
if p is not None:
for d in dirHints:
path = os.path.join(p,d)
if os.path.exists(path):
searchPaths.append(path)
searchPaths.append(p)
for path in searchPaths:
for root, dirs, files in os.walk(path):
if name in files:
return os.path.join(root, name)
raise Exception('Could not find file named "%s". Searched recursively in %s.' % (name, str(searchPaths)))
class CLibrary:
"""The CLibrary class is intended to automate much of the work in using ctypes by integrating
header file definitions from CParser. Ths class serves as a proxy to a ctypes, adding
a few features:
- allows easy access to values defined via CParser
- automatic type conversions for function calls using CParser function signatures
- creates ctype classes based on type definitions from CParser
Initialize using a ctypes shared object and a CParser:
headers = CParser.winDefs()
lib = CLibrary(windll.User32, headers)
There are 3 ways to access library elements:
lib(type, name) - type can be one of 'values', 'functions', 'types', 'structs', 'unions', or 'enums'.
Returns an object matching name. For values, the value from the headers is
returned. For functions, a callable object is returned that handles automatic
type conversion for arguments and return values. for structs, types, and enums,
a ctypes class is returned matching the type specified.
lib.name - searches in order through values, functions, types, structs, unions, and enums from
header definitions and returns an object for the first match found. The object
returned is the same as returned by lib(type, name). This is the preferred way to access
elements from CLibrary, but may not work in some situations (for example, if
a struct and variable share the same name).
lib[type] - Accesses the header definitions directly, returns definition dictionaries
based on the type requested. This is equivalent to headers.defs[type].
"""
Null = object()
cTypes = {
'char': c_char,
'wchar': c_wchar,
'unsigned char': c_ubyte,
'short': c_short,
'short int': c_short,
'unsigned short': c_ushort,
'unsigned short int': c_ushort,
'int': c_int,
'unsigned': c_uint,
'unsigned int': c_uint,
'long': c_long,
'long int': c_long,
'unsigned long': c_ulong,
'unsigned long int': c_ulong,
'__int64': c_longlong,
'long long': c_longlong,
'long long int': c_longlong,
'unsigned __int64': c_ulonglong,
'unsigned long long': c_ulonglong,
'unsigned long long int': c_ulonglong,
'float': c_float,
'double': c_double,
'long double': c_longdouble
}
cPtrTypes = {
'char': c_char_p,
'wchar': c_wchar_p,
'void': c_void_p
}
def __init__(self, lib, headers, prefix=None):
## name everything using underscores to avoid name collisions with library
self._lib_ = lib
self._headers_ = headers
self._defs_ = headers.defs
if prefix is None:
self._prefix_ = []
elif type(prefix) is list:
self._prefix_ = prefix
else:
self._prefix_ = [prefix]
self._objs_ = {}
for k in ['values', 'functions', 'types', 'structs', 'unions', 'enums']:
self._objs_[k] = {}
self._allObjs_ = {}
self._structs_ = {}
self._unions_ = {}
def __call__(self, typ, name):
if typ not in self._objs_:
typs = self._objs_.keys()
raise Exception("Type must be one of %s" % str(typs))
if name not in self._objs_[typ]:
self._objs_[typ][name] = self._mkObj_(typ, name)
return self._objs_[typ][name]
def _allNames_(self, name):
return [name] + [p + name for p in self._prefix_]
def _mkObj_(self, typ, name):
names = self._allNames_(name)
for n in names:
if n in self._objs_:
return self._objs_[n]
for n in names: ## try with and without prefix
if n not in self._defs_[typ] and not (typ in ['structs', 'unions', 'enums'] and n in self._defs_['types']):
continue
if typ == 'values':
return self._defs_[typ][n]
elif typ == 'functions':
return self._getFunction(n)
elif typ == 'types':
obj = self._defs_[typ][n]
return self._ctype(obj)
elif typ == 'structs':
return self._cstruct('structs', n)
elif typ == 'unions':
return self._cstruct('unions', n)
elif typ == 'enums':
## Allow automatic resolving of typedefs that alias enums
if n not in self._defs_['enums']:
if n not in self._defs_['types']:
raise Exception('No enums named "%s"' % n)
typ = self._headers_.evalType([n])[0]
if typ[:5] != 'enum ':
raise Exception('No enums named "%s"' % n)
n = self._defs_['types'][typ][1] ## look up internal name of enum
obj = self._defs_['enums'][n]
return obj
else:
raise Exception("Unknown type %s" % typ)
raise NameError(name)
def __getattr__(self, name):
"""Used to retrieve any type of definition from the headers. Searches for the name in this order:
values, functions, types, structs, unions, enums."""
if name not in self._allObjs_:
names = self._allNames_(name)
for k in ['values', 'functions', 'types', 'structs', 'unions', 'enums', None]:
if k is None:
#obj = getattr(self._lib_, name) # pull directly from the lib as a last resort?
raise AttributeError(name)
obj = None
for n in names:
if n in self._defs_[k]:
obj = self(k, n)
break
if obj is not None:
break
self._allObjs_[name] = obj
return self._allObjs_[name]
def __getitem__(self, name):
"""Used to retrieve a specific dictionary from the headers."""
return self._defs_[name]
def __repr__(self):
return "<CLibrary instance: %s>" % str(self._lib_)
def _getFunction(self, funcName):
try:
func = getattr(self._lib_, funcName)
except:
raise Exception("Function name '%s' appears in headers but not in library!" % func)
#print "create function %s," % (funcName), self._defs_['functions'][funcName]
return CFunction(self, func, self._defs_['functions'][funcName], funcName)
def _ctype(self, typ, pointers=True):
"""return a ctype object representing the named type.
If pointers is True, the class returned includes all pointer/array specs provided.
Otherwise, the class returned is just the base type with no pointers."""
try:
typ = self._headers_.evalType(typ)
mods = typ[1:][:]
## Create the initial type
## Some types like ['char', '*'] have a specific ctype (c_char_p)
## (but only do this if pointers == True)
if pointers and len(typ) > 1 and typ[1] == '*' and typ[0] in CLibrary.cPtrTypes:
cls = CLibrary.cPtrTypes[typ[0]]
mods = typ[2:]
## If the base type is in the list of existing ctypes:
elif typ[0] in CLibrary.cTypes:
cls = CLibrary.cTypes[typ[0]]
## structs, unions, enums:
elif typ[0][:7] == 'struct ':
cls = self._cstruct('structs', self._defs_['types'][typ[0]][1])
elif typ[0][:6] == 'union ':
cls = self._cstruct('unions', self._defs_['types'][typ[0]][1])
elif typ[0][:5] == 'enum ':
cls = c_int
## void
elif typ[0] == 'void':
cls = None
else:
#print typ
raise Exception("Can't find base type for %s" % str(typ))
if not pointers:
return cls
## apply pointers and arrays
while len(mods) > 0:
m = mods.pop(0)
if isinstance(m, six.string_types): ## pointer or reference
if m[0] == '*' or m[0] == '&':
for i in m:
cls = POINTER(cls)
elif isinstance(m, list) and isinstance(m[0], int): ## array
for i in m:
if i == -1: ## -1 indicates an 'incomplete type' like "int variable[]"
cls = POINTER(cls) ## which we should interpret like "int *variable"
else:
cls = cls * i
elif isinstance(m, (list, tuple)) and not isinstance(m[0], int): ## Probably a function pointer
## Find pointer and calling convention
isPtr = False
conv = '__cdecl'
if len(mods) == 0:
raise Exception("Function signature with no | |
import glob
import os
import shutil
import time
import zipfile
from datetime import datetime
from django import forms
from museum_site.models import *
from museum_site.fields import *
from museum_site.widgets import *
from museum_site.common import GENRE_LIST, YEAR, any_plus, TEMP_PATH, SITE_ROOT, get_sort_option_form_choices, delete_this
from museum_site.constants import (
LICENSE_CHOICES, LICENSE_SOURCE_CHOICES, LANGUAGE_CHOICES
)
from museum_site.core.detail_identifiers import *
from museum_site.private import IA_ACCESS, IA_SECRET
from internetarchive import upload
class ZGameForm(forms.ModelForm):
zfile = forms.FileField(
help_text=("Select the file you wish to upload. "
"All uploads <i>must</i> be zipped."),
label="File", widget=UploadFileWidget()
)
genres = forms.CharField(
help_text=(
"Check any applicable genres that describe the content of the "
"uploaded file. Use 'Other' if a genre isn't represented and "
"mention it in the upload notes field in the Upload Settings "
"section. For a description of genres, see the "
"<a href='/help/genre/' target='_blank'>Genre Overview</a> "
"page."
),
widget=SlashSeparatedValueCheckboxWidget(
choices=list(
zip(
Genre.objects.filter(visible=True), Genre.objects.filter(
visible=True
)
)
)
)
)
author = forms.CharField(
required=False,
help_text=(
"Separate multiple authors with a comma. Do not abbreviate "
"names.<br>"
"For files with many authors, consider using the compiler as "
"the author with \"Various\" to represent the rest. Try to "
"sort multiple authors from most to least important on this "
"particular upload. If the author's name is not known, leave this "
"field blank."
),
widget=SlashSeparatedValueWidget(
attrs={
"list": "author-suggestions",
"autocomplete": "off",
}
)
)
field_order = ["zfile", "title", "author", "company", "genres"]
use_required_attribute = False
# Properties handled in view
max_upload_size = 0
editing = False
expected_file_id = 0 # For replacing a zip
class Meta:
model = File
fields = [
"zfile", "title", "author", "company", "explicit",
"release_date", "language",
"description",
]
help_texts = {
"title": "Leave A/An/The as the first word if applicable.",
"company": (
"Any companies this file is published under. If there are "
"none, leave this field blank. If there are multiple, "
"separate them with a comma."
),
"release_date": (
"Enter the date this file was first made public. If this is a "
"newly created file, it should be today's date. If this is an "
"older release being uploaded now, it should be the "
"modification date of the most "
"recent ZZT world (or executable, or other primary file). If "
"the release date is not known, select \"Unknown\" to leave "
"this field blank."
),
"release_source": (
"Where the data for the release date is coming from"
),
"language": (
'Check any languages the player is expected to understand to '
'comprehend the files in the upload. For worlds exclusively '
'using created languages, use "Other". If a language is not '
'listed, use "Other" and specify the correct language in the '
'upload notes section.'
),
"file_license": "The license under which this world is published.",
"license_source": (
"Where the license can be found. Use a source contained "
"within the uploaded file when possible."
),
"description": (
"A description for the uploaded file. For utilities, please "
"be sure to fill this out. If the description is written by "
"the file's author, and not a third party please wrap it in "
"quotation marks."
),
"explicit": (
"Check this box if the upload contains material not suitable "
"for minors or non-consenting adults. Uploads marked as "
"explicit will require confirmation before accessing and "
"never appear in Worlds of ZZT bot posts."
),
}
widgets = {
"company": SlashSeparatedValueWidget(
attrs={
"list": "company-suggestions",
"autocomplete": "off",
}
),
"explicit": forms.RadioSelect(
choices=(
(0, "This upload does not contain explicit content"),
(1, "This upload contains explicit content")
),
),
"release_date": forms.DateInput(
format=("%y-%m-%d"),
attrs={"type": "date"}
),
"language": SlashSeparatedValueCheckboxWidget(
choices=LANGUAGE_CHOICES,
),
"file_license": SelectPlusCustomWidget(
choices=LICENSE_CHOICES
),
"license_source": SelectPlusCustomWidget(
choices=LICENSE_SOURCE_CHOICES
),
"zfile": UploadFileWidget(),
}
def clean_zfile(self):
zfile = self.cleaned_data["zfile"]
if zfile and zfile.name:
dupe = File.objects.filter(filename=zfile.name).first()
if dupe and dupe.id != self.expected_file_id:
raise forms.ValidationError(
"The selected filename is already in use. "
"Please rename your zipfile."
)
if zfile and zfile.size > self.max_upload_size:
raise forms.ValidationError(
"File exceeds your maximum upload size! "
"Contact Dr. Dos for a manual upload."
)
def clean_author(self):
# Replace blank authors with "Unknown"
author = self.cleaned_data["author"]
if author == "":
author = "Unknown"
return author
def clean_genres(self):
# Make sure all requested genres exist
valid_genres = list(Genre.objects.filter(visible=True).values_list("title", flat=True))
requested_genres = self.cleaned_data["genres"].split("/")
for genre in requested_genres:
if genre not in valid_genres:
raise forms.ValidationError(
"An invalid genre was specified."
)
class PlayForm(forms.Form):
zeta_config = forms.ChoiceField(
choices=Zeta_Config.objects.select_list(),
label="Configuration",
help_text=(
'Choose the intended configuration for playing the upload in the '
'browser. If this upload cannot be ran with Zeta, select '
'"Incompatible with Zeta" at the end of the list. For the vast '
'majority of ZZT worlds "ZZT v3.2 (Registered)" is the correct '
'choice.'
)
)
class UploadForm(forms.ModelForm):
generate_preview_image = forms.ChoiceField(
choices=[ # List rather than tuple so it can be modified later
("AUTO", "Automatic"),
("NONE", "None")
],
help_text=(
"Select a ZZT file whose title screen will be used for the world's "
"preview image. Leave set to 'Automatic' to use the oldest file in "
"the zip file. This image may be changed during publication."
),
)
edit_token = forms.CharField(required=False, widget=forms.HiddenInput())
class Meta:
model = Upload
fields = ["generate_preview_image", "notes", "announced", "edit_token"]
labels = {
"generate_preview_image": "Preview image",
"notes": "Upload notes",
"announced": "Announce on Discord",
}
help_texts = {
"notes": (
"Notes for staff to read before publication such as special "
"instructions before publishing. While not visible to users on "
"the site directly, consider anything entered in this field to "
"be public."
),
"announced": (
"New uploads are automatically shared to the Discord of ZZT's "
"announcements channel. You may choose to not announce the "
"upload. The upload will still appear publically in the upload "
"queue and on RSS feeds."
),
}
widgets = {
"announced": forms.RadioSelect(
choices=(
(0, "Announce this upload"),
(1, "Do not announce this upload")
),
),
}
class DownloadForm(forms.ModelForm):
use_required_attribute = False
url = forms.URLField(
required=False,
label="URL",
help_text=(
"An alternate location to acquire this file. The link "
"should lead to an active page where the file can be downloaded "
"<b>not</b> a direct link to the hosted file. The URL should "
"direct to a webpage with an official release by the file's "
"author, not an alternative ZZT archive, the Internet Archive, or "
"any unmaintained webpage."
)
)
class Meta:
model = Download
fields = ["url", "kind", "hosted_text"]
labels = {
"url": "URL",
"kind": "Category",
}
help_texts = {
"kind": (
"The type of webpage this file is hosted on. This is used to "
"determine an icon to display when selecting an alternate "
"download source."
),
"hosted_text": (
"For non-Itch download sources only. On the file's downloads "
"page, the text entered here will be prefixed with "
"\"Hosted on\"."
),
}
class AdvancedSearchForm(forms.Form):
use_required_attribute = False
required = False
title = forms.CharField(label="Title contains", required=False)
author = forms.CharField(label="Author contains", required=False)
filename = forms.CharField(label="Filename contains", required=False)
company = forms.CharField(label="Company contains", required=False)
genre = forms.ChoiceField(
choices=any_plus(zip(GENRE_LIST, GENRE_LIST)),
required=False
)
year = forms.ChoiceField(
choices=any_plus(((str(x), str(x)) for x in range(YEAR, 1990, -1))), # Earliest release year is 1991
required=False
)
board_min = forms.IntegerField(
required=False, label="Minimum/Maximum board count"
)
board_max = forms.IntegerField(required=False)
board_type = forms.ChoiceField(
widget=forms.RadioSelect,
choices=[
("playable", "Playable Boards"),
("total", "Total Boards"),
],
required=False,
)
language = forms.ChoiceField(
choices=any_plus(LANGUAGE_CHOICES),
required=False,
)
reviews = forms.ChoiceField(
widget=forms.RadioSelect(),
choices=(
("yes", "Show files with reviews"),
("no", "Show files without reviews"),
("any", "Show both")
),
required=False,
)
articles = forms.ChoiceField(
widget=forms.RadioSelect(),
choices=(
("yes", "Show files with articles"),
("no", "Show files without articles"),
("any", "Show both")
),
required=False
)
details = forms.MultipleChoiceField(
# widget=forms.SelectMultiple,
widget=GroupedCheckboxWidget,
choices=Detail.objects.form_list,
required=False,
)
sort = forms.ChoiceField(
label="Sort by",
choices=(
("title", "Title"),
("author", "Author"),
("company", "Company"),
("rating", "Rating"),
("release", "Release Date"),
),
required=False
)
class ArticleSearchForm(forms.Form):
use_required_attribute = False
required = False
YEARS = (
[("Any", "- ANY - ")] +
[(y, y) for y in range(YEAR, 1990, -1)] +
[("Unk", "Unknown")]
)
# TODO this may need to be | |
<reponame>djfroofy/beatlounge
import sys
import math
import warnings
from collections import namedtuple
from twisted.python import log
from twisted.python.failure import Failure
from twisted.internet.selectreactor import SelectReactor
from twisted.internet.task import LoopingCall
from bl.debug import DEBUG
__all__ = ['Tempo', 'Beat', 'Meter', 'standardMeter', 'BeatClock',
'ScheduledEvent', 'clock']
_BeatBase = namedtuple('_BeatBase',
'measure quarter eighth sixteenth remainder')
class Tempo(object):
"""
Tempo gives the tempo in 3 forms for ready access:
bpm (beats per minute)
tpb (ticks per beat)
tpm (ticks per minute)
Do not set these attributes directly, but call reset() instead. Otherwise,
expect unexpected behaviors.
"""
def __init__(self, bpm=120, tpb=24):
self.bpm = bpm
self.tpb = tpb
self.tpm = self.bpm * self.tpb
def reset(self, bpm=None, tpb=None, tpm=None):
if bpm:
self.bpm = bpm
if tpb:
self.tpb = tpb
if tpm:
self.tpm = tpm
self.bpm = (tpm / self.tpb)
return
self.tpm = self.bpm * self.tpb
def __str__(self):
return 'Tempo(bpm=%s, tpb=%s)' % (self.bpm, self.tpb)
TEMPO_120_24 = Tempo()
STANDARD_TICKS_PER_MEASURE = 96
class Beat(_BeatBase):
"""
A named tuple representing the current beat as:
(measure no, quarter no, eigth number no, sixteenth no, remaning ticks)
If we are on the 15th semiquaver of the 2nd measure for example,
the Beat would be:
(1, 3, 1, 1, 0)
If we are the third 1/32 of the first measure:
(0, 0, 0, 1, 3)
"""
def __repr__(self):
return ('Beat(measure=%s, quarter=%s, eighth=%s, sixteenth=%s, '
'remainder=%s)' % (self.measure, self.quarter, self.eighth,
self.sixteenth, self.remainder))
class Meter(object):
"""
Representation of a Musical meter with methods for representing the current
Beat and converting to other related values: the current measure number
based on ticks, ticks into the current measure, etc.
"""
strict = True
clock = None
def __init__(self, length=4, division=4, number=1, tempo=TEMPO_120_24):
self.length = length
self.division = division
self.number = number
self._hash = hash((self.length, self.division, self.number))
self.resetTempo(tempo)
def resetTempo(self, tempo):
self.tempo = tempo
self.ticksPerMeasure = int(tempo.tpb * self.length * 4. / self.division
* self.number)
def beat(self, ticks):
"""
Return Beat tuple based on the given ticks.
ticks: the clock ticks (BeatClock.ticks)
"""
measure, ticks = divmod(ticks, self.ticksPerMeasure)
if not ticks:
return Beat(measure, 0, 0, 0, 0)
quarter, ticks = divmod(ticks, self.tempo.tpb)
if not ticks:
return Beat(measure, int(quarter), 0, 0, 0)
eighth, ticks = divmod(ticks, self.tempo.tpb / 2)
if not ticks:
return Beat(measure, int(quarter), int(eighth), 0, 0)
sixteenth, ticks = divmod(ticks, self.tempo.tpb / 4)
return Beat(measure, int(quarter), int(eighth), int(sixteenth),
int(ticks))
def ticks(self, ticks):
"""
Return the number of ticks that have elapsed
since the start of the current measure based on the total clock ticks.
ticks: the clock ticks (BeatClock.ticks)
"""
return ticks % self.ticksPerMeasure
def divisionToTicks(self, n, d):
"""
Convert n/d (examples 1/4, 3/4, 3/32, 8/4..) For example, if the
ticks-per-beat are 24, then n=1 and d=8 would return 12.
"""
tpm = self.tempo.tpb * 4 # Ticks per standard measure 4/4
ticks = float(n) / d * tpm
_, rem = divmod(ticks, 1)
if rem and self.strict:
raise ValueError('<divisionToTicks> %s/%s does not evenly divide '
'%s' % (n, d, tpm))
elif rem and not self.strict:
log.err(Failure(ValueError('<divisionToTicks> %s/%s does not '
'evenly divide %s'
% (n, d, tpm))))
return int(math.floor(ticks))
dtt = divisionToTicks
def nextDivision(self, ticks, n, d):
m = self.measure(ticks) * self.ticksPerMeasure
offset_ticks = self.divisionToTicks(n, d)
next = m + offset_ticks
if next < ticks:
next = next + self.ticksPerMeasure
return next
nd = nextDivision
def nextMeasure(self, ticks, measures=1):
m = self.measure(ticks) * self.ticksPerMeasure
r = m + measures * self.ticksPerMeasure
return r
nm = nextMeasure
def untilNextMeasure(self, ticks, measures=1):
return self.nextMeasure(measures) - ticks
unm = untilNextMeasure
def measure(self, ticks):
"""
Return the current measure number based on ticks.
ticks. the clock ticks (BeatClock.ticks)
"""
return divmod(ticks, self.ticksPerMeasure)[0]
def __repr__(self):
return 'Meter(length=%s, division=%s, number=%s)' % (
self.length, self.division, self.number)
def __hash__(self):
return self._hash
standardMeter = Meter(4, 4)
class SynthControllerMixin(object):
if sys.platform == 'darwin':
synthAudioDevice = 'coreaudio'
elif sys.platform == 'linux2':
synthAudioDevice = 'alsa'
else:
synthAudioDevice = 'portaudio'
synthChannels = 'stereo'
class BeatClock(SelectReactor, SynthControllerMixin):
"""
A BeatClock is a meta reactor based on a looping call which is used to keep
virtual time based on a given tempo and meter.
The current implementation assumes there are 24 ticks (pulses) per quarter
note (or 96 ticks per standard measure).
In general a runtime should only use one singleton BeatClck, though it's in
theory possible to have many running at the same time (waves hands).
"""
defaultClock = None
syncClock = None
def __init__(self, tempo=TEMPO_120_24, meter=None, meters=(), reactor=None,
syncClockClass=None, default=False):
"""
tempo: The tempo object (default: Tempo(120, 24))
meter: Meter used by the clock - default to Meter(4,4,tempo=tempo)
reactor: The underlying reactor to drive this BeatClock - this defaults
to the global reactor (i.e "from twisted.internet import
reactor")
syncClockClass: SyncClock class to use for synchronizing the clock's
ticks and scheduling offset (if None, no SyncClock will be used).
See bl.sync.
default: If True, BeatClock.defaultClock will be set to the instance -
this is used by other components to get the default global
BeatClock.
"""
global clock
self.tempo = tempo
self.ticks = 0
self.meters = meters
self._meter_schedule = {}
if not self.meters:
self.meters = [Meter(4, 4, 1, tempo=self.tempo)]
else:
warnings.warn('meters argument is deprecated, use '
'meter=oneMeterNotAList instead')
self.meter = meter or self.meters[0]
if not reactor:
from twisted.internet import reactor
self.reactor = reactor
if default or (self.defaultClock is None):
BeatClock.defaultClock = self
clock = self
if syncClockClass:
self.syncClock = syncClockClass(self)
lasttick, ts = self.syncClock.lastTick()
self.ticks = lasttick
SelectReactor.__init__(self)
def setTempo(self, tempo):
"""
Change the current tempo. Note that this has the side-effect of
restarting the underlying task driving the BeatClock and resyncing to
the syncClock if there is a syncClock.
BUG: resyncing to SyncClock on tempo changes causes scheduled events
not get called for an unreasonable amount of time. Hopefully this will
resolved soon, but for the time being don't change the tempo at runtime
and set before starting the clock (e.g. with beatlounge command use the
-t arg to set the tempo in advance).
tempo: The tempo (instance of Tempo)
"""
self.tempo = tempo
if hasattr(self, 'task'):
self.task.stop()
self.task.start(60. / self.tempo.tpm, True)
if self.syncClock:
lasttick, ignore = self.syncClock.lastTick()
self.ticks = lasttick
def run(self):
"""
Start the BeatClock. Note that if twisted's reactor has not been
started this will start it. This is done for you by bl/console.py
(beatlounge command) so you generally should not call this directly in
interpreter sessions.
"""
self._initBackends()
self.startTicking()
self.running = True
if not self.reactor.running:
self.reactor.run()
def _initBackends(self):
# XXX this should be refactored some - make backends pluggable and
# indicate which to start from a command line, etc.
try:
from bl.instrument import fsynth
if self.synthChannels == 'stereo':
return
if self.synthChannels == 'mono':
pool = fsynth.MonoPool()
elif self.synthChannels == 'quad':
pool = fsynth.QuadPool()
else:
try:
self.synthChannels = int(self.synthChannels)
except ValueError:
raise ValueError(
'synthChannels should be one of mono, '
'stereo, quad or an integer')
synths = dict(
(n, fsynth.Synth) for n in range(self.synthChannels))
pool = fsynth.NConnectionPool(synths)
fsynth.suggestDefaultPool(pool)
except ImportError:
log.msg('fluidsynth will not be available at runtime')
pass
def startTicking(self):
"""
Called by run - do not call me directly. Start the LoopingCall which
will drive the BeatClock.
"""
self.task = LoopingCall(self.tick)
self.on_stop = self.task.start(60. / self.tempo.tpm, True)
def tick(self):
"""
Advance ticks and run delayed calls.
"""
if self.syncClock:
ticks, ts = self.syncClock.lastTick()
if self.ticks > (ticks + 1):
if DEBUG:
log.msg("We're ahead by %s ticks, waiting" %
self.ticks - (ticks + 1))
return
self.ticks += 1
self.runUntilCurrent()
if self.syncClock:
tick, ts = self.syncClock.lastTick()
if tick > self.ticks:
self._syncToTick(tick, ts)
next = self.task._expectNextCallAt
if abs(next - ts) > 0.0005:
if DEBUG:
log.msg('Off by: %3.3fms; skewing time' %
(1000. * (next - ts)))
self.task._expectNextCallAt -= (next - ts)
def _syncToTick(self, tick, ts):
"""
Synchronize the current ticks based on tick and timestamp (ts) reported
by the SyncClock.
"""
# TODO - quiet everything somehow
delta = tick - self.ticks
if DEBUG:
log.msg("We're behind by %s ticks (ticks=%s expected=%s)" %
(delta, self.ticks, tick))
tpm = self.meter.ticksPerMeasure
if delta > tpm:
t = tick % tpm
ct = self.ticks % tpm
if | |
from Bio import pairwise2
from Bio.SubsMat.MatrixInfo import blosum62
import numpy as np
import scipy
import pandas as pd
import regex as re
import pickle
def sub_pivot_df(pps, sdf, group=True):
"""function takes a long form datatable of extracts and peaks (input sdf), filters
for peptide plasmids of interest (input pps) and outputs a datatable with
one row per extract, with columns for 'unmod' and 'mod' (or any other peak type)
with the respective peak area. group option specifies if replicates should be grouped
(by peptide sequence), with"""
#filter for a sub-dataframe that includes just the peptide plasmids of interest
sub_df = sdf[sdf['pep_plasmid'].isin(pps)]
#Grab the set of sequences of interest (set to make non-redundant)
sequences = set(sub_df['sequence'])
#grab just the modification information (%mod fractions) for each extract
stats_df = sub_df.pivot_table(index='extract', columns='peak_type',
values='mod_area', fill_value=0).reset_index()
#metadata for all of the extracts
meta_df = sub_df.groupby('extract', group_keys=False).first().reset_index().sort_values('extract')
#merge metadata with stats data based on extract
extract_df = meta_df.merge(stats_df, on='extract', how='inner')
#if include_other:
# sub_data['mod'] = sub_data['mod'] + sub_data['other']
if group:
extract_df['replicate'] = 1
return extract_df.groupby(
['sequence', 'mod_plasmid', 'modification description'], group_keys=False).agg(
{'media':'first','ms':'first', 'pep_plasmid':'first', 'replicate':'sum', 'total_area':'mean',
'mod':'mean','unmod':'mean', 'extract':'first'}).reset_index().sort_values('mod', ascending=False)
else:
return extract_df
def seq_alignment(wt_sequence, sdf, score='ddg', penalties=(-15, -2)):
"""Function takes a wild-type sequence and a dataframe of extracts of sequence variants to align to.
Returns four lists, each list having one element per row of the input dataframe:
seq_alignments - a list of tuples. Each tuple is the variant sequence, it's alignment to the
wild-type sequence, and it's modification score (the type of score specified in 'score' input).
labels_sparse - the variant sequence aligned to the wild-type sequence, positions that match
wild-type are blank (space), positions that are mutated are the mutant amino acid (or '-' for
gap). Note that for the wild-type sequence, the full sequence is here, no spaces, as a reference.
labels - the variant sequence, unchanged/unaligned.
labels_aligned - the variant sequence, aligned (with gaps)
"""
seq_alignments = []
labels = [wt_sequence]
labels_sparse = [wt_sequence]
labels_aligned = [wt_sequence]
for ind, row in enumerate(sdf.iterrows()):
#get rid of the index
row = row[1]
seq = row['sequence']
mod_efficiency = row[score]
#align the sequences, this will be a list of alignments, we just take the first one, since they are all
# functionally equivalent for our purposes
alignments = pairwise2.align.globalds(wt_sequence, seq.split("*")[0], blosum62, penalties[0], penalties[1])[0]
#skip the wt sequence for the labels/order, so we added it at the beginning
if alignments[1] == wt_sequence:
seq_alignments.append((seq, alignments[1], mod_efficiency))
else:
seq_alignments.append((seq, alignments[1], mod_efficiency))
labels_sparse.append("".join([i if i != w else " " for i, w in zip(alignments[1], wt_sequence)]))
labels.append(seq)
labels_aligned.append(alignments[1])
return seq_alignments, labels_sparse, labels, labels_aligned
def aln2binary_df(wt_sequence, seq_alignments, invert=False):
"""function takes a wild-type sequence, and a list of sequence alignments from the seq_alignment function
(list should be a list of tuples, one tuple per variant: (variant sequence, it's alignment to the
wild-type sequence, and it's modification score)
Returns a new dataframe that is one row per variant, and one column per amino acid position. At each
position, the number 1 means that the variant sequence matches wild-type, 0 means the variant sequence
does not match wild-type
If invert, then the 1/0 assignment is switched.
DOES NOT WORK IF THERE ARE GAPS (or rather, it just assumes that a gap is not a match, it is not recorded
specially)
"""
#Making a new dataframe (seq_df) that has a column for each amino acid
indexes = [i for i in range(len(wt_sequence))]
#temporary list, 1 element for each variant
new_form = []
mod_scores = []
for variant_seq, aligned_seq, mod_eff in seq_alignments:
binary_seq = []
for s,w in zip(aligned_seq, wt_sequence):
if s == w:
binary_seq.append(0 if invert else 1)
else:
binary_seq.append(1 if invert else 0)
new_form.append(binary_seq)
mod_scores.append(mod_eff)
binary_df = pd.DataFrame(new_form, columns = indexes)
#convert modification scores into a numpy array and then into delta delta G for each variant
mod_scores = np.array(mod_scores)
return binary_df, mod_scores
def detection_threshold_adjust(extract_df, qqq_threshold=10000, qtof_threshold=1000):
"""Function takes a dataframe of extracts (each row is an extract) and adjusts for the noise level
of the lcms. If modified and unmodified peptide are unobserved, the extract is removed. If
unmodified or modified peptide is unobserved, it's peak area is set to the detection threshold
so that the modified ratio or DDG of modification are real numbers.
Requires the following columns to be in the dataframe:
mod - the area of the peak corresponding to modified peptide in the extract
total_area - the sum of all modification state peak areas in the extract
ms - the mass spectrometer used
Adds the following columns to the dataframe:
mod_area - equal to the column 'mod'
mod_fraction - mod_area / total_area
mod_area_capped - the new mod_area, adjusted for the threshold
total_area_capped - the new total_area, adjusted for the threshold
mod_fraction_capped - mod_area_capped / total_area_capped
mod_ratio_capped - mod_area_capped / (total_area_capped - mod_area_capped)
"""
extract_df['mod_area'] = extract_df['mod']
extract_df['mod_fraction'] = extract_df['mod_area'] / extract_df['total_area']
extract_df['mod_area_capped'] = extract_df['mod_area']
extract_df['total_area_capped'] = extract_df['total_area']
#print(sub_df)
for eind, extract in extract_df.iterrows():
#if mod and total are zero, no peptide was observed, extract is removed since nothing
# can be said about modification.
if extract['mod_area'] == 0 and extract['total_area'] == 0:
extract_df.drop(eind, inplace=True)
#if mod was not observed, but unmod was, set the mod area to be the detection threshold
elif extract['mod_area'] == 0:
e_a = None
if extract['ms'] == 'qtof':
e_a = qtof_threshold
elif extract['ms'] == 'qqq':
e_a = qqq_threshold
#change the mod area, and the total area to match
extract_df.set_value(eind, 'mod_area_capped', e_a)
extract_df.set_value(eind, 'total_area_capped', extract['total_area_capped'] + e_a)
#if unmod was not observed, but mod was, set the unmod area to be the detection threshold
if extract['mod_area'] == extract['total_area']:
e_a = None
if extract['ms'] == 'qtof':
e_a = qtof_threshold
elif extract['ms'] == 'qqq':
e_a = qqq_threshold
extract_df.set_value(eind, 'total_area_capped', extract['total_area_capped'] + e_a)
extract_df['mod_fraction_capped'] = extract_df['mod_area_capped'] / extract_df['total_area_capped']
extract_df['mod_ratio_capped'] = extract_df['mod_area_capped'] / (extract_df['total_area_capped'] -
extract_df['mod_area_capped'])
def wt_normalize(wt_plasmid, extract_df):
#Grab the wild-type amino acid sequence
wt_extracts = set(extract_df[extract_df['pep_plasmid'] == wt_plasmid]['extract'])
#Get the wild-type modification efficiency to normalize by
wt_mod_ratio = scipy.stats.gmean(extract_df[extract_df['extract'].isin(wt_extracts)]['mod_ratio_capped'])
extract_df['mod_ratio_normalized'] = extract_df['mod_ratio_capped'] / float(wt_mod_ratio)
def calculate_ddg(extract_df):
extract_df['ddg'] = (-(1.38*10**-23*310)*np.log(extract_df['mod_ratio_normalized'])*6.022*10**23)/1000
extract_df['ddg'] = extract_df['ddg'].astype('float')
def ddgi(wt, extract_df):
"""function takes the wild-type precursor peptide plasmid number, a list of plasmid
numbers that correspond to alanine block scan mutants, and peak dataframe.
"""
detection_threshold_adjust(extract_df)
wt_normalize(wt, extract_df)
calculate_ddg(extract_df)
variants_ddgn = extract_df.groupby('sequence', group_keys=False).agg({'ddg':'mean'}).reset_index()
wt_sequence = extract_df[extract_df['pep_plasmid'] == wt]['sequence'].any()
seq_alignments, labels, _, _ = seq_alignment(wt_sequence, variants_ddgn, score='ddg')
binary_df, ddg_scores = aln2binary_df(wt_sequence, seq_alignments, invert=True)
#get individual DDGi scalars for each variant based on the number of muated residues
ddgi_scalar = [s/d if d!=0 else 0 for
s,d in zip(ddg_scores, binary_df.sum(axis=1))]
#multiply that onto the binary_df to get the score contribution of each mutation
ddgi_scores = binary_df.multiply(ddgi_scalar, axis=0)
#replace with nan so 0 doesn't affect the mean, then take the mean to get mean ddgi per position across
# all the variants to initialize the scores
ddgi_scores = ddgi_scores.replace(0, np.nan).mean(axis=0)
moved = 1
while moved > 0.001:
moved = 0
movement = np.zeros(len(ddgi_scores))
#multiply score at each position onto mutated positions in the binary_df, then sum each variant's
# ddgi to get the full variant ddg. The difference between summed ddgi ('sum') and measured ddg ('ddg')
# is what will be fixed in the iteration.
score_df = binary_df.replace(0, np.nan).multiply(ddgi_scores, axis=1)
score_df['sum'] = score_df.sum(axis=1)
score_df['ddg'] = ddg_scores
for position in binary_df.columns:
if all(score_df[position].isnull()):
#if there are no variants with mutations at this position, then continue
continue
mutated_df = score_df[score_df[position].notnull()]
wrong_by = np.array(list(mutated_df['ddg'] - mutated_df['sum'])).mean()
#Adding a scaler to the wrong by amount that is one-third the value of the ddgi value of that
# position to discourage unlimited growth at each position.
wrong_by = wrong_by - (ddgi_scores[position]/3.0)
#move 1% of the total "wrong by" amount
to_move = wrong_by / 100.0
#sanity/bounding checks
if ddgi_scores[position]+to_move < 0:
if all(mutated_df['ddg']>0):
#don't allow a negative ddgi, if all variant ddg values are positive
to_move = 0
if ddgi_scores[position] < 0:
| |
<reponame>DoofCoder/mesa
# -*- coding: utf-8 -*-
"""
Batchrunner
===========
A single class to manage a batch run or parameter sweep of a given model.
"""
import copy
from itertools import product, count
import pandas as pd
from tqdm import tqdm
import random
try:
from pathos.multiprocessing import ProcessPool
except ImportError:
pathos_support = False
else:
pathos_support = True
class ParameterError(TypeError):
MESSAGE = (
"parameters must map a name to a value. "
"These names did not match paramerets: {}"
)
def __init__(self, bad_names):
self.bad_names = bad_names
def __str__(self):
return self.MESSAGE.format(self.bad_names)
class VariableParameterError(ParameterError):
MESSAGE = (
"variable_parameters must map a name to a sequence of values. "
"These parameters were given with non-sequence values: {}"
)
def __init__(self, bad_names):
super().__init__(bad_names)
class FixedBatchRunner:
""" This class is instantiated with a model class, and model parameters
associated with one or more values. It is also instantiated with model and
agent-level reporters, dictionaries mapping a variable name to a function
which collects some data from the model or its agents at the end of the run
and stores it.
Note that by default, the reporters only collect data at the *end* of the
run. To get step by step data, simply have a reporter store the model's
entire DataCollector object.
"""
def __init__(
self,
model_cls,
parameters_list=None,
fixed_parameters=None,
iterations=1,
max_steps=1000,
model_reporters=None,
agent_reporters=None,
display_progress=True,
):
""" Create a new BatchRunner for a given model with the given
parameters.
Args:
model_cls: The class of model to batch-run.
parameters_list: A list of dictionaries of parameter sets.
The model will be run with dictionary of paramters.
For example, given parameters_list of
[{"homophily": 3, "density": 0.8, "minority_pc": 0.2},
{"homophily": 2, "density": 0.9, "minority_pc": 0.1},
{"homophily": 4, "density": 0.6, "minority_pc": 0.5}]
3 models will be run, one for each provided set of parameters.
fixed_parameters: Dictionary of parameters that stay same through
all batch runs. For example, given fixed_parameters of
{"constant_parameter": 3},
every instantiated model will be passed constant_parameter=3
as a kwarg.
iterations: The total number of times to run the model for each set
of parameters.
max_steps: Upper limit of steps above which each run will be halted
if it hasn't halted on its own.
model_reporters: The dictionary of variables to collect on each run
at the end, with variable names mapped to a function to collect
them. For example:
{"agent_count": lambda m: m.schedule.get_agent_count()}
agent_reporters: Like model_reporters, but each variable is now
collected at the level of each agent present in the model at
the end of the run.
display_progress: Display progresss bar with time estimation?
"""
self.model_cls = model_cls
if parameters_list is None:
parameters_list = []
self.parameters_list = list(parameters_list)
self.fixed_parameters = fixed_parameters or {}
self._include_fixed = len(self.fixed_parameters.keys()) > 0
self.iterations = iterations
self.max_steps = max_steps
self.model_reporters = model_reporters
self.agent_reporters = agent_reporters
if self.model_reporters:
self.model_vars = {}
if self.agent_reporters:
self.agent_vars = {}
self.display_progress = display_progress
def _make_model_args(self):
"""Prepare all combinations of parameter values for `run_all`
Returns:
Tuple with the form:
(total_iterations, all_kwargs, all_param_values)
"""
total_iterations = self.iterations
all_kwargs = []
all_param_values = []
count = len(self.parameters_list)
if count:
for params in self.parameters_list:
kwargs = params.copy()
kwargs.update(self.fixed_parameters)
all_kwargs.append(kwargs)
all_param_values.append(params.values())
elif len(self.fixed_parameters):
count = 1
kwargs = self.fixed_parameters.copy()
all_kwargs.append(kwargs)
all_param_values.append(kwargs.values())
total_iterations *= count
return (total_iterations, all_kwargs, all_param_values)
def run_all(self):
""" Run the model at all parameter combinations and store results. """
run_count = count()
total_iterations, all_kwargs, all_param_values = self._make_model_args()
with tqdm(total_iterations, disable=not self.display_progress) as pbar:
for i, kwargs in enumerate(all_kwargs):
param_values = all_param_values[i]
for _ in range(self.iterations):
self.run_iteration(kwargs, param_values, next(run_count))
pbar.update()
def run_iteration(self, kwargs, param_values, run_count):
kwargscopy = copy.deepcopy(kwargs)
model = self.model_cls(**kwargscopy)
self.run_model(model)
# Collect and store results:
if param_values is not None:
model_key = tuple(param_values) + (run_count,)
else:
model_key = (run_count,)
if self.model_reporters:
self.model_vars[model_key] = self.collect_model_vars(model)
if self.agent_reporters:
agent_vars = self.collect_agent_vars(model)
for agent_id, reports in agent_vars.items():
agent_key = model_key + (agent_id,)
self.agent_vars[agent_key] = reports
return (getattr(self, "model_vars", None), getattr(self, "agent_vars", None))
def run_model(self, model):
""" Run a model object to completion, or until reaching max steps.
If your model runs in a non-standard way, this is the method to modify
in your subclass.
"""
while model.running and model.schedule.steps < self.max_steps:
model.step()
def collect_model_vars(self, model):
""" Run reporters and collect model-level variables. """
model_vars = {}
for var, reporter in self.model_reporters.items():
model_vars[var] = reporter(model)
return model_vars
def collect_agent_vars(self, model):
""" Run reporters and collect agent-level variables. """
agent_vars = {}
for agent in model.schedule._agents.values():
agent_record = {}
for var, reporter in self.agent_reporters.items():
agent_record[var] = getattr(agent, reporter)
agent_vars[agent.unique_id] = agent_record
return agent_vars
def get_model_vars_dataframe(self):
""" Generate a pandas DataFrame from the model-level variables
collected.
"""
return self._prepare_report_table(self.model_vars)
def get_agent_vars_dataframe(self):
""" Generate a pandas DataFrame from the agent-level variables
collected.
"""
return self._prepare_report_table(self.agent_vars, extra_cols=["AgentId"])
def _prepare_report_table(self, vars_dict, extra_cols=None):
"""
Creates a dataframe from collected records and sorts it using 'Run'
column as a key.
"""
extra_cols = ["Run"] + (extra_cols or [])
index_cols = set()
for params in self.parameters_list:
index_cols |= params.keys()
index_cols = list(index_cols) + extra_cols
records = []
for param_key, values in vars_dict.items():
record = dict(zip(index_cols, param_key))
record.update(values)
records.append(record)
df = pd.DataFrame(records)
rest_cols = set(df.columns) - set(index_cols)
ordered = df[index_cols + list(sorted(rest_cols))]
ordered.sort_values(by="Run", inplace=True)
if self._include_fixed:
for param in self.fixed_parameters.keys():
val = self.fixed_parameters[param]
# avoid error when val is an iterable
vallist = [val for i in range(ordered.shape[0])]
ordered[param] = vallist
return ordered
# This is kind of a useless class, but it does carry the 'source' parameters with it
class ParameterProduct:
def __init__(self, variable_parameters):
self.param_names, self.param_lists = zip(
*(copy.deepcopy(variable_parameters)).items()
)
self._product = product(*self.param_lists)
def __iter__(self):
return self
def __next__(self):
return dict(zip(self.param_names, next(self._product)))
# Roughly inspired by sklearn.model_selection.ParameterSampler. Does not handle
# distributions, only lists.
class ParameterSampler:
def __init__(self, parameter_lists, n, random_state=None):
self.param_names, self.param_lists = zip(
*(copy.deepcopy(parameter_lists)).items()
)
self.n = n
if random_state is None:
self.random_state = random.Random()
elif isinstance(random_state, int):
self.random_state = random.Random(random_state)
else:
self.random_state = random_state
self.count = 0
def __iter__(self):
return self
def __next__(self):
self.count += 1
if self.count <= self.n:
return dict(
zip(
self.param_names,
[self.random_state.choice(l) for l in self.param_lists],
)
)
raise StopIteration()
class BatchRunner(FixedBatchRunner):
""" This class is instantiated with a model class, and model parameters
associated with one or more values. It is also instantiated with model and
agent-level reporters, dictionaries mapping a variable name to a function
which collects some data from the model or its agents at the end of the run
and stores it.
Note that by default, the reporters only collect data at the *end* of the
run. To get step by step data, simply have a reporter store the model's
entire DataCollector object.
"""
def __init__(
self,
model_cls,
variable_parameters=None,
fixed_parameters=None,
iterations=1,
max_steps=1000,
model_reporters=None,
agent_reporters=None,
display_progress=True,
):
""" Create a new BatchRunner for a given model with the given
parameters.
Args:
model_cls: The class of model to batch-run.
variable_parameters: Dictionary of parameters to lists of values.
The model will be run with every combo of these paramters.
For example, given variable_parameters of
{"param_1": range(5),
"param_2": [1, 5, 10]}
models will be run with {param_1=1, param_2=1},
{param_1=2, param_2=1}, ..., {param_1=4, param_2=10}.
fixed_parameters: Dictionary of parameters that stay same through
all batch runs. For example, given fixed_parameters of
{"constant_parameter": 3},
every instantiated model will be passed constant_parameter=3
as a kwarg.
iterations: The total number of times to run the model for each
combination of parameters.
max_steps: Upper limit of steps above which each run will be halted
if it hasn't halted on its own.
model_reporters: The dictionary of variables to collect on each run
at the end, with variable names mapped to a function to collect
them. For example:
{"agent_count": lambda m: m.schedule.get_agent_count()}
agent_reporters: Like model_reporters, but each variable is now
collected at the level of each agent present in the model at
the end of the run.
display_progress: Display progresss bar with time estimation?
"""
super().__init__(
model_cls,
ParameterProduct(variable_parameters),
fixed_parameters,
iterations,
max_steps,
model_reporters,
agent_reporters,
display_progress,
)
class MPSupport(Exception):
def __str__(self):
return (
"BatchRunnerMP depends on pathos, which is either not "
"installed, or the path can not be found. "
)
class BatchRunnerMP(BatchRunner):
""" Child class of BatchRunner, extended with multiprocessing support. | |
tracking efficiencies parametrization.
Args:
efficiency: Calculated tracking efficiencies.
centrality_range: Associated centrality range.
period: Data taking period.
system: Collision system.
output_info: Output info for saving figures.
Returns:
None.
"""
# Get the parameters
pt_values, eta_values, n_cent_bins, centrality_ranges = generate_parameters(system)
logger.debug(fr"Plotting efficiencies for {centrality_range.min}--{centrality_range.max}%")
fig, ax = plt.subplots(figsize = (8, 6))
im = ax.imshow(
efficiency.T,
extent = [np.min(pt_values), np.max(pt_values), np.min(eta_values), np.max(eta_values)],
interpolation = "nearest", aspect = "auto", origin = "lower",
norm = matplotlib.colors.Normalize(vmin = 0.5, vmax = 1), cmap = "viridis",
)
# Add the colorbar
fig.colorbar(im, ax = ax)
# Labels
ax.set_xlabel(fr"${labels.pt_display_label()}\:({labels.momentum_units_label_gev()})$")
ax.set_ylabel(r"$\eta$")
title = f"{period} tracking efficiency parametrization"
if system != params.CollisionSystem.pp:
title += rf", ${centrality_range.min} \textendash {centrality_range.max}\%$"
ax.set_title(title, size = 16)
# Final adjustments
fig.tight_layout()
name = f"efficiency_{period}"
if system != params.CollisionSystem.pp:
name += f"_centrality_parametrization_{centrality_range.min}_{centrality_range.max}"
plot_base.save_plot(output_info, fig, name)
# Cleanup
plt.close(fig)
def retrieve_efficiency_data(n_cent_bins: int, centrality_ranges: Dict[int, params.SelectedRange]) -> Tuple[List[Hist], List[Hist], List[Hist]]:
""" Retrieve efficiency data.
Args:
n_cent_bins: Number of centrality bins.
centrality_ranges: Map from centrality bin numbers to centrality ranges.
Returns:
(2D efficiency data, 1D pt efficiency data, 1D eta efficiency data)
"""
# Retrieve histograms
hists = histogram.get_histograms_in_list(
filename = "trains/PbPbMC/55/AnalysisResults.root",
list_name = "AliAnalysisTaskPWGJEQA_tracks_caloClusters_emcalCells_histos"
)
matched_sparse = hists["tracks_Matched"]
generator_sparse = hists["tracks_PhysPrim"]
# Retrieve the centrality dependent data
efficiency_data_1D_pt = []
efficiency_data_1D_eta = []
efficiency_data_2D = []
for dimension in ["1D", "2D"]:
for centrality_bin, centrality_range in centrality_ranges.items():
# Select in centrality
matched_sparse.GetAxis(0).SetRangeUser(centrality_range.min + epsilon, centrality_range.max - epsilon)
generator_sparse.GetAxis(0).SetRangeUser(centrality_range.min + epsilon, centrality_range.max - epsilon)
# Restrict pt range to < 10 GeV
matched_sparse.GetAxis(1).SetRangeUser(0.15, 10)
generator_sparse.GetAxis(1).SetRangeUser(0.15, 10)
if dimension == "2D":
# (pt_gen, eta_gen) - order is reversed because 2D API is backwards...
pt_gen_matched = matched_sparse.Projection(2, 1)
pt_gen_matched.SetName(f"pt_gen_matched_cent_{centrality_bin}")
# (pt_gen, eta_gen, findable)
pt_gen_2d = generator_sparse.Projection(1, 2, 4)
pt_gen_2d.SetName(f"pt_gen_2D_cent_{centrality_bin}")
# Select only findable particles and use that efficiency.
pt_gen_2d.GetZaxis().SetRange(2, 2)
pt_gen_findable = pt_gen_2d.Project3D("yx")
logger.debug(f"pt_gen_matched: {pt_gen_matched}, pt_gen_findable: {pt_gen_findable}")
efficiency_hist = pt_gen_findable.Clone()
efficiency_hist.Divide(pt_gen_matched, pt_gen_findable, 1.0, 1.0, "B")
efficiency_data_2D.append(efficiency_hist)
elif dimension == "1D":
# pT 1D efficiency
# NOTE: We can't just project from the 2D efficiency. Integrating over eta will get the wrong
# wrong values. Here, we project before we divide to get the right answer.
pt_gen_matched1D = matched_sparse.Projection(1)
pt_gen_matched1D.SetName(f"pt_gen_matched_1D_cent_{centrality_bin}")
pt_gen_1d = generator_sparse.Projection(4, 1)
pt_gen_1d.SetName(f"pt_gen_1D_cent_{centrality_bin}")
# Select only findable particles and use that efficiency.
pt_gen_1d.GetYaxis().SetRange(2, 2)
pt_gen_findable = pt_gen_1d.ProjectionX()
logger.debug(f"pt_gen_matched1D: {pt_gen_matched1D}, pt_gen_findable: {pt_gen_findable}")
efficiency_1D = pt_gen_findable.Clone()
efficiency_1D.Divide(pt_gen_matched1D, pt_gen_findable, 1.0, 1.0, "B")
efficiency_data_1D_pt.append(efficiency_1D)
# Eta 1D
eta_gen_matched_1D = matched_sparse.Projection(2)
eta_gen_matched_1D.SetName(f"eta_gen_matched_1D_cent_{centrality_bin}")
eta_gen_1D = generator_sparse.Projection(4, 2)
eta_gen_1D.SetName(f"eta_gen_1D_cent_{centrality_bin}")
# Select only findable particles and use that efficiency.
eta_gen_1D.GetYaxis().SetRange(2, 2)
eta_gen_findable = eta_gen_1D.ProjectionX()
logger.debug(f"eta_gen_matched_1D: {eta_gen_matched_1D}, eta_gen_findable: {eta_gen_findable}")
efficiency_1D = eta_gen_findable.Clone()
efficiency_1D.Divide(eta_gen_matched_1D, eta_gen_findable, 1.0, 1.0, "B")
efficiency_data_1D_eta.append(efficiency_1D)
else:
# Shouldn't ever really happen, but just for sanity.
raise RuntimeError(f"Invalid dimension {dimension}")
return efficiency_data_2D, efficiency_data_1D_pt, efficiency_data_1D_eta
def calculate_residual_2D(efficiency_data: Hist, efficiency_function: Callable[..., float],
efficiency_period: Any, centrality_bin: int) -> Tuple[np.ndarray, List[float], List[float]]:
""" Calculate residual for 2D tracking efficiency.
There is a separate 1D and 2D function for convenience. If there is no entries for a particular
bin, we set the value to NaN so that it can be ignored later when plotting.
Args:
efficiency_data: 2D efficiency data.
efficiency_function: Efficiency function.
efficiency_period: Efficiency period.
centrality_bin: Centrality bin.
Returns:
Calculated residual, pt values where it was evaluated, eta values where it was evaluated.
"""
pts = [efficiency_data.GetXaxis().GetBinCenter(x) for x in range(1, efficiency_data.GetXaxis().GetNbins() + 1)]
etas = [efficiency_data.GetYaxis().GetBinCenter(y) for y in range(1, efficiency_data.GetYaxis().GetNbins() + 1)]
residual = np.zeros(shape = (efficiency_data.GetXaxis().GetNbins(),
efficiency_data.GetYaxis().GetNbins()))
# Loop over all of the bins in the data histogram.
chi_2 = []
for pt_index, pt in enumerate(pts):
for eta_index, eta in enumerate(etas):
x = pt_index + 1
y = eta_index + 1
# Calculate the efficiency. It's calculated again here to ensure that it's evaluated at exactly
# the same location as in the data histogram.
efficiency_at_value = efficiency_function(pt, eta, centrality_bin, efficiency_period, "task_name")
# Determine the histogram value, setting it to NaN if there's no entries.
if np.abs(efficiency_data.GetBinContent(x, y)) < epsilon:
value = np.nan
else:
value = (efficiency_data.GetBinContent(x, y) - efficiency_at_value) / efficiency_at_value * 100.
# The points around the edges aren't super reliable for calcuating chi squared
if pt > 1 and np.abs(eta) < 0.8:
chi_2.append(np.power(efficiency_data.GetBinContent(x, y) - efficiency_at_value, 2) / np.power(efficiency_data.GetBinError(x, y), 2))
residual[pt_index, eta_index] = value
# Check max values
logger.debug(f"min efficiency_data: {efficiency_data.GetMinimum()}, "
f"max efficiency_data: {efficiency_data.GetMaximum()}")
logger.debug(f"min residual: {np.nanmin(residual)}, max residual: {np.nanmax(residual)}")
logger.debug(f"standard mean: {np.nanmean(residual)}")
logger.debug(f"restricted mean: {np.nanmean(residual[:,np.abs(etas) < 0.8])}")
logger.debug(f"len(pts): {len(pts)}, len(etas): {len(etas)}")
# Check chi squared
chi_squared = np.sum(chi_2)
# 23 is the number of parameters (10 + 13) at any given point
ndf = len(chi_2) - 23
logger.warning("NOTE: The restricted chi squared value calculated here may not be super reliable.")
logger.info(f"Chi squared: {chi_squared}")
logger.info(f"NDF: {ndf}")
logger.info(f"chi2/ndf: {chi_squared / ndf}")
return residual, pts, etas
def plot_residual(residual: np.ndarray, pts: List[float], etas: List[float],
period: str, centrality_bin: int, centrality_ranges: Dict[int, params.SelectedRange],
output_info: analysis_objects.PlottingOutputWrapper) -> None:
""" Plot the residual between the data and the parametrization.
Args:
residual: Calculated residual.
pts: Pt values where the residual was evaluated.
etas: Eta values where the residual was evaluated.
period: Name of the data taking period.
centrality_bin: Centrality bin.
centraliy_ranges: Map of centrality bins to ranges.
output_info: Output info for saving figures.
Returns:
None.
"""
fig, ax = plt.subplots(figsize = (8, 6))
im = ax.imshow(
residual.T, extent = [np.nanmin(pts), np.nanmax(pts), np.nanmin(etas), np.nanmax(etas)],
interpolation = "nearest", aspect = "auto", origin = "lower",
# An even normalization is better for the colorscheme.
# NOTE: This causes clipping at the lowest pt values, but I don't think this is a big problem.
norm = matplotlib.colors.Normalize(
#vmin = np.nanmin(residuals[centrality_bin]), vmax = np.nanmax(residuals[centrality_bin])
vmin = -40, vmax = 40
),
# This is a good diverging color scheme when it's centered at 0.
cmap = "RdBu",
)
# Add the colorbar
color_bar = fig.colorbar(im, ax = ax)
color_bar.set_label(r"(data - fit)/fit (\%)")
# Labels
ax.set_xlabel(fr"${labels.pt_display_label()}\:({labels.momentum_units_label_gev()})$")
ax.set_ylabel(r"$\eta$")
title = f"{period} tracking efficiency residuals"
if system != params.CollisionSystem.pp:
centrality_range = centrality_ranges[centrality_bin]
title += rf", ${centrality_range.min} \textendash {centrality_range.max}\%$"
ax.set_title(title, size = 16)
# Final adjustments
fig.tight_layout()
name = f"efficiency_residuals_{period}"
if system != params.CollisionSystem.pp:
centrality_range = centrality_ranges[centrality_bin]
name += f"_centrality_{centrality_range.min}_{centrality_range.max}"
plot_base.save_plot(output_info, fig, name)
# Cleanup
plt.close(fig)
def plot_2D_efficiency_data(efficiency_hist: Hist, centrality_range: params.SelectedRange, output_info: analysis_objects.PlottingOutputWrapper) -> None:
""" Plot the 2D efficiency data
Args:
efficiency_hist: Efficiecny histogram.
centrality_range: Centrality range.
output_info: Output info for saving figures.
Returns:
None.
"""
X, Y, efficiency_data = histogram.get_array_from_hist2D(hist = efficiency_hist)
logger.debug(f"efficiency data min: {np.nanmin(efficiency_data)}, max: {np.nanmax(efficiency_data)}")
fig, ax = plt.subplots(figsize = (8, 6))
im = ax.imshow(
efficiency_data.T, extent = [np.nanmin(X), np.nanmax(X), np.nanmin(Y), np.nanmax(Y)],
interpolation = "nearest", aspect = "auto", origin = "lower",
norm = matplotlib.colors.Normalize(
vmin = np.nanmin(efficiency_data), vmax = np.nanmax(efficiency_data)
#vmin = 0.5, vmax = 1,
),
cmap = "viridis",
)
# Add the colorbar
color_bar = fig.colorbar(im, ax = ax)
color_bar.set_label("Efficiency")
# Labels
ax.set_xlabel(fr"${labels.pt_display_label()}\:({labels.momentum_units_label_gev()})$")
ax.set_ylabel(r"$\eta$")
title = f"{period} tracking efficiency data"
if system != params.CollisionSystem.pp:
title += rf", ${centrality_range.min} \textendash {centrality_range.max}\%$"
ax.set_title(title, size = 16)
# Final adjustments
fig.tight_layout()
name = f"efficiency_{period}"
if system != params.CollisionSystem.pp:
name += f"_centrality_{centrality_range.min}_{centrality_range.max}"
plot_base.save_plot(output_info, fig, name)
# Cleanup
plt.close(fig)
def plot_1D_pt_efficiency(efficiency: Hist, PublicUtils: T_PublicUtils, efficiency_period: Any,
centrality_bin: int, centrality_range: params.SelectedRange,
output_info: analysis_objects.PlottingOutputWrapper) -> None:
""" Plot 1D pt efficiency.
Args:
efficiency: Pt efficiency hist.
PublicUtils: Jet-H public utils class.
efficiency_period: Data taking period in the efficiency enum.
centrality_bin: int
centrality_range: Centrality range.
output_info: Output info for saving figures.
Returns:
None.
"""
# 1D efficiency as a function of pt
logger.debug(f"max efficiency_1D: {efficiency.GetMaximum()}")
h = histogram.Histogram1D.from_existing_hist(efficiency)
fig, ax = plt.subplots(figsize = (8, 6))
ax.errorbar(
h.x, h.y, yerr = h.errors,
label = "${labels.pt_display_label()}$",
color = "black", marker = ".", linestyle = "",
)
# Efficiency function
parametrization = []
for x in h.x:
parametrization.append(PublicUtils.LHC15oPtEfficiency(x, centrality_bin))
ax.plot(
h.x, parametrization,
label = "${labels.pt_display_label()}$ param.",
color = "red",
)
# Ensure that it's on a consistent axis
ax.set_ylim(0.6, 1)
# Labels
ax.set_xlabel(fr"${labels.pt_display_label()}\:({labels.momentum_units_label_gev()})$")
ax.set_ylabel(r"Efficiency")
title = f"{period} ${labels.pt_display_label()}$ tracking efficiency"
if system != params.CollisionSystem.pp:
title += rf", ${centrality_range.min} \textendash {centrality_range.max}\%$"
ax.set_title(title, size = 16)
# Final adjustments
fig.tight_layout()
name = f"efficiency_pt_{period}"
| |
<reponame>mon4ter/aiostaticmap<filename>src/aiostaticmap/aiostaticmap.py
from asyncio import ALL_COMPLETED, CancelledError, FIRST_COMPLETED, sleep, wait
from collections import deque
from io import BytesIO
from itertools import count
from logging import getLogger
from math import atan, ceil, cos, floor, log, pi, sinh, sqrt, tan
from typing import Dict, Iterable, List, Optional, Tuple, Union
from PIL import Image, ImageDraw
from aiohttp import ClientError, ClientSession
__all__ = [
'CircleMarker',
'IconMarker',
'Line',
'Polygon',
'StaticMap'
]
logger = getLogger(__package__)
class Line:
def __init__(self, coords: Iterable[Tuple[float, float]], color: str, width: int, simplify: bool = True):
"""
Line that can be drawn in a static map
:param coords: an iterable of lon-lat pairs, e.g. ((0.0, 0.0), (175.0, 0.0), (175.0, -85.1))
:type coords: Iterable[Tuple[float, float]]
:param color: color suitable for PIL / Pillow
:type color: str
:param width: width in pixel
:type width: int
:param simplify: whether to simplify coordinates, looks less shaky, default is true
:type simplify: bool
"""
self.coords = coords
self.color = color
self.width = width
self.simplify = simplify
@property
def extent(self) -> Tuple[float, float, float, float]:
"""
calculate the coordinates of the envelope / bounding box: (min_lon, min_lat, max_lon, max_lat)
:rtype: Tuple[float, float, float, float]
"""
return (
min((c[0] for c in self.coords)),
min((c[1] for c in self.coords)),
max((c[0] for c in self.coords)),
max((c[1] for c in self.coords)),
)
class CircleMarker:
def __init__(self, coord: Tuple[float, float], color: str, width: int):
"""
:param coord: a lon-lat pair, eg (175.0, 0.0)
:type coord: Tuple[float, float]
:param color: color suitable for PIL / Pillow
:type color: str
:param width: marker width
:type width: int
"""
self.coord = coord
self.color = color
self.width = width
@property
def extent_px(self) -> Tuple[int, int, int, int]:
return (self.width,) * 4
class IconMarker:
def __init__(self, coord: Tuple[float, float], file_path: str, offset_x: int, offset_y: int):
"""
:param coord: a lon-lat pair, eg (175.0, 0.0)
:type coord: Tuple[float, float]
:param file_path: path to icon
:type file_path: str
:param offset_x: x position of the tip of the icon. relative to left bottom, in pixel
:type offset_x: int
:param offset_y: y position of the tip of the icon. relative to left bottom, in pixel
:type offset_y: int
"""
self.coord = coord
self.img = Image.open(file_path, 'r')
self.offset = (offset_x, offset_y)
@property
def extent_px(self) -> Tuple[int, int, int, int]:
w, h = self.img.size
return (
self.offset[0],
h - self.offset[1],
w - self.offset[0],
self.offset[1],
)
class Polygon:
"""
Polygon that can be drawn on map
:param coords: an iterable of lon-lat pairs, e.g. ((0.0, 0.0), (175.0, 0.0), (175.0, -85.1))
:type coords: Iterable[Tuple[float, float]]
:param fill_color: color suitable for PIL / Pillow, can be None (transparent)
:type fill_color: str
:param outline_color: color suitable for PIL / Pillow, can be None (transparent)
:type outline_color: str
:param simplify: whether to simplify coordinates, looks less shaky, default is true
:type simplify: bool
"""
def __init__(self, coords: Iterable[Tuple[float, float]], fill_color: str, outline_color: str,
simplify: bool = True):
self.coords = coords
self.fill_color = fill_color
self.outline_color = outline_color
self.simplify = simplify
@property
def extent(self) -> Tuple[float, float, float, float]:
return (
min((c[0] for c in self.coords)),
min((c[1] for c in self.coords)),
max((c[0] for c in self.coords)),
max((c[1] for c in self.coords)),
)
def _lon_to_x(lon: float, zoom: int) -> float:
"""
transform longitude to tile number
:type lon: float
:type zoom: int
:rtype: float
"""
if not (-180 <= lon <= 180):
lon = (lon + 180) % 360 - 180
return ((lon + 180.) / 360) * pow(2, zoom)
def _lat_to_y(lat: float, zoom: int) -> float:
"""
transform latitude to tile number
:type lat: float
:type zoom: int
:rtype: float
"""
if not (-90 <= lat <= 90):
lat = (lat + 90) % 180 - 90
return (1 - log(tan(lat * pi / 180) + 1 / cos(lat * pi / 180)) / pi) / 2 * pow(2, zoom)
def _y_to_lat(y: float, zoom: int) -> float:
return atan(sinh(pi * (1 - 2 * y / pow(2, zoom)))) / pi * 180
def _x_to_lon(x: float, zoom: int) -> float:
return x / pow(2, zoom) * 360.0 - 180.0
def _simplify(points: List[Tuple[float, float]], tolerance: float = 11.0) -> Iterable[Tuple[float, float]]:
"""
:param points: list of lon-lat pairs
:type points: list
:param tolerance: tolerance in pixel
:type tolerance: float
:return: list of lon-lat pairs
:rtype: list
"""
if not points:
return points
new_coords = [points[0]]
for p in points[1:-1]:
last = new_coords[-1]
dist = sqrt(pow(last[0] - p[0], 2) + pow(last[1] - p[1], 2))
if dist > tolerance:
new_coords.append(p)
new_coords.append(points[-1])
return new_coords
class StaticMap:
def __init__(self, width: int, height: int, padding_x: int = 0, padding_y: int = 0,
url_template: str = "http://a.tile.komoot.de/komoot-2/{z}/{x}/{y}.png", tile_size: int = 256,
tile_request_timeout: Optional[float] = None, headers: Optional[Dict[str, str]] = None,
reverse_y: bool = False, background_color: str = "#fff", delay_between_retries: int = 0):
"""
:param width: map width in pixel
:type width: int
:param height: map height in pixel
:type height: int
:param padding_x: min distance in pixel from map features to border of map
:type padding_x: int
:param padding_y: min distance in pixel from map features to border of map
:type padding_y: int
:param url_template: tile URL
:type url_template: str
:param tile_size: the size of the map tiles in pixel
:type tile_size: int
:param tile_request_timeout: time in seconds to wait for requesting map tiles
:type tile_request_timeout: float
:param headers: additional headers to add to http requests
:type headers: dict
:param reverse_y: tile source has TMS y origin
:type reverse_y: bool
:param background_color: Image background color, only visible when tiles are transparent
:type background_color: str
:param delay_between_retries: number of seconds to wait between retries of map tile requests
:type delay_between_retries: int
"""
self.width = width
self.height = height
self.padding = (padding_x, padding_y)
self.url_template = url_template
self.headers = headers
self.tile_size = tile_size
self.request_timeout = tile_request_timeout
self.reverse_y = reverse_y
self.background_color = background_color
# features
self.markers = []
self.lines = []
self.polygons = []
# fields that get set when map is rendered
self.x_center = 0
self.y_center = 0
self.zoom = 0
self.delay_between_retries = delay_between_retries
def add_line(self, line: Line):
"""
:param line: line to draw
:type line: Line
"""
self.lines.append(line)
def add_marker(self, marker: Union[IconMarker, CircleMarker]):
"""
:param marker: marker to draw
:type marker: IconMarker or CircleMarker
"""
self.markers.append(marker)
def add_polygon(self, polygon: Polygon):
"""
:param polygon: polygon to be drawn
:type polygon: Polygon
"""
self.polygons.append(polygon)
async def render(self, zoom: Optional[int] = None, center: Optional[Tuple[float, float]] = None) -> Image:
"""
render static map with all map features that were added to map before
:param zoom: optional zoom level, will be optimized automatically if not given.
:type zoom: int
:param center: optional center of map, will be set automatically from markers if not given.
:type center: list
:return: PIL image instance
:rtype: Image.Image
"""
if not self.lines and not self.markers and not self.polygons and not (center and zoom):
raise RuntimeError("cannot render empty map, add lines / markers / polygons first")
if zoom is None:
self.zoom = self._calculate_zoom()
else:
self.zoom = zoom
if center:
self.x_center = _lon_to_x(center[0], self.zoom)
self.y_center = _lat_to_y(center[1], self.zoom)
else:
# get extent of all lines
extent = self.determine_extent(zoom=self.zoom)
# calculate center point of map
lon_center, lat_center = (extent[0] + extent[2]) / 2, (extent[1] + extent[3]) / 2
self.x_center = _lon_to_x(lon_center, self.zoom)
self.y_center = _lat_to_y(lat_center, self.zoom)
image = Image.new('RGB', (self.width, self.height), self.background_color)
await self._draw_base_layer(image)
self._draw_features(image)
return image
def determine_extent(self, zoom: Optional[int] = None) -> Tuple[float, float, float, float]:
"""
calculate common extent of all current map features
:param zoom: optional parameter, when set extent of markers can be considered
:type zoom: int
:return: extent (min_lon, min_lat, max_lon, max_lat)
:rtype: tuple
"""
extents = [line.extent for line in self.lines]
for m in self.markers:
e = (m.coord[0], m.coord[1])
if zoom is None:
extents.append(e * 2)
continue
# consider dimension of marker
e_px = m.extent_px
x = _lon_to_x(e[0], zoom)
y = _lat_to_y(e[1], zoom)
extents += [(
_x_to_lon(x - float(e_px[0]) / self.tile_size, zoom),
_y_to_lat(y + float(e_px[1]) / self.tile_size, zoom),
_x_to_lon(x + float(e_px[2]) / self.tile_size, zoom),
_y_to_lat(y - float(e_px[3]) / self.tile_size, zoom)
)]
extents += [p.extent for p in self.polygons]
return | |
# -*- coding: utf-8 -*-
# Name: fourth_day.py
# Authors: <NAME>
# Main interface to the fourth_day module. This package calculates the light yields and emission specta
# of organisms in the deep sea using a combination of modelling and data obtained by deep sea Cherenkov telescopes. Multiple
# calculation routines are provided.
# Imports
# Native modules
import logging
import sys
import numpy as np
import yaml
from time import time
import pandas as pd
import pickle
import os
from tqdm import tqdm
from pyDataverse.api import NativeApi, DataAccessApi
from pathlib import Path
# -----------------------------------------
# Package modules
from .config import config
from .genesis import Genesis
from .adamah import Adamah
from .current import Current
from .mc_sim import MC_sim
from .vtu_npy_handlers import vtu_npy_converter
from .lucifer import Lucifer
from .providence import Providence
# unless we put this class in __init__, __name__ will be contagion.contagion
_log = logging.getLogger("fourth_day")
class Fourth_Day(object):
"""
class: Fourth_Day
Interace to the FD package. This class
stores all methods required to run the simulation
of the bioluminescence
Parameters
----------
config : dic
Configuration dictionary for the simulation
Returns
-------
None
"""
def __init__(self, userconfig=None):
"""
function: __init__
Initializes the class FD.
Here all run parameters are set.
Parameters
----------
config : dic
Configuration dictionary for the simulation
Returns
-------
None
"""
# Inputs
if userconfig is not None:
if isinstance(userconfig, dict):
config.from_dict(userconfig)
else:
config.from_yaml(userconfig)
# Create RandomState
if config["general"]["random state seed"] is None:
_log.warning("No random state seed given, constructing new state")
rstate = np.random.RandomState()
else:
rstate = np.random.RandomState(
config["general"]["random state seed"]
)
config["runtime"] = {"random state": rstate}
# Logger
# creating file handler with debug messages
if config["general"]["enable logging"]:
fh = logging.FileHandler(
config["general"]["log file handler"], mode="w"
)
fh.setLevel(logging.DEBUG)
# console logger with a higher log level
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(config["general"]["debug level"])
# Logging formatter
fmt = "%(levelname)s: %(message)s"
fmt_with_name = "[%(name)s] " + fmt
formatter_with_name = logging.Formatter(fmt=fmt_with_name)
fh.setFormatter(formatter_with_name)
# add class name to ch only when debugging
if config["general"]["debug level"] == logging.DEBUG:
ch.setFormatter(formatter_with_name)
else:
formatter = logging.Formatter(fmt=fmt)
ch.setFormatter(formatter)
_log.addHandler(fh)
_log.addHandler(ch)
_log.setLevel(logging.DEBUG)
if not config["general"]["enable logging"]:
_log.disabled = True
_log.info('---------------------------------------------------')
_log.info('---------------------------------------------------')
_log.info('Welcome to FD!')
_log.info('This package will help you model deep sea' +
' bioluminescence!')
_log.info('---------------------------------------------------')
_log.info('---------------------------------------------------')
_log.info('Creating life...')
# Life creation
self._life = Genesis()
_log.info('Creation finished')
_log.info('---------------------------------------------------')
_log.info('---------------------------------------------------')
_log.info('Creating the world')
# The volume of interest
self._world = Adamah()
_log.info('Finished world building')
_log.info('---------------------------------------------------')
_log.info('---------------------------------------------------')
_log.info('Constructing the current')
# The current
try:
self._current = Current()
except FileNotFoundError:
_log.error('Water current not constructed!')
_log.error('Need to download the missing files!')
_log.error("Please use self.load_data()")
print('Water current not constructed!')
print('Need to download the missing files!')
print("Please use " + self.__class__.__name__ + ".load_data()")
pass
# This needs to be called explicitely vor conversion of vtu files
self._current_construction = vtu_npy_converter()
_log.info('Finished the current')
_log.info('---------------------------------------------------')
_log.info('---------------------------------------------------')
_log.info('To run the simulation use the sim method')
_log.info('---------------------------------------------------')
_log.info('---------------------------------------------------')
def sim(self):
""" Calculates the light yields depending on input
Parameters
----------
None
Returns
-------
None
"""
_log.info('---------------------------------------------------')
_log.info('---------------------------------------------------')
# A new simulation
if config["scenario"]["class"] == "New":
_log.info("Starting MC simulation")
_log.info("This may take a long time")
_log.info('Calculating photon bursts')
# The simulation
self._mc_run = MC_sim(
self._life,
self._world,
self._current
)
self._statistics = self._mc_run.statistics
self._t = (
np.arange(self._mc_run.iterations) *
config['water']['model']['time step']
)
if config["scenario"]["statistics storage"]["store"]:
_log.info("Storing data for future use")
save_string = (
config["scenario"]["statistics storage"]["location"] +
config["scenario"]["statistics storage"]["name"]
)
_log.debug("Storing under " + save_string)
_log.debug("Storing statistics")
pickle.dump(self._statistics, open(save_string + ".pkl", "wb"))
_log.debug("Storing times")
pickle.dump(self._t, open(save_string + "_t.pkl", "wb"))
_log.debug("Finished storing")
# Re-use a previous simulation
elif config["scenario"]["class"] == "Stored":
_log.info("Loading statistics from previous run")
save_string = (
config["scenario"]["statistics storage"]["location"] +
config["scenario"]["statistics storage"]["name"]
)
_log.debug("Loading from " + save_string)
_log.debug("Loading statistics")
try:
self._statistics = pickle.load(
open(save_string + ".pkl", "rb")
)
except:
ValueError("Statistics file not found! Check the file!")
_log.debug("Loading times")
try:
self._t = pickle.load(open(save_string + "_t.pkl", "rb"))
except:
ValueError("Time file not found! Check the file!")
_log.debug("Finished Loading")
# Calibration run
elif config['scenario']['class'] == 'Calibration':
_log.info("A calibration run")
_log.debug("Population simulation is not required here")
self._statistics = pd.DataFrame({'I am empty dummy' : []})
self._t = np.array(list(range(
len(config["calibration"]["light curve"][
list(config["calibration"]["light curve"].keys())[0]
])
)))
else:
ValueError(
("Unrecognized scenario class! The set class is %s" +
"New, Stored or Calibration are supported!") %(
config["scenario"]["class"]
)
)
_log.info('---------------------------------------------------')
_log.info('---------------------------------------------------')
if config['scenario']["detector"]["switch"]:
_log.info("Calculating photon yields at the detector")
self._lucifer = Lucifer()
self._light_yields = self._lucifer.light_bringer(
self._statistics,
self._life
)
if config['scenario']["detector"]["response"]:
_log.info("Folding detection probability")
self._providence = Providence()
tmp_measured = self._providence.detection_efficiency(
self._light_yields
)
# Converting to pandas dataframe
detector_names = [
"Detector %d" %i
for i in range(0, tmp_measured.shape[1])
]
self._measured = pd.DataFrame(
tmp_measured, columns=detector_names
)
_log.info('---------------------------------------------------')
_log.info('---------------------------------------------------')
_log.info('Finished calculation')
if config["scenario"]["class"] != "Calibration":
_log.info(self._statistics[0].keys())
_log.info('Get the results by typing self.statistics')
_log.info('Structure of dictionray:')
_log.debug(
"Dumping run settings into %s",
config["general"]["config location"],
)
if config["general"]["enable config dump"]:
with open(config["general"]["config location"], "w") as f:
yaml.dump(config, f)
_log.debug("Finished dump")
_log.info('---------------------------------------------------')
_log.info('---------------------------------------------------')
_log.info("Have a great day and until next time!")
_log.info(' /"*._ _')
_log.info(" .-*'` `*-.._.-'/")
_log.info(' < * )) , ( ')
_log.info(' `*-._`._(__.--*"`.\ ')
_log.info('---------------------------------------------------')
_log.info('---------------------------------------------------')
# Closing log
logging.shutdown()
@property
def statistics(self):
""" Getter functions for the simulation results
from the simulation
Parameters
----------
None
Returns
-------
statistics : dic
Stores the results from the simulation
"""
return self._statistics
@property
def t(self):
""" Getter functions for the simulation time
Parameters
----------
None
Returns
-------
t : np.array
The time array
"""
return (
self._t / config['water']['model']['time step']
)
@property
def light_yields(self):
""" Getter function for the light yields. The switch needs to be true
Parameters
----------
None
Returns
-------
light_yields : np.array
The light yield of the detector
Raises
------
ValueError
When the correct switches were't set in the config
"""
if config['scenario']["light prop"]["switch"]:
return self._light_yields
else:
raise ValueError(
"Light yields not calculated! Check the config file"
)
@property
def measured(self):
""" Getter function for the measured light yields.
The switch needs to be true
Parameters
----------
None
Returns
-------
light_yields : np.array
The light yield of the detector
Raises
------
ValueError
When the correct switches were't set in the config
"""
if config['scenario']["detector"]["response"]:
return self._measured
else:
raise ValueError(
"Detector not simulated! Check the config file"
)
@property
def wavelengths(self):
""" Getter functions for the wavelengths of the emitted light used
Parameters
----------
None
Returns
-------
statistics : dic
Stores the results from the simulation
"""
return config['advanced']['nm range']
def load_data(self):
""" Loads water current data from the server.
"""
base_url = config["data loader"]["base_url"]
DOI = config["data loader"]["DOI"]
# Setting up some apis
api = NativeApi(base_url)
data_api = DataAccessApi(base_url)
# Downloading the data. The size is approximately 4.3 GB so please make sure you have room
dataset = api.get_dataset(DOI)
# The list of all files in the data set
files_list = dataset.json()['data']['latestVersion']['files']
# The storage location, this can be customized
module_directory = os.path.abspath(os.path.dirname(__file__))
storage_location = (
module_directory + config["data loader"]["storage location"]
)
# Function to make sure the required directory structure is created
def ensure_dir(file_path):
directory = os.path.dirname(file_path)
if not os.path.exists(directory):
os.makedirs(directory)
# Writing files
# Connection is required here.
print("Starting the download. Please note this will take a while!")
print("Depending on the current server usage this takes a few hours!")
for file in tqdm(files_list):
filename = file["dataFile"]["filename"]
file_id = file["dataFile"]["id"]
file_path = file["directoryLabel"]
storage_path = Path(storage_location + file_path + "/" + filename)
ensure_dir(storage_path)
if os.path.isfile(storage_path):
continue
else:
response = data_api.get_datafile(file_id)
with open(storage_path, "wb") as f:
f.write(response.content)
# Unpacking
# print("Unpacking the files")
# zip_files = glob.glob(storage_location + file_path + '*.zip')
# for zip_filename in zip_files:
# dir_name = os.path.splitext(zip_filename)[0]
# os.mkdir(dir_name)
# zip_handler = zipfile.ZipFile(zip_filename, "r")
# zip_handler.extractall(dir_name)
@property
def hidden_function(self):
""" You found me!
"""
print(" , ")
print(" ,, (( ,, ")
print(" ,,,, (( (( ,,, ")
print(" ( (( *( ( ")
print(" ( ( ( ( ,,,, ")
print(" (( (( ( ( ,,,, ")
print(" ,, ( ((( (( ( ")
print(" (( ( ((( ")
print(" ( ( ( ( ")
print(" /( ( ( ((( ")
print(" ,,,,,, ( ( ( (((((( ")
print(" , ( (( (( ( ")
print(" ( ( ( ((( ")
print(" (* (/ ")
print(" ,, *((( ,,,, | |
the private IP should be returned
if there is no public one
compartment_id (str): OCID of the compartment.
config (object): An OCI config object or None.
interactive (bool): Indicates whether to execute in interactive mode
raise_exceptions (bool): If set to true exceptions are raised
Returns:
The IP as string or None
"""
instance_name = kwargs.get("instance_name")
instance_id = kwargs.get("instance_id")
private_ip_fallback = kwargs.get("private_ip_fallback")
compartment_id = kwargs.get("compartment_id")
config = kwargs.get("config")
config_profile = kwargs.get("config_profile")
interactive = kwargs.get("interactive", core.get_interactive_default())
raise_exceptions = kwargs.get("raise_exceptions", not interactive)
# Get the active config, compartment and instance
try:
config = configuration.get_current_config(
config=config, config_profile=config_profile,
interactive=interactive)
compartment_id = configuration.get_current_compartment_id(
compartment_id=compartment_id, config=config)
instance_id = configuration.get_current_instance_id(
instance_id=instance_id, config=config)
import oci.exceptions
try:
instance = get_instance(
instance_name=instance_name, instance_id=instance_id,
compartment_id=compartment_id, config=config,
return_python_object=True)
if instance is None:
raise ValueError("No instance given."
"Operation cancelled.")
# Initialize the identity client
compute = core.get_oci_compute_client(config=config)
# Get all VNICs of the instance
try:
attached_vnics = oci.pagination.list_call_get_all_results(
compute.list_vnic_attachments,
instance_id=instance.id,
compartment_id=compartment_id).data
except Exception as e:
raise Exception(
"Cannot get VNICs of the given instance.\n"
f"{str(e)}")
instance_ip = None
if attached_vnics:
virtual_network = core.get_oci_virtual_network_client(
config=config)
for attached_vnic in attached_vnics:
vnic = virtual_network.get_vnic(
attached_vnic.vnic_id).data
instance_ip = vnic.public_ip
if instance_ip:
break
if not instance_ip and private_ip_fallback:
for attached_vnic in attached_vnics:
vnic = virtual_network.get_vnic(
attached_vnic.vnic_id).data
instance_ip = vnic.private_ip
if instance_ip:
break
return instance_ip
except oci.exceptions.ServiceError as e:
if raise_exceptions:
raise
print(f'Could not get the VNIC of {instance.display_name}\n'
f'ERROR: {e.message}. (Code: {e.code}; Status: {e.status})')
except ValueError as e:
if raise_exceptions:
raise
print(f"ERROR: {str(e)}")
@plugin_function('mds.list.computeShapes', shell=True, cli=True, web=True)
def list_shapes(**kwargs):
"""Returns a list of all available compute shapes
This list is specific for the given compartment and availability_domain
Args:
**kwargs: Additional options
Keyword Args:
limit_shapes_to (list): A list of shape names
availability_domain (str): The name of the availability_domain to use
compartment_id (str): OCID of the compartment
config (dict): An OCI config object or None
config_profile (str): The name of an OCI config profile
interactive (bool): Indicates whether to execute in interactive mode
raise_exceptions (bool): If true exceptions are raised
return_formatted (bool): If true a human readable string is returned
return_python_object (bool): Used for internal plugin calls
Returns:
A list of shapes
"""
limit_shapes_to = kwargs.get("limit_shapes_to")
availability_domain = kwargs.get("availability_domain")
compartment_id = kwargs.get("compartment_id")
config = kwargs.get("config")
config_profile = kwargs.get("config_profile")
interactive = kwargs.get("interactive", core.get_interactive_default())
raise_exceptions = kwargs.get("raise_exceptions", not interactive)
return_formatted = kwargs.get("return_formatted", interactive)
return_python_object = kwargs.get("return_python_object", False)
try:
# Get the active config and compartment
config = configuration.get_current_config(
config=config, config_profile=config_profile,
interactive=interactive)
compartment_id = configuration.get_current_compartment_id(
compartment_id=compartment_id, config=config)
import oci.util
import oci.exceptions
from mds_plugin import compartment
try:
# Get the availability_domain name
availability_domain_obj = compartment.get_availability_domain(
random_selection=not interactive,
compartment_id=compartment_id,
availability_domain=availability_domain,
config=config, interactive=interactive,
raise_exceptions=raise_exceptions,
return_formatted=return_formatted,
return_python_object=True)
if availability_domain_obj:
availability_domain = availability_domain_obj.name
if not availability_domain:
raise ValueError("No availability domain given. "
"Operation cancelled.")
# Initialize the identity client
compute_client = core.get_oci_compute_client(config=config)
# Get list of available shapes
shapes = compute_client.list_shapes(
compartment_id=compartment_id,
availability_domain=availability_domain).data
# If a list of shape names was given, filter according to that list
if limit_shapes_to is not None:
shapes = [s for s in shapes if any(
s.shape in l_s for l_s in limit_shapes_to)]
return core.return_oci_object(
oci_object=shapes,
return_formatted=return_formatted,
return_python_object=return_python_object,
format_function=format_shapes)
except oci.exceptions.ServiceError as e:
if raise_exceptions:
raise e
print(f'Could not list the shapes for this compartment.\n'
f'ERROR: {e.message}. (Code: {e.code}; Status: {e.status})')
except Exception as e:
if raise_exceptions:
raise e
print(f'Could not list the shapes for this compartment.\n'
f'ERROR: {str(e)}')
@plugin_function('mds.get.computeShape')
def get_shape(**kwargs):
"""Gets a certain shape specified by name
The shape is specific for the given compartment and availability_domain
Args:
**kwargs: Additional options
Keyword Args:
shape_name (str): Name of the shape
limit_shapes_to (list): List of strings to limit the shape selection
availability_domain (str): The name of the availability_domain to use
compartment_id (str): OCID of the parent compartment.
config (dict): An OCI config object or None
config_profile (str): The name of an OCI config profile
interactive (bool): Indicates whether to execute in interactive mode
raise_exceptions (bool): If true exceptions are raised
return_formatted (bool): If true a human readable string is returned
return_python_object (bool): Used for internal plugin calls
Returns:
An shape object or None
"""
shape_name = kwargs.get("shape_name")
limit_shapes_to = kwargs.get("limit_shapes_to")
availability_domain = kwargs.get("availability_domain")
compartment_id = kwargs.get("compartment_id")
config = kwargs.get("config")
config_profile = kwargs.get("config_profile")
interactive = kwargs.get("interactive", core.get_interactive_default())
raise_exceptions = kwargs.get("raise_exceptions", not interactive)
return_formatted = kwargs.get("return_formatted", interactive)
return_python_object = kwargs.get("return_python_object", False)
try:
# Get the active config and compartment
config = configuration.get_current_config(
config=config, config_profile=config_profile,
interactive=interactive)
compartment_id = configuration.get_current_compartment_id(
compartment_id=compartment_id, config=config)
# Get the list of available shapes
shapes = list_shapes(
limit_shapes_to=limit_shapes_to,
compartment_id=compartment_id,
availability_domain=availability_domain,
config=config, interactive=interactive,
raise_exceptions=True,
return_python_object=True)
if not shapes:
raise Exception("No shapes found.")
# Let the user choose from the list
shape = core.prompt_for_list_item(
item_list=shapes,
prompt_caption="Please enter the name or index of the shape: ",
item_name_property="shape", given_value=shape_name,
print_list=True)
return core.return_oci_object(
oci_object=shape,
return_formatted=return_formatted,
return_python_object=return_python_object,
format_function=format_shapes)
except Exception as e:
if raise_exceptions:
raise
print(f'ERROR: {str(e)}')
@plugin_function('mds.get.computeShapeName')
def get_shape_name(**kwargs):
"""Gets a certain shape id specified by name for the given compartment and
availability_domain
Args:
**kwargs: Additional options
Keyword Args:
shape_name (str): Name of the shape
limit_shapes_to (list): List of strings to limit the shape selection
availability_domain (str): The name of the availability_domain to use
compartment_id (str): OCID of the parent compartment.
config (dict): An OCI config object or None
config_profile (str): The name of an OCI config profile
interactive (bool): Indicates whether to execute in interactive mode
raise_exceptions (bool): If true exceptions are raised
Returns:
The shape's name (which is the shape's id) or None
"""
shape_name = kwargs.get("shape_name")
limit_shapes_to = kwargs.get("limit_shapes_to")
availability_domain = kwargs.get("availability_domain")
compartment_id = kwargs.get("compartment_id")
config = kwargs.get("config")
config_profile = kwargs.get("config_profile")
interactive = kwargs.get("interactive", core.get_interactive_default())
raise_exceptions = kwargs.get("raise_exceptions", not interactive)
shape = get_shape(
shape_name=shape_name, limit_shapes_to=limit_shapes_to,
availability_domain=availability_domain,
compartment_id=compartment_id,
config=config, config_profile=config_profile,
interactive=interactive, raise_exceptions=raise_exceptions,
return_python_object=True)
return None if shape is None else shape.shape
@plugin_function('mds.list.computeImages')
def list_images(**kwargs):
"""Gets a compute image
Args:
**kwargs: Additional options
Keyword Args:
operating_system (str): The name of the operating system
operating_system_version (str): The version of the operating system
image_caption (str): The caption of the compute image to use
shape (str): The name of the shape to use.
compartment_id (str): OCID of the parent compartment.
config (dict): An OCI config object or None
config_profile (str): The name of an OCI config profile
interactive (bool): Indicates whether to execute in interactive mode
raise_exceptions (bool): If true exceptions are raised
return_formatted (bool): If true a human readable string is returned
return_python_object (bool): Used for internal plugin calls
Returns:
a compute image object
"""
operating_system = kwargs.get("operating_system")
operating_system_version = kwargs.get("operating_system_version")
image_caption = kwargs.get("image_caption")
shape = kwargs.get("shape")
compartment_id = kwargs.get("compartment_id")
config = kwargs.get("config")
config_profile = kwargs.get("config_profile")
interactive = kwargs.get("interactive", core.get_interactive_default())
raise_exceptions = kwargs.get("raise_exceptions", not interactive)
return_formatted = kwargs.get("return_formatted", interactive)
return_python_object = kwargs.get("return_python_object", False)
# Get the active config and compartment
try:
# Get the active config and compartment
config = configuration.get_current_config(
config=config, config_profile=config_profile,
interactive=interactive)
compartment_id = configuration.get_current_compartment_id(
compartment_id=compartment_id, config=config)
import oci.pagination
import oci.exceptions
try:
# Initialize the oci client
compute_client = core.get_oci_compute_client(config=config)
# Get list of images
images = oci.pagination.list_call_get_all_results(
compute_client.list_images,
compartment_id=compartment_id,
lifecycle_state="AVAILABLE",
shape=shape,
operating_system=operating_system,
operating_system_version=operating_system_version,
sort_by="DISPLAYNAME",
sort_order="ASC").data
# If no image_caption was given, let the user select an
# operating system first, then the actual image
if not image_caption and not operating_system and interactive:
os_list = sorted({img.operating_system for img in images})
operating_system = core.prompt_for_list_item(
item_list=os_list,
prompt_caption=(
"Please enter the name or index of the operating "
"system: "),
print_list=True)
if operating_system is None:
raise ValueError("No operation system given. "
"Operation cancelled.")
# Filter by given operating_system and sort by operating_system,
# operating_system_version DESC, time_created
images = sorted(sorted(
[i for i in images if i.operating_system == operating_system],
key=lambda img: (img.operating_system,
img.operating_system_version,
img.time_created),
reverse=True), key=lambda img: img.operating_system)
return core.return_oci_object(
oci_object=images,
return_formatted=return_formatted,
return_python_object=return_python_object,
format_function=format_compute_images)
except oci.exceptions.ServiceError as e:
if raise_exceptions:
raise
print(f'ERROR: {e.message}. (Code: {e.code}; Status: {e.status})')
except Exception as e:
if raise_exceptions:
raise
print(f"ERROR: {str(e)}")
@plugin_function('mds.get.computeImage')
def get_image(**kwargs):
"""Gets a compute image
Args:
**kwargs: Additional options
Keyword Args:
operating_system (str): The name of the operating system
operating_system_version (str): The version of the operating system
image_caption (str): The caption of the compute image to use
shape (str): The name of the | |
a formatter is set, use it. Otherwise, use the default formatter
for the module.
"""
if self.formatter:
fmt = self.formatter
else:
fmt = _defaultFormatter
return fmt.format(record)
def emit(self, record):
"""
Do whatever it takes to actually log the specified logging record.
This version is intended to be implemented by subclasses and so
raises a NotImplementedError.
"""
raise NotImplementedError, 'emit must be implemented '\
'by Handler subclasses'
def handle(self, record):
"""
Conditionally emit the specified logging record.
Emission depends on filters which may have been added to the handler.
Wrap the actual emission of the record with acquisition/release of
the I/O thread lock. Returns whether the filter passed the record for
emission.
"""
rv = self.filter(record)
if rv:
self.acquire()
try:
self.emit(record)
finally:
self.release()
return rv
def setFormatter(self, fmt):
"""
Set the formatter for this handler.
"""
self.formatter = fmt
def flush(self):
"""
Ensure all logging output has been flushed.
This version does nothing and is intended to be implemented by
subclasses.
"""
pass
def close(self):
"""
Tidy up any resources used by the handler.
This version does removes the handler from an internal list
of handlers which is closed when shutdown() is called. Subclasses
should ensure that this gets called from overridden close()
methods.
"""
#get the module data lock, as we're updating a shared structure.
_acquireLock()
try: #unlikely to raise an exception, but you never know...
del _handlers[self]
_handlerList.remove(self)
finally:
_releaseLock()
def handleError(self, record):
"""
Handle errors which occur during an emit() call.
This method should be called from handlers when an exception is
encountered during an emit() call. If raiseExceptions is false,
exceptions get silently ignored. This is what is mostly wanted
for a logging system - most users will not care about errors in
the logging system, they are more interested in application errors.
You could, however, replace this with a custom handler if you wish.
The record which was being processed is passed in to this method.
"""
if raiseExceptions:
ei = sys.exc_info()
traceback.print_exception(ei[0], ei[1], ei[2], None, sys.stderr)
del ei
class StreamHandler(Handler):
"""
A handler class which writes logging records, appropriately formatted,
to a stream. Note that this class does not close the stream, as
sys.stdout or sys.stderr may be used.
"""
def __init__(self, strm=None):
"""
Initialize the handler.
If strm is not specified, sys.stderr is used.
"""
Handler.__init__(self)
if strm is None:
strm = sys.stderr
self.stream = strm
self.formatter = None
def flush(self):
"""
Flushes the stream.
"""
self.stream.flush()
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline
[N.B. this may be removed depending on feedback]. If exception
information is present, it is formatted using
traceback.print_exception and appended to the stream.
"""
try:
msg = self.format(record)
fs = "%s\n"
if not hasattr(types, "UnicodeType"): #if no unicode support...
self.stream.write(fs % msg)
else:
try:
self.stream.write(fs % msg)
except UnicodeError:
self.stream.write(fs % msg.encode("UTF-8"))
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class FileHandler(StreamHandler):
"""
A handler class which writes formatted logging records to disk files.
"""
def __init__(self, filename, mode='a', encoding=None):
"""
Open the specified file and use it as the stream for logging.
"""
if codecs is None:
encoding = None
if encoding is None:
stream = open(filename, mode)
else:
stream = codecs.open(filename, mode, encoding)
StreamHandler.__init__(self, stream)
#keep the absolute path, otherwise derived classes which use this
#may come a cropper when the current directory changes
self.baseFilename = os.path.abspath(filename)
self.mode = mode
def close(self):
"""
Closes the stream.
"""
self.flush()
self.stream.close()
StreamHandler.close(self)
#---------------------------------------------------------------------------
# Manager classes and functions
#---------------------------------------------------------------------------
class PlaceHolder:
"""
PlaceHolder instances are used in the Manager logger hierarchy to take
the place of nodes for which no loggers have been defined. This class is
intended for internal use only and not as part of the public API.
"""
def __init__(self, alogger):
"""
Initialize with the specified logger being a child of this placeholder.
"""
#self.loggers = [alogger]
self.loggerMap = { alogger : None }
def append(self, alogger):
"""
Add the specified logger as a child of this placeholder.
"""
#if alogger not in self.loggers:
if not self.loggerMap.has_key(alogger):
#self.loggers.append(alogger)
self.loggerMap[alogger] = None
#
# Determine which class to use when instantiating loggers.
#
_loggerClass = None
def setLoggerClass(klass):
"""
Set the class to be used when instantiating a logger. The class should
define __init__() such that only a name argument is required, and the
__init__() should call Logger.__init__()
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError, "logger not derived from logging.Logger: " + \
klass.__name__
global _loggerClass
_loggerClass = klass
def getLoggerClass():
"""
Return the class to be used when instantiating a logger.
"""
return _loggerClass
class Manager:
"""
There is [under normal circumstances] just one Manager instance, which
holds the hierarchy of loggers.
"""
def __init__(self, rootnode):
"""
Initialize the manager with the root node of the logger hierarchy.
"""
self.root = rootnode
self.disable = 0
self.emittedNoHandlerWarning = 0
self.loggerDict = {}
def getLogger(self, name):
"""
Get a logger with the specified name (channel name), creating it
if it doesn't yet exist. This name is a dot-separated hierarchical
name, such as "a", "a.b", "a.b.c" or similar.
If a PlaceHolder existed for the specified name [i.e. the logger
didn't exist but a child of it did], replace it with the created
logger and fix up the parent/child references which pointed to the
placeholder to now point to the logger.
"""
rv = None
_acquireLock()
try:
if self.loggerDict.has_key(name):
rv = self.loggerDict[name]
if isinstance(rv, PlaceHolder):
ph = rv
rv = _loggerClass(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupChildren(ph, rv)
self._fixupParents(rv)
else:
rv = _loggerClass(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupParents(rv)
finally:
_releaseLock()
return rv
def _fixupParents(self, alogger):
"""
Ensure that there are either loggers or placeholders all the way
from the specified logger to the root of the logger hierarchy.
"""
name = alogger.name
i = string.rfind(name, ".")
rv = None
while (i > 0) and not rv:
substr = name[:i]
if not self.loggerDict.has_key(substr):
self.loggerDict[substr] = PlaceHolder(alogger)
else:
obj = self.loggerDict[substr]
if isinstance(obj, Logger):
rv = obj
else:
assert isinstance(obj, PlaceHolder)
obj.append(alogger)
i = string.rfind(name, ".", 0, i - 1)
if not rv:
rv = self.root
alogger.parent = rv
def _fixupChildren(self, ph, alogger):
"""
Ensure that children of the placeholder ph are connected to the
specified logger.
"""
name = alogger.name
namelen = len(name)
for c in ph.loggerMap.keys():
#The if means ... if not c.parent.name.startswith(nm)
#if string.find(c.parent.name, nm) <> 0:
if c.parent.name[:namelen] != name:
alogger.parent = c.parent
c.parent = alogger
#---------------------------------------------------------------------------
# Logger classes and functions
#---------------------------------------------------------------------------
class Logger(Filterer):
"""
Instances of the Logger class represent a single logging channel. A
"logging channel" indicates an area of an application. Exactly how an
"area" is defined is up to the application developer. Since an
application can have any number of areas, logging channels are identified
by a unique string. Application areas can be nested (e.g. an area
of "input processing" might include sub-areas "read CSV files", "read
XLS files" and "read Gnumeric files"). To cater for this natural nesting,
channel names are organized into a namespace hierarchy where levels are
separated by periods, much like the Java or Python package namespace. So
in the instance given above, channel names might be "input" for the upper
level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels.
There is no arbitrary limit to the depth of nesting.
"""
def __init__(self, name, level=NOTSET):
"""
Initialize the logger with a name and an optional level.
"""
Filterer.__init__(self)
self.name = name
self.level = level
self.parent = None
self.propagate = 1
self.handlers = []
self.disabled = 0
def setLevel(self, level):
"""
Set the logging level of this logger.
"""
self.level = level
def debug(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'DEBUG'.
To pass exception information, use the keyword argument exc_info with
a true value, | |
be stacked together
5. smoothing - boolean, will clip extremal values according to the smoothing_array
6. normalisation - str. Will normalise according to 'range', 'std' or 'None'
Range is from -1 to 1
Std is mean = 0, std = 1
7. output_as_torch - boolean:
outputs values as torch Tensors if you want (usually yes)
"""
self.input_arr = input_arr
self.image_files = input_arr[:,0]
self.label = input_arr[:,-1]
self.rotations = rotations
self.projected = projected
self.number_of_warps = number_of_warps
self.parity = parity_choice
self.smoothing = smoothing
self.normalisation = normalisation
self.sample_only = sample_only
self.output_as_torch = output_as_torch
if self.number_of_warps != 0 and self.number_of_warps != None:
self.directory = warped_files_directory
else:
self.directory = unwarped_files_directory
def __len__(self):
L = len(self.input_arr)
if self.number_of_warps !=0:
if self.sample_only == False:
L = L*self.number_of_warps
if self.parity == 'both':
L = 2* L
return L
def __test_input_params__(self):
assert self.input_arr.shape[1] >=2, 'check your input array is a nunpy array of files and labels'
assert type(self.number_of_warps) == int, "number of warps must be an in integer (can be 0)"
assert self.parity in ['left', 'both', 'combined'], "parity choice must be either left, combined or both"
if self.number_of_rotations != 0:
assert self.rotation_arr != None,'Must specify a rotation file containing rotation vertex ids if rotations are non-zero'
assert self.rotations == bool, 'rotations must be boolean'
assert self.normalisation in [None, 'none', 'std', 'range'], 'Normalisation must be either std or range'
def __genfilename__(self,idx, right):
"""
gets the appropriate file based on input parameters on PARITY and on WARPS
"""
# grab raw filename
raw_filename = self.image_files[idx]
# add parity to it. IN THE FORM OF A LIST! If requries both will output a list of length 2
filename = []
if self.parity != 'combined':
if right == True:
filename.append(raw_filename + '_R')
else:
filename.append(raw_filename + '_L')
# if self.parity == 'left':
# filename.append(raw_filename + '_L')
#
# elif self.parity == 'both':
# coin_flip = random.randint(0,1)
# if coin_flip == 0:
# filename.append(raw_filename + '_L')
# elif coin_flip == 1:
# filename.append(raw_filename + '_R')
# right = True
if self.parity == 'combined':
filename.append(raw_filename + '_L')
filename.append(raw_filename+'_R')
# filename is now a list of the correct filenames.
# now add warps if required
if self.number_of_warps != 0:
warp_choice = str(random.randint(0,self.number_of_warps))
if warp_choice !='0':
filename = [s + '_W'+warp_choice for s in filename ]
return filename
def __getitem__(self, idx):
"""
First load the images and collect them as numpy arrays
then collect the label
then collect the metadata (though might be None)
"""
if self.parity == 'both':
T = self.__len__()//2
idx, right = idx % T, idx // T
filename = self.__genfilename__(idx, right)
else:
right = False
filename = self.__genfilename__(idx, right)
image_gifti = [nb.load(self.directory + '/'+individual_filename+'.shape.gii').darrays for individual_filename in filename]
image = []
if self.rotations == True:
rotation_choice = random.randint(0, len(test_rotation_arr)-1)
if rotation_choice !=0:
for file in image_gifti:
image.extend(item.data[test_rotation_arr[rotation_choice]] for item in file)
else:
for file in image_gifti:
image.extend(item.data for item in file)
else:
for file in image_gifti:
image.extend(item.data for item in file)
if right == True:
image = [item[reversing_arr] for item in image]
### labels
# if self.number_of_warps != 0:
#
# idx = idx%len(self.input_arr)
# label = self.label[idx]
###### metadata grabbing if necessary
label = self.label[idx]
if self.input_arr.shape[1] > 2:
self.metadata = self.input_arr[:,1:-1]
metadata = self.metadata[idx]
else:
metadata = None
if self.smoothing != False:
for i in range(len(image)):
image[i] = np.clip(image[i], lower_bound[i%len(lower_bound)].item(), upper_bound[i%len(upper_bound)].item())
# torchify if required:
if self.normalisation != None:
if self.normalisation == 'std':
for i in range(len(image)):
image[i] = ( image[i] - means[i%len(means)].item( )) / stds[i%len(stds)].item()
elif self.normalisation == 'range':
for i in range(len(image)):
image[i] = image[i] - minima[i%len(minima)].item()
image[i] = image[i] / (maxima[i%len(maxima)].item()- minima[i%len(minima)].item())
if self.output_as_torch:
image = torch.Tensor( image )
label = torch.Tensor( [label] )
if isinstance(metadata,np.ndarray):
metadata = torch.Tensor( [metadata] ).squeeze(1)
if self.projected == True:
image = griddata(xy_points, image.T, grid, 'nearest')
image = torch.Tensor(image.reshape(170,170,4)).permute(2,0,1)
if hasattr(metadata,'shape'):
sample = {'image': image, 'metadata' : metadata, 'label': label}
else:
sample = {'image': image,'label': label}
return sample
class My_dHCP_Data_Graph(torch.utils.data.Dataset):
def __init__(self, input_arr, warped_files_directory, unwarped_files_directory, edges, rotations = False,
number_of_warps = 0, parity_choice = 'left', smoothing = False, normalisation = None, projected =False,
sample_only = True, output_as_torch = True):
"""
A Full Dataset for the dHCP Data. Can include warps, rotations and parity flips.
Fileanme style:
in the array: only 'sub-X-ses-Y'
but for the filenames themselves
Left = 'sub-X_ses-Y_L'
Right = 'sub-X_ses-Y_R'
if warped:
'sub-X_ses-Y_L_W1'
INPUT ARGS:
1. input_arr:
Numpy array size Nx2
FIRST index MUST be the filename (excluding directory AND L or R ) of MERGED nibabel files
LAST index must be the (float) label
(OPTIONAL) Middle index if size 3 (optional) is any confounding metadata (Also float, e.g scan age for predicting birth age)
2 . rotations - boolean: to add rotations or not to add rotations
3. number of warps to include - INT
NB WARPED AR INCLUDED AS FILENAME CHANGES. WARP NUMBER X IS WRITTEN AS filename_WX
NUMBER OF WARPS CANNOT EXCEED NUMBER OF WARPES PRESENT IN FILES
4. Particy Choice (JMPORTANT!) - defines left and right-ness
If: 'left'- will output ONLY LEFT
If: 'both' - will randomly choose L or R
If 'combined' - will output a combined array (left first), will be eventually read as a file with twice the number of input channels. as they will be stacked together
5. smoothing - boolean, will clip extremal values according to the smoothing_array
6. normalisation - str. Will normalise according to 'range', 'std' or 'None'
Range is from -1 to 1
Std is mean = 0, std = 1
7. output_as_torch - boolean:
outputs values as torch Tensors if you want (usually yes)
"""
self.input_arr = input_arr
self.image_files = input_arr[:,0]
self.label = input_arr[:,-1]
self.edges = edges
self.rotations = rotations
self.projected = False
self.number_of_warps = number_of_warps
self.parity = parity_choice
self.smoothing = smoothing
self.normalisation = normalisation
self.sample_only = sample_only
self.output_as_torch = output_as_torch
if self.number_of_warps != 0 and self.number_of_warps != None:
self.directory = warped_files_directory
else:
self.directory = unwarped_files_directory
def __len__(self):
L = len(self.input_arr)
if self.number_of_warps !=0:
if self.sample_only == False:
L = L*self.number_of_warps
if self.parity == 'both':
L = 2* L
return L
def __test_input_params__(self):
assert self.input_arr.shape[1] >=2, 'check your input array is a nunpy array of files and labels'
assert type(self.number_of_warps) == int, "number of warps must be an in integer (can be 0)"
assert self.parity in ['left', 'both', 'combined'], "parity choice must be either left, combined or both"
if self.number_of_rotations != 0:
assert self.rotation_arr != None,'Must specify a rotation file containing rotation vertex ids if rotations are non-zero'
assert self.rotations == bool, 'rotations must be boolean'
assert self.normalisation in [None, 'none', 'std', 'range'], 'Normalisation must be either std or range'
def __genfilename__(self,idx, right):
"""
gets the appropriate file based on input parameters on PARITY and on WARPS
"""
# grab raw filename
raw_filename = self.image_files[idx]
# add parity to it. IN THE FORM OF A LIST! If requries both will output a list of length 2
filename = []
if self.parity != 'combined':
if right == True:
filename.append(raw_filename + '_R')
else:
filename.append(raw_filename + '_L')
# if self.parity == 'left':
# filename.append(raw_filename + '_L')
#
# elif self.parity == 'both':
# coin_flip = random.randint(0,1)
# if coin_flip == 0:
# filename.append(raw_filename + '_L')
# elif | |
``SDE`` subclass and the chosen integration method.
Defaults to a process of ``numpy.nan`` along the given timeline.
See Also
--------
paths_generator
SDE
SDEs
Notes
-----
The equation to be integrated is exposed to the integration
algorithm in a standardized form, via methods ``A`` and ``dZ``
delegated to a cooperating ``SDE`` class. The latter should take care
of equation parameters, initial conditions, expected paths and shapes,
and should instantiate all necessary stochasticity sources.
The integration method is exposed as the ``next`` method to the
``paths_generator`` parent class.
If the ``getinfo`` attribute is set to ``True``, at each integration
step the following items are added to the ``itervars`` dictionary,
made available to subclasses to track the integration progress:
* ``last_t``: starting time point of the last integration step.
* ``last_dt``: time increment of the last integration step.
* ``last_x`` : starting value of the process, at time ``last_t``.
* ``last_A``: dictionary of the last computed values of the SDE
terms, at time ``last_t``.
* ``last_dZ``: dictionary of the last realized SDE stochasticity
source values, cumulated in the interval from ``last_t``
to ``last_t + last_dt``.
* ``new_x`` : computed value of the process, at time
``last_t + last_dt``.
This becomes relevant in case the output timeline is coarse
(e.g. just the initial and final time) but diagnostic information
is needed about all integration steps performed
(e.g., to track how often the process has changed sign, or to
count the number of realized jumps).
Methods
-------
A
dZ
next
euler_next
"""
def _check_integration_method(self, id):
if not hasattr(self, id + '_next'):
raise ValueError(
'unrecognized integration method {}: '
"use 'euler' "
'or provide a properly defined '
'`{}_next` integrator class method'
.format(id, id))
def _get_integration_method(self, id):
return getattr(self, id + '_next')
def __init__(self, *, paths=1,
xshape=(), wshape=(),
dtype=None, steps=None, i0=0,
info=None, getinfo=True,
method='euler'):
# setup the required integration method
self.method = method
self._check_integration_method(method)
self._method_next = self._get_integration_method(method)
# set up the paths_generator parent class
super().__init__(paths=paths,
xshape=xshape, wshape=wshape,
dtype=dtype, steps=steps, i0=i0,
info=info, getinfo=getinfo)
# integration methods
# -------------------
def euler_next(self):
"""
Euler-Maruyama integration step.
"""
iv = self.itervars
sw, xw = iv['sw'], iv['xw']
s, ds = sw[0], sw[1] - sw[0]
x = xw[0]
# compute A, dZ and make them available as attributes
A, dZ = self.A(s, x), self.dZ(s, ds)
xw[1][...] = x + sum(A.get(id, 0)*dZ[id] for id in A.keys())
if self.getinfo:
iv.update(last_t=s, last_dt=ds,
last_x=xw[0], last_A=A,
last_dZ=dZ, new_x=xw[1])
# interface vs parent paths_generator class, and
# delegation to cooperating class methods
# ----------------------------------------------
depth = 2
def next(self):
"""Perform an integration step with the requested method."""
super().next()
self._method_next()
def exit(self, tt, xx):
"""See documentation of paths_generator.exit"""
return process(t=tt, x=xx)
# interface vs cooperating SDE class
# ----------------------------------
def A(self, t, x):
"""Value of the SDE terms at time t and process value x.
Example of expected code for the SDE ``dx = (1 - x)*dt + 2*dw(t)``::
return {
'dt': (1 - x),
'dw': 2
}
The ``SDE`` class takes care of casting user-specified
equations into this format.
"""
return {'dt': x + np.nan}
def dZ(self, t, dt):
"""Value of the SDE differentials at time t, for
time increment dt.
Example of expected code for the SDE ``dx = (1 - x)*dt + 2*dw(t)``,
where ``x`` has two components::
shape = (2, self.paths)
return {
'dt': dt,
'dw': wiener_source(vshape=2, paths=self.paths)(0, dt)
}
The ``SDE`` class takes care of instantiating user-specified
stochasticity sources and casting them into this format.
"""
return {'dt': dt + np.nan}
# ------------------------------------------------------------
# The SDE (one equation) and SDEs (multiple equations) classes
# ------------------------------------------------------------
class SDE:
"""
Class representation of a user defined Stochastic Differential
Equation (SDE), intended for subclassing.
This class aims to provide an easy to use and flexible interface,
allowing to specify user-defined SDEs and expose them in a standardized
form to the cooperating ``integrator`` class (the latter should
always follow in method resolution order). A minimal
definition of an Ornstein-Uhlenbeck process is as follows:
>>> from sdepy import SDE, integrator
>>> class my_process(SDE, integrator):
... def sde(self, t, x, theta=1., k=1., sigma=1.):
... return {'dt': k*(theta - x), 'dw': sigma}
An SDE is stated as a dictionary, containing for each differential
the value of the corresponding coefficient::
dx = f(t, x)*dt + g(t, x)*dw + h(t, x)*dj
translates to::
{'dt': f(t, x), 'dw': g(t, x), 'dj': h(t, x)}
Instances are callables with signature ``(timeline)`` that integrate
the SDE along the given timeline, using the configuration set out in
the instantiation parameters:
>>> P = my_process(x0=1, sigma=0.5, paths=100*1000, steps=100)
>>> x = P(timeline=(0., 0.5, 1.))
>>> x.shape
(3, 100000)
Subclasses can specify or customize:
the equation and its parameters (``sde`` method),
initial conditions and preprocessing (``init`` method and
``log`` attribute), shape of the values to be computed and stored
(``shapes`` method), stochastic differentials appearing in the equation
(``sources`` attribute) and their parameters and initialization (methods
``source_dt``, ``source_dw``, ``source_dn``, ``source_dj``, or any custom
``source_{id}`` method for a corresponding differential ``'{id}'``
declared in ``sources`` and used as a key in ``sde`` return values),
optional non array-like parameters (``more`` method), how to store results
at points on the requested timeline (``let`` method), and
postprocessing (``result`` method and ``log`` attribute).
Parameters
----------
paths : int
Number of paths of the process.
vshape : int or tuple of int
Shape of the values of the process.
dtype : data-type, optional
Data-type of the process. Defaults to the numpy default.
rng : numpy.random.Generator, or numpy.random.RandomState, or None
Random numbers generator used to instantiate sources.
If ``None``, defaults to
``sdepy.infrastructure.default_rng``, a global variabile
initialized on import to ``numpy.random.default_rng()``.
steps : iterable, or int, or None
Specification of the time points to be touched during integration
(as accepted by a cooperating ``integrator`` class).
Default behaviour is:
- if ``None``, the simulated steps coincide with the timeline;
- if ``int``, the simulated steps touch all timeline points,
as well as ``steps`` equally spaced points between the minimum
and maximum point in the timeline;
- if iterable, the simulated steps touch all timeline points,
as well as all values in ``steps`` between the minimum and maximum
points in the timeline.
i0 : int
Index along the timeline at which the integration starts. The timeline
is assumed to be in ascending order. Initial conditions are set
at ``timeline[i0]``, the integration is performed
backwards from ``timeline[i0]`` to ``timeline[0]``, and forwards
from ``timeline[i0]`` to ``timeline[-1]``.
info : dict, optional
Diagnostic information about the integration is stored in this
dictionary and is accessible as the ``info`` attribute.
Defaults to a new empty ``dict``.
getinfo : bool
If ``True``, subclass methods ``info_begin``, ``info_next``,
``info_store``, ``info_end`` are invoked during integration.
Defaults to ``True``.
method : str
Integration method, as accepted by the ``integrator``
cooperating class.
**args : SDE-specific parameters
SDE parameters and initial conditions, as implied by the signature
of ``sde``, ``init`` and ``more`` methods, and stochasticity sources
parameters, as implied by the signature of ``source_{id}`` methods.
Each keyword should be used once (e.g. ``corr``, a ``source_dw``
parameter, should not be used as the name of a SDE parameter) or,
if repeated, must have consistent default values.
Returns
-------
process
Once instantiated as ``p``, ``p(timeline)`` performs the integration
along the given timeline, based on parameters of instantiation,
and returns the resulting process as defined by subclass methods.
Defaults to a process of ``numpy.nan`` along the given timeline.
See Also
--------
paths_generator
integrator
SDEs
Notes
-----
Custom stochastic differentials used in the SDE should be recognized,
and treated appropriately, by the chosen integration method. This
may require customization of the ``next`` method of the ``integrator``
class.
All named initialization parameters (``paths``, ``steps`` etc.)
are stored as attributes.
Notes on SDE-specific parameters:
* ``init`` parameters are converted to arrays via ``np.asarray``.
* ``sde`` and source quantitative parameters may be array-like,
or time dependent with signature ``(t)``.
* both are converted to arrays | |
None:
plabel = label
else:
plabel = 'Flux'
""" Plot the spectrum """
drawstyle = 'steps'
ls = "%s" % linestyle
self.ax1.plot(self['wav'], flux, color, linestyle=ls,
drawstyle=drawstyle, label=plabel, **kwargs)
self.ax1.tick_params(labelsize=fontsize)
self.ax1.set_xlabel(xlabel, fontsize=fontsize)
"""
Plot the model, given as an astropy.modeling model, if requested
"""
if model is not None:
fmod = model(wav)
self.ax1.plot(wav, fmod, color=modcolor)
""" Plot the RMS spectrum if the variance spectrum exists """
if var is not None:
rms = np.sqrt(var) + rmsoffset
if rmsls is None:
if docolor:
rlinestyle = 'solid'
rlw = 1
else:
rlinestyle = 'dotted'
rlw = 2
else:
rlinestyle = '%s' % rmsls
self.ax1.plot(self['wav'], rms, rmscolor, linestyle=rlinestyle,
drawstyle=drawstyle, label='RMS', lw=rlw)
""" More plot labels """
self.ax1.set_ylabel(ylabel, fontsize=fontsize)
if title is not None:
self.ax1.set_title(title)
if(wav[0] > wav[-1]):
self.ax1.set_xlim([wav[-1], wav[0]])
else:
self.ax1.set_xlim([wav[0], wav[-1]])
# print(self['wav'][0], self['wav'][-1])
""" Plot the atmospheric transmission if requested """
if add_atm_trans:
self.plot_atm_trans(mode=mode, ls=atmls, scale=atmscale,
offset=atmoffset, fwhm=atmfwhm,
modfile=atmmodfile)
return self.fig
# -----------------------------------------------------------------------
def plot_sky(self, color='g', linestyle='-', xlabel='default',
title='default', label='default', verbose=True):
"""
Plots the sky spectrum, if the appropriate information is available.
There are two ways in which the sky spectrum may be stored:
1. In the sky variable
2. In the var variable if the sky variable is not available. In this
case the rms spectrum (i.e., the square root of the variance
spectrum) should be a decent representation of the sky if the
object was not super bright.
"""
""" Check to make sure that there is a spectrum to plot """
if self.sky:
skyflux = self['sky']
skylab = 'Sky spectrum'
elif 'var' in self.colnames:
skyflux = np.sqrt(self['var'])
print('Using RMS spectrum as a proxy for the sky spectrum')
skylab = 'RMS spectrum'
else:
if verbose:
print('')
print('Cannot plot sky spectrum.')
print('The spectrum must either have either a sky column or a'
' a variance column')
print('')
raise KeyError('No sky or variance information in the spectrum')
""" Set up for plotting """
ls = '%s' % linestyle
ds = 'steps'
if xlabel == 'default':
xlab = 'Wavelength (Angstroms)'
else:
xlab = xlabel
if label == 'default':
lab = skylab
else:
lab = label
""" Plot the spectrum """
if lab is not None:
plt.plot(self['wav'], skyflux, ls=ls, ds=ds, color=color,
label=lab)
else:
plt.plot(self['wav'], skyflux, ls=ls, ds=ds, color=color)
if title == 'default':
plttitle = 'Sky Spectrum'
else:
plttitle = title
if title is not None:
plt.title(plttitle)
plt.xlabel(xlab)
plt.ylabel('Relative flux')
if(self['wav'][0] > self['wav'][-1]):
plt.xlim([self['wav'][-1], self['wav'][0]])
else:
plt.xlim([self['wav'][0], self['wav'][-1]])
# -----------------------------------------------------------------------
def smooth(self, filtwidth, smfunc='boxcar', mode='input', doplot=True,
outfile=None, **kwargs):
"""
Smooths the spectrum using the requested function. The smoothing
function is set by the smfunc parameter. Available functions are:
'boxcar' - the default value and only available value for now
Simple usage example, for a spectrum called "myspec":
myspec.smooth(7)
This will do a variance weighted boxcar smoothing with a 7-pixel
smoothing width, if the variance spectrum is available. Otherwise
it will do a uniformly-weighted boxcar smoothing
"""
""" Select the spectrum to be smoothed """
spec = self.select_mode(mode)
"""
Smooth the spectrum using the requested smoothing function
[For now, only boxcar smoothing is allowed]
The smoothing functions are inherited from the Data1d class
"""
if smfunc == 'boxcar':
ysmooth, varsmooth = spec.smooth_boxcar(filtwidth, verbose=False)
else:
print('')
print('For smoothing, smfunc can only be one of the following:')
print(" 'boxcar'")
print('')
raise ValueError
""" Put the smoothed spectrum into a Data1d container """
if varsmooth is None:
names = self.names0[:-1]
else:
names = self.names0
self.smospec = df.Data1d(self['wav'], ysmooth, varsmooth,
names=names)
""" Plot the smoothed spectrum if desired """
if doplot:
self.plot(mode='smooth', **kwargs)
# -----------------------------------------------------------------------
def load_linelist(self, linefile='default'):
linefmt = [('name', 'S10'), ('wavelength', float), ('label', 'S10'),
('dxlab', float), ('type', int), ('plot', bool)]
lineinfo = np.array([
('He II', 256.32, 'HeII', 0.0, 2, True),
('He II', 303.78, 'HeII', 0.0, 2, True),
('He I', 537.03, 'HeI', 0.0, 2, True),
('He I', 584.33, 'HeI', 0.0, 2, True),
('Ly-gamma', 972.54, r'Ly$\gamma$', 0.0, 3, True),
('Ly-beta', 1025.7, r'Ly$\beta$', 0.0, 3, True),
('O VI', 1035.07, 'OVI', 0.0, 2, True),
("Ly-alpha", 1216., r"Ly$\alpha$", 0.0, 4, True),
('N V', 1240.1, 'NV', 0.0, 4, True),
('Si II', 1263.3, 'SiII', 0.0, 4, True),
('O I', 1303.5, 'OI', 0.0, 4, True),
('C II', 1334.53, 'CII', 0.0, 4, True),
('Si IV', 1396.7, 'SiIV', 0.0, 2, False),
('Si IV/O IV]', 1400, 'SiIV/OIV]', 0.0, 4, True),
('O IV]', 1402.2, 'OIV]', 0.0, 2, False),
('N IV]', 1486.5, 'NIV]', 0.0, 4, True),
("C IV", 1549.1, "C IV", 0.0, 4, True),
('He II ', 1640.5, 'HeII', 0.0, 2, True),
('O III]', 1663.0, 'OIII]', 0.0, 2, True),
('N III]', 1750.4, 'NIII]', 0.0, 2, True),
('Al III', 1858.7, 'AlIII', 0.0, 4, True),
('Si III]', 1892.0, 'SiIII]', 0.0, 4, True),
("C III]", 1908.7, "C III]", 100.0, 4, True),
('Fe III', 2075, 'FeIII', 0.0, 0, True),
('C II] ', 2326, 'CII]', 0.0, 2, True),
('Fe II', 2375, 'FeII', -10.0, 0, True),
('Fe II', 2383, 'FeII', 20.0, 0, True),
('[Ne IV]', 2423, '[NeIV]', 0.0, 2, True),
('Fe II', 2587, 'FeII', -10.0, 0, True),
('Fe II', 2600, 'FeII', 20.0, 0, True),
('Fe II', 2750.3, 'FeII', 0.0, 0, False),
('Mg II', 2799.8, 'MgII', 0.0, 4, True),
('Mg II', 2795.53, 'MgII', 0.0, 0, False),
('Mg II', 2802.71, 'MgII', 0.0, 0, True),
('Mg I', 2852, 'MgI', 0.0, 0, True),
('O III', 3047, 'OIII', 0.0, 2, True),
('O III ', 3133, 'OIII', 0.0, 2, True),
('[Ne V]', 3346, '[NeV]', 0.0, 2, True),
('[Ne V]', 3426, '[NeV]', 0.0, 2, True),
('[O II]', 3726.03, '[O II]', 0.0, 4, True),
('[O II]', 3728.82, '[O II]', 0.0, 4, False),
('H-kappa', 3750, r'H$\kappa$', 0.0, 0, True),
('[Fe VII]', 3761.4, '[FeVII]', 0.0, 0, True),
('H-iota', 3770, r'H$\iota$', 0.0, 0, True),
('H-theta', 3797, r'H$\theta$', 0.0, 0, True),
('H-eta', 3835, r'H$\eta$', 0.0, 0, True),
('CN bandhd', 3883, 'CN', 0.0, 0, True),
('CaII K', 3933.67, 'CaII K', 0.0, 0, True),
('CaII H', 3968.47, 'CaII H', 0.0, 0, True),
('H-delta', 4101, r'H$\delta$', 0.0, 1, True),
('G-band', 4305, 'G-band', 0.0, 0, True),
('H-gamma', 4340, r'H$\gamma$', 0.0, 1, True),
('Fe4383', 4383, 'Fe4383', 0.0, 0, True),
('Ca4455', 4455, 'Ca4455', 0.0, 0, True),
('Fe4531', 4531, 'Fe4531', 0.0, 0, True),
('H-beta', 4861, r'H$\beta$', 0.0, 3, True),
('[O III]', 4962., '[O III]', 0.0, 4, False),
('[O III]', 5007., '[O III]', 0.0, 4, True),
('Mg I (b)', 5176, 'Mg b', 0.0, 0, True),
('[N I]', 5199., '[N I]', 0.0, 2, True),
('HeI', 5876., 'He I', 0.0, 2, True),
('Na I (D)', 5889.95, ' ', 0.0, 0, True),
('Na I (D)', 5895.92, 'Na D ', 0.0, 0, True),
('[O I]', 6300., '[O I]', 0.0, 2, True),
('[N II]', 6548., '[N II]', 0.0, 2, False),
('H-alpha', 6562.8, r'H$\alpha$', 0.0, 3, True),
('[N II]', 6583.5, '[N II]', 0.0, 2, False),
('[S II]', 6716.4, '[S II]', 0.0, 2, False),
('[S II]', 6730.8, '[S II]', 0.0, 2, True),
('Ca triplet', 8498.03, 'CaII', 0.0, 0, True),
('Ca triplet', 8542.09, 'CaII', 0.0, 0, True),
('Ca triplet', 8662.14, 'CaII', 0.0, 0, True),
('[S III]', 9069, '[S III]', 0.0, 2, True),
('[S III]', 9532, '[S III]', 0.0, 2, True),
('Pa-delta', 10050., r'Pa$\delta$', 0.0, 3, True),
('Pa-gamma', 10940., r'Pa$\gamma$', 0.0, 3, True),
('Pa-beta', 12820., r'Pa$\beta$', 0.0, 3, True),
('Br-12', 15552., 'Br12', 0.0, 3, True),
('Br-11', 15696., 'Br11', 0.0, 3, True),
('Br-10', 15876., 'Br10', 0.0, 3, True),
('Br-9', 16105., 'Br9', 0.0, 3, True),
('Br-8', 16400., 'Br8', 0.0, 3, True),
('Br-7', 16800., 'Br7', 0.0, 3, True),
('Br-6', 17357., 'Br6', 0.0, 3, True),
('Br-5', 18170., 'Br5', 0.0, 3, True),
('Pa-alpha', 18750., r'Pa$\alpha$', 0.0, 3, True),
('Br-delta', 19440., r'Br$\delta$', 0.0, 3, True),
('Br-gamma', 21660., r'Br$\gamma$', 0.0, 3, True),
('Br-beta', 26250., r'Br$\beta$', 0.0, 3, True),
('Br-alpha', 40510., r'Br$\alpha$', 0.0, 3, True),
], dtype=linefmt)
self.lineinfo = Table(lineinfo)
# -----------------------------------------------------------------------
def draw_tick(self, lam, linetype, ticklen, axes, usesmooth=False,
labww=20., tickfac=0.75):
"""
This method is called by mark_lines
It labels a spectral line by drawing a tickmark above or below the
spectrum at the given wavelength (lam).
"""
""" Choose whether to use the smoothed flux or not """
if usesmooth:
flux = self.smospec['flux']
| |
<gh_stars>1-10
# Copyright 2017 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
class TestTransaction(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.firestore_v1.transaction import Transaction
return Transaction
def _make_one(self, *args, **kwargs):
klass = self._get_target_class()
return klass(*args, **kwargs)
def test_constructor_defaults(self):
from google.cloud.firestore_v1.transaction import MAX_ATTEMPTS
transaction = self._make_one(mock.sentinel.client)
self.assertIs(transaction._client, mock.sentinel.client)
self.assertEqual(transaction._write_pbs, [])
self.assertEqual(transaction._max_attempts, MAX_ATTEMPTS)
self.assertFalse(transaction._read_only)
self.assertIsNone(transaction._id)
def test_constructor_explicit(self):
transaction = self._make_one(
mock.sentinel.client, max_attempts=10, read_only=True
)
self.assertIs(transaction._client, mock.sentinel.client)
self.assertEqual(transaction._write_pbs, [])
self.assertEqual(transaction._max_attempts, 10)
self.assertTrue(transaction._read_only)
self.assertIsNone(transaction._id)
def test__add_write_pbs_failure(self):
from google.cloud.firestore_v1.transaction import _WRITE_READ_ONLY
batch = self._make_one(mock.sentinel.client, read_only=True)
self.assertEqual(batch._write_pbs, [])
with self.assertRaises(ValueError) as exc_info:
batch._add_write_pbs([mock.sentinel.write])
self.assertEqual(exc_info.exception.args, (_WRITE_READ_ONLY,))
self.assertEqual(batch._write_pbs, [])
def test__add_write_pbs(self):
batch = self._make_one(mock.sentinel.client)
self.assertEqual(batch._write_pbs, [])
batch._add_write_pbs([mock.sentinel.write])
self.assertEqual(batch._write_pbs, [mock.sentinel.write])
def test__options_protobuf_read_only(self):
from google.cloud.firestore_v1.proto import common_pb2
transaction = self._make_one(mock.sentinel.client, read_only=True)
options_pb = transaction._options_protobuf(None)
expected_pb = common_pb2.TransactionOptions(
read_only=common_pb2.TransactionOptions.ReadOnly()
)
self.assertEqual(options_pb, expected_pb)
def test__options_protobuf_read_only_retry(self):
from google.cloud.firestore_v1.transaction import _CANT_RETRY_READ_ONLY
transaction = self._make_one(mock.sentinel.client, read_only=True)
retry_id = b"illuminate"
with self.assertRaises(ValueError) as exc_info:
transaction._options_protobuf(retry_id)
self.assertEqual(exc_info.exception.args, (_CANT_RETRY_READ_ONLY,))
def test__options_protobuf_read_write(self):
transaction = self._make_one(mock.sentinel.client)
options_pb = transaction._options_protobuf(None)
self.assertIsNone(options_pb)
def test__options_protobuf_on_retry(self):
from google.cloud.firestore_v1.proto import common_pb2
transaction = self._make_one(mock.sentinel.client)
retry_id = b"hocus-pocus"
options_pb = transaction._options_protobuf(retry_id)
expected_pb = common_pb2.TransactionOptions(
read_write=common_pb2.TransactionOptions.ReadWrite(
retry_transaction=retry_id
)
)
self.assertEqual(options_pb, expected_pb)
def test_in_progress_property(self):
transaction = self._make_one(mock.sentinel.client)
self.assertFalse(transaction.in_progress)
transaction._id = b"not-none-bites"
self.assertTrue(transaction.in_progress)
def test_id_property(self):
transaction = self._make_one(mock.sentinel.client)
transaction._id = mock.sentinel.eye_dee
self.assertIs(transaction.id, mock.sentinel.eye_dee)
def test__begin(self):
from google.cloud.firestore_v1.gapic import firestore_client
from google.cloud.firestore_v1.proto import firestore_pb2
# Create a minimal fake GAPIC with a dummy result.
firestore_api = mock.create_autospec(
firestore_client.FirestoreClient, instance=True
)
txn_id = b"to-begin"
response = firestore_pb2.BeginTransactionResponse(transaction=txn_id)
firestore_api.begin_transaction.return_value = response
# Attach the fake GAPIC to a real client.
client = _make_client()
client._firestore_api_internal = firestore_api
# Actually make a transaction and ``begin()`` it.
transaction = self._make_one(client)
self.assertIsNone(transaction._id)
ret_val = transaction._begin()
self.assertIsNone(ret_val)
self.assertEqual(transaction._id, txn_id)
# Verify the called mock.
firestore_api.begin_transaction.assert_called_once_with(
client._database_string, options_=None, metadata=client._rpc_metadata
)
def test__begin_failure(self):
from google.cloud.firestore_v1.transaction import _CANT_BEGIN
client = _make_client()
transaction = self._make_one(client)
transaction._id = b"not-none"
with self.assertRaises(ValueError) as exc_info:
transaction._begin()
err_msg = _CANT_BEGIN.format(transaction._id)
self.assertEqual(exc_info.exception.args, (err_msg,))
def test__clean_up(self):
transaction = self._make_one(mock.sentinel.client)
transaction._write_pbs.extend(
[mock.sentinel.write_pb1, mock.sentinel.write_pb2]
)
transaction._id = b"not-this-time-my-friend"
ret_val = transaction._clean_up()
self.assertIsNone(ret_val)
self.assertEqual(transaction._write_pbs, [])
self.assertIsNone(transaction._id)
def test__rollback(self):
from google.protobuf import empty_pb2
from google.cloud.firestore_v1.gapic import firestore_client
# Create a minimal fake GAPIC with a dummy result.
firestore_api = mock.create_autospec(
firestore_client.FirestoreClient, instance=True
)
firestore_api.rollback.return_value = empty_pb2.Empty()
# Attach the fake GAPIC to a real client.
client = _make_client()
client._firestore_api_internal = firestore_api
# Actually make a transaction and roll it back.
transaction = self._make_one(client)
txn_id = b"to-be-r\x00lled"
transaction._id = txn_id
ret_val = transaction._rollback()
self.assertIsNone(ret_val)
self.assertIsNone(transaction._id)
# Verify the called mock.
firestore_api.rollback.assert_called_once_with(
client._database_string, txn_id, metadata=client._rpc_metadata
)
def test__rollback_not_allowed(self):
from google.cloud.firestore_v1.transaction import _CANT_ROLLBACK
client = _make_client()
transaction = self._make_one(client)
self.assertIsNone(transaction._id)
with self.assertRaises(ValueError) as exc_info:
transaction._rollback()
self.assertEqual(exc_info.exception.args, (_CANT_ROLLBACK,))
def test__rollback_failure(self):
from google.api_core import exceptions
from google.cloud.firestore_v1.gapic import firestore_client
# Create a minimal fake GAPIC with a dummy failure.
firestore_api = mock.create_autospec(
firestore_client.FirestoreClient, instance=True
)
exc = exceptions.InternalServerError("Fire during rollback.")
firestore_api.rollback.side_effect = exc
# Attach the fake GAPIC to a real client.
client = _make_client()
client._firestore_api_internal = firestore_api
# Actually make a transaction and roll it back.
transaction = self._make_one(client)
txn_id = b"roll-bad-server"
transaction._id = txn_id
with self.assertRaises(exceptions.InternalServerError) as exc_info:
transaction._rollback()
self.assertIs(exc_info.exception, exc)
self.assertIsNone(transaction._id)
self.assertEqual(transaction._write_pbs, [])
# Verify the called mock.
firestore_api.rollback.assert_called_once_with(
client._database_string, txn_id, metadata=client._rpc_metadata
)
def test__commit(self):
from google.cloud.firestore_v1.gapic import firestore_client
from google.cloud.firestore_v1.proto import firestore_pb2
from google.cloud.firestore_v1.proto import write_pb2
# Create a minimal fake GAPIC with a dummy result.
firestore_api = mock.create_autospec(
firestore_client.FirestoreClient, instance=True
)
commit_response = firestore_pb2.CommitResponse(
write_results=[write_pb2.WriteResult()]
)
firestore_api.commit.return_value = commit_response
# Attach the fake GAPIC to a real client.
client = _make_client("phone-joe")
client._firestore_api_internal = firestore_api
# Actually make a transaction with some mutations and call _commit().
transaction = self._make_one(client)
txn_id = b"under-over-thru-woods"
transaction._id = txn_id
document = client.document("zap", "galaxy", "ship", "space")
transaction.set(document, {"apple": 4.5})
write_pbs = transaction._write_pbs[::]
write_results = transaction._commit()
self.assertEqual(write_results, list(commit_response.write_results))
# Make sure transaction has no more "changes".
self.assertIsNone(transaction._id)
self.assertEqual(transaction._write_pbs, [])
# Verify the mocks.
firestore_api.commit.assert_called_once_with(
client._database_string,
write_pbs,
transaction=txn_id,
metadata=client._rpc_metadata,
)
def test__commit_not_allowed(self):
from google.cloud.firestore_v1.transaction import _CANT_COMMIT
transaction = self._make_one(mock.sentinel.client)
self.assertIsNone(transaction._id)
with self.assertRaises(ValueError) as exc_info:
transaction._commit()
self.assertEqual(exc_info.exception.args, (_CANT_COMMIT,))
def test__commit_failure(self):
from google.api_core import exceptions
from google.cloud.firestore_v1.gapic import firestore_client
# Create a minimal fake GAPIC with a dummy failure.
firestore_api = mock.create_autospec(
firestore_client.FirestoreClient, instance=True
)
exc = exceptions.InternalServerError("Fire during commit.")
firestore_api.commit.side_effect = exc
# Attach the fake GAPIC to a real client.
client = _make_client()
client._firestore_api_internal = firestore_api
# Actually make a transaction with some mutations and call _commit().
transaction = self._make_one(client)
txn_id = b"beep-fail-commit"
transaction._id = txn_id
transaction.create(client.document("up", "down"), {"water": 1.0})
transaction.delete(client.document("up", "left"))
write_pbs = transaction._write_pbs[::]
with self.assertRaises(exceptions.InternalServerError) as exc_info:
transaction._commit()
self.assertIs(exc_info.exception, exc)
self.assertEqual(transaction._id, txn_id)
self.assertEqual(transaction._write_pbs, write_pbs)
# Verify the called mock.
firestore_api.commit.assert_called_once_with(
client._database_string,
write_pbs,
transaction=txn_id,
metadata=client._rpc_metadata,
)
class Test_Transactional(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.firestore_v1.transaction import _Transactional
return _Transactional
def _make_one(self, *args, **kwargs):
klass = self._get_target_class()
return klass(*args, **kwargs)
def test_constructor(self):
wrapped = self._make_one(mock.sentinel.callable_)
self.assertIs(wrapped.to_wrap, mock.sentinel.callable_)
self.assertIsNone(wrapped.current_id)
self.assertIsNone(wrapped.retry_id)
def test__reset(self):
wrapped = self._make_one(mock.sentinel.callable_)
wrapped.current_id = b"not-none"
wrapped.retry_id = b"also-not"
ret_val = wrapped._reset()
self.assertIsNone(ret_val)
self.assertIsNone(wrapped.current_id)
self.assertIsNone(wrapped.retry_id)
def test__pre_commit_success(self):
to_wrap = mock.Mock(return_value=mock.sentinel.result, spec=[])
wrapped = self._make_one(to_wrap)
txn_id = b"totes-began"
transaction = _make_transaction(txn_id)
result = wrapped._pre_commit(transaction, "pos", key="word")
self.assertIs(result, mock.sentinel.result)
self.assertEqual(transaction._id, txn_id)
self.assertEqual(wrapped.current_id, txn_id)
self.assertEqual(wrapped.retry_id, txn_id)
# Verify mocks.
to_wrap.assert_called_once_with(transaction, "pos", key="word")
firestore_api = transaction._client._firestore_api
firestore_api.begin_transaction.assert_called_once_with(
transaction._client._database_string,
options_=None,
metadata=transaction._client._rpc_metadata,
)
firestore_api.rollback.assert_not_called()
firestore_api.commit.assert_not_called()
def test__pre_commit_retry_id_already_set_success(self):
from google.cloud.firestore_v1.proto import common_pb2
to_wrap = mock.Mock(return_value=mock.sentinel.result, spec=[])
wrapped = self._make_one(to_wrap)
txn_id1 = b"already-set"
wrapped.retry_id = txn_id1
txn_id2 = b"ok-here-too"
transaction = _make_transaction(txn_id2)
result = wrapped._pre_commit(transaction)
self.assertIs(result, mock.sentinel.result)
self.assertEqual(transaction._id, txn_id2)
self.assertEqual(wrapped.current_id, txn_id2)
self.assertEqual(wrapped.retry_id, txn_id1)
# Verify mocks.
to_wrap.assert_called_once_with(transaction)
firestore_api = transaction._client._firestore_api
options_ = common_pb2.TransactionOptions(
read_write=common_pb2.TransactionOptions.ReadWrite(
retry_transaction=txn_id1
)
)
firestore_api.begin_transaction.assert_called_once_with(
transaction._client._database_string,
options_=options_,
metadata=transaction._client._rpc_metadata,
)
firestore_api.rollback.assert_not_called()
firestore_api.commit.assert_not_called()
def test__pre_commit_failure(self):
exc = RuntimeError("Nope not today.")
to_wrap = mock.Mock(side_effect=exc, spec=[])
wrapped = self._make_one(to_wrap)
txn_id = b"gotta-fail"
transaction = _make_transaction(txn_id)
with self.assertRaises(RuntimeError) as exc_info:
wrapped._pre_commit(transaction, 10, 20)
self.assertIs(exc_info.exception, exc)
self.assertIsNone(transaction._id)
self.assertEqual(wrapped.current_id, txn_id)
self.assertEqual(wrapped.retry_id, txn_id)
# Verify mocks.
to_wrap.assert_called_once_with(transaction, 10, 20)
firestore_api = transaction._client._firestore_api
firestore_api.begin_transaction.assert_called_once_with(
transaction._client._database_string,
options_=None,
metadata=transaction._client._rpc_metadata,
)
firestore_api.rollback.assert_called_once_with(
transaction._client._database_string,
txn_id,
metadata=transaction._client._rpc_metadata,
)
firestore_api.commit.assert_not_called()
def test__pre_commit_failure_with_rollback_failure(self):
from google.api_core import exceptions
exc1 = ValueError("I will not be only failure.")
to_wrap = mock.Mock(side_effect=exc1, spec=[])
wrapped = self._make_one(to_wrap)
txn_id = b"both-will-fail"
transaction = _make_transaction(txn_id)
# Actually force the ``rollback`` to fail as well.
exc2 = exceptions.InternalServerError("Rollback blues.")
firestore_api = transaction._client._firestore_api
firestore_api.rollback.side_effect = exc2
# Try to ``_pre_commit``
with self.assertRaises(exceptions.InternalServerError) as exc_info:
wrapped._pre_commit(transaction, a="b", c="zebra")
self.assertIs(exc_info.exception, exc2)
self.assertIsNone(transaction._id)
self.assertEqual(wrapped.current_id, txn_id)
self.assertEqual(wrapped.retry_id, txn_id)
# Verify mocks.
to_wrap.assert_called_once_with(transaction, a="b", c="zebra")
firestore_api.begin_transaction.assert_called_once_with(
transaction._client._database_string,
options_=None,
metadata=transaction._client._rpc_metadata,
)
firestore_api.rollback.assert_called_once_with(
transaction._client._database_string,
txn_id,
metadata=transaction._client._rpc_metadata,
)
firestore_api.commit.assert_not_called()
def test__maybe_commit_success(self):
wrapped = self._make_one(mock.sentinel.callable_)
txn_id = b"nyet"
transaction = _make_transaction(txn_id)
transaction._id = txn_id # We won't call ``begin()``.
succeeded = wrapped._maybe_commit(transaction)
self.assertTrue(succeeded)
# On success, _id is reset.
self.assertIsNone(transaction._id)
# Verify mocks.
firestore_api = transaction._client._firestore_api
firestore_api.begin_transaction.assert_not_called()
firestore_api.rollback.assert_not_called()
firestore_api.commit.assert_called_once_with(
transaction._client._database_string,
[],
transaction=txn_id,
metadata=transaction._client._rpc_metadata,
)
def test__maybe_commit_failure_read_only(self):
from google.api_core import exceptions
wrapped = self._make_one(mock.sentinel.callable_)
txn_id = b"failed"
transaction = _make_transaction(txn_id, read_only=True)
transaction._id = txn_id # We won't call ``begin()``.
wrapped.current_id = txn_id # We won't call ``_pre_commit()``.
wrapped.retry_id = txn_id # We won't call ``_pre_commit()``.
# Actually force the ``commit`` to fail (use ABORTED, but cannot
# retry since read-only).
exc = exceptions.Aborted("Read-only did a bad.")
firestore_api = transaction._client._firestore_api
firestore_api.commit.side_effect = exc
with self.assertRaises(exceptions.Aborted) as exc_info:
wrapped._maybe_commit(transaction)
self.assertIs(exc_info.exception, exc)
self.assertEqual(transaction._id, txn_id)
self.assertEqual(wrapped.current_id, txn_id)
self.assertEqual(wrapped.retry_id, txn_id)
# Verify mocks.
firestore_api.begin_transaction.assert_not_called()
firestore_api.rollback.assert_not_called()
firestore_api.commit.assert_called_once_with(
transaction._client._database_string,
[],
transaction=txn_id,
metadata=transaction._client._rpc_metadata,
)
def test__maybe_commit_failure_can_retry(self):
from google.api_core import exceptions
wrapped = self._make_one(mock.sentinel.callable_)
txn_id = b"failed-but-retry"
transaction = _make_transaction(txn_id)
transaction._id = txn_id # We won't call ``begin()``.
wrapped.current_id = txn_id # We won't call ``_pre_commit()``.
wrapped.retry_id = txn_id # We won't call ``_pre_commit()``.
# Actually force the ``commit`` to fail.
exc = exceptions.Aborted("Read-write did a bad.")
firestore_api = transaction._client._firestore_api
firestore_api.commit.side_effect = exc
succeeded = wrapped._maybe_commit(transaction)
self.assertFalse(succeeded)
self.assertEqual(transaction._id, | |
bend.Bend(2, 3, 4) # theta_B
one_tors = tors.Tors(1, 2, 3, 4) # tau
self._D_on[4] = False # NO phi_A
self._D_on[5] = False # NO phi_B
elif nA == 2 and nB == 1:
one_stre = stre.Stre(2, 3) # RAB
one_bend = bend.Bend(1, 2, 3) # theta_A
self._D_on[2] = False # NO theta_B
self._D_on[3] = False # NO tau
self._D_on[4] = False # NO phi_A
self._D_on[5] = False # NO phi_B
elif nA == 1 and nB == 2:
one_stre = stre.Stre(2, 3) # RAB
self._D_on[1] = False # NO phi_A
one_bend2 = bend.Bend(2, 3, 4) # theta_B
self._D_on[3] = False # NO tau
self._D_on[4] = False # NO phi_A
self._D_on[5] = False # NO phi_B
elif nA == 1 and nB == 1:
one_stre = stre.Stre(2, 3) # RAB
self._D_on[1] = False
self._D_on[2] = False
self._D_on[3] = False
self._D_on[4] = False
self._D_on[5] = False
else:
raise OptError("No reference points present")
if op.Params.interfrag_dist_inv:
one_stre.inverse = True
if one_stre is not None:
self.pseudo_frag.intcos.append(one_stre)
if one_bend is not None:
self.pseudo_frag.intcos.append(one_bend)
if one_bend2 is not None:
self.pseudo_frag.intcos.append(one_bend2)
if one_tors is not None:
self.pseudo_frag.intcos.append(one_tors)
if one_tors2 is not None:
self.pseudo_frag.intcos.append(one_tors2)
if one_tors3 is not None:
self.pseudo_frag.intcos.append(one_tors3)
def __str__(self):
s = "\tFragment %s\n" % self._A_lbl
for i,r in enumerate(self._Arefs):
s += "\t\tReference point %d\n" % (3-i)
s += r.__str__()
s += "\n\tFragment %s\n" % self._B_lbl
for i,r in enumerate(self._Brefs):
s += "\t\tReference point %d\n" % (i+4)
s += r.__str__()
s += self._pseudo_frag.__str__()
return s
@property
def n_arefs(self): # number of reference points
return len(self._Arefs)
@property
def n_brefs(self):
return len(self._Brefs)
@property
def a_idx(self):
return self._A_idx
@property
def b_idx(self):
return self._B_idx
@property
def pseudo_frag(self):
return self.pseudo_frag
def d_on(self, i):
return self._D_on[i]
def set_ref_geom(self, ArefGeom, BrefGeom): # for debugging
self.pseudo_frag.geom[:] = 0.0
for i, row in enumerate(ArefGeom):
self.pseudo_frag.geom[2-i][:] = row
for i, row in enumerate(BrefGeom):
self.pseudo_frag.geom[3+i][:] = row
return
def q(self):
return [i.q(self.pseudo_frag.geom) for i in self._pseudo_frag.intcos]
def q_show(self):
return [i.q_show(self.pseudo_frag.geom) for i in self._pseudo_frag.intcos]
def update_reference_geometry(self, Ageom, Bgeom):
self.pseudo_frag.geom[:] = 0.0
for i, rp in enumerate(self._Arefs): # First reference atom goes in 3rd row!
for w in rp:
self.pseudo_frag.geom[2-i][:] += w.weight * Ageom[w.atom]
for i, rp in enumerate(self._Brefs):
for w in rp:
self.pseudo_frag.geom[3+i][:] += w.weight * Bgeom[w.atom]
return
def get_ref_geom(self):
return self.pseudo_frag.geom.copy()
def a_ref_geom(self): # returns reference atoms in order dA1, dA2, dA3
x = np.zeros((self.n_arefs, 3))
for i in range(self.n_arefs):
x[i] = self.pseudo_frag.geom[2-i]
return x
def b_ref_geom(self):
x = np.zeros((self.n_brefs, 3))
x[:] = self.pseudo_frag.geom[3:(3 + self.n_brefs)]
return x
def active_labels(self):
lbls = []
# to add later
# if (inter_frag->coords.simples[0]->is_inverse_stre()): # lbl[0] += "1/R_AB"
# if (inter_frag->coords.simples[i]->is_frozen()) lbl[i] = "*";
if self.d_on(0): lbls.append("R_AB")
if self.d_on(1): lbls.append("theta_A")
if self.d_on(2): lbls.append("theta_B")
if self.d_on(3): lbls.append("tau")
if self.d_on(4): lbls.append("phi_A")
if self.d_on(5): lbls.append("phi_B")
return lbls
@property
def num_intcos(self):
return len(self.pseudo_frag.intcos)
def orient_fragment(self, Ageom_in, Bgeom_in, q_target, printCoords=False):
""" orient_fragment() moves the geometry of fragment B so that the
interfragment coordinates have the given values
Parameters
----------
q_target : numpy array float[6]
------------
Returns: new geometry for B
"""
logger = logging.getLogger(__name__)
nArefs = self.n_arefs # of ref pts on A to worry about
nBrefs = self.n_brefs # of ref pts on B to worry about
self.update_reference_geometry(Ageom_in, Bgeom_in)
q_orig = self.q()
if len(q_orig) != len(q_target):
raise OptError("Unexpected number of target interfragment coordinates")
dq_target = q_target - q_orig
# These values are arbitrary; used to determine ref. point locations
# below only if a fragment doesn't have 3 of them.
R_AB, theta_A, theta_B, tau, phi_A, phi_B = 1.0, 0.8, 0.8, 0.8, 0.8, 0.8
cnt = 0
active_lbls = self.active_labels()
if self._D_on[0]:
R_AB = q_target[cnt]
cnt += 1
if self._D_on[1]:
theta_A = q_target[cnt]
cnt += 1
if self._D_on[2]:
theta_B = q_target[cnt]
cnt += 1
if self._D_on[3]:
tau = q_target[cnt]
cnt += 1
if self._D_on[4]:
phi_A = q_target[cnt]
cnt += 1
if self._D_on[5]:
phi_B = q_target[cnt]
cnt += 1
if printCoords:
s = "\t---DimerFrag coordinates between fragments %s and %s" % (self._A_lbl, self._B_lbl)
s += "\t---Internal Coordinate Step in ANG or DEG, aJ/ANG or AJ/DEG ---"
s += "\t ----------------------------------------------------------------------"
s += "\t Coordinate Previous Change Target"
s += "\t ---------- -------- ----- ------"
for i in range(self.num_intcos):
c = self.pseudo_frag.intcos[i].q_show_factor # for printing to Angstroms/degrees
s += "\t%-20s%12.5f%13.5f%13.5f" % (active_lbls[i],
c * q_orig[i], c * dq_target[i], c * q_target[i])
s += "\t ----------------------------------------------------------------------"
logger.info(s)
# From here on, for simplicity we include 3 reference atom rows, even if we don't
# have 3 reference atoms. So, stick SOMETHING non-linear/non-0 in for
# non-specified reference atoms so zmat function works.
ref_A = np.zeros( (3,3) )
ref_A[0:nArefs] = self.a_ref_geom()
#print("ref_A:")
#print(ref_A)
if nArefs < 3: # pad ref_A with arbitrary entries
for xyz in range(3):
ref_A[2,xyz] = xyz+1
if nArefs < 2:
for xyz in range(3):
ref_A[1,xyz] = xyz+2
ref_B = np.zeros( (3,3) )
ref_B[0:nBrefs] = self.b_ref_geom()
ref_B_final = np.zeros( (nBrefs,3) )
# compute B1-B2 distance, B2-B3 distance, and B1-B2-B3 angle
if nBrefs>1:
R_B1B2 = v3d.dist(ref_B[0], ref_B[1])
if nBrefs>2:
R_B2B3 = v3d.dist(ref_B[1], ref_B[2])
B_angle = v3d.angle(ref_B[0], ref_B[1], ref_B[2])
# Determine target location of reference pts for B in coordinate system of A
ref_B_final[0][:] = orient.zmat_point(
ref_A[2], ref_A[1], ref_A[0], R_AB, theta_A, phi_A)
if nBrefs>1:
ref_B_final[1][:] = orient.zmat_point(
ref_A[1], ref_A[0], ref_B_final[0], R_B1B2, theta_B, tau)
if nBrefs>2:
ref_B_final[2][:] = orient.zmat_point(
ref_A[0], ref_B_final[0], ref_B_final[1], R_B2B3, B_angle, phi_B)
#print("ref_B_final target:")
#print(ref_B_final)
# Can use to test if target reference points give correct values.
#self.set_ref_geom(ref_A, ref_B_final)
#print(self._pseudo_frag)
nBatoms = len(Bgeom_in)
Bgeom = Bgeom_in.copy()
self.update_reference_geometry(Ageom_in, Bgeom)
ref_B[0:nBrefs] = self.b_ref_geom()
# 1) Translate B->geom to place B1 in correct location.
for i in range(nBatoms):
Bgeom[i] += ref_B_final[0] - ref_B[0]
# recompute B reference points
self.update_reference_geometry(Ageom_in, Bgeom)
ref_B[0:nBrefs] = self.b_ref_geom()
#print("ref_B after positioning B1:")
#print(ref_B)
# 2) Move fragment B to place reference point B2 in correct location
if nBrefs>1:
# Determine rotational angle and axis
e12 = v3d.eAB(ref_B[0], ref_B[1]) # normalized B1 -> B2
e12b = v3d.eAB(ref_B[0], ref_B_final[1]) # normalized B1 -> B2target
B_angle = acos(v3d.dot(e12b,e12))
if fabs(B_angle) > 1.0e-7:
erot = v3d.cross(e12,e12b)
# Move B to put B1 at origin
for i in range(nBatoms):
Bgeom[i] -= ref_B[0]
# Rotate B
orient.rotate_vector(erot, B_angle, Bgeom)
# Move B back to coordinate system of A
for i in range(nBatoms):
Bgeom[i] += ref_B[0]
# recompute current B reference points
self.update_reference_geometry(Ageom_in, Bgeom)
ref_B[0:nBrefs] = self.b_ref_geom()
#print("ref_B after positioning B2:");
#print(ref_B)
# 3) Move fragment B to place reference point B3 in correct location.
if nBrefs==3:
# Determine rotational angle and axis
erot = v3d.eAB(ref_B[0], ref_B[1]) # B1 -> B2 is rotation axis
# Calculate B3-B1-B2-B3' torsion angle
B_angle = v3d.tors(ref_B[2], ref_B[0], ref_B[1], ref_B_final[2])
if fabs(B_angle) > 1.0e-10:
# Move B to put B2 at origin
for i in range(nBatoms):
Bgeom[i] -= ref_B[1]
orient.rotate_vector(erot, B_angle, Bgeom)
# Translate B1 back to coordinate system of A
for i in range(nBatoms):
Bgeom[i] += ref_B[1]
self.update_reference_geometry(Ageom_in, Bgeom)
ref_B[0:nBrefs] = self.b_ref_geom()
#print("ref_B after positioning B3:");
#print(ref_B)
# Check to see if desired reference points were obtained.
tval = 0.0
for i in range(nBrefs):
tval += np.dot(ref_B[i] - ref_B_final[i], ref_B[i] - ref_B_final[i])
tval = sqrt(tval)
#print("orient_fragment: |x_target - x_achieved| = %.2e" % tval)
return Bgeom
# end def orient_fragment()
def compute_b_mat(self, A_geom, B_geom, Bmat_in, A_xyz_off=None, B_xyz_off=None):
""" This function adds interfragment rows into an existing B matrix.
B is (internals, Cartesians). Often, 6 x 3*(Natoms).
Parameters
----------
A_geom : numpy array
geometry of fragment A, array is (A atoms,3)
B_geom : numpy array that is (B atoms,3)
geometry of fragment B, array is (B atoms,3)
Bmat_int : numpy array
provided B matrix
intco_off : int
index of first row of Bmatrix to start writing the interfragment rows.
A_off : int
Column of B matrix at which the cartesian coordinates of atoms in fragment A begin.
Needed since columns may | |
Definition Schema Component : keyref fields
locating an attribute refers to a key locating an attribute
"""
assert_bindings(
schema="msData/identityConstraint/idK007.xsd",
instance="msData/identityConstraint/idK007.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_k006_id_k006_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : keyref fields
locating an element refers to a unique locating an attribute
"""
assert_bindings(
schema="msData/identityConstraint/idK006.xsd",
instance="msData/identityConstraint/idK006.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_k005_id_k005_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : keyref fields
locating an element refers to a key locating an attribute
"""
assert_bindings(
schema="msData/identityConstraint/idK005.xsd",
instance="msData/identityConstraint/idK005.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_k004_id_k004_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : keyref fields
locating an attribute subject to normalization refers to a key
locating an element that is not normalized , postnormalization values
are the same
"""
assert_bindings(
schema="msData/identityConstraint/idK004.xsd",
instance="msData/identityConstraint/idK004.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_k002_id_k002_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : keyref fields
locating an attribute refers to a unique locating an element
"""
assert_bindings(
schema="msData/identityConstraint/idK002.xsd",
instance="msData/identityConstraint/idK002.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_k001_id_k001_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : keyref fields
locating an attribute refers to a key locating an element
"""
assert_bindings(
schema="msData/identityConstraint/idK001.xsd",
instance="msData/identityConstraint/idK001.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_h034_id_h034_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : keyref
category, field points to attribute used only within xsi:type
substitution
"""
assert_bindings(
schema="msData/identityConstraint/idH034.xsd",
instance="msData/identityConstraint/idH034.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_h032_id_h032_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : keyref
category, field points to attribute from redefined schema
"""
assert_bindings(
schema="msData/identityConstraint/idH032.xsd",
instance="msData/identityConstraint/idH032.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_h031a_id_h031_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : keyref
category, field points to attribute from imported schema Resolution
pending decision about issue 5780 against the 1.0 spec.
"""
assert_bindings(
schema="msData/identityConstraint/idH031.xsd",
instance="msData/identityConstraint/idH031.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_h031_id_h031_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : keyref
category, field points to attribute from imported schema Resolution
pending decision about issue 5780 against the 1.0 spec.
"""
assert_bindings(
schema="msData/identityConstraint/idH031.xsd",
instance="msData/identityConstraint/idH031.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_h030_id_h030_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : keyref
category, field points to attribute within targetNamespace
"""
assert_bindings(
schema="msData/identityConstraint/idH030.xsd",
instance="msData/identityConstraint/idH030.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_h029_id_h029_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : keyref
category, field points to element redefined by use of
substitutionGroup
"""
assert_bindings(
schema="msData/identityConstraint/idH029.xsd",
instance="msData/identityConstraint/idH029.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_h028_id_h028_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : keyref
category, field points to element from redefined schema
"""
assert_bindings(
schema="msData/identityConstraint/idH028.xsd",
instance="msData/identityConstraint/idH028.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_h027_id_h027_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : keyref
category, field points to element from imported schema
"""
assert_bindings(
schema="msData/identityConstraint/idH027.xsd",
instance="msData/identityConstraint/idH027.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_h026_id_h026_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : keyref
category, field points to element outside targetNamespace in non-
imported schema
"""
assert_bindings(
schema="msData/identityConstraint/idH026.xsd",
instance="msData/identityConstraint/idH026.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_h025_id_h025_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : keyref
category, field points to element within targetNamespace
"""
assert_bindings(
schema="msData/identityConstraint/idH025.xsd",
instance="msData/identityConstraint/idH025.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_h024_id_h024_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : keyref
category, selector points to element redefined by use of
substitutionGroup
"""
assert_bindings(
schema="msData/identityConstraint/idH024.xsd",
instance="msData/identityConstraint/idH024.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_h023_id_h023_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : keyref
category, selector points to element from redefined schema
"""
assert_bindings(
schema="msData/identityConstraint/idH023.xsd",
instance="msData/identityConstraint/idH023.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_h022_id_h022_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : keyref
category, selector points to element from imported schema
"""
assert_bindings(
schema="msData/identityConstraint/idH022.xsd",
instance="msData/identityConstraint/idH022.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_h021_id_h021_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : keyref
category, selector points to element outside of targetNamespace in a
non-imported schema
"""
assert_bindings(
schema="msData/identityConstraint/idH021.xsd",
instance="msData/identityConstraint/idH021.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_h020_id_h020_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : keyref
category, selector points to element within targetNamespace
"""
assert_bindings(
schema="msData/identityConstraint/idH020.xsd",
instance="msData/identityConstraint/idH020.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_h019_id_h019_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : keyref
category, qualified node set defined with the use of multiple field
schema elements pointing to a mix of elements and attributes
"""
assert_bindings(
schema="msData/identityConstraint/idH019.xsd",
instance="msData/identityConstraint/idH019.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_h018_id_h018_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : keyref
category, qualified node set defined with the use of multiple field
schema elements pointing to only attributes
"""
assert_bindings(
schema="msData/identityConstraint/idH018.xsd",
instance="msData/identityConstraint/idH018.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_h017_id_h017_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : keyref
category, qualified node set defined with the use of multiple field
schema elements pointing to only elements
"""
assert_bindings(
schema="msData/identityConstraint/idH017.xsd",
instance="msData/identityConstraint/idH017.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_h016_id_h016_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : keyref
category, instance member (a)=test, string; instance member (b)='',
string defined using fixed='test'
"""
assert_bindings(
schema="msData/identityConstraint/idH016.xsd",
instance="msData/identityConstraint/idH016.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_h015_id_h015_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : keyref
category, instance member (a)=test, string; instance member (b)='',
string defined using default='test'
"""
assert_bindings(
schema="msData/identityConstraint/idH015.xsd",
instance="msData/identityConstraint/idH015.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_h009_id_h009_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : keyref
category, keyref refers to unique element
"""
assert_bindings(
schema="msData/identityConstraint/idH009.xsd",
instance="msData/identityConstraint/idH009.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_h008_id_h008_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : keyref
category, keyref refers to existing key element defined post to keyref
"""
assert_bindings(
schema="msData/identityConstraint/idH008.xsd",
instance="msData/identityConstraint/idH008.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_h007_id_h007_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : keyref
category, keyref refers to existing key element defined prior to
keyref
"""
assert_bindings(
schema="msData/identityConstraint/idH007.xsd",
instance="msData/identityConstraint/idH007.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_h004_id_h004_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : keyref
category, field element evaluates to a node-set with only one member
"""
assert_bindings(
schema="msData/identityConstraint/idH004.xsd",
instance="msData/identityConstraint/idH004.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_h003_id_h003_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : keyref
category, field element evaluates to an empty-node set
"""
assert_bindings(
schema="msData/identityConstraint/idH003.xsd",
instance="msData/identityConstraint/idH003.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_h001_id_h001_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : keyref
category, selector element evaluates to a node-set
"""
assert_bindings(
schema="msData/identityConstraint/idH001.xsd",
instance="msData/identityConstraint/idH001.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_g030_id_g030_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : key category,
field points to attribute used only within xsi:type substitution
"""
assert_bindings(
schema="msData/identityConstraint/idG030.xsd",
instance="msData/identityConstraint/idG030.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_g029_id_g029_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : key category,
field points to attribute outside targetNamespace in non-imported
schema
"""
assert_bindings(
schema="msData/identityConstraint/idG029.xsd",
instance="msData/identityConstraint/idG029.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_g028_id_g028_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : key category,
field points to attribute from redefined schema
"""
assert_bindings(
schema="msData/identityConstraint/idG028.xsd",
instance="msData/identityConstraint/idG028.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_g027_id_g027_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : key category,
field points to attribute from imported schema
"""
assert_bindings(
schema="msData/identityConstraint/idG027.xsd",
instance="msData/identityConstraint/idG027.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_g026_id_g026_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : key category,
field points to attribute within targetNamespace
"""
assert_bindings(
schema="msData/identityConstraint/idG026.xsd",
instance="msData/identityConstraint/idG026.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_g024_id_g024_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : key category,
field points to element from redefined schema
"""
assert_bindings(
schema="msData/identityConstraint/idG024.xsd",
instance="msData/identityConstraint/idG024.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_g023_id_g023_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : key category,
field points to element from imported schema
"""
assert_bindings(
schema="msData/identityConstraint/idG023.xsd",
instance="msData/identityConstraint/idG023.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_id_g022_id_g022_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition | |
# -*- coding: utf-8 -*-
""" Hi-C analysis of ChIP-seq after multi-targeting Cas9
"""
__author__ = "<NAME>"
__license__ = "MIT"
__version__ = "0.9"
__maintainer__ = "<NAME>"
import h5py
import pysam
import os
import re
import numpy as np
from scipy import sparse, stats
import matplotlib.pyplot as plt
from . import chipseq as c
from . import mtss as m
CHR = ['chr1', 'chr2', 'chr3', 'chr4', 'chr5', 'chr6', 'chr7', 'chr8', 'chr9', 'chr10', 'chr11',
'chr12', 'chr13', 'chr14', 'chr15', 'chr16', 'chr17', 'chr18', 'chr19', 'chr20', 'chr21',
'chr22', 'chrX']
def get_span_width(generator, genome, f_test, f_ctrl, outpath, w_rad=10000, skip=5000, false_ct=10):
""" Determine width of 53BP1 or gH2AX enrichment by comparing test sample to negative control
sample. Extending from the cut site at fixed intervals, enrichment width on either end of
the cut site is defined to be where there are under 'false_ct' evaluations of negative
control sample enrichment that is higher than test sample enrichment.
:param generator: generator that outputs target sites in the following tuple format:
( span_rs = region string in "chr1:100-200" format, centered at cut site
cut_i = cut site (int)
sen_i = sense/antisense (+/- str)
pam_i = PAM (str)
gui_i = genomic target sequence (str)
mis_i = # mismatches (int)
guide = intended target sequence (str)
:param genome: [genome name, path to genome with .fa extension], i.e. ['hg38', path/to/hg38.fa]
:param f_test: test sample BAM file
:param f_ctrl: negative control BAM file
:param outpath: path to output BED file (.bed extension omitted)
:param w_rad: radius of window of enrichment evaluation at each site
:param skip: number of bases to skip per evaluation of enrichment over control
:param false_ct: maximum number of times control sample has higher enrichment than test sample
for a region to be included in enrichment span width centered at the cut site
"""
hgsize = c.get_genome_dict(genome[0])
outbed = open(outpath + ".bed", 'w')
outnpy = []
bam_test, bam_ctrl = pysam.AlignmentFile(f_test, 'rb'), pysam.AlignmentFile(f_ctrl, 'rb')
for rs, cut, sen, pam, gui, mis, guide in generator:
[chr_i, sta_i, end_i] = re.split('[:-]', rs)
index_neg, count_neg, width_neg = 0, 0, 0
while True:
index_neg -= skip
ind_lt_neg, ind_rt_neg = cut + index_neg - w_rad, cut + index_neg + w_rad
if ind_lt_neg >= 0:
rs_neg = chr_i + ":" + str(ind_lt_neg) + "-" + str(ind_rt_neg)
rpm_neg_test = bam_test.count(region=rs_neg) / bam_test.mapped * 1E6
rpm_neg_ctrl = bam_ctrl.count(region=rs_neg) / bam_ctrl.mapped * 1E6
if rpm_neg_test <= rpm_neg_ctrl:
count_neg += 1
if count_neg >= false_ct:
break
else:
break
index_pos, count_pos, width_pos = 0, 0, 0
while True:
index_pos += skip
ind_lt_pos, ind_rt_pos = cut + index_pos - w_rad, cut + index_pos + w_rad
if ind_rt_pos <= hgsize[chr_i]:
rs_pos = chr_i + ":" + str(ind_lt_pos) + "-" + str(ind_rt_pos)
rpm_pos_test = bam_test.count(region=rs_pos) / bam_test.mapped * 1E6
rpm_pos_ctrl = bam_ctrl.count(region=rs_pos) / bam_ctrl.mapped * 1E6
if rpm_pos_test <= rpm_pos_ctrl:
count_pos += 1
if count_pos >= false_ct:
break
else:
break
span_rs = chr_i + ":" + str(cut + index_neg) + "-" + str(cut + index_pos)
enrich_test = bam_test.count(region=span_rs) / bam_test.mapped * 1E6
enrich_ctrl = bam_ctrl.count(region=span_rs) / bam_ctrl.mapped * 1E6
bed_1, bed_2, bed_3 = chr_i, str(cut + index_neg), str(cut + index_pos)
bed_4, bed_5, bed_6 = chr_i + ":" + str(cut), "%0.6f" % (enrich_test - enrich_ctrl), "+"
bed_7, bed_8 = str(sta_i), str(end_i)
outbed.write("\t".join([bed_1, bed_2, bed_3, bed_4, bed_5, bed_6, bed_7, bed_8]) + "\n")
outnpy.append([rs, chr_i, str(cut), bed_2, bed_3, str(int(bed_3) - int(bed_2))])
header = "region_string, chromosome, cut coordinate, " \
"start coordinate, end coordinate, span width"
np.savetxt(outpath + ".csv", np.asarray(outnpy), fmt='%s', delimiter=',', header=header)
bam_test.close()
bam_ctrl.close()
outbed.close()
def gen_filter_dist(generator, distance):
""" Given cut site generator, filter for cut sites that are separated by at least 'distance'.
:param generator: generator that outputs target sites in the following tuple format:
( span_rs = region string in "chr1:100-200" format, centered at cut site
cut_i = cut site (int)
sen_i = sense/antisense (+/- str)
pam_i = PAM (str)
gui_i = genomic target sequence (str)
mis_i = # mismatches (int)
guide = intended target sequence (str)
:param distance: minimum acceptable distance between adjacent cut sites on the same chromosome
:yield another generator with cut sites too close to adjacent ones filtered out
"""
chr_prev = None
gen_prev, gen_curr = None, None
for gen in generator:
rs, cut = gen[0], gen[1]
chr_i = re.split('[:-]', rs)[0]
if chr_prev != chr_i:
if gen_prev and gen_prev[2]:
yield gen_prev[0]
chr_prev = chr_i
gen_prev = (gen, cut, True)
else:
if cut - gen_prev[1] > distance:
if gen_prev[2]:
yield gen_prev[0]
gen_prev = (gen, cut, True)
else:
gen_prev = (gen, cut, False)
def rao_fourCseq_gen(generator, path_out, path_hic, kb_resolution, radius):
""" Determine 4C-seq profiles using Hi-C data from Rao et al., 2014 at all viewpoints from a
cut site generator. All data is written to a merged wiggle file.
:param generator: generator that outputs target sites in the following tuple format:
( span_rs = region string in "chr1:100-200" format, centered at cut site
cut_i = cut site (int)
sen_i = sense/antisense (+/- str)
pam_i = PAM (str)
gui_i = genomic target sequence (str)
mis_i = # mismatches (int)
guide = intended target sequence (str)
:param path_out: path to wiggle file (extension omitted) to write 4C-seq profile data
:param path_hic: path to Hi-C "root" path from Rao et al., 2014 (e.g. "K562")
:param kb_resolution: [integer] Hi-C resolution in kilobases {5, 10, 25, 50, 100, 250, 500}
:param radius: [integer] 4C-seq profile radius centered at coordinate to write on wiggle file
"""
chr_prev = None
chr_vals = None
wigout = open(path_out + ".wig", 'w')
for rs, cut, sen, pam, gui, mis, tar in generator: # iterate over each target site
chr_i = re.split('[:-]', rs)[0]
if chr_i in CHR:
if chr_prev != chr_i:
print("rao_fourCseq_gen(): Processing %s." % chr_i)
if chr_prev: # save the first to second-to-last chromosome
wigout.write("variableStep\tchrom=%s\n" % chr_prev)
chr_vals = sorted(list(chr_vals), key=lambda x: x[0])
for val in chr_vals:
wigout.write("%i\t%0.5f\n" % val)
chr_vals = set(_rao_fourCseq_helper(path_hic, kb_resolution, chr_i, cut, radius))
chr_prev = chr_i
else:
chr_vals |= set(_rao_fourCseq_helper(path_hic, kb_resolution, chr_i, cut, radius))
# save last chromosome
if chr_prev:
wigout.write("variableStep\tchrom=%s\n" % chr_prev)
chr_vals = sorted(list(chr_vals), key=lambda x: x[0])
for val in chr_vals:
wigout.write("%i\t%0.5f\n" % val)
wigout.close()
def rao_fourCseq_single(path_out, path_hic, kb_resolution, chromosome, coordinate, radius=None):
""" Determine 4C-seq profile at a single viewpoint using Hi-C data from Rao et al., 2014
:param path_out: path to wiggle file (extension omitted) to write 4C-seq profile data
:param path_hic: path to Hi-C "root" path from Rao et al., 2014 (e.g. "K562")
:param kb_resolution: [integer] Hi-C resolution in kilobases {5, 10, 25, 50, 100, 250, 500}
:param chromosome: [string] chromosome of viewpoint (e.g. "chr7")
:param coordinate: [integer] coordinate of view point (e.g. 5529660)
:param radius: [integer] 4C-seq profile radius centered at coordinate to write on wiggle file
"""
wigout = open(path_out + "rao_%i_%s_%i.wig" % (kb_resolution, chromosome, coordinate), 'w')
wigout.write("variableStep\tchrom=%s\n" % chromosome)
outvals = _rao_fourCseq_helper(path_hic, kb_resolution, chromosome, coordinate, radius)
for val in outvals:
wigout.write("%i\t%0.5f\n" % val)
wigout.close()
def _rao_fourCseq_helper(path_hic, kb_resolution, chromosome, coordinate, radius=None):
""" Determine 4C-seq profile at a specific viewpoint using Hi-C data from Rao et al., 2014
Helper file with input being an open wiggle file in which to enter 4C-seq profile data
:param path_hic: path to Hi-C "root" path from Rao et al., 2014 (e.g. "K562")
:param kb_resolution: [integer] Hi-C resolution in kilobases {5, 10, 25, 50, 100, 250, 500}
:param chromosome: [string] chromosome of viewpoint (e.g. "chr7")
:param coordinate: [integer] coordinate of view point (e.g. 5529660)
:param radius: [integer] 4C-seq profile radius centered at coordinate to write on wiggle file
:return outvals: [array] of (coordinate, value) tuples that correspond to the coordinate and
values for display in wiggle format.
"""
path_file = os.path.join(path_hic, "%ikb_resolution_intrachromosomal" % kb_resolution,
chromosome,
"MAPQGE30", "%s_%ikb.RAWobserved" % (chromosome, kb_resolution))
round_coord = myround(coordinate, kb_resolution*1000)
dM = np.loadtxt(path_file) # load distance matrix
outvals = []
for i in range(dM.shape[0]):
dm_i = dM[i, :]
if dm_i[0] == round_coord or dm_i[1] == round_coord:
dm_view, dm_coor, dm_valu = None, None, None
if dm_i[0] == round_coord:
dm_view, dm_coor, dm_valu = dm_i[0], dm_i[1], dm_i[2]
elif dm_i[1] == round_coord:
dm_view, dm_coor, dm_valu = dm_i[1], dm_i[0], dm_i[2]
if not radius or not | |
"""
This module provides local and cloud storage of computed values. The main
point of entry is the PersistentCache, which encapsulates this functionality.
"""
import attr
import os
import shutil
import tempfile
import yaml
import warnings
from uuid import uuid4
from pathlib import Path
from bionic.exception import EntitySerializationError, UnsupportedSerializedValueError
from .datatypes import Result
from .gcs import GcsTool
from .utils.files import (
ensure_dir_exists,
ensure_parent_dir_exists,
recursively_copy_path,
)
from .utils.misc import hash_simple_obj_to_hex, oneline
from .utils.urls import (
derelativize_url,
path_from_url,
relativize_url,
url_from_path,
)
from .tokenization import tokenize
import logging
logger = logging.getLogger(__name__)
try:
# The C-based YAML emitter is much faster, but requires separate bindings
# which may not be installed.
YamlDumper = yaml.CDumper
YamlLoader = yaml.CLoader
except AttributeError:
running_under_readthedocs = os.environ.get("READTHEDOCS") == "True"
if not running_under_readthedocs:
warnings.warn(
oneline(
"""
Failed to find LibYAML bindings;
falling back to slower Python implementation.
This may reduce performance on large flows.
Installing LibYAML should resolve this."""
)
)
YamlDumper = yaml.Dumper
YamlLoader = yaml.Loader
class PersistentCache:
"""
Provides a persistent mapping between Queries (things we could compute) and
saved Results (computed Queries). You use it by getting a CacheAccessor
for your specific query, and then performing load/save operations on the
accessor.
When looking up a Query, the cache searches for a saved artifact with a
matching Query. The Query may not match exactly: each Query contains a
Provenance, which represents all the code and data used to compute a value,
and two Provenances can match at different levels of precision, from a
"functional" match to an "exact" one. A functional match is sufficient to
treat two artifacts as interchangeable; the finer levels of matching are
only used by the "assisted versioning" system, which tries to detect
situations where a function's bytecode has changed but its version hasn't.
The cache has two tiers: a "local" tier on disk, which is cheap to access,
and an optional "cloud" tier backed by GCS, which is more expensive to
access (but globally accessible). For load operations, the cache returns
the cheapest artifact that functionally matches the Query. For save
operations, the cache records an exact entry in both tiers.
The cache actually has two distinct responsibilities: (a) translating
between in-memory Python objects and serialized files or blobs, and (b)
maintaining an "inventory" of these files and blobs. Currently it makes
sense to group these responsibilities together at each tier, where the
local inventory tracks the local files and the cloud inventory tracks the
cloud blobs. Each of these tiers is handled by a "store" class. However,
in the future we may have other types of persistent artifacts (like
database tables) which don't have their own inventory type. In this case
we might want to split these responsibilities out.
"""
def __init__(self, local_store, cloud_store):
self._local_store = local_store
self._cloud_store = cloud_store
def get_accessor(self, query):
return CacheAccessor(self, query)
class CacheAccessor:
"""
Provides a reference to the cache entries for a specific query. This
interface is convenient, and it also allows us to maintain some memoized
state for each query, saving redundant lookups.
"""
def __init__(self, parent_cache, query):
self.query = query
self.value_filename_stem = valid_filename_from_query(self.query) + "."
self._local = parent_cache._local_store
self._cloud = parent_cache._cloud_store
# These values are memoized to avoid roundtrips.
self._stored_local_entry = None
self._stored_cloud_entry = None
def flush_stored_entries(self):
"""
Flushes the stored local and cloud cached entries.
"""
self._stored_local_entry = None
self._stored_cloud_entry = None
def can_load(self):
"""
Indicates whether there are any cached artifacts for this query.
"""
try:
return self._get_nearest_entry_with_artifact() is not None
except InternalCacheStateError as e:
self._raise_state_error_with_explanation(e)
def load_provenance(self):
"""
Returns the provenance of the nearest cached artifact for this query,
if one exists.
"""
try:
entry = self._get_nearest_entry_with_artifact()
if entry is None:
return None
return entry.provenance
except InternalCacheStateError as e:
self._raise_state_error_with_explanation(e)
def load_result(self):
"""
Returns a Result for the nearest cached artifact for this query, if one
exists.
"""
try:
entry = self._get_nearest_entry_with_artifact()
if entry is None:
return None
if entry.tier == "local":
file_path = path_from_url(entry.artifact_url)
elif entry.tier == "cloud":
blob_url = entry.artifact_url
file_path = self._file_from_blob(blob_url)
else:
raise AssertionError("Unrecognized tier: " + entry.tier)
value = self._value_from_file(file_path)
value_hash = self.query.protocol.tokenize_file(file_path)
return Result(
query=self.query,
value=value,
file_path=file_path,
value_hash=value_hash,
)
except InternalCacheStateError as e:
self._raise_state_error_with_explanation(e)
def load_result_value_hash(self):
"""
Returns only the value hash for the nearest cached artifact for
this query, if one exists.
"""
try:
entry = self._get_nearest_entry_with_artifact()
if entry is None:
return None
return entry.value_hash
except InternalCacheStateError as e:
self._raise_state_error_with_explanation(e)
def save_result(self, result):
"""
Saves a Result in each cache layer that doesn't already have an exact
match.
"""
try:
self._save_or_reregister_result(result)
except InternalCacheStateError as e:
self._raise_state_error_with_explanation(e)
def update_provenance(self):
"""
Adds an entry to each cache layer that doesn't already have an exact
match for this query. There must be already be at least one cached
functional match -- i.e., ``can_load()`` must already return True.
"""
try:
self._save_or_reregister_result(None)
except InternalCacheStateError as e:
self._raise_state_error_with_explanation(e)
def _save_or_reregister_result(self, result):
local_entry = self._get_local_entry()
cloud_entry = self._get_cloud_entry()
self.flush_stored_entries()
if result is not None:
value_wrapper = NullableWrapper(result.value)
file_path = result.file_path
value_hash = result.value_hash
else:
value_wrapper = None
file_path = None
value_hash = None
blob_url = None
if file_path is None:
if local_entry.has_artifact:
file_path = path_from_url(local_entry.artifact_url)
value_hash = local_entry.value_hash
elif value_wrapper is not None:
file_path = self._file_from_value(value_wrapper.value)
value_hash = self.query.protocol.tokenize_file(file_path)
else:
if cloud_entry is None or not cloud_entry.has_artifact:
raise AssertionError(
oneline(
"""
Attempted to register metadata with no result
argument and no previously saved values;
this suggests we called update_provenance() without
previously finding a cached value, which shouldn't
happen."""
)
)
blob_url = cloud_entry.artifact_url
file_path = self._file_from_blob(blob_url)
value_hash = cloud_entry.value_hash
if not local_entry.exactly_matches_query:
file_url = url_from_path(file_path)
local_entry = self._local.inventory.register_url(
self.query, file_url, value_hash,
)
self._stored_local_entry = local_entry
if self._cloud:
assert cloud_entry is not None
if not cloud_entry.exactly_matches_query:
if blob_url is None:
if cloud_entry.has_artifact:
blob_url = cloud_entry.artifact_url
else:
blob_url = self._blob_from_file(file_path)
cloud_entry = self._cloud.inventory.register_url(
self.query, blob_url, value_hash,
)
self._stored_cloud_entry = cloud_entry
def _get_nearest_entry_with_artifact(self):
"""
Returns the "nearest" -- i.e., most local -- cache entry for this
query.
"""
local_entry = self._get_local_entry()
if local_entry.has_artifact:
return local_entry
cloud_entry = self._get_cloud_entry()
if cloud_entry is not None and cloud_entry.has_artifact:
return cloud_entry
return None
def _get_local_entry(self):
if self._stored_local_entry is None:
self._stored_local_entry = self._local.inventory.find_entry(self.query)
return self._stored_local_entry
def _get_cloud_entry(self):
if self._stored_cloud_entry is None:
if self._cloud is None:
return None
self._stored_cloud_entry = self._cloud.inventory.find_entry(self.query)
return self._stored_cloud_entry
def _file_from_blob(self, blob_url):
dir_path = self._local.generate_unique_dir_path(self.query)
filename = path_from_url(blob_url).name
file_path = dir_path / filename
ensure_parent_dir_exists(file_path)
logger.info("Downloading %s from GCS ...", self.query.task_key)
try:
self._cloud.download(file_path, blob_url)
except Exception as e:
raise InternalCacheStateError.from_failure("artifact blob", blob_url, e)
return file_path
def _blob_from_file(self, file_path):
url_prefix = self._cloud.generate_unique_url_prefix(self.query)
blob_url = url_prefix + "/" + file_path.name
logger.info("Uploading %s to GCS ...", self.query.task_key)
try:
self._cloud.upload(file_path, blob_url)
except Exception as e:
raise InternalCacheStateError.from_failure("artifact file", file_path, e)
return blob_url
def _file_from_value(self, value):
dir_path = self._local.generate_unique_dir_path(self.query)
extension = self.query.protocol.file_extension_for_value(value)
value_filename = self.value_filename_stem + extension
value_path = dir_path / value_filename
ensure_parent_dir_exists(value_path)
try:
self.query.protocol.write(value, value_path)
except Exception as e:
# TODO Should we rename this to just SerializationError?
raise EntitySerializationError(
oneline(
f"""
Value of descriptor {self.query.dnode.to_descriptor()!r}
could not be serialized to disk
"""
)
) from e
return value_path
def _value_from_file(self, file_path):
value_filename = file_path.name
extension = value_filename[len(self.value_filename_stem) :]
try:
return self.query.protocol.read_with_extension(file_path, extension)
except UnsupportedSerializedValueError:
raise
except Exception as e:
raise InternalCacheStateError.from_failure("artifact file", file_path, e)
def _raise_state_error_with_explanation(self, source_exc):
stores = [self._local]
if self._cloud:
stores.append(self._cloud)
inventory_root_urls = " and ".join(store.inventory.root_url for store in stores)
raise InvalidCacheStateError(
oneline(
f"""
Cached data may be in an invalid state; this should be
impossible but could have resulted from either a bug or a
change to the cached files. You should be able to repair
the problem by removing all cached files under
{inventory_root_urls}."""
)
) from source_exc
@attr.s(frozen=True)
class NullableWrapper:
"""
A simple wrapper for a value that might be None. We use this when we want
to distinguish between "we have a value which is None" from "we don't have a
value".
"""
value = attr.ib()
@attr.s(frozen=True)
class InventoryEntry:
"""
Represents a saved artifact tracked by an Inventory; returned by Inventory
to CacheAccessor.
"""
tier = attr.ib()
has_artifact = attr.ib()
artifact_url = attr.ib()
provenance = attr.ib()
exactly_matches_query = attr.ib()
value_hash = attr.ib()
@attr.s(frozen=True)
class MetadataMatch:
"""
| |
= source_fields[2].split('-')[0]
except IndexError:
strategy = source_fields[2]
try:
values = source_fields[2].split('-')[1].split(',')
try:
value_min = values[0]
value_max = values[1]
value = None
except IndexError:
rospy.loginfo("Detected a singe value from source_string: %s" % source_string)
value = values[0]
value_min = None
value_max = None
except IndexError:
rospy.loginfo("Could not get value_min/value_max nor single value from sources for source_string: %s" % source_string)
value_min, value_max, value = None, None, None
single_source['topic'] = topic
single_source['message_type'] = message_type
single_source['strategy'] = strategy
single_source['slot'] = slot
single_source['value_min'] = value_min
single_source['value_max'] = value_max
single_source['value'] = value
sources.append(single_source)
return sources
def build_source_string(topic, message_type, strategy, slot=None, value_min=None, value_max=None):
"""
Builds source strings with some wizardry
"""
# both are required
ret = topic + ':' + message_type
# only add sub slots if parent slots exist
if slot:
ret += '-' + slot
# required
ret += ':' + strategy
# only do min & max if both are there
if value_min and value_max:
ret += '-' + str(value_min) + ',' + str(value_max)
return ret
def check_registration(e):
"""\
Shuts down this ROS node if it is not registered on the master.
This will effectively kill the director each time the ROS master is
restarted, preventing silent and subtle publishing failure.
This should cause a shutdown *only* if the master can be contacted and the
node is not registered.
"""
import rosnode
try:
nodes = rosnode.get_node_names()
except rosnode.ROSNodeIOException:
rospy.logdebug("Could not contact master for registration check")
return
if rospy.get_name() not in nodes:
rospy.logwarn("Node no longer registered, shutting down")
rospy.signal_shutdown("Node no longer registered")
os.kill(os.getpid(), signal.SIGTERM)
def begin_checking_registration(interval=1):
"""
Periodically check the health of this node on the master.
"""
rospy.Timer(rospy.Duration(interval), _check_registration)
def next_scene_uri(presentation, scene):
"""
Read two JSON-encoded strings: a Presentation and a Scene.
Decode, find the Scene within the Presentation's script,
then return the URI for the next Scene in the script.
"""
try:
resource_uri = json.loads(scene)['resource_uri']
scenes = json.loads(presentation)['scenes']
script = map(lambda x: x['resource_uri'], scenes)
except KeyError:
return None
try:
return script[script.index(resource_uri) + 1]
except IndexError:
rospy.loginfo("Already at last Scene in this Presentation.")
return None
def get_message_type_from_string(string):
"""
Return msg_type module (e.g. GenericMessage) from string like 'interactivespaces_msgs/GenericMessage'
"""
module = string.split('/')[0]
# e.g. 'interactivespaces_msgs'
message = string.split('/')[1]
# e.g. GenericMessage
module_obj = __import__('%s.msg' % module)
globals()[module] = module_obj
message_type_final = getattr(getattr(sys.modules[module], 'msg'), message)
return message_type_final
def x_available(timeout=None):
if not timeout:
return
import commands
while timeout >= 0:
x_check = commands.getstatusoutput("DISPLAY=:0 xset q")
if x_check[0] == 0:
return True
else:
rospy.loginfo("X not available - sleeping for %s more seconds" % timeout)
timeout -= 1
rospy.sleep(1)
def dependency_available(server, port, name, timeout=None):
"""
Wait for network service to appear. Provide addres, port and name.
If timeout is set to none then wait forever.
"""
import socket
import errno
from socket import error as socket_error
s = socket.socket()
if timeout:
from time import time as now
end = now() + timeout
while True:
try:
if timeout:
next_timeout = end - now()
if next_timeout < 0:
return False
else:
s.settimeout(next_timeout)
s.connect((server, port))
except socket_error as serr:
# this exception occurs only if timeout is set
if serr.errno == errno.ECONNREFUSED:
rospy.logwarn("%s not yet available - waiting %s seconds more" % (name, next_timeout))
rospy.sleep(1)
else:
rospy.logwarn("%s not available because: %s" % (name, serr))
rospy.sleep(1)
except socket.error, err:
# catch timeout exception from underlying network library
# this one is different from socket.timeout
rospy.loginfo("%s not yet available - waiting %s secs more" % (name, next_timeout))
rospy.sleep(1)
if type(err.args) != tuple or err[0] != errno.ETIMEDOUT:
raise
else:
s.close()
rospy.loginfo("%s is available" % name)
return True
def discover_host_from_url(url):
from urlparse import urlparse
data = urlparse(url)
return data.hostname
def discover_port_from_url(url):
from urlparse import urlparse
data = urlparse(url)
return data.port
def find_device(name):
import os
for device in os.listdir('/dev/input/'):
device = '/dev/input/' + device
if 'event' not in device:
continue
if check_device(device, name):
return device
# did not find an event device with the name provided
return None
def check_device(device, name):
import os
from stat import ST_MODE
from evdev import InputDevice
if os.access(device, os.W_OK | os.R_OK):
return (InputDevice(device).name == name)
original_mode = os.stat(device)
os.system('sudo chmod 0666 %s' % device)
flag = (InputDevice(device).name == name)
if not flag:
os.chmod(device, original_mode)
return flag
def is_valid_state(state):
from lg_common.msg import ApplicationState
return state == ApplicationState.HIDDEN or \
state == ApplicationState.STOPPED or \
state == ApplicationState.STARTED or \
state == ApplicationState.SUSPENDED or \
state == ApplicationState.VISIBLE
def make_soft_relaunch_callback(func, *args, **kwargs):
"""
Creates a callback on the /soft_relaunch topic. The normal
argument passed is an array of strings called 'groups.' Ros
nodes can be put into groups like "sreetview" and "earth". The
"all" group happens to all ros nodes.
"""
from std_msgs.msg import String
rospy.logdebug('creating callback %s' % kwargs.get('groups', 'no group'))
def cb(msg):
if msg.data == 'all':
rospy.loginfo('calling callback for data: (%s) kwargs: (%s)' % (msg.data, kwargs))
func(msg)
return
if 'groups' in kwargs and msg.data in kwargs['groups']:
rospy.loginfo('calling callback for data: (%s) kwargs: (%s)' % (msg.data, kwargs))
func(msg)
return
return rospy.Subscriber('/soft_relaunch', String, cb)
def get_nested_slot_value(slot, message):
"""
Accepts a list a string with dots that represents slot and subslots
needed to be traversed in order to get value of message's nested attribute
Slot string gets converted to list of strings used to get a slot value from msg.
Every subslot should be an element in the list. Example:
For sensor_msg/Range:
---
header:
seq: 1414798
stamp:
secs: 1461247209
nsecs: 611480951
frame_id: ''
radiation_type: 0
field_of_view: 0.0
min_range: 0.0
max_range: 0.0
range: 0.685800015926
list of slots to get nsecs will look like:
['header', 'stamp', 'nsecs']
Returns a dictionary with slots name and value e.g.:
{'header.stamp.nsecs': 611480951}
"""
slot_tree = slot.split('.')
if len(slot_tree) == 1:
return {slot: getattr(message, slot)}
elif len(slot_tree) > 1:
deserialized_msg = message
for subslot in slot_tree:
try:
deserialized_msg = getattr(deserialized_msg, subslot)
except AttributeError:
if type(deserialized_msg) == str:
try:
# try to convert string to dict (works only for genericmessage)
deserialized_msg = json.loads(deserialized_msg)
deserialized_msg = deserialized_msg[subslot]
except KeyError:
msg = "Sublot %s does not exist in message: %s" % (subslot, deserialized_msg)
rospy.logerr(msg)
except ValueError:
msg = "Could not convert message '%s' to dict using subslot: '%s'" % (subslot, deserialized_msg)
rospy.logerr(msg)
elif type(deserialized_msg) == dict:
try:
deserialized_msg = deserialized_msg[subslot]
except KeyError:
msg = "Could not get value for slot %s from message %s" % (subslot, deserialized_msg)
rospy.logerr(msg)
else:
msg = "Could not get subslot value '%s' from message '%s'" % (subslot, deserialized_msg)
rospy.logerr(msg)
return {slot: deserialized_msg}
def get_activity_config(scene, activity_name, window_viewport):
"""
Returns configuration for the given activity on the given viewport in the given scene.
Args:
scene (interactivespaces_msgs.msg.GenericMessage)
activity_name (str)
window_viewport (str)
Returns:
dict: Configuration for the activity.
None: Activity not present on this viewport.
"""
import json
scene = json.loads(scene.message)
def is_activity_window(window):
return window['activity'] == activity_name and \
w['presentation_viewport'] == window_viewport
try:
windows = [w for w in scene['windows'] if is_activity_window(w)]
activity_config = windows[0]['activity_config']
except KeyError:
return None
except AttributeError:
return None
except IndexError:
return None
return activity_config
def check_www_dependency(should_depend, host, port, name, timeout):
"""
Check if www dependency is available, or raise an exception
"""
if should_depend:
rospy.loginfo("Waiting for rosbridge to become available")
if not dependency_available(host, port, name, timeout):
msg = "Service: %s (%s:%s) hasn't become accessible within %s seconds" % (name, host, port, timeout)
rospy.logfatal(msg)
raise DependencyException(msg)
else:
rospy.loginfo("%s is online" % name)
def x_available_or_raise(timeout):
"""
Checks if x is available, or will raise an error
"""
if x_available(timeout):
rospy.loginfo("X available")
else:
msg = "X server is not available"
rospy.logfatal(msg)
raise DependencyException(msg)
def browser_eligible_for_reuse(current_browser, future_browser):
"""
type current_browser: ManagedAdhocBrowser
type future_browser: AdhocBrowser.msg
Compares two browsers and returns bool telling whether
one browser can be updated to the other.
It can't be updated if there's a difference in:
- cmd args
- extensions are different
- user agent
- binary
"""
future_browser_extensions = [ext.name for ext in future_browser.extensions]
future_browser_cmd_args = [arg.argument for arg in future_browser.command_line_args]
return current_browser.user_agent == future_browser.user_agent and\
current_browser.binary == future_browser.binary and\
current_browser.extensions == future_browser_extensions and\
current_browser.command_line_args == future_browser_cmd_args
def get_random_string(N=6, uppercase=True):
"""
Generate random string.
"""
if uppercase:
string_range = string.ascii_uppercase
else:
string_range = string.ascii_letters
return ''.join(random.choice(string_range) for | |
to the nearest millisecond
year : int
Year
month : int
Month (range 1 .. 12)
day : int
days in the month (range 1-31)
hours : int
hours of day (range 0-23)
minutes : int
minutes of hour (range 0-59)
seconds : int
seconds of minute (range 0-60) rounded down
ns : int
nanoseconds of second (range 0-999999999)
sender : int
Optional sender ID, defaults to SENDER_ID (see sbp/msg.py).
"""
_parser = construct.Struct(
'flags' / construct.Int8ul,
'tow' / construct.Int32ul,
'year' / construct.Int16ul,
'month' / construct.Int8ul,
'day' / construct.Int8ul,
'hours' / construct.Int8ul,
'minutes' / construct.Int8ul,
'seconds' / construct.Int8ul,
'ns' / construct.Int32ul,)
__slots__ = [
'flags',
'tow',
'year',
'month',
'day',
'hours',
'minutes',
'seconds',
'ns',
]
def __init__(self, sbp=None, **kwargs):
if sbp:
super( MsgUtcTime,
self).__init__(sbp.msg_type, sbp.sender, sbp.length,
sbp.payload, sbp.crc)
self.from_binary(sbp.payload)
else:
super( MsgUtcTime, self).__init__()
self.msg_type = SBP_MSG_UTC_TIME
self.sender = kwargs.pop('sender', SENDER_ID)
self.flags = kwargs.pop('flags')
self.tow = kwargs.pop('tow')
self.year = kwargs.pop('year')
self.month = kwargs.pop('month')
self.day = kwargs.pop('day')
self.hours = kwargs.pop('hours')
self.minutes = kwargs.pop('minutes')
self.seconds = kwargs.pop('seconds')
self.ns = kwargs.pop('ns')
def __repr__(self):
return fmt_repr(self)
@staticmethod
def from_json(s):
"""Given a JSON-encoded string s, build a message object.
"""
d = json.loads(s)
return MsgUtcTime.from_json_dict(d)
@staticmethod
def from_json_dict(d):
sbp = SBP.from_json_dict(d)
return MsgUtcTime(sbp, **d)
def from_binary(self, d):
"""Given a binary payload d, update the appropriate payload fields of
the message.
"""
p = MsgUtcTime._parser.parse(d)
for n in self.__class__.__slots__:
setattr(self, n, getattr(p, n))
def to_binary(self):
"""Produce a framed/packed SBP message.
"""
c = containerize(exclude_fields(self))
self.payload = MsgUtcTime._parser.build(c)
return self.pack()
def into_buffer(self, buf, offset):
"""Produce a framed/packed SBP message into the provided buffer and offset.
"""
self.payload = containerize(exclude_fields(self))
self.parser = MsgUtcTime._parser
self.stream_payload.reset(buf, offset)
return self.pack_into(buf, offset, self._build_payload)
def to_json_dict(self):
self.to_binary()
d = super( MsgUtcTime, self).to_json_dict()
j = walk_json_dict(exclude_fields(self))
d.update(j)
return d
SBP_MSG_UTC_TIME_GNSS = 0x0105
class MsgUtcTimeGnss(SBP):
"""SBP class for message MSG_UTC_TIME_GNSS (0x0105).
You can have MSG_UTC_TIME_GNSS inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
This message reports the Universal Coordinated Time (UTC). Note the flags
which indicate the source of the UTC offset value and source of the time
fix.
Parameters
----------
sbp : SBP
SBP parent object to inherit from.
flags : int
Indicates source and time validity
tow : int
GPS time of week rounded to the nearest millisecond
year : int
Year
month : int
Month (range 1 .. 12)
day : int
days in the month (range 1-31)
hours : int
hours of day (range 0-23)
minutes : int
minutes of hour (range 0-59)
seconds : int
seconds of minute (range 0-60) rounded down
ns : int
nanoseconds of second (range 0-999999999)
sender : int
Optional sender ID, defaults to SENDER_ID (see sbp/msg.py).
"""
_parser = construct.Struct(
'flags' / construct.Int8ul,
'tow' / construct.Int32ul,
'year' / construct.Int16ul,
'month' / construct.Int8ul,
'day' / construct.Int8ul,
'hours' / construct.Int8ul,
'minutes' / construct.Int8ul,
'seconds' / construct.Int8ul,
'ns' / construct.Int32ul,)
__slots__ = [
'flags',
'tow',
'year',
'month',
'day',
'hours',
'minutes',
'seconds',
'ns',
]
def __init__(self, sbp=None, **kwargs):
if sbp:
super( MsgUtcTimeGnss,
self).__init__(sbp.msg_type, sbp.sender, sbp.length,
sbp.payload, sbp.crc)
self.from_binary(sbp.payload)
else:
super( MsgUtcTimeGnss, self).__init__()
self.msg_type = SBP_MSG_UTC_TIME_GNSS
self.sender = kwargs.pop('sender', SENDER_ID)
self.flags = kwargs.pop('flags')
self.tow = kwargs.pop('tow')
self.year = kwargs.pop('year')
self.month = kwargs.pop('month')
self.day = kwargs.pop('day')
self.hours = kwargs.pop('hours')
self.minutes = kwargs.pop('minutes')
self.seconds = kwargs.pop('seconds')
self.ns = kwargs.pop('ns')
def __repr__(self):
return fmt_repr(self)
@staticmethod
def from_json(s):
"""Given a JSON-encoded string s, build a message object.
"""
d = json.loads(s)
return MsgUtcTimeGnss.from_json_dict(d)
@staticmethod
def from_json_dict(d):
sbp = SBP.from_json_dict(d)
return MsgUtcTimeGnss(sbp, **d)
def from_binary(self, d):
"""Given a binary payload d, update the appropriate payload fields of
the message.
"""
p = MsgUtcTimeGnss._parser.parse(d)
for n in self.__class__.__slots__:
setattr(self, n, getattr(p, n))
def to_binary(self):
"""Produce a framed/packed SBP message.
"""
c = containerize(exclude_fields(self))
self.payload = MsgUtcTimeGnss._parser.build(c)
return self.pack()
def into_buffer(self, buf, offset):
"""Produce a framed/packed SBP message into the provided buffer and offset.
"""
self.payload = containerize(exclude_fields(self))
self.parser = MsgUtcTimeGnss._parser
self.stream_payload.reset(buf, offset)
return self.pack_into(buf, offset, self._build_payload)
def to_json_dict(self):
self.to_binary()
d = super( MsgUtcTimeGnss, self).to_json_dict()
j = walk_json_dict(exclude_fields(self))
d.update(j)
return d
SBP_MSG_DOPS = 0x0208
class MsgDops(SBP):
"""SBP class for message MSG_DOPS (0x0208).
You can have MSG_DOPS inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
This dilution of precision (DOP) message describes the effect of navigation
satellite geometry on positional measurement precision. The flags field
indicated whether the DOP reported corresponds to differential or SPP
solution.
Parameters
----------
sbp : SBP
SBP parent object to inherit from.
tow : int
GPS Time of Week
gdop : int
Geometric Dilution of Precision
pdop : int
Position Dilution of Precision
tdop : int
Time Dilution of Precision
hdop : int
Horizontal Dilution of Precision
vdop : int
Vertical Dilution of Precision
flags : int
Indicates the position solution with which the DOPS message corresponds
sender : int
Optional sender ID, defaults to SENDER_ID (see sbp/msg.py).
"""
_parser = construct.Struct(
'tow' / construct.Int32ul,
'gdop' / construct.Int16ul,
'pdop' / construct.Int16ul,
'tdop' / construct.Int16ul,
'hdop' / construct.Int16ul,
'vdop' / construct.Int16ul,
'flags' / construct.Int8ul,)
__slots__ = [
'tow',
'gdop',
'pdop',
'tdop',
'hdop',
'vdop',
'flags',
]
def __init__(self, sbp=None, **kwargs):
if sbp:
super( MsgDops,
self).__init__(sbp.msg_type, sbp.sender, sbp.length,
sbp.payload, sbp.crc)
self.from_binary(sbp.payload)
else:
super( MsgDops, self).__init__()
self.msg_type = SBP_MSG_DOPS
self.sender = kwargs.pop('sender', SENDER_ID)
self.tow = kwargs.pop('tow')
self.gdop = kwargs.pop('gdop')
self.pdop = kwargs.pop('pdop')
self.tdop = kwargs.pop('tdop')
self.hdop = kwargs.pop('hdop')
self.vdop = kwargs.pop('vdop')
self.flags = kwargs.pop('flags')
def __repr__(self):
return fmt_repr(self)
@staticmethod
def from_json(s):
"""Given a JSON-encoded string s, build a message object.
"""
d = json.loads(s)
return MsgDops.from_json_dict(d)
@staticmethod
def from_json_dict(d):
sbp = SBP.from_json_dict(d)
return MsgDops(sbp, **d)
def from_binary(self, d):
"""Given a binary payload d, update the appropriate payload fields of
the message.
"""
p = MsgDops._parser.parse(d)
for n in self.__class__.__slots__:
setattr(self, n, getattr(p, n))
def to_binary(self):
"""Produce a framed/packed SBP message.
"""
c = containerize(exclude_fields(self))
self.payload = MsgDops._parser.build(c)
return self.pack()
def into_buffer(self, buf, offset):
"""Produce a framed/packed SBP message into the provided buffer and offset.
"""
self.payload = containerize(exclude_fields(self))
self.parser = MsgDops._parser
self.stream_payload.reset(buf, offset)
return self.pack_into(buf, offset, self._build_payload)
def to_json_dict(self):
self.to_binary()
d = super( MsgDops, self).to_json_dict()
j = walk_json_dict(exclude_fields(self))
d.update(j)
return d
SBP_MSG_POS_ECEF = 0x0209
class MsgPosECEF(SBP):
"""SBP class for message MSG_POS_ECEF (0x0209).
You can have MSG_POS_ECEF inherit its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
The position solution message reports absolute Earth Centered Earth Fixed
(ECEF) coordinates and the status (single point vs pseudo-absolute RTK) of
the position solution. If the rover receiver knows the surveyed position of
the base station and has an RTK solution, this reports a pseudo-absolute
position solution using the base station position and the rover's RTK
baseline vector. The full GPS time is given by the preceding MSG_GPS_TIME
with the matching time-of-week (tow).
Parameters
----------
sbp : SBP
SBP parent object to inherit from.
tow : int
GPS Time of Week
x : double
ECEF X coordinate
y : double
ECEF Y coordinate
z : double
ECEF Z coordinate
accuracy : int
Position estimated standard deviation
n_sats : int
Number of satellites used in solution
flags : int
Status flags
sender : int
Optional sender ID, defaults to SENDER_ID (see sbp/msg.py).
"""
_parser = construct.Struct(
'tow' / construct.Int32ul,
'x' / construct.Float64l,
'y' / construct.Float64l,
'z' / construct.Float64l,
'accuracy' / construct.Int16ul,
'n_sats' / construct.Int8ul,
'flags' / construct.Int8ul,)
__slots__ = [
'tow',
'x',
'y',
'z',
'accuracy',
'n_sats',
'flags',
]
def __init__(self, sbp=None, **kwargs):
if sbp:
super( MsgPosECEF,
self).__init__(sbp.msg_type, sbp.sender, sbp.length,
sbp.payload, sbp.crc)
self.from_binary(sbp.payload)
else:
super( MsgPosECEF, self).__init__()
self.msg_type = SBP_MSG_POS_ECEF
self.sender = kwargs.pop('sender', SENDER_ID)
self.tow = kwargs.pop('tow')
self.x = kwargs.pop('x')
self.y = kwargs.pop('y')
self.z = kwargs.pop('z')
self.accuracy = kwargs.pop('accuracy')
self.n_sats = kwargs.pop('n_sats')
self.flags = kwargs.pop('flags')
def __repr__(self):
return fmt_repr(self)
@staticmethod
def from_json(s):
"""Given a JSON-encoded string s, build a message object.
"""
d = json.loads(s)
return MsgPosECEF.from_json_dict(d)
@staticmethod
def from_json_dict(d):
sbp | |
<filename>build/lib/bigwing/crawler.py
from bs4 import BeautifulSoup
import warnings; warnings.filterwarnings("ignore")
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from IPython.display import clear_output
import re, os, time, pickle, errno
import pandas as pd
import numpy as np
import threading
class BigwingCrawler():
def __init__(self, url='about:blank', page_range=None, page_type=None, browser='Chrome', headless=True, n_jobs=1, verbose=True):
'''
크롤러 클래스 생성자
:param url:
:param browser: 헤드리스 브라우저 지정 Chrome(Default) or PhantomJS
:param headless: 헤드리스 모드 설정 True(Default) or False
'''
try :
self.url = url
self.page_type = page_type
self.browser = browser
self.headless = headless
self.n_jobs = n_jobs
self.data = None
self.thread = []
self.verbose = verbose
if page_range != None:
self.partitioner(page_range[0], page_range[1], n_jobs)
self.start_page = page_range[0]
self.end_page = page_range[1]
self.error_page_list = self.load("error_pages")
self.success_page_list = self.load("success_pages")
except Exception as e:
print(e)
self.close()
def partitioner(self, start, end, divide):
partition_sp = np.linspace(start - 1, end, divide + 1).astype(int)
# 파티션정보 저장소 생성
self.partitions = {} # 파티션별 스크랩데이터 저장소
self.error_pages = {} # 파티션별 에러페이지 저장
self.success_pages = {} # 파티션별 성공페이지 저장
self.status = {} # 파티션별 진행상태 저장
self.successes = {} # 파티션별 성공건수 저장
self.processeds = {} # 파티션별 처리건수 저장
self.errors = {} # 파티션별 에러건수 저장
self.run_flags = {} # 파티션별 실행 여부 플래그
self.stop_flags = {} # 파티션별 중단 여부 플래그
self.zip_flag = 0 # 파티션 병합 여부 플래그
self.drivers = {} # 파티션별 브라우저 드라이버 저장
self.htmls = {} # 파티션별 html 문서 저장
self.soups = {} # 파티션별 BeautifulSoup 객체 저장
self.processes = {} # 각 파티션의 프로세스 저장
# 파티션저장소별 초기화
for i in range(len(partition_sp) - 1):
# 파티션별 키 생성 (키값에 파티션 페이지범위 포함)
partition_key = (partition_sp[i] + 1, partition_sp[i + 1])
self.open(partition_key) # 브라우저 오픈
self.partitions[partition_key] = pd.DataFrame()
self.error_pages[partition_key] = []
self.success_pages[partition_key] = []
self.status[partition_key] = "준비완료"
self.successes[partition_key] = 0
self.processeds[partition_key] = 0
self.errors[partition_key] = 0
self.processes[partition_key] = None
self.run_flags[partition_key] = False
self.stop_flags[partition_key] = True
def start(self):
if self.verbose == True: print("{} 개 프로세스로 작동합니다.".format(len(self.partitions.keys())))
for partition_key in self.partitions:
self.status[partition_key] = "진행중"
self.processes[partition_key] = threading.Thread(target=self.crawl, args=(partition_key,))
self.run_flags[partition_key] = True
self.stop_flags[partition_key] = False
for process in self.processes.values() :
process.start()
# for process in self.processes.values() :
# process.join()
def restart(self, part_nm=None):
keys = list(self.partitions.keys())
if part_nm != None :
if part_nm > len(keys) : print("{}번 프로세스는 없습니다."); return;
partition_key = keys[part_nm + 1]
self.run_flags[partition_key] = True
self.status[partition_key] = "진행중"
print("{} 프로세스 재시작".format(partition_key))
else :
for partition_key in keys :
self.run_flags[partition_key] = True
self.status[partition_key] = "진행중"
print("{} 프로세스 재시작".format(partition_key))
def pause(self, part_nm=None):
keys = list(self.partitions.keys())
if part_nm != None :
if part_nm > len(keys) : print("{}번 프로세스는 없습니다."); return;
partition_key = keys[part_nm + 1]
self.run_flags[partition_key] = False
self.status[partition_key] = "일시정지"
print("{} 프로세스 일시정지".format(partition_key))
else :
for partition_key in keys :
self.run_flags[partition_key] = False
self.status[partition_key] = "일시정지"
print("{} 프로세스 일시정지".format(partition_key))
def stop(self, part_nm=None):
keys = list(self.partitions.keys())
if part_nm != None:
if part_nm > len(keys): print("{}번 프로세스는 없습니다."); return;
partition_key = keys[part_nm + 1]
self.stop_flags[partition_key] = True
self.status[partition_key] = "중단"
print("{} 프로세스 중단".format(partition_key))
else:
for partition_key in keys:
self.stop_flags[partition_key] = True
self.status[partition_key] = "중단"
print("{} 프로세스 중단".format(partition_key))
time.sleep(2)
self.close()
def set_verbose(self, verbose):
self.verbose = verbose
def open(self, partition_key):
self.drivers[partition_key] = self.set_driver(self.url)
self.htmls[partition_key] = self.set_html(partition_key)
self.soups[partition_key] = self.set_soup(partition_key)
print("{} 페이지 브라우저를 오픈했습니다.".format(partition_key))
def clear(self):
import shutil
try :
shutil.rmtree("tmpdata/{}".format(self.page_type))
print("데이터 삭제")
except FileNotFoundError as e :
print("기록이 없습니다.")
def backup(self):
import shutil
from datetime import datetime
timestamp = datetime.strftime(datetime.now(), "%m%d_%H%M")
tmpdir = os.path.join(os.path.abspath(os.path.curdir), "tmpdata")
backupdir = os.path.join(os.path.abspath(os.path.curdir), "backup")
dstdir = os.path.join(backupdir, timestamp)
if not os.path.isdir(backupdir):
os.makedirs(backupdir)
try :
shutil.move(tmpdir, dstdir)
print("{} 로 데이터를 백업했습니다.".format(
os.path.join(dstdir, self.page_type)))
except :
pass
def refresh(self, partition_key):
for i in range(self.n_jobs) :
self.htmls[partition_key] = self.set_html(partition_key)
self.soups[partition_key] = self.set_soup(partition_key)
def picker(self, partition_key, parant_tag, child_tag=None):
'''
웹페이지에서 검색대상 정보가 있는 태그를 설정하고 웹페이지 전체 데이터를 가져오는 함수
:param parant_tag: 상위 태그 설정 인수
:param child_tag: 하위 태그 설정 인수 (Default : None)
:return: list타입의 list타입 변수
'''
tags = self.soups[partition_key].select(parant_tag)
results = []
for tag in tags :
if child_tag != None :
tag = tag.select(child_tag)
tag = [data.text.strip() for data in tag]
if tag == [] :
continue
results.append(tag)
return results
def fetch(self, partition_key, keyword):
'''
추상화 함수 : 단일 레코드 크롤링 함수
:param keyword: 검색어
:return: 없음
'''
pass
def insert(self, input_data, col):
pass
def takeout(self):
'''
크롤링한 데이터셋을 리턴하는 함수
:return: data ( 타입 : 데이터프레임 or 딕셔너리(데이터프레임) )
'''
if self.n_jobs == 1:
return self.partitions.pop()
else:
if self.zip_flag == 0:
return self.partitions
else:
return self.data
def save(self):
self.data = pd.DataFrame()
for partition in self.partitions.values():
self.data = self.data.append(partition)
self.data = self.data.reset_index(drop=True)
print("데이터 병합")
self.record()
print("스크랩 로그기록")
self.log()
self.zip_flag = 1
def monitor(self, second=2):
self.set_verbose(False)
while True:
try:
self.summary()
clear_output(wait=True)
time.sleep(second)
except KeyboardInterrupt:
break;
self.set_verbose(True)
print("모니터링 종료")
def summary(self):
print("-" * 108)
for partition_key in self.partitions:
line = "{:>15} 스크랩프로세스 | {:>5}% {} | 총 {:>6}건 | 성공 {:>6}건 | 실패 {:>6}건".format(
str(partition_key),
("%.1f" % (self.processeds[partition_key] / (partition_key[1] - partition_key[0] + 1) * 100)),
self.status[partition_key],
partition_key[1] - partition_key[0] + 1,
self.successes[partition_key],
self.errors[partition_key],
)
print("|{:>82} |".format(line))
print("-" * 108)
total_processeds = 0
for i in self.processeds.values() : total_processeds += i
total_successes = 0
for i in self.successes.values(): total_successes += i
total_errors = 0
for i in self.errors.values(): total_errors += i
total_status = "준비완료"
for status in self.status.values() :
if "진행중" in status : total_status = "진행중"
cnt = 0
for status in self.status.values() :
if "종료" in status : cnt +=1
if cnt == len(self.status.values()) :
total_status = "종료"
percentage = (total_processeds / (self.end_page - self.start_page + 1)) * 100
line = "{:>12} 스크랩프로세스 | {:>5}% {} | 총 {:>6}건 | 성공 {:>6}건 | 실패 {:>6}건".format(
"전체",
"%.1f" % percentage,
total_status,
self.end_page - self.start_page + 1,
total_successes,
total_errors,
)
print("|{:>80} |".format(line))
print("-" * 108)
def record(self):
filename = "total_{}_{}_{}".format(self.page_type, self.start_page, self.end_page)
try:
if not (os.path.isdir(os.path.join("tmpdata", self.page_type))):
os.makedirs(os.path.join("tmpdata", self.page_type))
if not (os.path.isdir(os.path.join("tmpdata", self.page_type, "data"))):
os.makedirs(os.path.join("tmpdata", self.page_type, "data"))
except OSError as e:
if e.errno != errno.EEXIST:
print("디렉토리 생성 실패.")
raise
try :
with open("tmpdata/{}/data/{}.pkl".format(self.page_type, filename), "rb") as f:
dump_data = pickle.load(f)
except:
dump_data = pd.DataFrame()
dump_data = dump_data.append(self.data).reset_index(drop=True)
with open("tmpdata/{}/data/{}.pkl".format(self.page_type, filename), "wb") as f:
pickle.dump(dump_data, f)
#기존 데이터와 병합
try :
file_data = pd.read_csv("tmpdata/{}/data/{}.csv".format(self.page_type, filename), encoding="utf8", index_col=False)
except FileNotFoundError :
file_data = pd.DataFrame()
file_data = file_data.append(self.data).reset_index(drop=True)
file_data.to_csv("tmpdata/{}/data/{}.csv".format(self.page_type, filename), encoding="utf8", index=False)
print("{} 로 데이터를 저장했습니다.".format(os.path.join(os.path.abspath(os.path.curdir),"tmpdata",self.page_type, "data", filename + ".csv")))
def load(self, filename):
import pickle
try :
with open("tmpdata/{}/log/{}.pkl".format(self.page_type, filename), "rb") as f:
data = pickle.load(f)
return data
except :
return []
def crawl(self, partition_key):
pass
def scrap(self, partition_key):
pass
def set_page(self, partition_key, page_nm):
pass
def _check(self, attr) :
'''
클래스 속성이 존재하는지 검사하는 함수(클래스 내부사용)
:param attr: 속성 변수
:return: 없음
'''
try:
getattr(self, attr)
except AttributeError:
raise RuntimeError("FAILED : {} 를 확인해주세요.".format(attr))
def set_soup(self, partition_key):
'''
BeautifulSoup 객체를 생성하는 Setter 함수
:param url: url 문자열 값 입력 받는 인수
:param browser: 헤드리스 브라우저 지정(Default : Chrome) #PhantomJs 사용가능
:return: 없음
'''
return BeautifulSoup(self.htmls[partition_key], 'html.parser')
def set_html(self, partition_key):
'''
문자열 타입 html 문서를 저장하는 Setter 함수
:param url:
:param browser:
:return: 없음
'''
return self.drivers[partition_key].page_source
def set_driver(self, url):
'''
selenium 패키지의 browser driver 모듈을 세팅하는 함수
:param url: 문자열타입 url 주소를 입력받는 인수
:param browser: 브라우저를 지정하는 인수 (Default : Chrome) # PhantomJS 도가능
:return: 없음
'''
driver = None
option = Options()
option.add_argument('headless')
option.add_argument('window-size=1920x1080')
option.add_argument("disable-gpu")
# Headless숨기기1
option.add_argument(
"user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36")
option.add_argument("lang=ko_KR")
cur_dir = os.path.abspath(os.path.dirname(__file__))
browser_dir = os.path.join(cur_dir, "browser")
if self.browser == "Chrome":
browser_file = browser_dir + "/chromedriver.exe"
if self.headless == True :
driver = webdriver.Chrome(browser_file, chrome_options=option)
else :
driver = webdriver.Chrome(browser_file)
driver.get('about:blank')
driver.execute_script("Object.defineProperty(navigator, 'plugins', {get: function() {return[1, 2, 3, 4, 5]}})")
driver.execute_script("const getParameter = WebGLRenderingContext.getParameter;WebGLRenderingContext.prototype.getParameter = function(parameter) {if (parameter === 37445) {return 'NVIDIA Corporation'} if (parameter === 37446) {return 'NVIDIA GeForce GTX 980 Ti OpenGL Engine';}return getParameter(parameter);};")
else:
browser_file = browser_dir + "/PhantomJS.exe"
driver = webdriver.PhantomJS(browser_file)
driver.execute_script("Object.defineProperty(navigator, 'languages', {get: function() {return ['ko-KR', 'ko']}})")
driver.implicitly_wait(3)
driver.get(url)
return driver
def get_text(self, partition_key):
'''
인스턴스의 html 변수의 텍스트 정보를 얻어오는 함수
:return: 문자열 타입 text
'''
text = | |
<reponame>martino-vic/pylexibank
"""Functionality to load a set of CLDF datasets into a sqlite db.
Notes:
- Only CLDF components will be loaded into the db.
- The names of the columns in the database are the names from the CSV files, not the
preferred labels for the corresponding CLDF properties.
"""
import json
import pathlib
import sqlite3
import contextlib
import collections
import attr
from csvw.datatypes import DATATYPES
from clldutils.misc import nfilter
from clldutils.jsonlib import load
from pycldf.terms import term_uri
from pycldf.sources import Sources
def identity(s):
return s
TYPE_MAP = {
'string': ('TEXT', identity),
'integer': ('INTEGER', identity),
'boolean': ('INTEGER', lambda s: s if s is None else int(s)),
'decimal': ('REAL', lambda s: s if s is None else float(s)),
}
BIBTEX_FIELDS = [
'address', # Publisher's address
'annote', # An annotation for annotated bibliography styles (not typical)
'author', # The name(s) of the author(s) (separated by and)
'booktitle', # The title of the book, if only part of it is being cited
'chapter', # The chapter number
'crossref', # The key of the cross-referenced entry
'edition', # The edition of a book, long form (such as "First" or "Second")
'editor', # The name(s) of the editor(s)
'eprint', # A specification of electronic publication, preprint or technical report
'howpublished', # How it was published, if the publishing method is nonstandard
'institution', # institution involved in the publishing,not necessarily the publisher
'journal', # The journal or magazine the work was published in
'key', # A hidden field used for specifying or overriding the orderalphabetical order
'month', # The month of publication (or, if unpublished, the month of creation)
'note', # Miscellaneous extra information
'number', # The "(issue) number" of a journal, magazine, or tech-report
'organization', # The conference sponsor
'pages', # Page numbers, separated either by commas or double-hyphens.
'publisher', # The publisher's name
'school', # The school where the thesis was written
'series', # The series of books the book was published in
'title', # The title of the work
'type', # The field overriding the default type of publication
'url', # The WWW address
'volume', # The volume of a journal or multi-volume book
'year',
]
PROPERTY_URL_TO_COL = collections.defaultdict(dict)
for table in load(pathlib.Path(__file__).parent / 'cldf-metadata.json')['tables']:
for col in table['tableSchema']['columns']:
if col.get('propertyUrl'):
PROPERTY_URL_TO_COL[table['dc:conformsTo'].split('#')[1]][col['propertyUrl']] = \
col['name']
def insert(db, table, keys, *rows, **kw):
if rows:
if isinstance(keys, str):
keys = [k.strip() for k in keys.split(',')]
sql = "INSERT INTO {0} ({1}) VALUES ({2})".format(
table, ','.join(keys), ','.join(['?' for _ in keys]))
db.executemany(sql, rows)
def quoted(*names):
return ','.join('`{0}`'.format(name) for name in names)
@attr.s
class ColSpec(object):
"""
A `ColSpec` captures sufficient information about a `Column` for the DB schema.
"""
name = attr.ib()
csvw_type = attr.ib(default='string', converter=lambda s: s if s else 'string')
separator = attr.ib(default=None)
primary_key = attr.ib(default=None)
db_type = attr.ib(default=None)
convert = attr.ib(default=None)
cldf_name = attr.ib(default=None)
def __attrs_post_init__(self):
if self.csvw_type in TYPE_MAP:
self.db_type, self.convert = TYPE_MAP[self.csvw_type]
else:
self.db_type = 'TEXT'
self.convert = DATATYPES[self.csvw_type].to_string
if not self.cldf_name:
self.cldf_name = self.name
@property
def sql(self):
return '`{0.name}` {0.db_type}'.format(self)
@attr.s
class TableSpec(object):
"""
A `TableSpec` captures sufficient information about a `Table` for the DB schema.
"""
name = attr.ib()
columns = attr.ib(default=attr.Factory(list))
foreign_keys = attr.ib(default=attr.Factory(list))
consumes = attr.ib(default=None)
primary_key = attr.ib(default=None)
@property
def sql(self):
clauses = [col.sql for col in self.columns] # limit to non-local columns!
clauses.append('`dataset_ID` TEXT NOT NULL')
if self.primary_key:
clauses.append('PRIMARY KEY(`dataset_ID`, `{0}`)'.format(self.primary_key))
clauses.append('FOREIGN KEY(`dataset_ID`) REFERENCES dataset(`ID`) ON DELETE CASCADE')
for fk, ref, refcols in self.foreign_keys:
clauses.append('FOREIGN KEY({0}) REFERENCES {1}({2}) ON DELETE CASCADE'.format(
quoted(*fk), ref, quoted(*refcols)))
return "CREATE TABLE {0} (\n {1}\n)".format(self.name, ',\n '.join(clauses))
def schema(ds):
"""
Convert the table and column descriptions of a `Dataset` into specifications for the
DB schema.
:param ds:
:return: A pair (tables, reference_tables).
"""
tables, ref_tables = {}, {}
table_lookup = {t.url.string: t for t in ds.tables if ds.get_tabletype(t)}
for table in table_lookup.values():
spec = TableSpec(ds.get_tabletype(table))
spec.primary_key = [
c for c in table.tableSchema.columns if
c.propertyUrl and c.propertyUrl.uri == term_uri('id')][0].name
# Map the column name to the default:
if spec.name in PROPERTY_URL_TO_COL:
spec.primary_key = PROPERTY_URL_TO_COL[spec.name][term_uri('id')]
for c in table.tableSchema.columns:
if c.propertyUrl and c.propertyUrl.uri == term_uri('source'):
# A column referencing sources is replaced by an association table.
otype = ds.get_tabletype(table).replace('Table', '')
ref_tables[ds.get_tabletype(table)] = TableSpec(
'{0}Source'.format(otype), # The name of the association table.
[ColSpec(otype + '_ID'), ColSpec('Source_ID'), ColSpec('Context')],
[
( # The foreign key to the referencing object:
['dataset_ID', otype + '_ID'],
ds.get_tabletype(table),
['dataset_ID', spec.primary_key]),
( # The foreign key to the referenced source:
['dataset_ID', 'Source_ID'],
'SourceTable',
['dataset_ID', 'ID']),
],
c.name)
else:
cname = c.header
if c.propertyUrl and spec.name in PROPERTY_URL_TO_COL:
if c.propertyUrl.uri in PROPERTY_URL_TO_COL[spec.name]:
cname = PROPERTY_URL_TO_COL[spec.name][c.propertyUrl.uri]
spec.columns.append(ColSpec(
cname,
c.datatype.base if c.datatype else c.datatype,
c.separator,
cname == spec.primary_key,
cldf_name=c.header))
listvalued = set(c.name for c in table.tableSchema.columns if c.separator)
for fk in table.tableSchema.foreignKeys:
if fk.reference.schemaReference:
# We only support Foreign Key references between tables!
continue # pragma: no cover
ref = table_lookup[fk.reference.resource.string]
ref_type = ds.get_tabletype(ref)
if ref_type:
colRefs = sorted(fk.columnReference)
if any(c in listvalued for c in colRefs):
# We drop list-valued foreign keys
continue # pragma: no cover
if spec.name in PROPERTY_URL_TO_COL: # Current table is a standard CLDF component.
# Must map foreign keys
colRefs = []
for c in sorted(fk.columnReference):
c = ds[spec.name, c]
if c.propertyUrl and c.propertyUrl.uri in PROPERTY_URL_TO_COL[spec.name]:
colRefs.append(PROPERTY_URL_TO_COL[spec.name][c.propertyUrl.uri])
else:
colRefs.append(c.header) # pragma: no cover
rcolRefs = sorted(fk.reference.columnReference)
if ref_type in PROPERTY_URL_TO_COL:
# Must map foreign key targets!
rcolRefs = []
for c in sorted(fk.reference.columnReference):
c = ds[ref_type, c]
if c.propertyUrl and c.propertyUrl.uri in PROPERTY_URL_TO_COL[ref_type]:
rcolRefs.append(PROPERTY_URL_TO_COL[ref_type][c.propertyUrl.uri])
else:
rcolRefs.append(c.header) # pragma: no cover
spec.foreign_keys.append((
tuple(['dataset_ID'] + colRefs),
ds.get_tabletype(table_lookup[fk.reference.resource.string]),
tuple(['dataset_ID'] + rcolRefs)))
tables[spec.name] = spec
# must determine the order in which tables must be created!
ordered = collections.OrderedDict()
i = 0
#
# We loop through the tables repeatedly, and whenever we find one, which has all
# referenced tables already in ordered, we move it from tables to ordered.
#
while tables and i < 100:
i += 1
for table in list(tables.keys()):
if all(ref[1] in ordered for ref in tables[table].foreign_keys):
# All referenced tables are already created.
ordered[table] = tables.pop(table)
break
if tables: # pragma: no cover
raise ValueError('there seem to be cyclic dependencies between the tables')
return list(ordered.values()), ref_tables
class Database(object):
def __init__(self, fname):
"""
A `Database` instance is initialized with a file path.
:param fname: Path to a file in the file system where the db is to be stored.
"""
self.fname = pathlib.Path(fname)
def drop(self):
if self.fname.exists():
self.fname.unlink()
def connection(self):
return contextlib.closing(sqlite3.connect(self.fname.as_posix()))
def create(self, force=False, exists_ok=False):
"""
Creates a db file with the core schema.
:param force: If `True` an existing db file will be overwritten.
"""
if self.fname and self.fname.exists():
if force:
self.drop()
elif exists_ok:
return
else:
raise ValueError('db file already exists, use force=True to overwrite')
with self.connection() as db:
db.execute(
"""\
CREATE TABLE dataset (
ID TEXT PRIMARY KEY NOT NULL,
name TEXT,
version TEXT,
metadata_json TEXT
)""")
db.execute("""\
CREATE TABLE datasetmeta (
dataset_ID TEXT ,
key TEXT,
value TEXT,
PRIMARY KEY (dataset_ID, key),
FOREIGN KEY(dataset_ID) REFERENCES dataset(ID)
)""")
db.execute("""\
CREATE TABLE SourceTable (
dataset_ID TEXT ,
ID TEXT ,
bibtex_type TEXT,
{0}
extra TEXT,
PRIMARY KEY (dataset_ID, ID),
FOREIGN KEY(dataset_ID) REFERENCES dataset(ID)
)""".format('\n '.join('`{0}` TEXT,'.format(f) for f in BIBTEX_FIELDS)))
def fetchone(self, sql, params=None, conn=None, verbose=False):
return self._fetch(sql, 'fetchone', params, conn, verbose=verbose)
def fetchall(self, sql, params=None, conn=None, verbose=False):
return self._fetch(sql, 'fetchall', params, conn, verbose=verbose)
def _fetch(self, sql, method, params, conn, verbose=False):
sql = self.sql.get(sql, sql)
def _do(conn, sql, method):
cu = conn.cursor()
if verbose:
print(sql)
cu.execute(sql, params or ())
return getattr(cu, method)()
if not conn:
with self.connection() as conn:
return _do(conn, sql, method)
else:
return _do(conn, sql, method)
@property
def tables(self):
res = {r[0]: {} for r in self.fetchall(
"SELECT name FROM sqlite_master WHERE type='table'")}
for t in res:
res[t] = {r[1]: r[2] for r in self.fetchall(
"PRAGMA table_info({0})".format(t))}
return res
def unload(self, dataset_id, args=None):
dataset_id = getattr(dataset_id, 'id', dataset_id)
with self.connection() as db:
for table in self.tables:
if table != 'dataset':
db.execute(
"DELETE FROM {0} WHERE dataset_ID = ?".format(table),
(dataset_id,))
db.execute("DELETE FROM dataset WHERE ID = ?", (dataset_id,))
db.commit()
def _create_table_if_not_exists(self, table):
if table.name in self.tables:
return False
| |
# -*- coding: utf-8 -*-
"""NASNet-A models for Keras.
NASNet refers to Neural Architecture Search Network, a family of models
that were designed automatically by learning the model architectures
directly on the dataset of interest.
Here we consider NASNet-A, the highest performance model that was found
for the CIFAR-10 dataset, and then extended to ImageNet 2012 dataset,
obtaining state of the art performance on CIFAR-10 and ImageNet 2012.
Only the NASNet-A models, and their respective weights, which are suited
for ImageNet 2012 are provided.
The below table describes the performance on ImageNet 2012:
--------------------------------------------------------------------------------
Architecture | Top-1 Acc | Top-5 Acc | Multiply-Adds | Params (M)
--------------------------------------------------------------------------------
| NASNet-A (4 @ 1056) | 74.0 % | 91.6 % | 564 M | 5.3 |
| NASNet-A (6 @ 4032) | 82.7 % | 96.2 % | 23.8 B | 88.9 |
--------------------------------------------------------------------------------
Reference:
- [Learning Transferable Architectures for Scalable Image Recognition](
https://arxiv.org/abs/1707.07012) (CVPR 2018)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import warnings
import h5py
import numpy as np
import tensorflow as tf
from keras_applications.imagenet_utils import _obtain_input_shape
from models.nasnet_utils_do import ScheduledDropout, ConcreteDroppath
from tensorflow.keras import backend as K
from tensorflow.keras import layers
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import AveragePooling2D
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import Cropping2D
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Dropout,SpatialDropout2D
from tensorflow.keras.layers import GlobalAveragePooling2D
from tensorflow.keras.layers import GlobalMaxPooling2D
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import SeparableConv2D
from tensorflow.keras.layers import ZeroPadding2D
from tensorflow.keras.models import Model
from tensorflow.keras.utils import get_file
from tensorflow.keras.utils import get_source_inputs
from tensorflow.python.keras.applications.imagenet_utils import correct_pad
from . import NetType
TF_NASNET_LARGE_WEIGHT_PATH = 'https://storage.googleapis.com/tensorflow/keras-applications/nasnet/NASNet-large.h5'
TF_NASNET_LARGE_WEIGHT_PATH_NO_TOP = 'https://storage.googleapis.com/tensorflow/keras-applications/nasnet/NASNet-large-no-top.h5'
def NASNet_large_do(net_type, include_top=True, do_p=0.3, weights='imagenet', input_tensor=None,
input_shape=None, total_training_steps=None, penultimate_filters=4032, num_blocks=6, stem_block_filters=96,
skip_reduction=True, filter_multiplier=2, pooling=None, classes=1000, activation='softmax'):
"""Instantiates a NASNet model.
Optionally loads weights pre-trained
on ImageNet. This model is available for TensorFlow only,
and can only be used with inputs following the TensorFlow
data format `(width, height, channels)`.
You should set `image_data_format='channels_last'` in your Keras config
located at ~/.keras/keras.json.
Note that the default input image size for this model is 331x331.
# Arguments
include_top: whether to include the fully-connected
layer at the top of the network.
weights: `None` (random initialization) or
`imagenet` (ImageNet weights)
For loading `imagenet` weights, `input_shape` should be (331, 331, 3)
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: Optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(331, 331, 3)` for NASNetLarge.
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(224, 224, 3)` would be one valid value.
penultimate_filters: Number of filters in the penultimate layer.
NASNet models use the notation `NASNet (N @ P)`, where:
- N is the number of blocks
- P is the number of penultimate filters
num_blocks: Number of repeated blocks of the NASNet model.
NASNet models use the notation `NASNet (N @ P)`, where:
- N is the number of blocks
- P is the number of penultimate filters
stem_block_filters: Number of filters in the initial stem block
skip_reduction: Whether to skip the reduction step at the tail
end of the network.
filter_multiplier: Controls the width of the network.
- If `filter_multiplier` < 1.0, proportionally decreases the number
of filters in each layer.
- If `filter_multiplier` > 1.0, proportionally increases the number
of filters in each layer.
- If `filter_multiplier` = 1, default number of filters from the
paper are used at each layer.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
RuntimeError: If attempting to run this model with a
backend that does not support separable convolutions.
"""
if not (weights in {'imagenet', None} or os.path.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as imagenet with `include_top`'
' as true, `classes` should be 1000')
if K.backend() != 'tensorflow':
raise RuntimeError('The Xception model is only available with '
'the TensorFlow backend.')
if K.image_data_format() != 'channels_last':
warnings.warn('The NASNet model is only available for the '
'input data format "channels_last" '
'(width, height, channels). '
'However your settings specify the default '
'data format "channels_first" (channels, width, height). '
'You should set `image_data_format="channels_last"` in your Keras '
'config located at ~/.keras/keras.json. '
'The model being returned right now will expect inputs '
'to follow the "channels_last" data format.')
K.set_image_data_format('channels_last')
old_data_format = 'channels_first'
else:
old_data_format = None
# Determine proper input shape
input_shape = _obtain_input_shape(input_shape,
default_size=331,
min_size=32,
data_format=K.image_data_format(),
require_flatten=False,
weights=None) # weights=None to prevent input channels equality check
print('input shape', input_shape)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if penultimate_filters % (24 * (filter_multiplier ** 2)) != 0:
raise ValueError(
'For NASNet-A models, the `penultimate_filters` must be a multiple '
'of 24 * (`filter_multiplier` ** 2). Current value: %d' %
penultimate_filters)
filters = penultimate_filters // 24
x = Conv2D(stem_block_filters, (3, 3), strides=(2, 2), padding="same",
use_bias=False, name='stem_conv1', kernel_initializer='he_normal')(img_input)
x = BatchNormalization(momentum=0.9997, epsilon=1e-3, name='stem_bn1')(x)
if net_type == NetType.mc:
x = Dropout(do_p)(x, training=True)
elif net_type == NetType.mc_df:
x = Dropout(do_p, noise_shape=(x.shape[0], 1, 1, x.shape[-1]))(x, training=True)
#conv1
total_num_cells = 4 + 3 * num_blocks
cell_counter = 0
p = None
x, p = _reduction_a_cell_do(x, p, filters // (filter_multiplier ** 2), net_type=net_type, cell_num=cell_counter,
total_num_cells=total_num_cells, total_training_steps=total_training_steps,
do_p=do_p, block_id='stem_1')
cell_counter += 1
#conv2
x, p = _reduction_a_cell_do(x, p, filters // filter_multiplier, net_type=net_type, cell_num=cell_counter,
total_num_cells=total_num_cells, total_training_steps=total_training_steps,
do_p=do_p, block_id='stem_2')
cell_counter += 1
for i in range(num_blocks):
x, p = _normal_a_cell_do(x, p, filters, net_type=net_type, cell_num=cell_counter,
total_num_cells=total_num_cells, total_training_steps=total_training_steps,
do_p=do_p, block_id='%d' % (i))
cell_counter += 1
#conv3
x, p0 = _reduction_a_cell_do(x, p, filters * filter_multiplier, net_type=net_type, cell_num=cell_counter,
total_num_cells=total_num_cells, total_training_steps=total_training_steps,
do_p=do_p, block_id='reduce_%d' % (num_blocks))
cell_counter += 1
p = p0 if not skip_reduction else p
for i in range(num_blocks):
x, p = _normal_a_cell_do(x, p, filters * filter_multiplier, net_type=net_type, cell_num=cell_counter,
total_num_cells=total_num_cells, total_training_steps=total_training_steps,
do_p=do_p, block_id='%d' % (num_blocks + i + 1))
cell_counter += 1
#conv4
x, p0 = _reduction_a_cell_do(x, p, filters * filter_multiplier ** 2, net_type=net_type, cell_num=cell_counter,
total_num_cells=total_num_cells, total_training_steps=total_training_steps,
do_p=do_p, block_id='reduce_%d' % (2 * num_blocks))
cell_counter += 1
p = p0 if not skip_reduction else p
for i in range(num_blocks):
x, p = _normal_a_cell_do(x, p, filters * filter_multiplier ** 2, net_type=net_type, cell_num=cell_counter,
total_num_cells=total_num_cells, total_training_steps=total_training_steps,
do_p=do_p, block_id='%d' % (2 * num_blocks + i + 1))
cell_counter += 1
#conv5
x = Activation('relu')(x)
# if include_top:
x = GlobalAveragePooling2D()(x)
x = Dense(classes, activation=activation, name='predictions')(x) #!!!
# else:
# if pooling == 'avg':
# x = GlobalAveragePooling2D()(x)
# elif pooling == 'max':
# x = GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='NASNet')
# Create donor model
if input_shape[-1] > 3 and weights is not None:
input_shape1 = (*input_shape[:-1], 3)
donor_model = get_donor_model(include_top, input_tensor=None,
input_shape=input_shape1,
penultimate_filters=penultimate_filters,
num_blocks=num_blocks,
stem_block_filters=stem_block_filters,
skip_reduction=skip_reduction,
pooling=pooling,
classes=classes)
#return model #TODO: remove to implement weighs copy by name
# load weights
if weights is not None and input_shape[-1] >= 3:
if weights == 'imagenet':
if include_top:
print('Loading pretrained ImageNet weights, include top for NASNet backbone')
weights_path = get_file('nasnet_large.h5',
TF_NASNET_LARGE_WEIGHT_PATH,
cache_subdir='models',
file_hash='11577c9a518f0070763c2b964a382f17')
else:
print('Loading pretrained ImageNet weights, exclude top for NASNet backbone')
weights_path | |
# Copyright 2014 SAP AG or an SAP affiliate company.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# While not a requirement of the license, if you do modify this file, we
# would appreciate hearing about it. Please email <EMAIL>
"""SQLAnydb - A DB API v2.0 compatible interface to SQL Anywhere.
This package provides a DB API v2.0 interface
http://www.python.org/dev/peps/pep-0249
to the sqlanywhere dbcapi library.
"""
__version__ = '1.0.6.1'
import os
import sys
import atexit
import time
import logging
try:
import exceptions
# pre 3.0
Exception = exceptions.StandardError
bytes = str
str = unicode
v3list = lambda x: x
except:
# 3.0 or later
xrange = range
v3list = list
import codecs
from ctypes import *
from struct import pack, unpack, calcsize
lg = logging.getLogger(__name__)
API_VERSION = 1
API_VERSION_EX = 2
# NB: The following must match those in sacapi.h for the specified API_VERSION!
A_INVALID_TYPE = 0
A_BINARY = 1
A_STRING = 2
A_DOUBLE = 3
A_VAL64 = 4
A_UVAL64 = 5
A_VAL32 = 6
A_UVAL32 = 7
A_VAL16 = 8
A_UVAL16 = 9
A_VAL8 = 10
A_UVAL8 = 11
DT_NOTYPE = 0
DT_DATE = 384
DT_TIME = 388
DT_TIMESTAMP = 392
DT_VARCHAR = 448
DT_FIXCHAR = 452
DT_LONGVARCHAR = 456
DT_STRING = 460
DT_DOUBLE = 480
DT_FLOAT = 482
DT_DECIMAL = 484
DT_INT = 496
DT_SMALLINT = 500
DT_BINARY = 524
DT_LONGBINARY = 528
DT_TINYINT = 604
DT_BIGINT = 608
DT_UNSINT = 612
DT_UNSSMALLINT = 616
DT_UNSBIGINT = 620
DT_BIT = 624
DT_LONGNVARCHAR = 640
DD_INVALID = 0x0
DD_INPUT = 0x1
DD_OUTPUT = 0x2
DD_INPUT_OUTPUT = 0x3
class DataValue(Structure):
"""Must match a_sqlany_data_value."""
_fields_ = [("buffer", POINTER(c_char)),
("buffer_size", c_size_t),
("length", POINTER(c_size_t)),
("type", c_int),
("is_null", POINTER(c_int))]
class BindParam(Structure):
"""Must match a_sqlany_bind_param."""
_fields_ = [("direction", c_int),
("value", DataValue),
("name", c_char_p)]
class ColumnInfo(Structure):
"""Must match a_sqlany_column_info."""
_fields_ = [("name", c_char_p),
("type", c_int),
("native_type", c_int),
("precision", c_short),
("scale", c_short),
("max_size", c_size_t),
("nullable", c_int32)]
class DataInfo(Structure):
"""Must match a_sqlany_data_info."""
_fields_ = [("index", c_int),
("type", c_int),
("is_null", c_int),
("data_size", c_size_t)]
def init_sacapi(api):
sacapi_i32 = c_int32
sacapi_bool = sacapi_i32
sacapi_u32 = c_uint32
p_sacapi_u32 = POINTER(sacapi_u32)
p_sqlany_interface_context = c_void_p
p_sqlany_connection = c_void_p
p_sqlany_stmt = c_void_p
p_sqlany_bind_param = c_void_p
p_sqlany_bind_param_info = c_void_p
p_sqlany_data_value = c_void_p
p_sqlany_data_info = c_void_p
p_sqlany_column_info = c_void_p
def defun(name, *types):
try:
setattr(api, name, CFUNCTYPE(*types)((name, api),))
except:
pass
defun("sqlany_init",
sacapi_bool, c_char_p, sacapi_u32, p_sacapi_u32)
defun("sqlany_init_ex",
p_sqlany_interface_context, c_char_p, sacapi_u32, p_sacapi_u32)
defun("sqlany_fini",
None)
defun("sqlany_fini_ex",
None, p_sqlany_interface_context)
defun("sqlany_new_connection",
p_sqlany_connection)
defun("sqlany_new_connection_ex",
p_sqlany_connection, p_sqlany_interface_context)
defun("sqlany_free_connection",
None, p_sqlany_connection)
defun("sqlany_make_connection",
p_sqlany_connection, c_void_p)
defun("sqlany_make_connection_ex",
p_sqlany_connection, p_sqlany_interface_context, c_void_p)
defun("sqlany_connect",
sacapi_bool, p_sqlany_connection, c_char_p)
defun("sqlany_disconnect",
sacapi_bool, p_sqlany_connection)
defun("sqlany_cancel",
None, p_sqlany_connection)
defun("sqlany_execute_immediate",
sacapi_bool, p_sqlany_connection, c_char_p)
defun("sqlany_prepare",
p_sqlany_stmt, p_sqlany_connection, c_char_p)
defun("sqlany_free_stmt",
None, p_sqlany_stmt)
defun("sqlany_num_params",
sacapi_i32, p_sqlany_stmt)
defun("sqlany_describe_bind_param",
sacapi_bool, p_sqlany_stmt, sacapi_u32, p_sqlany_bind_param)
defun("sqlany_bind_param",
sacapi_bool, p_sqlany_stmt, sacapi_u32, p_sqlany_bind_param)
defun("sqlany_send_param_data",
sacapi_bool, p_sqlany_stmt, sacapi_u32, c_void_p, c_size_t)
defun("sqlany_reset",
sacapi_bool, p_sqlany_stmt)
defun("sqlany_get_bind_param_info",
sacapi_bool, p_sqlany_stmt, sacapi_u32, p_sqlany_bind_param_info)
defun("sqlany_execute",
sacapi_bool, p_sqlany_stmt)
defun("sqlany_execute_direct",
p_sqlany_stmt, p_sqlany_connection, c_char_p)
defun("sqlany_fetch_absolute",
sacapi_bool, p_sqlany_stmt, sacapi_i32)
defun("sqlany_fetch_next",
sacapi_bool, p_sqlany_stmt)
defun("sqlany_get_next_result",
sacapi_bool, p_sqlany_stmt)
defun("sqlany_affected_rows",
sacapi_i32, p_sqlany_stmt)
defun("sqlany_num_cols",
sacapi_i32, p_sqlany_stmt)
defun("sqlany_num_rows",
sacapi_i32, p_sqlany_stmt)
defun("sqlany_get_column",
sacapi_bool, p_sqlany_stmt, sacapi_u32, p_sqlany_data_value)
defun("sqlany_get_data",
sacapi_i32, p_sqlany_stmt, sacapi_u32, c_size_t, c_void_p, c_size_t)
defun("sqlany_get_data_info",
sacapi_bool, p_sqlany_stmt, sacapi_u32, p_sqlany_data_info)
defun("sqlany_get_column_info",
sacapi_bool, p_sqlany_stmt, sacapi_u32, p_sqlany_column_info)
defun("sqlany_commit",
sacapi_bool, p_sqlany_connection)
defun("sqlany_rollback",
sacapi_bool, p_sqlany_connection)
defun("sqlany_client_version",
sacapi_bool, c_void_p, c_size_t)
defun("sqlany_client_version_ex",
sacapi_bool, p_sqlany_interface_context, c_void_p, c_size_t)
defun("sqlany_error",
sacapi_i32, p_sqlany_connection, c_void_p, c_size_t)
defun("sqlany_sqlstate",
c_size_t, p_sqlany_connection, c_void_p, c_size_t)
defun("sqlany_clear_error",
None, p_sqlany_connection)
return api
# NB: The preceding must match those in sacapi.h for the specified API_VERSION!
class DBAPISet(frozenset):
"""A special type of set for which A == x is true if A is a
DBAPISet and x is a member of that set."""
def __eq__(self, other):
if isinstance(other, DBAPISet):
return frozenset.__eq__(self, other)
else:
return other in self
def __ne__(self, other):
return not self == other
def __hash__(self):
return frozenset.__hash__(self)
STRING = DBAPISet([DT_VARCHAR,
DT_FIXCHAR,
DT_LONGVARCHAR,
DT_STRING,
DT_LONGNVARCHAR])
BINARY = DBAPISet([DT_BINARY,
DT_LONGBINARY])
NUMBER = DBAPISet([DT_DOUBLE,
DT_FLOAT,
DT_DECIMAL,
DT_INT,
DT_SMALLINT,
DT_TINYINT])
DATE = DBAPISet([DT_DATE])
TIME = DBAPISet([DT_TIME])
TIMESTAMP = DBAPISet([DT_TIMESTAMP])
DATETIME = TIMESTAMP
ROWID = DBAPISet()
ToPyType = {DT_DATE : DATE,
DT_TIME : TIME,
DT_TIMESTAMP : TIMESTAMP,
DT_VARCHAR : STRING,
DT_FIXCHAR : STRING,
DT_LONGVARCHAR : STRING,
DT_STRING : STRING,
DT_DOUBLE : NUMBER,
DT_FLOAT : NUMBER,
DT_DECIMAL : NUMBER,
DT_INT : NUMBER,
DT_SMALLINT : NUMBER,
DT_BINARY : BINARY,
DT_LONGBINARY : BINARY,
DT_TINYINT : NUMBER,
DT_BIGINT : NUMBER,
DT_UNSINT : NUMBER,
DT_UNSSMALLINT : NUMBER,
DT_UNSBIGINT : NUMBER,
DT_BIT : NUMBER,
DT_LONGNVARCHAR : STRING}
class Error(Exception):
pass
class Warning(Exception):
"""Raise for important warnings like data truncation while inserting."""
pass
class InterfaceError(Error):
"""Raise for interface, not database, related errors."""
pass
class DatabaseError(Error):
pass
class InternalError(DatabaseError):
"""Raise for internal errors: cursor not valid, etc."""
pass
class OperationalError(DatabaseError):
"""Raise for database related errors, not under programmer's control:
unexpected disconnect, memory allocation error, etc."""
pass
class ProgrammingError(DatabaseError):
"""Raise for programming errors: table not found, incorrect syntax, etc."""
pass
class IntegrityError(DatabaseError):
"""Raise for database constraint failures: missing primary key, etc."""
pass
class DataError(DatabaseError):
pass
class NotSupportedError(DatabaseError):
"""Raise for methods or APIs not supported by database."""
pass
def standardErrorHandler(connection, cursor, errorclass, errorvalue):
error=(errorclass, errorvalue)
if connection:
connection.messages.append(error)
if cursor:
cursor.messages.append(error)
if errorclass != Warning:
raise errorclass(errorvalue)
# format types indexed by A_* values
format = 'xxxdqQiIhHbB'
def mk_valueof(raw, char_set):
def valueof(data):
if data.is_null.contents:
return None
elif data.type in raw:
return data.buffer[:data.length.contents.value]
elif data.type in (A_STRING,):
return data.buffer[:data.length.contents.value].decode(char_set)
else:
fmt = format[data.type]
return unpack(fmt, data.buffer[:calcsize(fmt)])[0]
return valueof
def mk_assign(char_set):
def assign(param, value):
is_null = value is None
param.value.is_null = pointer(c_int(is_null))
if is_null and param.direction == DD_INPUT:
value = 0
if param.value.type == A_INVALID_TYPE:
if isinstance(value, int):
if abs(value) > 4294967295:
param.value.type = A_VAL64
else:
param.value.type = A_VAL32
elif isinstance(value, float):
param.value.type = A_DOUBLE
elif isinstance(value, Binary):
param.value.type = A_BINARY
else:
param.value.type = A_STRING
fmt = format[param.value.type]
if fmt == 'x':
if isinstance(value, bytes):
pass
elif isinstance(value, str):
value = value.encode(char_set)
else:
value = str(value).encode(char_set)
size = length = len(value)
if param.direction != DD_INPUT:
if size < param.value.buffer_size:
size = param.value.buffer_size
buffer = create_string_buffer(value)
else:
buffer = create_string_buffer(pack(fmt, value))
size = length = calcsize(fmt)
param.value.buffer = cast(buffer, POINTER(c_char))
param.value.buffer_size = c_size_t(size)
param.value.length = pointer(c_size_t(length))
return assign
threadsafety = 1
apilevel = '2.0'
paramstyle = 'qmark'
__all__ = [ 'threadsafety', 'apilevel', 'paramstyle', 'connect']
def load_library(*names):
for name in names:
try:
dll = cdll.LoadLibrary(name)
lg.debug("Successfully loaded dbcapi library '%s' with name '%s'", dll, name)
return init_sacapi(dll)
except OSError as ose:
continue
raise InterfaceError("Could not load dbcapi. Tried: " + ','.join(names))
class Root(object):
def __init__(self, name):
lg.debug("Attempting to load dbcapi library")
self.api = load_library('dbcapi.dll', 'libdbcapi_r.so',
'libdbcapi_r.dylib')
ver = c_uint(0)
try:
self.api.sqlany_init_ex.restype = POINTER(c_int)
lg.debug("Attempting to initalize dbcapi context (self.api.sqlany_init_ex) with arguments:" \
" app name: '%s', api version: '%s'",
name, API_VERSION_EX)
context = self.api.sqlany_init_ex(name.encode('utf-8'), API_VERSION_EX, byref(ver))
if not context:
lg.error("Failed to initalize dbcapi context (self.api.sqlany_init_ex returned NULL)," \
"perhaps you are missing some required sqlanywhere libaries?")
raise InterfaceError("Failed to initalize dbcapi context, dbcapi version %d required." \
" Perhaps you are missing some sqlanywhere libaries?" %
API_VERSION_EX)
else:
lg.debug("Initalization of dbcapi context successful, max api version supported: %s", ver)
def new_connection():
return self.api.sqlany_new_connection_ex(context)
self.api.sqlany_new_connection = new_connection
def fini():
self.api.sqlany_fini_ex(context)
self.api.sqlany_fini = fini
def client_version():
length = 1000
buffer = create_string_buffer(length)
ret = self.api.sqlany_client_version_ex(context, buffer, length)
if ret:
vers = buffer.value
else:
vers = None
return vers
self.api.sqlany_client_version = client_version
except InterfaceError:
raise
except:
if (not self.api.sqlany_init(name.encode('utf-8'), API_VERSION, byref(ver))):
raise InterfaceError("dbcapi version %d required." %
API_VERSION)
self.api.sqlany_new_connection.restype = POINTER(c_int)
# Need to set return type to some pointer type other than void
# to avoid automatic conversion to a (32 bit) int.
self.api.sqlany_prepare.restype = POINTER(c_int)
atexit.register(self.__del__)
def __del__(self):
# if we fail to load the library, then we won't get a chance
# to even set the 'api' instance variable
if hasattr(self, "api") and self.api:
lg.debug("__del__ called on sqlany.Root object, finalizng dbcapi context")
self.api.sqlany_fini()
self.api = None
def connect(*args, **kwargs):
"""Constructor for creating a connection to a database."""
return Connection(args, kwargs)
class Connection(object):
# cache the api object so we don't have to load and unload every single time
cls_parent = None
def __init__(self, args, kwargs, parent = None):
# make it so we don't load Root() and therefore the | |
self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "DbVaultProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class RestoreJob(
ros_cdk_core.Resource,
metaclass=jsii.JSIIMeta,
jsii_type="@alicloud/ros-cdk-hbr.RestoreJob",
):
'''A ROS resource type: ``ALIYUN::HBR::RestoreJob``.'''
def __init__(
self,
scope: ros_cdk_core.Construct,
id: builtins.str,
props: "RestoreJobProps",
enable_resource_property_constraint: typing.Optional[builtins.bool] = None,
) -> None:
'''Create a new ``ALIYUN::HBR::RestoreJob``.
Param scope - scope in which this resource is defined
Param id - scoped id of the resource
Param props - resource properties
:param scope: -
:param id: -
:param props: -
:param enable_resource_property_constraint: -
'''
jsii.create(self.__class__, self, [scope, id, props, enable_resource_property_constraint])
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrErrorMessage")
def attr_error_message(self) -> ros_cdk_core.IResolvable:
'''Attribute ErrorMessage: Error message of restore job.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrErrorMessage"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrRestoreId")
def attr_restore_id(self) -> ros_cdk_core.IResolvable:
'''Attribute RestoreId: Restore job ID.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrRestoreId"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrRestoreType")
def attr_restore_type(self) -> ros_cdk_core.IResolvable:
'''Attribute RestoreType: Restore type.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrRestoreType"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrSourceType")
def attr_source_type(self) -> ros_cdk_core.IResolvable:
'''Attribute SourceType: Source type.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrSourceType"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrStatus")
def attr_status(self) -> ros_cdk_core.IResolvable:
'''Attribute Status: Restore job status.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrStatus"))
@jsii.data_type(
jsii_type="@alicloud/ros-cdk-hbr.RestoreJobProps",
jsii_struct_bases=[],
name_mapping={
"restore_type": "restoreType",
"snapshot_id": "snapshotId",
"source_client_id": "sourceClientId",
"source_instance_id": "sourceInstanceId",
"source_type": "sourceType",
"target_client_id": "targetClientId",
"target_instance_id": "targetInstanceId",
"target_path": "targetPath",
"vault_id": "vaultId",
},
)
class RestoreJobProps:
def __init__(
self,
*,
restore_type: typing.Union[builtins.str, ros_cdk_core.IResolvable],
snapshot_id: typing.Union[builtins.str, ros_cdk_core.IResolvable],
source_client_id: typing.Union[builtins.str, ros_cdk_core.IResolvable],
source_instance_id: typing.Union[builtins.str, ros_cdk_core.IResolvable],
source_type: typing.Union[builtins.str, ros_cdk_core.IResolvable],
target_client_id: typing.Union[builtins.str, ros_cdk_core.IResolvable],
target_instance_id: typing.Union[builtins.str, ros_cdk_core.IResolvable],
target_path: typing.Union[builtins.str, ros_cdk_core.IResolvable],
vault_id: typing.Union[builtins.str, ros_cdk_core.IResolvable],
) -> None:
'''Properties for defining a ``ALIYUN::HBR::RestoreJob``.
:param restore_type: Property restoreType: Restore type.
:param snapshot_id: Property snapshotId: Snapshot ID.
:param source_client_id: Property sourceClientId: Source client ID. It should be provided when SourceType=FILE.
:param source_instance_id: Property sourceInstanceId: Source instance ID. It should be provided when SourceType=ECS_FILE.
:param source_type: Property sourceType: Source type.
:param target_client_id: Property targetClientId: Target client ID. It should be provided when RestoreType=FILE.
:param target_instance_id: Property targetInstanceId: Target instance ID. It should be provided when RestoreType=ECS_FILE.
:param target_path: Property targetPath: Target path. For instance, "/".
:param vault_id: Property vaultId: Vault ID.
'''
self._values: typing.Dict[str, typing.Any] = {
"restore_type": restore_type,
"snapshot_id": snapshot_id,
"source_client_id": source_client_id,
"source_instance_id": source_instance_id,
"source_type": source_type,
"target_client_id": target_client_id,
"target_instance_id": target_instance_id,
"target_path": target_path,
"vault_id": vault_id,
}
@builtins.property
def restore_type(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''Property restoreType: Restore type.'''
result = self._values.get("restore_type")
assert result is not None, "Required property 'restore_type' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def snapshot_id(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''Property snapshotId: Snapshot ID.'''
result = self._values.get("snapshot_id")
assert result is not None, "Required property 'snapshot_id' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def source_client_id(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''Property sourceClientId: Source client ID.
It should be provided when SourceType=FILE.
'''
result = self._values.get("source_client_id")
assert result is not None, "Required property 'source_client_id' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def source_instance_id(
self,
) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''Property sourceInstanceId: Source instance ID.
It should be provided when SourceType=ECS_FILE.
'''
result = self._values.get("source_instance_id")
assert result is not None, "Required property 'source_instance_id' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def source_type(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''Property sourceType: Source type.'''
result = self._values.get("source_type")
assert result is not None, "Required property 'source_type' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def target_client_id(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''Property targetClientId: Target client ID.
It should be provided when RestoreType=FILE.
'''
result = self._values.get("target_client_id")
assert result is not None, "Required property 'target_client_id' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def target_instance_id(
self,
) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''Property targetInstanceId: Target instance ID.
It should be provided when RestoreType=ECS_FILE.
'''
result = self._values.get("target_instance_id")
assert result is not None, "Required property 'target_instance_id' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def target_path(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''Property targetPath: Target path.
For instance, "/".
'''
result = self._values.get("target_path")
assert result is not None, "Required property 'target_path' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def vault_id(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''Property vaultId: Vault ID.'''
result = self._values.get("vault_id")
assert result is not None, "Required property 'vault_id' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "RestoreJobProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class RosBackupClients(
ros_cdk_core.RosResource,
metaclass=jsii.JSIIMeta,
jsii_type="@alicloud/ros-cdk-hbr.RosBackupClients",
):
'''A ROS template type: ``ALIYUN::HBR::BackupClients``.'''
def __init__(
self,
scope: ros_cdk_core.Construct,
id: builtins.str,
props: "RosBackupClientsProps",
enable_resource_property_constraint: builtins.bool,
) -> None:
'''Create a new ``ALIYUN::HBR::BackupClients``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param props: - resource properties.
:param enable_resource_property_constraint: -
'''
jsii.create(self.__class__, self, [scope, id, props, enable_resource_property_constraint])
@jsii.member(jsii_name="renderProperties")
def _render_properties(
self,
props: typing.Mapping[builtins.str, typing.Any],
) -> typing.Mapping[builtins.str, typing.Any]:
'''
:param props: -
'''
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.invoke(self, "renderProperties", [props]))
@jsii.python.classproperty # type: ignore[misc]
@jsii.member(jsii_name="ROS_RESOURCE_TYPE_NAME")
def ROS_RESOURCE_TYPE_NAME(cls) -> builtins.str:
'''The resource type name for this resource class.'''
return typing.cast(builtins.str, jsii.sget(cls, "ROS_RESOURCE_TYPE_NAME"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrClientIds")
def attr_client_ids(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: ClientIds: ID list of clients installed in instances
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrClientIds"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrInstanceIds")
def attr_instance_ids(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: InstanceIds: ID list of instances to install backup client
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrInstanceIds"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="rosProperties")
def _ros_properties(self) -> typing.Mapping[builtins.str, typing.Any]:
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.get(self, "rosProperties"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="enableResourcePropertyConstraint")
def enable_resource_property_constraint(self) -> builtins.bool:
return typing.cast(builtins.bool, jsii.get(self, "enableResourcePropertyConstraint"))
@enable_resource_property_constraint.setter
def enable_resource_property_constraint(self, value: builtins.bool) -> None:
jsii.set(self, "enableResourcePropertyConstraint", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="instanceIds")
def instance_ids(
self,
) -> typing.Union[ros_cdk_core.IResolvable, typing.List[typing.Union[builtins.str, ros_cdk_core.IResolvable]]]:
'''
:Property: instanceIds: ID list of instances to install backup client
'''
return typing.cast(typing.Union[ros_cdk_core.IResolvable, typing.List[typing.Union[builtins.str, ros_cdk_core.IResolvable]]], jsii.get(self, "instanceIds"))
@instance_ids.setter
def instance_ids(
self,
value: typing.Union[ros_cdk_core.IResolvable, typing.List[typing.Union[builtins.str, ros_cdk_core.IResolvable]]],
) -> None:
jsii.set(self, "instanceIds", value)
@jsii.data_type(
jsii_type="@alicloud/ros-cdk-hbr.RosBackupClientsProps",
jsii_struct_bases=[],
name_mapping={"instance_ids": "instanceIds"},
)
class RosBackupClientsProps:
def __init__(
self,
*,
instance_ids: typing.Union[ros_cdk_core.IResolvable, typing.Sequence[typing.Union[builtins.str, ros_cdk_core.IResolvable]]],
) -> None:
'''Properties for defining a ``ALIYUN::HBR::BackupClients``.
:param instance_ids:
'''
self._values: typing.Dict[str, typing.Any] = {
"instance_ids": instance_ids,
}
@builtins.property
def instance_ids(
self,
) -> typing.Union[ros_cdk_core.IResolvable, typing.List[typing.Union[builtins.str, ros_cdk_core.IResolvable]]]:
'''
:Property: instanceIds: ID list of instances to install backup client
'''
result = self._values.get("instance_ids")
assert result is not None, "Required property 'instance_ids' is missing"
return typing.cast(typing.Union[ros_cdk_core.IResolvable, typing.List[typing.Union[builtins.str, ros_cdk_core.IResolvable]]], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "RosBackupClientsProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class RosDbAgent(
ros_cdk_core.RosResource,
metaclass=jsii.JSIIMeta,
jsii_type="@alicloud/ros-cdk-hbr.RosDbAgent",
):
'''A ROS template type: ``ALIYUN::HBR::DbAgent``.'''
def __init__(
self,
scope: ros_cdk_core.Construct,
id: builtins.str,
props: "RosDbAgentProps",
enable_resource_property_constraint: builtins.bool,
) -> None:
'''Create a new ``ALIYUN::HBR::DbAgent``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param props: - resource properties.
:param enable_resource_property_constraint: -
'''
jsii.create(self.__class__, self, [scope, id, props, enable_resource_property_constraint])
@jsii.member(jsii_name="renderProperties")
def _render_properties(
self,
props: typing.Mapping[builtins.str, typing.Any],
) -> typing.Mapping[builtins.str, typing.Any]:
'''
:param props: -
'''
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.invoke(self, "renderProperties", [props]))
@jsii.python.classproperty # type: ignore[misc]
@jsii.member(jsii_name="ROS_RESOURCE_TYPE_NAME")
def ROS_RESOURCE_TYPE_NAME(cls) -> builtins.str:
'''The resource type name for this resource class.'''
return typing.cast(builtins.str, jsii.sget(cls, "ROS_RESOURCE_TYPE_NAME"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrInstanceIds")
def attr_instance_ids(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: InstanceIds: Uni backup agent instance ids
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrInstanceIds"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrTaskId")
def attr_task_id(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: TaskId: Uni backup agent install task id.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrTaskId"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrUniBackupInstanceDetails")
def attr_uni_backup_instance_details(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: UniBackupInstanceDetails: Uni backup agent instance info details
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrUniBackupInstanceDetails"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrUniBackupInstances")
def attr_uni_backup_instances(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: UniBackupInstances: Uni backup agent instance info
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrUniBackupInstances"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="rosProperties")
def _ros_properties(self) -> typing.Mapping[builtins.str, typing.Any]:
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.get(self, "rosProperties"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="enableResourcePropertyConstraint")
def enable_resource_property_constraint(self) -> builtins.bool:
return typing.cast(builtins.bool, jsii.get(self, "enableResourcePropertyConstraint"))
@enable_resource_property_constraint.setter
def enable_resource_property_constraint(self, value: builtins.bool) -> None:
jsii.set(self, "enableResourcePropertyConstraint", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="instanceInfo")
def | |
{"names": ["x", "y", "z", "v_x", "v_y", "v_z", "M_200c"],
"formats": 7 * [np.float32]}
catalog = np.zeros(n_tot, dtype)
return pd.DataFrame(catalog)
@staticmethod
def _set_octant(df, octant):
"""Affix an octant column to a copy of the data frame """
df_copy = df.copy() #FIXME: Make sure shallow copy is safe
df_copy["octant"] = octant
return df_copy
@staticmethod
def _tile_by_shifting(coords, boxsize, move_signature):
"""tile a 3d box by shifting coordinates according to the move signatures in
_octant_shift_signature
e.g. (0,0,-1) shifts the box down one unit in the -z direction"""
#TODO: assert 3d arrays
#TODO: assert array move signature
x, y, z = coords
dx, dy, dz = move_signature * boxsize
x += dx
y += dy
z += dz
return pd.Series([x, y, z])
@staticmethod
def _tile_by_mirroring(coords, boxsize, move_signature):
"""tile a 3d box by reflecting coordinates according to the move signatures in
_octant_mirror_signature
e.g. (+1,+1,-1) reflects the box along the x-y plane (in the -z direction)"""
# TODO: assert 3d arrays
# TODO: assert array move signature
x, y, z = coords
# boxsize is redundant and is used for consistency
dx, dy, dz = move_signature
x *= dx
y *= dy
z *= dz
return pd.Series([x, y, z])
@staticmethod
def _tile_by_rotating(coords, boxsize, move_signature):
"""tile a 3d box by rotating coordinates according to the move signatures in
_octant_rotate_signature
e.g. (1,1) rotates the box counter-clockwise around the z axis"""
# TODO: assert 3d arrays
# TODO: assert array move signature
x, y, z = coords
# boxsize is redundant and is used for consistency
n, z_sign = move_signature
# rotate the coordinates according to the transformation in Sehgah 2010 Eq 28
xiy = (x + np.sign(z_sign)*1.j*y)*np.exp(1.j*(n-0.5*np.sign(z_sign)+0.5)*np.pi/2)
x = xiy.real
y = xiy.imag
z *= z_sign
return pd.Series([x, y, z])
def replicate(self,
mode="rotate"):
"""
Replicate an octant to get a whole-sky box
Parameters
----------
mode
Returns
-------
"""
assert mode in ["shift", "rotate", "mirror"], "mode can be either 'shift', 'rotate', " \
"or 'mirror."
# add the octant column to data
# define local variable data to force catalog rebuilding at the end
data = pd.concat([self._set_octant(f, i) for (i, f) in enumerate(8*[self.data])],
axis=0, ignore_index=True)
# set the replication mode and signature based on the given kwarg
if mode.lower() == "shift":
tile = self._tile_by_shifting
move_signature = self._octant_shift_signature
elif mode.lower() == "mirror":
tile = self._tile_by_mirroring
move_signature = self._octant_mirror_signature
elif mode.lower() == "rotate":
tile = self._tile_by_rotating
move_signature = self._octant_rotate_signature
# replicate the octants using the tiling function set above
data[["x", "y", "z"]] = \
data.apply(lambda row:
tile(
row[["x", "y", "z"]],
self.box_size,
np.array(move_signature[row["octant"]])
),
axis=1)
# reset data and rebuild the dataframe
self.data = data
def move_to_box_center(self):
"""move the observer from (0,0,0) to the center of the box (Lx/2, Ly/2, Lz/2) to make
coordinates symmetric
*Not recommended for light-cone catalogs"""
data = self.data # trick for forcing catalog rebuilding at the end
Lx, Ly, Lz = self.box_size
data["x"] -= Lx / 2
data["y"] -= Ly / 2
data["z"] -= Lz / 2
# reset data and rebuild the dataframe
self.data = data
#TODO: for the cutting methods, avoid rebuilding the dataframe after every cut
def cut_M_200c(self, mass_min=0., mass_max=np.inf, inplace=True):
"""
Cut the catalog according the the given mass range
Parameters
----------
mass_min [M_sun]
minimum halo mass to keep
mass_max [M_sun]
maximum halo mass to keep
Returns
-------
None
catalog.data will only contain halos with mass M in the range mass_min < M < mass_max
"""
data = self.data[(self.data.M_200c > mass_min) & (self.data.M_200c < mass_max)]
if inplace:
self.data = data
else:
return data
def cut_R_200c(self, R_min=0., R_max=np.inf, inplace=True):
"""
Cut the catalog according the the given radius range
Parameters
----------
R_min [Mpc]
minimum halo radius to keep
R_max [Mpc]
maximum halo radius to keep
Returns
-------
None
catalog.data will only contain halos with radius R in the range R_min < R < R_max
"""
data = self.data[(self.data.R_200c > R_min) & (self.data.R_200c < R_max)]
if inplace:
self.data = data
else:
return data
def cut_R_ang_200c(self, R_ang_min=0., R_ang_max=np.inf, inplace=True):
"""
Cut the catalog according the the given angular radius range
Parameters
----------
R_ang_min [arcmin]
minimum halo angular radius to keep
R_ang_max [arcmin]
maximum halo angular radius to keep
Returns
-------
None
catalog.data will only contain halos with angular radius R_ang_200c in the range
R_ang_min < R_ang_200c < R_ang_max
"""
data = self.data[(self.data.R_ang_200c > R_ang_min) & (self.data.R_ang_200c < R_ang_max)]
if inplace:
self.data = data
else:
return data
def cut_D_c(self, D_min=0., D_max=np.inf, inplace=True):
"""
Cut the catalog according the the given comoving distance range
Parameters
----------
D_min [Mpc]
minimum halo comoving distance to keep
D_max [Mpc]
maximum halo comoving distance to keep
Returns
-------
None
catalog.data will only contain halos with comoving distance D_c in the range D_min < D_c <
D_max
"""
data = self.data[(self.data.D_c > D_min) & (self.data.D_c < D_max)]
if inplace:
self.data = data
else:
return data
def cut_D_a(self, D_min=0., D_max=np.inf, inplace=True):
"""
Cut the catalog according the the given angular diameter distance range
Parameters
----------
D_min [Mpc]
minimum halo angular diameter distance to keep
D_max [Mpc]
maximum halo angular diameter distance to keep
Returns
-------
None
catalog.data will only contain halos with angular diameter distance D_a in the range
D_min < D_a < D_max
"""
data = self.data[(self.data.D_a > D_min) & (self.data.D_a < D_max)]
if inplace:
self.data = data
else:
return data
def cut_redshift(self, redshift_min=0., redshift_max=np.inf, inplace=True):
"""
Cut the catalog according the the given redshift range
Parameters
----------
redshift_min
minimum halo redshift to keep
redshift_max
maximum halo redshift to keep
Returns
-------
None
catalog.data will only contain halos with redshift in the range
redshift_min < redshift < redshift_max
"""
data = self.data[(self.data.redshift > redshift_min) &
(self.data.redshift < redshift_max)]
if inplace:
self.data = data
else:
return data
def cut_lon_lat(self,
lon_range=[0, 360],
lat_range=[-90, 90],
inplace=True):
"""
Cut the catalog according the the given longitude and latitude range
Parameters
----------
lon_range [deg]
range of longitutes to keep
lat_range [deg]
rane of latitudes to keep
Returns
-------
None
catalog.data will only contain halos with longitutes in the range lon_range and
latitudes in the range lat_range
"""
data = self.data[(self.data.lon > lon_range[0]) &
(self.data.lon < lon_range[1]) &
(self.data.lat > lat_range[0]) &
(self.data.lat < lat_range[1])]
if inplace:
self.data = data
else:
return data
def cut_theta_phi(self,
theta_range=[0, np.pi],
phi_range=[0, 2 * np.pi],
inplace=True):
"""
Cut the catalog according the the given longitude and latitude range
Parameters
----------
theta_range [rad]
range of longitutes to keep
phi_range [rad]
rane of latitudes to keep
Returns
-------
None
catalog.data will only contain halos with theta in the range theta_range and
phi in the range phi_range
"""
data = self.data[(self.data.theta > theta_range[0]) &
(self.data.theta < theta_range[1]) &
(self.data.phi > phi_range[0]) &
(self.data.phi < phi_range[1])]
if inplace:
self.data = data
else:
return data
def cut_mask(self, mask, threshold=0.5, inplace=True):
"""cut the catalog according to the input mask
halos outside of the mask (mask<threshold) will be discarded"""
# make sure npix is valid and then get the corresponding nside
npix = len(mask)
assert hp.isnpixok(npix), "bad number of pixels"
nside = hp.npix2nside(npix)
# find the pixels where each halo lies in
angs = hp.ang2pix(nside, *self.data[["lon", "lat"]].values.T, lonlat=True)
# check to see if the pixel is masked
is_in_mask = mask[angs] >= threshold
# slice the halos outside the mask
data = self.data[is_in_mask]
if inplace:
self.data = data
else:
return data
#########################################################
# Canvas Object
#########################################################
class Canvas:
"""healpy or flat-sky canvas with the location of the halos to paint the signal on"""
def __init__(self,
catalog,
nside,
mode="healpy",
#analyze=True,
R_times=1, # the discs will be found around R_times x virial radius,
inclusive=False,
):
#TODO: define attribute dictionary with __slots__
assert mode == "healpy", "currently only full sky is supported"
self._nside = nside
self._npix = hp.nside2npix(self.nside)
self._lmax = 3 * self.nside-1
self._ell = np.arange(self.lmax+1)
self._cmap = cm.RdBu_r
self.R_times = R_times
self.inclusive = inclusive
# set all the healpy pixels to zero initially
self._pixels = np.zeros(self.npix)
self._pixel_is_outdated = False
self._alm = None
self._alm_is_outdated = True
self._Cl = | |
'''
Created on 10 aug. 2015
@author: Maurits
'''
import random
import time
import string
import os
import math
import pygame
import basic_shape
import textutil
import enemy
import coin
from collections import OrderedDict
class GUI(object):
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
self.screen = None
def start(self):
os.environ["SDL_VIDEO_WINDOW_POS"] = "50,50"
self.score = 0
self.trans = False
pygame.init()
self.screen = pygame.display.set_mode((1200, 600))
self.font = pygame.font.Font("font/SUBWAY.ttf", 30)
textutil.drawtextcentered(self.screen, (600, 300), self.font, "Loading...", 0, (255, 255, 255))
pygame.display.flip()
pygame.mixer.music.set_volume(0.5)
pygame.mixer.music.load("sound/Ouroboros.ogg")
pygame.mixer.music.play(-1)
self.endtime = int(time.time())
self.planeimage = pygame.image.load("art/plane.png").convert_alpha()
self.astroid_image = pygame.image.load("art/astroid.png").convert_alpha()
self.redguy_image = pygame.image.load("art/redguy.png").convert_alpha()
self.backimage = pygame.image.load("art/background.png").convert()
self.bullet_img = pygame.image.load("art/bullet.png").convert_alpha()
self.coin_img = pygame.image.load("art/coin.png").convert_alpha()
self.life_img = pygame.image.load("art/life.png").convert_alpha()
self.glowring_img = pygame.image.load("art/glowring.png").convert_alpha()
self.player_image = pygame.image.load("art/player_plane.png").convert_alpha()
self.instructions_image = pygame.image.load("art/instructions.png").convert_alpha()
self.background_special_image = pygame.image.load("art/background_special.png").convert()
self.turtle_image = pygame.image.load("art/turtle.png").convert_alpha()
self.orange_square_image = pygame.image.load("art/orange_rect.png").convert_alpha()
self.white_circle_image = pygame.image.load("art/neon_tube.png").convert_alpha()
self.arrow_right_pic = pygame.image.load("art/Arrow_right.png").convert_alpha()
self.coin_snd = pygame.mixer.Sound("sound/coin2.wav")
self.coin_snd_chan = pygame.mixer.Channel(0)
self.expl_snd = pygame.mixer.Sound("sound/explosion.wav")
self.expl_snd_chan = tuple(pygame.mixer.Channel(i + 1) for i in range(2))
self.door_snd = pygame.mixer.Sound("sound/door2.wav")
self.door_snd_chan = pygame.mixer.Channel(3)
self.menu_index = 0
self.easy = True
self.help_text = None
self.help_text_ID = 0
self.running = True;
self.world_size = (3000, 3000)
# self.player.gui=self
pygame.display.set_caption("Lazer of Death")
pygame.display.set_icon(self.player_image)
self.menu_items = (("Help", self.startHelp),
("Settings", self.startSettings),
["Sound: [ON]", self.toggleSound],
["Music: [ON]", self.toggleMusic]
)
self.settings = OrderedDict((("Music Volume", 1.0),
("Sound Volume", 1.0),
("Precise FPS", False)))
self.player = None
self.state = "menu"
self.objects = pygame.sprite.Group()
self.enemies = pygame.sprite.Group()
self.bullets = pygame.sprite.Group()
self.coins = pygame.sprite.Group()
self.astroids = pygame.sprite.Group()
self.special_coins = pygame.sprite.Group()
pygame.key.set_repeat(100, 100)
self.clock = pygame.time.Clock()
self.fpsfunc = self.clock.tick
self.transsurface = pygame.Surface(self.screen.get_size())
self.menu = False
self.target_monster = None
for i in range(50):
self.addrandommonster()
self.state = "menu"
self.startmenu(False)
print("load time=" + str(time.time() - (self.endtime)) + "s")
def playcoin(self):
# pygame.mixer.get_busy()
if not self.coin_snd_chan.get_busy():
self.coin_snd_chan.play(self.coin_snd)
def playdoor(self):
if not self.door_snd_chan.get_busy():
self.door_snd_chan.play(self.door_snd)
def play_explode(self):
for i in self.expl_snd_chan:
if not i.get_busy():
i.play(self.expl_snd)
return
def startgame(self):
print("FPS: ", self.clock.get_fps())
self.transstart()
self.player = enemy.Player((self.world_size[0] / 2, self.world_size[1] / 2), self.player_image, 45,
(10, 10), gui=self, maxpos=self.world_size,
)
self.state = "game"
self.objects.empty()
self.enemies.empty()
self.bullets.empty()
self.coins.empty()
self.astroids.empty()
self.objects.add(self.player)
if self.easy:
num = 25
else:
num = 50
for i in range(num):
self.addrandommonster()
self.score = 0
self.endtime = time.time() + 60
self.deaths = 0
self.deathscore = 0
self.player_immune = False
self.lives = 3
if self.easy:
self.pen = 50
else:
self.pen = 100
def startmenu(self, trans=True):
print("FPS: ", self.clock.get_fps())
if trans:
self.transstart()
self.state = "menu"
size = self.screen.get_size()
self.offset = tuple(-self.world_size[i] // 2 + size[i] // 2 for i in range(2))
self.menusurf = pygame.Surface(size, pygame.SRCALPHA)
self.menusurf.fill((0, 255, 0, 200), (size[0] // 2 - 200, 0, 400, 600))
textutil.drawtextcentered(self.menusurf, (size[0] // 2, 60), self.font, "Lazer of death")
textutil.drawtextcentered(self.menusurf, (size[0] // 2, 470), self.font,
"\"z\" to " + ("select menu item" if self.menu else"change difficulty"))
textutil.drawtextcentered(self.menusurf, (size[0] // 2, size[1] - 30), self.font, "Press Space to Play")
if self.easy:
self.highscores = textutil.loadhighscores()
else:
self.highscores = textutil.loadhighscores("highscores/hardhiscores.csv")
pygame.draw.rect(self.menusurf, (255, 255, 255), (size[0] // 2 - 180, 100, 360, 335), 2)
for num, i in enumerate(sorted(list(self.highscores.keys()), reverse=True)):
if num < 10:
textutil.drawtextcentered(self.menusurf, (size[0] // 2 - 175, 130 + 30 * num), self.font,
"{0: >2d}:{1}".format(num + 1, self.highscores[i]), alignment=(0, 1))
textutil.drawtextcentered(self.menusurf, (size[0] // 2 + 175, 130 + 30 * num), self.font, str(i),
alignment=(2, 1))
if not self.menu:
self.menusurf.fill((200, 200, 0, 200), (size[0] // 2 + 200, size[1] - 110, 180, 50))
textutil.drawtextcentered(self.menusurf, (size[0] // 2 + 290, size[1] - 85), self.font, "menu", True,
(255, 255, 255), (1, 1))
self.menusurf.blit(self.arrow_right_pic, (size[0] // 2 + 205, size[1] - 105))
self.menusurf.blit(self.arrow_right_pic, (size[0] // 2 + 335, size[1] - 105))
else:
self.menusurf.fill((200, 200, 0, 200), (size[0] // 2 + 200, 50, 220, 500))
for index, (name, func) in enumerate(self.menu_items):
textutil.drawtextcentered(self.menusurf, (size[0] // 2 + 310, 75 + 70 * index), self.font, name, True
, (255, 255, 255))
pygame.draw.rect(self.menusurf, (255, 255, 255), (size[0] // 2 + 205, 55 + 70 * self.menu_index, 210, 60),
2)
if self.player:
self.player.kill()
self.player = None
def startHelp(self):
self.endtime = int(time.time())
# print "Menu index: ",self.menu_index,"Gui page",self.help_text_ID,"Text len", \
# len(self.help_text) if self.help_text else "None"
IDchange = False
self.state = "help"
size = self.screen.get_size()
self.menusurf = pygame.Surface(size, pygame.SRCALPHA)
self.menusurf.fill((0, 255, 0, 200), (size[0] // 2 - 200, 0, 400, 600))
textutil.drawtextcentered(self.menusurf, (size[0] // 2, 40), self.font,
"Instructions")
if self.menu_index < 0:
self.help_text_ID -= 1
self.menu_index = 0
IDchange = True
if self.help_text_ID == 0:
self.target_monster = None
if self.help_text_ID < 0:
self.help_text_ID = 0
if self.help_text_ID == 0:
if self.help_text == None or IDchange:
self.menu_index = 0
f = open("data/Help.txt")
self.help_text = f.read().splitlines()
f.close()
if self.menu_index - len(self.help_text) + 15 > 0:
self.help_text_ID += 1
self.menu_index = 0
IDchange = True
else:
for i in range(16):
if (i + self.menu_index < len(self.help_text)):
textutil.drawtextcentered(self.menusurf, (size[0] // 2, 90 + 30 * i), self.font,
self.help_text[i + self.menu_index])
if self.help_text_ID > 0:
self.menusurf.fill((0, 0, 0, 0), (size[0] // 2 - 170, 70, 340, 340))
if self.menu_index > len(self.help_text) - 4:
if self.help_text_ID < 5:
self.menu_index = 0
self.help_text_ID += 1
IDchange = True
else:
self.menu_index -= 1
ID = self.help_text_ID - 1
if (self.target_monster == None or self.target_monster.ID != ID or
not self.target_monster.alive):
# print ("Changing Monster, current is "+str(self.target_monster)+" ID: "+
# (str(self.target_monster.ID) if self.target_monster !=None else "N/A"))
self.target_monster = None
for i in self.enemies:
if i.ID == ID:
self.target_monster = i
break
if self.target_monster == None:
self.target_monster = self.addrandommonster(ID)
self.offset = list(self.offset)
if IDchange or self.help_text == None:
f = open("Data/" + str(ID + 1) + ".txt")
self.help_text = f.read().splitlines()
f.close()
for index in range(5):
if index + self.menu_index < len(self.help_text):
text = self.help_text[index + self.menu_index]
textutil.drawtextcentered(self.menusurf, (size[0] // 2, 430 + 30 * index), self.font,
text)
pygame.draw.rect(self.menusurf, (255, 255, 255), (size[0] // 2 - 180, 60, 360, size[1] - 80), 1)
def startSettings(self):
self.state = "conf"
size = self.screen.get_size()
self.menusurf = pygame.Surface(size, pygame.SRCALPHA)
self.menusurf.fill((0, 255, 0, 200), (size[0] // 2 - 200, 0, 400, 600))
textutil.drawtextcentered(self.menusurf, (size[0] // 2, 40), self.font,
"Settings")
pygame.draw.rect(self.menusurf, (255, 255, 255), (size[0] // 2 - 180, 60, 360, size[1] - 80), 2)
for index, (text, value) in enumerate(self.settings.items()):
textutil.drawtextcentered(self.menusurf, (size[0] // 2, 90 + 100 * index), self.font,
text)
if self.menu_index == index:
pygame.draw.rect(self.menusurf, (255, 255, 255), (size[0] // 2 - 175, 70 + 100 * index, 350, 80), 2)
if isinstance(value, float):
textutil.drawtextcentered(self.menusurf, (size[0] // 2 - 170, 120 + 100 * index),
self.font, "|" * int(value * 16), alignment=(0, 1))
elif isinstance(value, bool):
if value:
textutil.drawtextcentered(self.menusurf, (size[0] // 2, 120 + 100 * index), self.font,
"Yes", color=(0, 255, 0))
else:
textutil.drawtextcentered(self.menusurf, (size[0] // 2, 120 + 100 * index), self.font,
"No", color=(255, 0, 0))
def applySettings(self):
pygame.mixer.music.set_volume(self.settings["Music Volume"])
self.toggleSound()
self.toggleSound()
if self.settings["Precise FPS"]:
self.fpsfunc = self.clock.tick_busy_loop
else:
self.fpsfunc = self.clock.tick
def starthighscore(self):
self.transstart()
if self.player:
self.player.kill()
self.player = None
self.state = "score"
self.name = ""
print("deaths: " + str(self.deaths))
print("score while dead " + str(self.deathscore))
if self.deaths > 0:
print("score per death: " + str(self.deathscore / self.deaths))
print(self.score - (self.pen * self.deaths) + self.deathscore)
size = self.screen.get_size()
self.offset = tuple(-self.world_size[i] // 2 + size[i] // 2 for i in range(2))
self.menusurf = pygame.Surface(size, pygame.SRCALPHA)
self.menusurf.fill((0, 255, 0, 200), (size[0] // 2 - 200, 0, 400, 600))
textutil.drawtextcentered(self.menusurf, (size[0] // 2, 40), self.font, "Lazer of death")
textutil.drawtextcentered(self.menusurf, (size[0] // 2 - 180, 100), self.font, "Base score:", alignment=(0, 1))
textutil.drawtextcentered(self.menusurf, (size[0] // 2 + 180, 100), self.font, "{0:05d}".format(self.score),
alignment=(2, 1))
textutil.drawtextcentered(self.menusurf, (size[0] // 2 - 180, 160), self.font, "Death Penalty:",
alignment=(0, 1))
textutil.drawtextcentered(self.menusurf, (size[0] // 2 + 180, 160), self.font,
"{0:05d}".format(self.pen * self.deaths), color=(255, 0, 0),
alignment=(2, 1))
textutil.drawtextcentered(self.menusurf, (size[0] // 2 - 180, 220), self.font, "Death coins:", alignment=(0, 1))
textutil.drawtextcentered(self.menusurf, (size[0] // 2 + 180, 220), self.font,
"{0:05d}".format(self.deathscore), color=(0, 0, 255),
alignment=(2, 1))
print(self.score - (self.pen * self.deaths) + self.deathscore)
textutil.drawtextcentered(self.menusurf, (size[0] // 2 - 180, 280), self.font, "Total score:", alignment=(0, 1))
textutil.drawtextcentered(self.menusurf, (size[0] // 2 + 180, 280), self.font,
"{0:05d}".format(self.score - self.pen * self.deaths + self.deathscore),
alignment=(2, 1))
print(self.score - (self.pen * self.deaths) + self.deathscore)
pygame.draw.line(self.menusurf, (255, 255, 255), (size[0] // 2 - 185, 250), (size[0] // 2 + 185, 250), 2)
pygame.draw.line(self.menusurf, (255, 255, 255), (size[0] // | |
#!/usr/bin/env python3
# -*- coding : utf-8 -*-
import hashlib
import html
import os
import re
import dateutil.parser
from pathlib import Path
from datetime import datetime
import gzip
import requests
import atoma
from urllib import request
from http.cookiejar import CookieJar
from lxml.html.soupparser import fromstring
from lxml.ElementInclude import etree
from PIL import Image
from fake_useragent import UserAgent
import blog2epub
from blog2epub.Book import Book
class Crawler(object):
"""
Universal blog crawler.
"""
article_class = "Article"
content_xpath = "//div[contains(concat(' ',normalize-space(@class),' '),'post-body')]"
images_regex = r'<table[^>]*><tbody>[\s]*<tr><td[^>]*><a href="([^"]*)"[^>]*><img[^>]*></a></td></tr>[\s]*<tr><td class="tr-caption" style="[^"]*">([^<]*)'
articles_regex = r'<h3 class=\'post-title entry-title\' itemprop=\'name\'>[\s]*<a href=\'([^\']*)\'>([^>^<]*)</a>[\s]*</h3>'
def __init__(self, url, include_images=True, images_height=800, images_width=600, images_quality=40, start=None,
end=None, limit=None, skip=False, force_download=False, file_name=None, destination_folder='./',
cache_folder=None, language=None, interface=None):
self.url = self._prepare_url(url)
self.url_to_crawl = self._prepare_url_to_crawl(self.url)
self.port = self._prepare_port(self.url_to_crawl)
self.file_name = self._prepare_file_name(file_name, self.url)
self.destination_folder = destination_folder
self.cache_folder = cache_folder
if cache_folder is None:
self.cache_folder = os.path.join(str(Path.home()), '.blog2epub')
self.include_images = include_images
self.images_quality = images_quality
self.images_height = images_height
self.images_width = images_width
self.start = start
self.end = end
self.limit = limit
self.skip = skip
self.force_download = force_download
self.interface = self._get_the_interface(interface)
self.dirs = Dirs(self.cache_folder, self.url.replace('/', '_'))
self.book = None
self.title = None
self.description = None
self.language = language
self.atom_feed = False
self.articles = []
self.article_counter = 0
self.images = []
self.downloader = Downloader(self)
self.tags = {}
def _prepare_url(self, url):
return url.replace('http:', '').replace('https:', '').strip('/')
def _prepare_file_name(self, file_name, url):
if file_name:
return file_name
return url.replace('/', '_')
def _prepare_url_to_crawl(self, url):
r = request.urlopen('http://' + url)
return r.geturl()
def _prepare_port(self, url):
if url.startswith("https://"):
return 443
else:
return 80
def _get_the_interface(self, interface):
if interface:
return interface
else:
return EmptyInterface()
def get_cover_title(self):
cover_title = self.title + ' '
if self.start == self.end:
cover_title = cover_title + str(self.start)
else:
end_date = self.end.split(' ')
start_date = self.start.split(' ')
if len(end_date) == len(start_date):
ed = []
for i, d in enumerate(end_date):
if d != start_date[i]:
ed.append(d)
ed = ' '.join(ed)
cover_title = cover_title + ed + '-' + self.start
return cover_title
@staticmethod
def get_date(str_date):
return re.sub('[^\,]*, ', '', str_date)
def _set_blog_language(self, content):
if self.language is None and re.search("'lang':[\s]*'([a-z^']+)'", content):
self.language = re.search("'lang':[\s]*'([a-z^']+)'", content).group(1).strip()
if self.language is None and re.search('lang=[\'"]([a-z]+)[\'"]', content):
self.language = re.search('lang=[\'"]([a-z]+)[\'"]', content).group(1).strip()
if self.language is None and re.search('locale[\'"]:[\s]*[\'"]([a-z]+)[\'"]', content):
self.language = re.search('locale[\'"]:[\s]*[\'"]([a-z]+)[\'"]', content).group(1).strip()
if self.language is None:
self.language = 'en'
def _get_blog_title(self, content):
if re.search("<title>([^>^<]*)</title>", content):
return re.search("<title>([^>^<]*)</title>", content).group(1).strip()
return ''
def _get_blog_description(self, tree):
return tree.xpath('//div[@id="header"]/div/div/div/p[@class="description"]/span/text()')
def _get_header_images(self, tree):
header_images = []
for img in tree.xpath('//div[@id="header"]/div/div/div/p[@class="description"]/span/img/@src'):
header_images.append(self.downloader.download_image(img))
return header_images
def _get_articles(self, content):
"""
:param content: web page content
:return: list of Article class objects
"""
tree = fromstring(content)
art_urls = tree.xpath("//h3[contains(@class, 'entry-title')]/a/@href")
art_titles = tree.xpath("//h3[contains(@class, 'entry-title')]/a/text()")
output = []
if art_urls and len(art_urls) == len(art_titles):
for i in range(len(art_urls)):
output.append(eval(self.article_class)(art_urls[i], art_titles[i], self))
else:
articles_list = re.findall(self.articles_regex, content)
for art in articles_list:
output.append(eval(self.article_class)(art[0], art[1], self))
return output
def _get_atom_content(self):
""" Try to load atom
"""
atom_content = self.downloader.get_content('https://' + self.url + '/feeds/posts/default')
try:
self.atom_feed = atoma.parse_atom_bytes(bytes(atom_content, encoding="utf-8"))
return True
except Exception as e:
self.interface.print(e)
return False
def _get_url_to_crawl(self, tree):
url_to_crawl = None
if tree.xpath('//a[@class="blog-pager-older-link"]/@href'):
url_to_crawl = tree.xpath('//a[@class="blog-pager-older-link"]/@href')[0]
return url_to_crawl
def _add_tags(self, tags):
for tag in tags:
if tag in self.tags:
self.tags[tag] = self.tags[tag]+1
else:
self.tags[tag] = 1
def _atom_feed_loop(self):
self.url_to_crawl = None
for item in self.atom_feed.entries:
try:
self.article_counter += 1
art = eval(self.article_class)(item.links[0].href, item.title.value, self)
self.interface.print(str(len(self.articles) + 1) + '. ' + art.title)
art.date = item.updated
if self.start:
self.end = art.date
else:
self.start = art.date
if item.content:
art.set_content(item.content.value)
art.get_images()
art.set_content(art.html)
self.images = self.images + art.images
self.articles.append(art)
self._add_tags(art.tags)
if self.limit and len(self.articles) >= self.limit:
break
except Exception as e:
self.interface.print(e)
self.interface.print("[article not recognized - skipping]")
def _articles_loop(self, content):
for art in self._get_articles(content):
self.article_counter += 1
if not self.skip or self.article_counter > self.skip:
art.process()
self.images = self.images + art.images
self.interface.print(str(len(self.articles) + 1) + '. ' + art.title)
if self.start:
self.end = art.date
else:
self.start = art.date
self.articles.append(art)
self._add_tags(art.tags)
self._check_limit()
else:
self.interface.print('[skipping] ' + art.title)
if not self.url_to_crawl:
break
def _check_limit(self):
if self.limit and len(self.articles) >= self.limit:
self.url_to_crawl = None
def _prepare_content(self, content):
return content
def _crawl(self):
while self.url_to_crawl:
content = self.downloader.get_content(self.url_to_crawl)
tree = fromstring(content)
self._set_blog_language(content)
self.images = self.images + self._get_header_images(tree)
self.description = self._get_blog_description(tree)
self.title = self._get_blog_title(content)
if self._get_atom_content():
self._atom_feed_loop()
else:
content = self._prepare_content(content)
self._articles_loop(content)
self.url_to_crawl = self._get_url_to_crawl(tree)
self._check_limit()
def save(self):
self._crawl()
if self.articles:
self.book = Book(self)
self.book.save()
else:
self.interface.print("No articles found.")
class Dirs(object):
"""
Tiny class to temporary directories configurations.
"""
def _prepare_directories(self):
paths = [self.html, self.images, self.originals]
for p in paths:
if not os.path.exists(p):
os.makedirs(p)
def __init__(self, cache_folder, name):
self.path = os.path.join(cache_folder, name)
self.html = os.path.join(self.path, 'html')
self.images = os.path.join(self.path, 'images')
self.originals = os.path.join(self.path, 'originals')
self.assets = os.path.join(str(os.path.realpath(blog2epub.__file__).replace('__init__.py','')), 'assets')
self._prepare_directories()
class Downloader(object):
def __init__(self, crawler):
self.dirs = crawler.dirs
self.crawler_url = crawler.url
self.crawler_port = crawler.port
self.interface = crawler.interface
self.force_download = crawler.force_download
self.images_width = crawler.images_width
self.images_height = crawler.images_height
self.images_quality = crawler.images_quality
self.cookies = CookieJar()
self.session = requests.session()
ua = UserAgent()
self.headers = {
'User-Agent': ua.chrome,
}
def get_urlhash(self, url):
m = hashlib.md5()
m.update(url.encode('utf-8'))
return m.hexdigest()
def file_write(self, contents, filepath):
filepath = filepath + ".gz"
with gzip.open(filepath, 'wb') as f:
f.write(contents.encode('utf-8'))
def file_read(self, filepath):
if os.path.isfile(filepath + ".gz"):
with gzip.open(filepath + ".gz", 'rb') as f:
contents = f.read().decode('utf-8')
else:
with open(filepath, 'rb') as html_file:
contents = html_file.read().decode('utf-8')
self.file_write(contents, filepath)
os.remove(filepath)
return contents
def get_filepath(self, url):
return os.path.join(self.dirs.html, self.get_urlhash(url) + '.html')
def file_download(self, url, filepath):
self.dirs._prepare_directories()
response = self.session.get(url, cookies=self.cookies, headers=self.headers)
self.cookies = response.cookies
data = response.content
try:
contents = data.decode('utf-8')
except Exception as e:
contents = data
self.interface.print(e)
self.file_write(contents, filepath)
return contents
def image_download(self, url, filepath):
try:
self.dirs._prepare_directories()
f = open(filepath, 'wb')
response = self.session.get(url, cookies=self.cookies, headers=self.headers)
f.write(response.content)
f.close()
return True
except Exception:
return False
def checkInterstitial(self, contents):
interstitial = re.findall('interstitial=([^"]+)', contents)
if interstitial:
return interstitial[0]
else:
return False
def get_content(self, url):
filepath = self.get_filepath(url)
if self.force_download or (not os.path.isfile(filepath) and not os.path.isfile(filepath + ".gz")):
contents = self.file_download(url, filepath)
else:
contents = self.file_read(filepath)
interstitial = self.checkInterstitial(contents)
if interstitial:
interstitial_url = "http://" + self.crawler_url + "?interstitial="+interstitial
self.file_download(interstitial_url, self.get_filepath(interstitial_url))
contents = self.file_download("http://" + self.crawler_url, self.get_filepath("http://" + self.crawler_url));
return contents
def download_image(self, img):
if img.startswith("//"):
img = "http:" + img
img_hash = self.get_urlhash(img)
img_type = os.path.splitext(img)[1].lower()
original_fn = os.path.join(self.dirs.originals, img_hash + "." + img_type)
resized_fn = os.path.join(self.dirs.images, img_hash + ".jpg")
if not os.path.isfile(resized_fn) or self.force_download:
self.image_download(img, original_fn)
if os.path.isfile(original_fn):
try:
picture = Image.open(original_fn)
if picture.size[0] > self.images_width or picture.size[1] > self.images_height:
picture.thumbnail([self.images_width, self.images_height], Image.ANTIALIAS)
picture = picture.convert('L')
picture.save(resized_fn, format='JPEG', quality=self.images_quality)
except Exception:
return None
os.remove(original_fn)
return img_hash + ".jpg"
class Article(object):
"""
Blog post, article which became book chapter...
"""
def __init__(self, url, title, crawler):
self.url = url
self.title = title
self.tags = []
self.interface = crawler.interface
self.dirs = crawler.dirs
self.comments = ''
self.include_images = crawler.include_images
self.content_xpath = crawler.content_xpath
self.images_regex = crawler.images_regex
self.language = crawler.language
self.images = []
self.images_captions = []
self.html = None
self.content = None
self.date = None
self.tree = None
self.downloader = Downloader(crawler)
def get_title(self):
self.title = html.unescape(self.title.strip())
def get_date(self):
if isinstance(self.date, datetime):
return
date = self.tree.xpath('//abbr[@itemprop="datePublished"]/@title')
if date:
self.date = date[0]
else:
date = self.tree.xpath('//h2[@class="date-header"]/span/text()')
if len(date) > 0:
self.date = re.sub('(.*?, )', '', date[0])
if self.date is None:
d = self.url.split('/')
if len(d) > 4:
self.date = "%s-%s-01 00:00" % (d[3], d[4])
else:
self.date = str(datetime.now())
else:
self.date = self._translate_month(self.date)
print("DATA: {}".format(self.date))
try:
self.date = dateutil.parser.parse(self.date)
except:
self.interface.print("Date not parsed: {}".format(self.date))
def _translate_month(self, date):
# TODO: need to be refactored, or moved as parameter to dateutil parser function
date = date.lower()
if self.language == 'pl':
date = date.replace('stycznia', 'january')
date = date.replace('lutego', 'february')
date = date.replace('marca', 'march')
date = date.replace('kwietnia', 'april')
date = date.replace('maja', 'may')
date = date.replace('czerwca', 'june')
date = date.replace('lipca', 'july')
date = date.replace('sierpnia', 'august')
date = date.replace('września', 'september')
date = date.replace('października', 'october')
date = date.replace('listopada', 'november')
date = date.replace('grudnia', 'december')
date = date.replace(' sty ', ' january ')
date = date.replace(' lut ', ' february ')
date = date.replace(' mar ', ' march ')
date = date.replace(' kwi ', ' april ')
date = date.replace(' maj ', ' may ')
date = date.replace(' cze ', ' june ')
date = date.replace(' lip ', ' july ')
date = | |
from __future__ import annotations
import math
from collections import deque
from typing import Optional, Callable
import numpy as np
import pygame
from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, \
PIECE_INDICES, init_zobrist, MoveFlags, GameState
from chess.utils import load_image, load_font
class Chessboard:
"""Chessboard interface (8x8 field)"""
def __init__(self, light_colour="#F0D9B5", dark_colour="#B58863") -> None:
# Board itself
self._board = np.array([Piece.empty()] * 64)
# Active colour
self._active_colour = PieceColour.White
# Castling rights
self._castling_rights = {
PieceColour.White: {
CastlingType.KingSide: False,
CastlingType.QueenSide: False
},
PieceColour.Black: {
CastlingType.KingSide: False,
CastlingType.QueenSide: False
}
}
# Store piece types as strings
self._get_piece_str = {PieceType.Pawn: "pawn",
PieceType.Knight: "knight",
PieceType.Bishop: "bishop",
PieceType.Rook: "rook",
PieceType.Queen: "queen",
PieceType.King: "king"}
# Store piece move validators
self._get_validator: dict[
PieceType, Callable[[int, int, int, int], bool]] \
= {PieceType.Pawn: self._can_pawn_make,
PieceType.Knight: self._can_knight_make,
PieceType.Bishop: self._can_bishop_make,
PieceType.Rook: self._can_rook_make,
PieceType.Queen: self._can_queen_make,
PieceType.King: self._can_king_make}
# En Passant target
self._en_passant_target: Optional[int] = None
# Half-move clock
self._halfmoves = 0
# Init zobrist hash
self._z_table = init_zobrist()
# Board appearance
self._light_colour = pygame.Color(light_colour)
self._dark_colour = pygame.Color(dark_colour)
self._light_complementary = pygame.Color("#DBAB84")
self._dark_complementary = pygame.Color("#DBC095")
self._move_colour = pygame.Color("#8D80AD")
self._bg_colour = pygame.Color("#443742")
self._side = 100 # px
self._font_size = 45
self._font_gap = 15
self._font = load_font("ubuntumono/UbuntuMono-R.ttf", self._font_size)
self._font_colour = pygame.Color("white")
@property
def board(self) -> np.ndarray:
return self._board
@property
def halfmoves(self) -> int:
return self._halfmoves
@property
def active_colour(self) -> PieceColour:
return self._active_colour
@property
def passive_colour(self) -> PieceColour:
return PieceColour.White if self._active_colour == PieceColour.Black else PieceColour.Black
def hash(self) -> int:
h = 0
for i in range(64):
piece = self._board[i]
if piece.Type != PieceType.Empty:
j = PIECE_INDICES[piece.Type.value | piece.Colour.value]
h ^= self._z_table[i][j]
return h
def set_colours(self, light_colour: str, dark_colour: str,
light_complementary: str, dark_complementary: str) -> None:
self._light_colour = pygame.Color(light_colour)
self._dark_colour = pygame.Color(dark_colour)
self._light_complementary = pygame.Color(light_complementary)
self._dark_complementary = pygame.Color(dark_complementary)
def render(self, screen: pygame.Surface,
last_move=None, skip=None, pos=None, game_info=None) -> None:
"""Render chessboard"""
if skip is not None and pos is None:
raise ValueError("skip is not None but pos is None")
screen.fill(self._bg_colour)
group = pygame.sprite.Group()
grabbed_data = None
skip: Optional[tuple[int]]
can_move_now = None if skip is None else self._get_all_piece_moves(skip[0] + skip[1] * 8)
for i, piece in enumerate(self._board):
x, y = i % 8, i // 8
if pos is not None and i in can_move_now:
pygame.draw.rect(screen, self._move_colour,
(x * self._side, y * self._side,
self._side, self._side))
elif last_move is not None and last_move.From == i:
pygame.draw.rect(screen, self._light_complementary,
(x * self._side, y * self._side,
self._side, self._side))
elif last_move is not None and last_move.To == i or (x, y) == skip:
pygame.draw.rect(screen, self._dark_complementary,
(x * self._side, y * self._side,
self._side, self._side))
else:
if (x + y) % 2 == 0:
colour = self._light_colour
else:
colour = self._dark_colour
pygame.draw.rect(screen, colour,
(x * self._side, y * self._side,
self._side, self._side))
if piece.Type == PieceType.Empty:
continue
elif (x, y) == skip:
grabbed_data = f"{self._get_piece_str[piece.Type]}_" \
f"{'w' if piece.Colour == PieceColour.White else 'b'}.png", i, group
else:
PieceSprite(
f"{self._get_piece_str[piece.Type]}_"
f"{'w' if piece.Colour == PieceColour.White else 'b'}"
f".png", i, group)
if grabbed_data is not None:
grabbed_piece = PieceSprite(*grabbed_data)
grabbed_piece.rect.x = pos[0] - 50 # type: ignore
grabbed_piece.rect.y = pos[1] - 50 # type: ignore
group.draw(screen)
text = ["Ход " + ("белых"
if self._active_colour == PieceColour.White
else "чёрных")]
if game_info is not None:
text.extend([f"Оценка: {game_info[0]}",
f"Позиций: {game_info[2]}",
f"Глубина: {game_info[3]}",
f"Время: {game_info[1]}с"])
line_pos = (screen.get_rect().h -
len(text) * (self._font_size + self._font_gap) -
self._font_gap) // 2
for line in text:
line_rendered = self._font.render(line, True, self._font_colour)
l_rect = line_rendered.get_rect()
screen.blit(line_rendered, (800 + (400 - l_rect.w) // 2, line_pos))
line_pos += self._font_size + self._font_gap
def at(self, x: int, y: int) -> Piece:
"""Get piece from position on the board"""
if 0 <= x <= 7 and 0 <= y <= 7:
return self._board[x + y * 8]
return Piece.empty()
def toggle_state(self) -> GameState:
"""Return game state after active colour move"""
other_colour = PieceColour.Black \
if self._active_colour == PieceColour.White \
else PieceColour.White
self._active_colour = other_colour
if self.get_all_moves(other_colour):
return GameState.Continue
elif self.king_is_safe(other_colour):
return GameState.Stalemate
else:
return GameState.Checkmate
def _force_can_make(self, move: Move) -> Optional[Move]:
"""
Check if the move is correct with adding corresponding flags
(!) Without checking king safety and turn order
"""
# Can't make incorrect move
if move.Captured != self._board[move.To]:
return None
this_piece: Piece = self._board[move.From]
other_piece: Piece = self._board[move.To]
# Can't make move w/o piece itself
if this_piece.Type == PieceType.Empty:
return None
# Can't eat pieces of your colour
if other_piece.Type != PieceType.Empty and \
other_piece.Colour == this_piece.Colour:
return None
# Resolving piece xy coordinates to calculate move possibility
y1, y2 = move.From // 8, move.To // 8
x1, x2 = move.From % 8, move.To % 8
# Castling
if this_piece.Type == PieceType.King and \
y1 == y2 and abs(x1 - x2) == 2 \
and move.Captured == Piece.empty():
castling = CastlingType.QueenSide if x1 - x2 == 2 \
else CastlingType.KingSide
if castling == CastlingType.QueenSide and (
self._board[move.To - 1] != Piece.empty() or
self._board[move.From - 1] != Piece.empty() or
self._board[move.From - 2] != Piece.empty()):
return None
elif castling == CastlingType.KingSide and (
self._board[move.From + 1] != Piece.empty() or
self._board[move.From + 2] != Piece.empty()):
return None
if self._castling_rights[this_piece.Colour][castling]:
lost_castling = {castling}
other_side = CastlingType.KingSide \
if castling == CastlingType.QueenSide \
else CastlingType.QueenSide
if self._castling_rights[this_piece.Colour][other_side]:
lost_castling.add(other_side)
move.Flags = MoveFlags(Castling=castling,
LoseCastling=lost_castling)
else:
return None
elif this_piece.Type == PieceType.King:
# Losing castling rights after king move
lost_castling = set()
if self._castling_rights[this_piece.Colour][CastlingType.KingSide]:
lost_castling.add(CastlingType.KingSide)
if self._castling_rights[this_piece.Colour][CastlingType.QueenSide]:
lost_castling.add(CastlingType.QueenSide)
move.Flags = MoveFlags(LoseCastling=lost_castling)
elif this_piece.Type == PieceType.Rook:
# Losing castling rights after rook move
if x1 == 0 and self._castling_rights[this_piece.Colour][CastlingType.QueenSide]:
move.Flags = MoveFlags(LoseCastling={CastlingType.QueenSide})
elif x1 == 7 and self._castling_rights[this_piece.Colour][CastlingType.KingSide]:
move.Flags = MoveFlags(LoseCastling={CastlingType.KingSide})
elif this_piece.Type == PieceType.Pawn and 0 <= move.To <= 7:
move.Flags = MoveFlags(PawnPromotion=PieceType.Queen)
if self._get_validator[this_piece.Type](x1, y1, x2, y2):
return move
return None
def can_make(self, move: Move) -> Optional[Move]:
"""Check if the move is correct"""
# Checking basic move correctness
completed_move = self._force_can_make(move)
if completed_move is not None:
# Can't capture the king
if self._board[move.To].Type == PieceType.King:
return None
# Checking king safety
self.make_move(move)
safety = self.king_is_safe(self._board[move.To].Colour)
self.unmake_move(move)
return completed_move if safety else None
return None
def make_move(self, move: Move) -> None:
"""
Make move on the board
Use board.make_move() to check if move is correct
"""
# Removing castling rights
if move.Flags.LoseCastling is not None:
this_colour = self._board[move.From].Colour
for castling in move.Flags.LoseCastling:
self._castling_rights[this_colour][castling] = False
# Moving piece
self._halfmoves += 1
self._board[move.To] = self._board[move.From]
self._board[move.From] = Piece.empty()
if move.Flags.PawnPromotion is not None:
self._board[move.To] = Piece(move.Flags.PawnPromotion,
self._board[move.To].Colour)
# Doing castling
if move.Flags.Castling is not None:
if move.Flags.Castling == CastlingType.KingSide:
self._board[move.From + 1] = self._board[move.To + 1]
self._board[move.To + 1] = Piece.empty()
else:
self._board[move.From - 1] = self._board[move.To - 2]
self._board[move.To - 2] = Piece.empty()
def unmake_move(self, move: Move) -> None:
"""Unmake move on the board (no additional checking)"""
# Returning castling rights
if move.Flags.LoseCastling is not None:
this_colour = self._board[move.To].Colour
for castling in move.Flags.LoseCastling:
self._castling_rights[this_colour][castling] = True
# Unmoving piece
self._halfmoves -= 1
self._board[move.From] = self._board[move.To]
self._board[move.To] = move.Captured
# Demoting pawn
if move.Flags.PawnPromotion is not None:
self._board[move.From] = Piece(PieceType.Pawn,
self._board[move.From].Colour)
# Undoing castling
if move.Flags.Castling is not None:
if move.Flags.Castling == CastlingType.KingSide:
self._board[move.To + 1] = self._board[move.From + 1]
self._board[move.From + 1] = Piece.empty()
else:
self._board[move.To - 2] = self._board[move.From - 1]
self._board[move.From - 1] = Piece.empty()
def get_all_moves(self, colour: PieceColour, no_castling=False) -> deque[Move]:
moves: deque[Move] = deque()
for i, piece_from in enumerate(self._board):
if piece_from.Type == PieceType.Empty or \
piece_from.Colour != colour:
continue
for j, piece_to in enumerate(self._board):
move = self.can_make(Move(i, j, piece_to))
if move is not None and (not no_castling or move.Flags.Castling is None):
moves.append(move)
return moves
def _get_all_piece_moves(self, pos: int) -> deque[int]:
moves: deque[int] = deque()
for i, piece_to in enumerate(self._board):
move = self.can_make(Move(pos, i, piece_to))
if move is not None:
moves.append(move.To)
return moves
def king_is_safe(self, colour: PieceColour) -> bool:
"""Check if king is safe on current board state"""
king_pos = np.where(self._board == Piece(PieceType.King, colour))[0][0]
king_x, king_y = king_pos % 8, king_pos // 8
right_side = range(king_x + 1, 8)
left_side = range(king_x - 1, -1, -1)
bottom_side = range(king_y + 1, 8)
top_side = range(king_y - 1, | |
for chunk in lists.group(context.bytes, slot):
stack.append(chunk)
elif isinstance(slot, srop.SigreturnFrame):
stack.describe("Sigreturn Frame")
if slot.sp in (0, None) and self.base:
slot.sp = stack.next + len(slot)
registers = [slot.registers[i] for i in sorted(slot.registers.keys())]
for register in registers:
value = slot[register]
description = self.describe(value)
if description:
stack.describe('%s = %s' % (register, description))
else:
stack.describe('%s' % (register))
stack.append(value)
elif isinstance(slot, Call):
stack.describe(self.describe(slot))
registers = dict(zip(slot.abi.register_arguments, slot.args))
for value, name in self.setRegisters(registers):
if name in registers:
index = slot.abi.register_arguments.index(name)
description = self.describe(value) or repr(value)
stack.describe('[arg%d] %s = %s' % (index, name, description))
elif isinstance(name, Gadget):
stack.describe('; '.join(name.insns))
elif isinstance(name, str):
stack.describe(name)
stack.append(value)
if address != stack.next:
stack.describe(slot.name)
stack.append(slot.target)
# For any remaining arguments, put them on the stack
stackArguments = slot.args[len(slot.abi.register_arguments):]
nextGadgetAddr = stack.next + (context.bytes * len(stackArguments))
# Generally, stack-based arguments assume there's a return
# address on the stack.
#
# We need to at least put padding there so that things line up
# properly, but likely also need to adjust the stack past the
# arguments.
if slot.abi.returns:
# Save off the address of the next gadget
if remaining or stackArguments:
nextGadgetAddr = stack.next
# If there were arguments on the stack, we need to stick something
# in the slot where the return address goes.
if len(stackArguments) > 0:
if remaining:
fix_size = (1 + len(stackArguments))
fix_bytes = fix_size * context.bytes
adjust = self.search(move = fix_bytes)
if not adjust:
log.error("Could not find gadget to adjust stack by %#x bytes" % fix_bytes)
nextGadgetAddr += adjust.move
stack.describe('<adjust @%#x> %s' % (nextGadgetAddr, self.describe(adjust)))
stack.append(adjust.address)
for pad in range(fix_bytes, adjust.move, context.bytes):
stackArguments.append(Padding())
# We could not find a proper "adjust" gadget, but also didn't need one.
else:
stack.append(Padding("<return address>"))
for i, argument in enumerate(stackArguments):
if isinstance(argument, NextGadgetAddress):
stack.describe("<next gadget>")
stack.append(nextGadgetAddr)
else:
description = self.describe(argument) or 'arg%i' % (i + len(registers))
stack.describe(description)
stack.append(argument)
else:
stack.append(slot)
#
# Second pass
#
# All of the register-loading, stack arguments, and call addresses
# are on the stack. We can now start loading in absolute addresses.
#
start = base
end = stack.next
size = (stack.next - base)
for i, slot in enumerate(stack):
slot_address = stack.address + (i * context.bytes)
if isinstance(slot, six.integer_types):
pass
elif isinstance(slot, (bytes, six.text_type)):
pass
elif isinstance(slot, AppendedArgument):
stack[i] = stack.next
stack.extend(slot.resolve(stack.next))
elif isinstance(slot, CurrentStackPointer):
stack[i] = slot_address
elif isinstance(slot, Padding):
stack[i] = self.generatePadding(i * context.bytes, context.bytes)
stack.describe(slot.name, slot_address)
elif isinstance(slot, Gadget):
stack[i] = slot.address
stack.describe(self.describe(slot), slot_address)
# Everything else we can just leave in place.
# Maybe the user put in something on purpose?
# Also, it may work in pwnlib.util.packing.flat()
else:
pass
return stack
def find_stack_adjustment(self, slots):
self.search(move=slots * context.arch)
def chain(self):
"""Build the ROP chain
Returns:
str containing raw ROP bytes
"""
return packing.flat(self.build())
def dump(self):
"""Dump the ROP chain in an easy-to-read manner"""
return self.build().dump()
def regs(self, registers=None, **kw):
if registers is None:
registers = {}
registers.update(kw)
def call(self, resolvable, arguments = (), abi = None, **kwargs):
"""Add a call to the ROP chain
Arguments:
resolvable(str,int): Value which can be looked up via 'resolve',
or is already an integer.
arguments(list): List of arguments which can be passed to pack().
Alternately, if a base address is set, arbitrarily nested
structures of strings or integers can be provided.
"""
if self.migrated:
log.error('Cannot append to a migrated chain')
# If we can find a function with that name, just call it
if isinstance(resolvable, str):
addr = self.resolve(resolvable)
elif hasattr(resolvable, 'name') and hasattr(resolvable, 'address'):
addr = resolvable.address
resolvable = str(resolvable.name)
else:
addr = resolvable
resolvable = ''
if addr:
self.raw(Call(resolvable, addr, arguments, abi))
# Otherwise, if it is a syscall we might be able to call it
elif not self._srop_call(resolvable, arguments):
log.error('Could not resolve %r.' % resolvable)
def _srop_call(self, resolvable, arguments):
# Check that the call is a valid syscall
resolvable = 'SYS_' + resolvable.lower()
syscall_number = getattr(constants, resolvable, None)
if syscall_number is None:
return False
log.info_once("Using sigreturn for %r" % resolvable)
# Find an int 0x80 or similar instruction we can use
syscall_gadget = None
syscall_instructions = srop.syscall_instructions[context.arch]
for instruction in syscall_instructions:
syscall_gadget = self.find_gadget([instruction])
if syscall_gadget:
break
else:
log.error("Could not find any instructions in %r" % syscall_instructions)
# Generate the SROP frame which would invoke the syscall
with context.local(arch=self.elfs[0].arch):
frame = srop.SigreturnFrame()
frame.pc = syscall_gadget
frame.syscall = syscall_number
try:
SYS_sigreturn = constants.SYS_sigreturn
except AttributeError:
SYS_sigreturn = constants.SYS_rt_sigreturn
for register, value in zip(frame.arguments, arguments):
frame[register] = value
# Set up a call frame which will set EAX and invoke the syscall
call = Call('SYS_sigreturn',
syscall_gadget,
[SYS_sigreturn],
abi.ABI.sigreturn())
self.raw(call)
self.raw(frame)
# We do not expect to ever recover after the syscall, as it would
# require something like 'int 0x80; ret' which does not ever occur
# in the wild.
self.migrated = True
return True
def find_gadget(self, instructions):
"""
Returns a gadget with the exact sequence of instructions specified
in the ``instructions`` argument.
"""
n = len(instructions)
for gadget in self.gadgets.values():
if tuple(gadget.insns)[:n] == tuple(instructions):
return gadget
def raw(self, value):
"""Adds a raw integer or string to the ROP chain.
If your architecture requires aligned values, then make
sure that any given string is aligned!
Arguments:
data(int/str): The raw value to put onto the rop chain.
>>> rop = ROP([])
>>> rop.raw('AAAAAAAA')
>>> rop.raw('BBBBBBBB')
>>> rop.raw('CCCCCCCC')
>>> print(rop.dump())
0x0000: b'AAAA' 'AAAAAAAA'
0x0004: b'AAAA'
0x0008: b'BBBB' 'BBBBBBBB'
0x000c: b'BBBB'
0x0010: b'CCCC' 'CCCCCCCC'
0x0014: b'CCCC'
"""
if self.migrated:
log.error('Cannot append to a migrated chain')
self._chain.append(value)
def migrate(self, next_base):
"""Explicitly set $sp, by using a ``leave; ret`` gadget"""
if isinstance(next_base, ROP):
next_base = self.base
pop_sp = self.rsp or self.esp
pop_bp = self.rbp or self.ebp
leave = self.leave
if pop_sp and len(pop_sp.regs) == 1:
self.raw(pop_sp)
self.raw(next_base)
elif pop_bp and leave and len(pop_bp.regs) == 1:
self.raw(pop_bp)
self.raw(next_base - context.bytes)
self.raw(leave)
else:
log.error('Cannot find the gadgets to migrate')
self.migrated = True
def __bytes__(self):
"""Returns: Raw bytes of the ROP chain"""
return self.chain()
def __str__(self):
return str(self.chain())
def __get_cachefile_name(self, files):
"""Given an ELF or list of ELF objects, return a cache file for the set of files"""
cachedir = os.path.join(tempfile.gettempdir(), 'pwntools-rop-cache-%d.%d' % sys.version_info[:2])
if not os.path.exists(cachedir):
os.mkdir(cachedir)
if isinstance(files, ELF):
files = [files]
hashes = []
for elf in self.elfs:
sha256 = hashlib.sha256(elf.get_data()).hexdigest()
hashes.append(sha256)
return os.path.join(cachedir, '_'.join(hashes))
def __cache_load(self, elf):
filename = self.__get_cachefile_name(elf)
if not os.path.exists(filename):
return None
log.info_once('Loaded cached gadgets for %r' % elf.file.name)
gadgets = eval(open(filename).read())
gadgets = {k - elf.load_addr + elf.address:v for k, v in gadgets.items()}
return gadgets
def __cache_save(self, elf, data):
data = {k + elf.load_addr - elf.address:v for k, v in data.items()}
open(self.__get_cachefile_name(elf), 'w+').write(repr(data))
def __load(self):
"""Load all ROP gadgets for the selected ELF files"""
#
# We accept only instructions that look like these.
#
# - leave
# - pop reg
# - add $sp, value
# - ret
#
# Currently, ROPgadget does not detect multi-byte "C2" ret.
# https://github.com/JonathanSalwan/ROPgadget/issues/53
#
pop = re.compile(r'^pop (.{3})')
add = re.compile(r'^add [er]sp, (\S+)$')
ret = re.compile(r'^ret$')
leave = re.compile(r'^leave$')
int80 = re.compile(r'int +0x80')
syscall = re.compile(r'^syscall$')
sysenter = re.compile(r'^sysenter$')
#
# Validation routine
#
# >>> valid('pop eax')
# True
# >>> valid('add rax, 0x24')
# False
# >>> valid('add esp, 0x24')
# True
#
valid = lambda insn: any(map(lambda pattern: pattern.match(insn), [pop,add,ret,leave,int80,syscall,sysenter]))
#
# Currently, ropgadget.args.Args() doesn't take any arguments, and pulls
# only from sys.argv. Preserve it through this call. We also
# monkey-patch sys.stdout to suppress output from ropgadget.
#
argv = sys.argv
stdout = sys.stdout
class Wrapper:
def __init__(self, fd):
self._fd = fd
def write(self, s):
pass
def __getattr__(self, k):
return self._fd.__getattribute__(k)
gadgets = {}
for elf in self.elfs:
cache = self.__cache_load(elf)
if cache:
gadgets.update(cache)
continue
log.info_once('Loading gadgets for %r' % elf.path)
try:
sys.stdout = Wrapper(sys.stdout)
import ropgadget
sys.argv = ['ropgadget', '--binary', elf.path, '--only', 'sysenter|syscall|int|add|pop|leave|ret', '--nojop']
args = ropgadget.args.Args().getArgs()
core = ropgadget.core.Core(args)
core.do_binary(elf.path)
core.do_load(0)
finally:
sys.argv = argv
sys.stdout = stdout
elf_gadgets = {}
for gadget in core._Core__gadgets:
address = gadget['vaddr'] - |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.